1 from pyspark.sql import SparkSession
 
   2 from pyspark.sql import Window
 
   3 from pyspark.sql import functions as f
 
   5 from pyspark.mllib.linalg.distributed import CoordinateMatrix
 
   6 from tempfile import TemporaryDirectory
 
   8 import pyarrow.dataset as ds
 
   9 from scipy.sparse import csr_matrix, issparse
 
  13 from datetime import datetime
 
  14 from pathlib import Path
 
  16 class tf_weight(Enum):
 
  20 infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet"
 
  22 def reindex_tfidf_time_interval(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None):
 
  24     term_id = term + '_id'
 
  25     term_id_new = term + '_id_new'
 
  27     spark = SparkSession.builder.getOrCreate()
 
  28     conf = spark.sparkContext.getConf()
 
  29     print(exclude_phrases)
 
  30     tfidf_weekly = spark.read.parquet(infile)
 
  32     # create the time interval
 
  33     if from_date is not None:
 
  34         if type(from_date) is str:
 
  35             from_date = datetime.fromisoformat(from_date)
 
  37         tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week >= from_date)
 
  39     if to_date is not None:
 
  40         if type(to_date) is str:
 
  41             to_date = datetime.fromisoformat(to_date)
 
  42         tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week < to_date)
 
  44     tfidf = tfidf_weekly.groupBy(["subreddit","week", term_id, term]).agg(f.sum("tf").alias("tf"))
 
  45     tfidf = _calc_tfidf(tfidf, term_colname, tf_weight.Norm05)
 
  46     tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits)
 
  47     tfidf = spark.read_parquet(tempdir.name)
 
  48     subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
 
  49     subreddit_names = subreddit_names.sort_values("subreddit_id_new")
 
  50     subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
 
  51     return(tempdir, subreddit_names)
 
  53 def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False):
 
  54     spark = SparkSession.builder.getOrCreate()
 
  55     conf = spark.sparkContext.getConf()
 
  56     print(exclude_phrases)
 
  58     tfidf = spark.read.parquet(infile)
 
  60     if included_subreddits is None:
 
  61         included_subreddits = select_topN_subreddits(topN)
 
  63         included_subreddits = set(open(included_subreddits))
 
  65     if exclude_phrases == True:
 
  66         tfidf = tfidf.filter(~f.col(term_colname).contains("_"))
 
  68     print("creating temporary parquet with matrix indicies")
 
  69     tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits)
 
  71     tfidf = spark.read.parquet(tempdir.name)
 
  72     subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
 
  73     subreddit_names = subreddit_names.sort_values("subreddit_id_new")
 
  74     subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
 
  76     return (tempdir, subreddit_names)
 
  78 def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None):
 
  80     if from_date is not None or to_date is not None:
 
  81         tempdir, subreddit_names = reindex_tfidf_time_interval(infile, term_colname='author', min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False, from_date=from_date, to_date=to_date)
 
  84         tempdir, subreddit_names = reindex_tfidf(infile, term_colname='author', min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False)
 
  86     print("loading matrix")
 
  87     #    mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname)
 
  88     mat = read_tfidf_matrix(tempdir.name, term_colname)
 
  89     print('computing similarities')
 
  96     print(f"shape of sims:{sims.shape}")
 
  97     print(f"len(subreddit_names.subreddit.values):{len(subreddit_names.subreddit.values)}")
 
  98     sims = pd.DataFrame(sims)
 
  99     sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1)
 
 100     sims['subreddit'] = subreddit_names.subreddit.values
 
 104     output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
 
 105     output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
 
 106     output_parquet =  Path(str(p).replace("".join(p.suffixes), ".parquet"))
 
 108     sims.to_feather(outfile)
 
 111 def read_tfidf_matrix_weekly(path, term_colname, week):
 
 113     term_id = term + '_id'
 
 114     term_id_new = term + '_id_new'
 
 116     dataset = ds.dataset(path,format='parquet')
 
 117     entries = dataset.to_table(columns=['tf_idf','subreddit_id_new',term_id_new],filter=ds.field('week')==week).to_pandas()
 
 118     return(csr_matrix((entries.tf_idf,(entries[term_id_new]-1, entries.subreddit_id_new-1))))
 
 120 def write_weekly_similarities(path, sims, week, names):
 
 122     p = pathlib.Path(path)
 
 126     # reformat as a pairwise list
 
 127     sims = sims.melt(id_vars=['subreddit','week'],value_vars=names.subreddit.values)
 
 128     sims.to_parquet(p / week.isoformat())
 
 130 def read_tfidf_matrix(path,term_colname):
 
 132     term_id = term + '_id'
 
 133     term_id_new = term + '_id_new'
 
 135     dataset = ds.dataset(path,format='parquet')
 
 136     entries = dataset.to_table(columns=['tf_idf','subreddit_id_new',term_id_new]).to_pandas()
 
 137     return(csr_matrix((entries.tf_idf,(entries[term_id_new]-1, entries.subreddit_id_new-1))))
 
 139 def column_overlaps(mat):
 
 140     non_zeros = (mat != 0).astype('double')
 
 142     intersection = non_zeros.T @ non_zeros
 
 143     card1 = non_zeros.sum(axis=0)
 
 144     den = np.add.outer(card1,card1) - intersection
 
 146     return intersection / den
 
 148 def column_similarities(mat):
 
 149     norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32))
 
 150     mat = mat.multiply(1/norm)
 
 155 def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df, included_subreddits):
 
 157     term_id = term + '_id'
 
 158     term_id_new = term + '_id_new'
 
 161         min_df = 0.1 * len(included_subreddits)
 
 162         tfidf = tfidf.filter(f.col('count') >= min_df)
 
 163     if max_df is not None:
 
 164         tfidf = tfidf.filter(f.col('count') <= max_df)
 
 166     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
 
 168     # we might not have the same terms or subreddits each week, so we need to make unique ids for each week.
 
 169     sub_ids = tfidf.select(['subreddit_id','week']).distinct()
 
 170     sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.partitionBy('week').orderBy("subreddit_id")))
 
 171     tfidf = tfidf.join(sub_ids,['subreddit_id','week'])
 
 173     # only use terms in at least min_df included subreddits in a given week
 
 174     new_count = tfidf.groupBy([term_id,'week']).agg(f.count(term_id).alias('new_count'))
 
 175     tfidf = tfidf.join(new_count,[term_id,'week'],how='inner')
 
 178     term_ids = tfidf.select([term_id,'week']).distinct()
 
 179     term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.partitionBy('week').orderBy(term_id)))
 
 180     tfidf = tfidf.join(term_ids,[term_id,'week'])
 
 182     tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old")
 
 183     tfidf = tfidf.withColumn("tf_idf", (tfidf.relative_tf * tfidf.idf).cast('float'))
 
 185     tempdir =TemporaryDirectory(suffix='.parquet',prefix='term_tfidf_entries',dir='.')
 
 187     tfidf = tfidf.repartition('week')
 
 189     tfidf.write.parquet(tempdir.name,mode='overwrite',compression='snappy')
 
 193 def prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits):
 
 195     term_id = term + '_id'
 
 196     term_id_new = term + '_id_new'
 
 199         min_df = 0.1 * len(included_subreddits)
 
 200         tfidf = tfidf.filter(f.col('count') >= min_df)
 
 201     if max_df is not None:
 
 202         tfidf = tfidf.filter(f.col('count') <= max_df)
 
 204     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
 
 206     # reset the subreddit ids
 
 207     sub_ids = tfidf.select('subreddit_id').distinct()
 
 208     sub_ids = sub_ids.withColumn("subreddit_id_new", f.row_number().over(Window.orderBy("subreddit_id")))
 
 209     tfidf = tfidf.join(sub_ids,'subreddit_id')
 
 211     # only use terms in at least min_df included subreddits
 
 212     new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count'))
 
 213     tfidf = tfidf.join(new_count,term_id,how='inner')
 
 216     term_ids = tfidf.select([term_id]).distinct()
 
 217     term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id)))
 
 218     tfidf = tfidf.join(term_ids,term_id)
 
 220     tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old")
 
 221     tfidf = tfidf.withColumn("tf_idf", (tfidf.relative_tf * tfidf.idf).cast('float'))
 
 223     tempdir =TemporaryDirectory(suffix='.parquet',prefix='term_tfidf_entries',dir='.')
 
 225     tfidf.write.parquet(tempdir.name,mode='overwrite',compression='snappy')
 
 229 # try computing cosine similarities using spark
 
 230 def spark_cosine_similarities(tfidf, term_colname, min_df, included_subreddits, similarity_threshold):
 
 232     term_id = term + '_id'
 
 233     term_id_new = term + '_id_new'
 
 236         min_df = 0.1 * len(included_subreddits)
 
 238     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
 
 239     tfidf = tfidf.cache()
 
 241     # reset the subreddit ids
 
 242     sub_ids = tfidf.select('subreddit_id').distinct()
 
 243     sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id")))
 
 244     tfidf = tfidf.join(sub_ids,'subreddit_id')
 
 246     # only use terms in at least min_df included subreddits
 
 247     new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count'))
 
 248     tfidf = tfidf.join(new_count,term_id,how='inner')
 
 251     term_ids = tfidf.select([term_id]).distinct()
 
 252     term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id)))
 
 253     tfidf = tfidf.join(term_ids,term_id)
 
 255     tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old")
 
 256     tfidf = tfidf.withColumn("tf_idf", tfidf.relative_tf * tfidf.idf)
 
 258     # step 1 make an rdd of entires
 
 259     # sorted by (dense) spark subreddit id
 
 260     n_partitions = int(len(included_subreddits)*2 / 5)
 
 262     entries = tfidf.select(f.col(term_id_new)-1,f.col("subreddit_id_new")-1,"tf_idf").rdd.repartition(n_partitions)
 
 264     # put like 10 subredis in each partition
 
 266     # step 2 make it into a distributed.RowMatrix
 
 267     coordMat = CoordinateMatrix(entries)
 
 269     coordMat = CoordinateMatrix(coordMat.entries.repartition(n_partitions))
 
 271     # this needs to be an IndexedRowMatrix()
 
 272     mat = coordMat.toRowMatrix()
 
 274     #goal: build a matrix of subreddit columns and tf-idfs rows
 
 275     sim_dist = mat.columnSimilarities(threshold=similarity_threshold)
 
 277     return (sim_dist, tfidf)
 
 280 def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
 
 282     term_id = term + '_id'
 
 284     # aggregate counts by week. now subreddit-term is distinct
 
 285     df = df.filter(df.subreddit.isin(include_subs))
 
 286     df = df.groupBy(['subreddit',term,'week']).agg(f.sum('tf').alias('tf'))
 
 288     max_subreddit_terms = df.groupby(['subreddit','week']).max('tf') # subreddits are unique
 
 289     max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
 
 290     df = df.join(max_subreddit_terms, on=['subreddit','week'])
 
 291     df = df.withColumn("relative_tf", df.tf / df.sr_max_tf)
 
 293     # group by term. term is unique
 
 294     idf = df.groupby([term,'week']).count()
 
 296     N_docs = df.select(['subreddit','week']).distinct().groupby(['week']).agg(f.count("subreddit").alias("subreddits_in_week"))
 
 298     idf = idf.join(N_docs, on=['week'])
 
 300     # add a little smoothing to the idf
 
 301     idf = idf.withColumn('idf',f.log(idf.subreddits_in_week) / (1+f.col('count'))+1)
 
 303     # collect the dictionary to make a pydict of terms to indexes
 
 304     terms = idf.select([term,'week']).distinct() # terms are distinct
 
 306     terms = terms.withColumn(term_id,f.row_number().over(Window.partitionBy('week').orderBy(term))) # term ids are distinct
 
 309     subreddits = df.select(['subreddit','week']).distinct()
 
 310     subreddits = subreddits.withColumn('subreddit_id',f.row_number().over(Window.partitionBy("week").orderBy("subreddit")))
 
 312     df = df.join(subreddits,on=['subreddit','week'])
 
 314     # map terms to indexes in the tfs and the idfs
 
 315     df = df.join(terms,on=[term,'week']) # subreddit-term-id is unique
 
 317     idf = idf.join(terms,on=[term,'week'])
 
 319     # join on subreddit/term to create tf/dfs indexed by term
 
 320     df = df.join(idf, on=[term_id, term,'week'])
 
 322     # agg terms by subreddit to make sparse tf/df vectors
 
 324     if tf_family == tf_weight.MaxTF:
 
 325         df = df.withColumn("tf_idf",  df.relative_tf * df.idf)
 
 326     else: # tf_fam = tf_weight.Norm05
 
 327         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
 
 331 def _calc_tfidf(df, term_colname, tf_family):
 
 333     term_id = term + '_id'
 
 335     max_subreddit_terms = df.groupby(['subreddit']).max('tf') # subreddits are unique
 
 336     max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
 
 338     df = df.join(max_subreddit_terms, on='subreddit')
 
 340     df = df.withColumn("relative_tf", df.tf / df.sr_max_tf)
 
 342     # group by term. term is unique
 
 343     idf = df.groupby([term]).count()
 
 344     N_docs = df.select('subreddit').distinct().count()
 
 345     # add a little smoothing to the idf
 
 346     idf = idf.withColumn('idf',f.log(N_docs/(1+f.col('count')))+1)
 
 348     # collect the dictionary to make a pydict of terms to indexes
 
 349     terms = idf.select(term).distinct() # terms are distinct
 
 350     terms = terms.withColumn(term_id,f.row_number().over(Window.orderBy(term))) # term ids are distinct
 
 353     subreddits = df.select(['subreddit']).distinct()
 
 354     subreddits = subreddits.withColumn('subreddit_id',f.row_number().over(Window.orderBy("subreddit")))
 
 356     df = df.join(subreddits,on='subreddit')
 
 358     # map terms to indexes in the tfs and the idfs
 
 359     df = df.join(terms,on=term) # subreddit-term-id is unique
 
 361     idf = idf.join(terms,on=term)
 
 363     # join on subreddit/term to create tf/dfs indexed by term
 
 364     df = df.join(idf, on=[term_id, term])
 
 366     # agg terms by subreddit to make sparse tf/df vectors
 
 367     if tf_family == tf_weight.MaxTF:
 
 368         df = df.withColumn("tf_idf",  df.relative_tf * df.idf)
 
 369     else: # tf_fam = tf_weight.Norm05
 
 370         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
 
 375 def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
 
 377     term_id = term + '_id'
 
 378     # aggregate counts by week. now subreddit-term is distinct
 
 379     df = df.filter(df.subreddit.isin(include_subs))
 
 380     df = df.groupBy(['subreddit',term]).agg(f.sum('tf').alias('tf'))
 
 382     df = _calc_tfidf(df, term_colname, tf_family)
 
 386 def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv"):
 
 387     rankdf = pd.read_csv(path)
 
 388     included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values)
 
 389     return included_subreddits