2 from pyspark.sql import functions as f
3 from pyspark.sql import SparkSession
4 from pyspark.sql import Window
7 import pyarrow.dataset as ds
10 from itertools import islice, chain
11 from pathlib import Path
12 from similarities_helper import pull_tfidf, column_similarities, write_weekly_similarities, lsi_column_similarities
13 from scipy.sparse import csr_matrix
14 from multiprocessing import Pool, cpu_count
15 from functools import partial
17 infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_10k.parquet"
18 tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet"
20 included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
24 # outfile = '/gscratch/comdata/output/reddit_similarity/weekly/comment_authors_test.parquet'
25 # included_subreddits=None
27 def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, included_subreddits, topN, outdir:Path, subreddit_names, nterms):
29 term_id = term + '_id'
30 term_id_new = term + '_id_new'
31 print(f"loading matrix: {week}")
33 entries = pull_tfidf(infile = tfidf_path,
34 term_colname=term_colname,
37 included_subreddits=included_subreddits,
42 tfidf_colname='tf_idf'
43 # if the max subreddit id we found is less than the number of subreddit names then we have to fill in 0s
44 mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1)),shape=(nterms,subreddit_names.shape[0]))
46 print('computing similarities')
49 sims = pd.DataFrame(sims)
50 sims = sims.rename({i: sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1)
51 sims['_subreddit'] = subreddit_names.subreddit.values
52 outfile = str(Path(outdir) / str(week))
53 write_weekly_similarities(outfile, sims, week, subreddit_names)
55 def pull_weeks(batch):
56 return set(batch.to_pandas()['week'])
58 # This requires a prefit LSI model, since we shouldn't fit different LSI models for every week.
59 def cosine_similarities_weekly_lsi(n_components=100, lsi_model=None, *args, **kwargs):
60 term_colname= kwargs.get('term_colname')
61 #lsi_model = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_terms_compex_LSI/1000_term_LSIMOD.pkl"
63 # simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm='randomized',lsi_model_load=lsi_model)
65 simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=kwargs.get('n_iter'),random_state=kwargs.get('random_state'),algorithm=kwargs.get('algorithm'),lsi_model_load=lsi_model)
67 return cosine_similarities_weekly(*args, simfunc=simfunc, **kwargs)
69 #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
70 def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, max_df=None, included_subreddits = None, topN = 500, simfunc=column_similarities):
72 # do this step in parallel if we have the memory for it.
73 # should be doable with pool.map
75 spark = SparkSession.builder.getOrCreate()
76 df = spark.read.parquet(tfidf_path)
78 # load subreddits + topN
80 subreddit_names = df.select(['subreddit','subreddit_id']).distinct().toPandas()
81 subreddit_names = subreddit_names.sort_values("subreddit_id")
82 nterms = df.select(f.max(f.col(term_colname + "_id")).alias('max')).collect()[0].max
83 weeks = df.select(f.col("week")).distinct().toPandas().week.values
86 print(f"computing weekly similarities")
87 week_similarities_helper = partial(_week_similarities,simfunc=simfunc, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df,max_df=max_df,included_subreddits=included_subreddits,topN=topN, subreddit_names=subreddit_names,nterms=nterms)
89 pool = Pool(cpu_count())
91 list(pool.imap(week_similarities_helper,weeks))
93 # with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine?
96 def author_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=500):
97 return cosine_similarities_weekly(infile,
105 def term_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=None):
106 return cosine_similarities_weekly(infile,
115 def author_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=None,n_components=100,lsi_model=None):
116 return cosine_similarities_weekly_lsi(infile,
123 n_components=n_components,
127 def term_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=500,n_components=100,lsi_model=None):
128 return cosine_similarities_weekly_lsi(infile,
135 n_components=n_components,
138 if __name__ == "__main__":
139 fire.Fire({'authors':author_cosine_similarities_weekly,
140 'terms':term_cosine_similarities_weekly,
141 'authors-lsi':author_cosine_similarities_weekly_lsi,
142 'terms-lsi':term_cosine_similarities_weekly