]> code.communitydata.science - cdsc_reddit.git/blob - similarities/weekly_cosine_similarities.py
f9c96664b1a8aae485d39e1bfb112430800b4c8e
[cdsc_reddit.git] / similarities / weekly_cosine_similarities.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 import numpy as np
5 import pyarrow
6 import pandas as pd
7 import fire
8 from itertools import islice
9 from pathlib import Path
10 from similarities_helper import *
11 from multiprocessing import pool
12
13 def _week_similarities(tempdir, term_colname, week):
14         print(f"loading matrix: {week}")
15         mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
16         print('computing similarities')
17         sims = column_similarities(mat)
18         del mat
19
20         names = subreddit_names.loc[subreddit_names.week == week]
21         sims = pd.DataFrame(sims.todense())
22
23         sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1)
24         sims['_subreddit'] = names.subreddit.values
25
26         write_weekly_similarities(outfile, sims, week, names)
27
28 #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
29 def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500):
30     spark = SparkSession.builder.getOrCreate()
31     conf = spark.sparkContext.getConf()
32     print(outfile)
33     tfidf = spark.read.parquet(tfidf_path)
34     
35     if included_subreddits is None:
36         included_subreddits = select_topN_subreddits(topN)
37     else:
38         included_subreddits = set(open(included_subreddits))
39
40     print(f"computing weekly similarities for {len(included_subreddits)} subreddits")
41
42     print("creating temporary parquet with matrix indicies")
43     tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits)
44
45     tfidf = spark.read.parquet(tempdir.name)
46
47     # the ids can change each week.
48     subreddit_names = tfidf.select(['subreddit','subreddit_id_new','week']).distinct().toPandas()
49     subreddit_names = subreddit_names.sort_values("subreddit_id_new")
50     subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
51     spark.stop()
52
53     weeks = sorted(list(subreddit_names.week.drop_duplicates()))
54     # do this step in parallel if we have the memory for it.
55     # should be doable with pool.map
56
57     def week_similarities_helper(week):
58         _week_similarities(tempdir, term_colname, week)
59
60     with Pool(40) as pool: # maybe it can be done with 40 cores on the huge machine?
61         list(pool.map(weeks,week_similarities_helper))
62
63 def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500):
64     return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_100k.parquet',
65                                       outfile,
66                                       'author',
67                                       min_df,
68                                       included_subreddits,
69                                       topN)
70
71 def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500):
72     return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_100k.parquet',
73                                       outfile,
74                                       'term',
75                                       min_df,
76                                       included_subreddits,
77                                       topN)
78
79 if __name__ == "__main__":
80     fire.Fire({'authors':author_cosine_similarities_weekly,
81                'terms':term_cosine_similarities_weekly})

Community Data Science Collective || Want to submit a patch?