1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
8 from itertools import islice
9 from pathlib import Path
10 from similarities_helper import *
12 #tfidf = spark.read.parquet('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/subreddit_terms.parquet')
13 def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500):
14 spark = SparkSession.builder.getOrCreate()
15 conf = spark.sparkContext.getConf()
17 tfidf = spark.read.parquet(tfidf_path)
19 if included_subreddits is None:
20 included_subreddits = select_topN_subreddits(topN)
23 included_subreddits = set(open(included_subreddits))
25 print("creating temporary parquet with matrix indicies")
26 tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits)
28 tfidf = spark.read.parquet(tempdir.name)
30 # the ids can change each week.
31 subreddit_names = tfidf.select(['subreddit','subreddit_id_new','week']).distinct().toPandas()
32 subreddit_names = subreddit_names.sort_values("subreddit_id_new")
33 subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
36 weeks = list(subreddit_names.week.drop_duplicates())
38 print("loading matrix")
39 mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
40 print('computing similarities')
41 sims = column_similarities(mat)
44 names = subreddit_names.loc[subreddit_names.week==week]
46 sims = sims.rename({i:sr for i, sr in enumerate(names.subreddit.values)},axis=1)
47 sims['subreddit'] = names.subreddit.values
48 write_weekly_similarities(outfile, sims, week)
52 def cosine_similarities(outfile, min_df = None, included_subreddits=None, topN=500):
54 Compute similarities between subreddits based on tfi-idf vectors of author comments
56 included_subreddits : string
57 Text file containing a list of subreddits to include (one per line) if included_subreddits is None then do the top 500 subreddits
59 min_df : int (default = 0.1 * (number of included_subreddits)
60 exclude terms that appear in fewer than this number of documents.
63 where to output csv and feather outputs
66 spark = SparkSession.builder.getOrCreate()
67 conf = spark.sparkContext.getConf()
70 tfidf = spark.read.parquet('/gscratch/comdata/output/reddit_similarity/tfidf/subreddit_comment_authors.parquet')
72 if included_subreddits is None:
73 included_subreddits = select_topN_subreddits(topN)
76 included_subreddits = set(open(included_subreddits))
78 print("creating temporary parquet with matrix indicies")
79 tempdir = prep_tfidf_entries(tfidf, 'author', min_df, included_subreddits)
80 tfidf = spark.read.parquet(tempdir.name)
81 subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
82 subreddit_names = subreddit_names.sort_values("subreddit_id_new")
83 subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
86 print("loading matrix")
87 mat = read_tfidf_matrix(tempdir.name,'author')
88 print('computing similarities')
89 sims = column_similarities(mat)
92 sims = pd.DataFrame(sims.todense())
93 sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)},axis=1)
94 sims['subreddit'] = subreddit_names.subreddit.values
98 output_feather = Path(str(p).replace("".join(p.suffixes), ".feather"))
99 output_csv = Path(str(p).replace("".join(p.suffixes), ".csv"))
100 output_parquet = Path(str(p).replace("".join(p.suffixes), ".parquet"))
102 sims.to_feather(outfile)
105 if __name__ == '__main__':
106 fire.Fire(author_cosine_similarities)