]> code.communitydata.science - cdsc_reddit.git/blob - old/author_cosine_similarity.py
Merge branch 'master' of code:cdsc_reddit
[cdsc_reddit.git] / old / author_cosine_similarity.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 import numpy as np
5 import pyarrow
6 import pandas as pd
7 import fire
8 from itertools import islice
9 from pathlib import Path
10 from similarities_helper import *
11
12 #tfidf = spark.read.parquet('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/subreddit_terms.parquet')
13 def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500):
14     spark = SparkSession.builder.getOrCreate()
15     conf = spark.sparkContext.getConf()
16     print(outfile)
17     tfidf = spark.read.parquet(tfidf_path)
18     
19     if included_subreddits is None:
20         included_subreddits = select_topN_subreddits(topN)
21
22     else:
23         included_subreddits = set(open(included_subreddits))
24
25     print("creating temporary parquet with matrix indicies")
26     tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits)
27
28     tfidf = spark.read.parquet(tempdir.name)
29
30     # the ids can change each week.
31     subreddit_names = tfidf.select(['subreddit','subreddit_id_new','week']).distinct().toPandas()
32     subreddit_names = subreddit_names.sort_values("subreddit_id_new")
33     subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
34     spark.stop()
35
36     weeks = list(subreddit_names.week.drop_duplicates())
37     for week in weeks:
38         print("loading matrix")
39         mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
40         print('computing similarities')
41         sims = column_similarities(mat)
42         del mat
43
44         names = subreddit_names.loc[subreddit_names.week==week]
45
46         sims = sims.rename({i:sr for i, sr in enumerate(names.subreddit.values)},axis=1)
47         sims['subreddit'] = names.subreddit.values
48         write_weekly_similarities(outfile, sims, week)
49
50
51
52 def cosine_similarities(outfile, min_df = None, included_subreddits=None, topN=500):
53     '''
54     Compute similarities between subreddits based on tfi-idf vectors of author comments
55     
56     included_subreddits : string
57         Text file containing a list of subreddits to include (one per line) if included_subreddits is None then do the top 500 subreddits
58
59     min_df : int (default = 0.1 * (number of included_subreddits)
60          exclude terms that appear in fewer than this number of documents.
61
62     outfile: string
63          where to output csv and feather outputs
64 '''
65
66     spark = SparkSession.builder.getOrCreate()
67     conf = spark.sparkContext.getConf()
68     print(outfile)
69
70     tfidf = spark.read.parquet('/gscratch/comdata/output/reddit_similarity/tfidf/subreddit_comment_authors.parquet')
71
72     if included_subreddits is None:
73         included_subreddits = select_topN_subreddits(topN)
74
75     else:
76         included_subreddits = set(open(included_subreddits))
77
78     print("creating temporary parquet with matrix indicies")
79     tempdir = prep_tfidf_entries(tfidf, 'author', min_df, included_subreddits)
80     tfidf = spark.read.parquet(tempdir.name)
81     subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
82     subreddit_names = subreddit_names.sort_values("subreddit_id_new")
83     subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
84     spark.stop()
85
86     print("loading matrix")
87     mat = read_tfidf_matrix(tempdir.name,'author')
88     print('computing similarities')
89     sims = column_similarities(mat)
90     del mat
91     
92     sims = pd.DataFrame(sims.todense())
93     sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)},axis=1)
94     sims['subreddit'] = subreddit_names.subreddit.values
95
96     p = Path(outfile)
97
98     output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
99     output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
100     output_parquet =  Path(str(p).replace("".join(p.suffixes), ".parquet"))
101
102     sims.to_feather(outfile)
103     tempdir.cleanup()
104     
105 if __name__ == '__main__':
106     fire.Fire(author_cosine_similarities)

Community Data Science Collective || Want to submit a patch?