]> code.communitydata.science - cdsc_reddit.git/blob - similarities/tfidf.py
add note to try other tf normalization strategies.
[cdsc_reddit.git] / similarities / tfidf.py
1 import fire
2 from pyspark.sql import SparkSession
3 from pyspark.sql import functions as f
4 from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
5
6 def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
7     spark = SparkSession.builder.getOrCreate()
8
9     df = spark.read.parquet(inpath)
10
11     df = df.filter(~ f.col(term_colname).isin(exclude))
12
13     if included_subreddits is not None:
14         include_subs = set(map(str.strip,map(str.lower, open(included_subreddits))))
15     else:
16         include_subs = select_topN_subreddits(topN)
17
18     df = func(df, include_subs, term_colname)
19
20     df.write.parquet(outpath,mode='overwrite',compression='snappy')
21
22     spark.stop()
23
24 def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
25     return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
26
27 def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits):
28     return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
29
30 def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
31                   topN=25000,
32                   included_subreddits=None):
33
34     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
35                  outpath,
36                  topN,
37                  'author',
38                  ['[deleted]','AutoModerator'],
39                  included_subreddits=included_subreddits
40                  )
41
42 def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
43                 topN=25000,
44                 included_subreddits=None):
45
46     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
47                  outpath,
48                  topN,
49                  'term',
50                  [],
51                  included_subreddits=included_subreddits
52                  )
53
54 def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
55                          topN=25000,
56                          included_subreddits=None):
57
58     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
59                         outpath,
60                         topN,
61                         'author',
62                         ['[deleted]','AutoModerator'],
63                         included_subreddits=included_subreddits
64                         )
65
66 def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
67                        topN=25000,
68                        included_subreddits=None):
69
70
71     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
72                         outpath,
73                         topN,
74                         'term',
75                         [],
76                         included_subreddits=included_subreddits
77                         )
78
79
80 if __name__ == "__main__":
81     fire.Fire({'authors':tfidf_authors,
82                'terms':tfidf_terms,
83                'authors_weekly':tfidf_authors_weekly,
84                'terms_weekly':tfidf_terms_weekly})

Community Data Science Collective || Want to submit a patch?