2 from pyspark.sql import SparkSession
3 from pyspark.sql import functions as f
4 from similarities_helper import tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
5 from functools import partial
7 inpath = '/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet'
8 # include_terms is a path to a parquet file that contains a column of term_colname + '_id' to include.
9 def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits, included_terms=None, min_df=None, max_df=None):
10 spark = SparkSession.builder.getOrCreate()
12 df = spark.read.parquet(inpath)
14 df = df.filter(~ f.col(term_colname).isin(exclude))
16 if included_subreddits is not None:
17 include_subs = set(map(str.strip,open(included_subreddits)))
19 include_subs = select_topN_subreddits(topN)
21 include_subs = spark.sparkContext.broadcast(include_subs)
23 # term_id = term_colname + "_id"
25 if included_terms is not None:
26 terms_df = spark.read.parquet(included_terms)
27 terms_df = terms_df.select(term_colname).distinct()
28 df = df.join(terms_df, on=term_colname, how='left_semi')
30 dfwriter = func(df, include_subs.value, term_colname)
32 dfwriter.parquet(outpath,mode='overwrite',compression='snappy')
35 def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits, min_df, max_df):
36 tfidf_func = partial(tfidf_dataset, max_df=max_df, min_df=min_df)
37 return _tfidf_wrapper(tfidf_func, inpath, outpath, topN, term_colname, exclude, included_subreddits)
39 def tfidf_weekly(inpath, outpath, static_tfidf_path, topN, term_colname, exclude, included_subreddits):
40 return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits, included_terms=static_tfidf_path)
43 def tfidf_authors(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
44 outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
46 included_subreddits=None,
54 ['[deleted]','AutoModerator'],
55 included_subreddits=included_subreddits,
60 def tfidf_terms(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
61 outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
63 included_subreddits=None,
72 included_subreddits=included_subreddits,
77 def tfidf_authors_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
78 static_tfidf_path="/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet",
79 outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
81 included_subreddits=None
84 return tfidf_weekly(inpath,
89 ['[deleted]','AutoModerator'],
90 included_subreddits=included_subreddits
93 def tfidf_terms_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
94 static_tfidf_path="/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet",
95 outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
97 included_subreddits=None
101 return tfidf_weekly(inpath,
107 included_subreddits=included_subreddits
111 if __name__ == "__main__":
112 fire.Fire({'authors':tfidf_authors,
114 'authors_weekly':tfidf_authors_weekly,
115 'terms_weekly':tfidf_terms_weekly})