]> code.communitydata.science - cdsc_reddit.git/blob - old/tfidf_comments.py
no longer do we need to get daily dumps
[cdsc_reddit.git] / old / tfidf_comments.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 from similarities_helper import build_tfidf_dataset
5
6 ## TODO:need to exclude automoderator / bot posts.
7 ## TODO:need to exclude better handle hyperlinks. 
8
9 spark = SparkSession.builder.getOrCreate()
10 df = spark.read.parquet("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet")
11
12 include_subs = pd.read_csv("/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv")
13 include_subs = set(include_subs.loc[include_subs.comments_rank <= 25000]['subreddit'])
14
15 df = build_tfidf_dataset(df, include_subs, 'term')
16
17 df.write.parquet('/gscratch/comdata/output/reddit_similarity/reddit_similarity/subreddit_terms.parquet',mode='overwrite',compression='snappy')
18 spark.stop()

Community Data Science Collective || Want to submit a patch?