]> code.communitydata.science - cdsc_reddit.git/blob - old/#tfidf_comments_weekly.py#
Merge branch 'master' of code:cdsc_reddit
[cdsc_reddit.git] / old / #tfidf_comments_weekly.py#
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 from similarities_helper import build_weekly_tfidf_dataset
5 import pandas as pd
6
7
8 ## TODO:need to exclude automoderator / bot posts.
9 ## TODO:need to exclude better handle hyperlinks. 
10
11 spark = SparkSession.builder.getOrCreate()
12 df = spark.read.parquet("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet")
13
14 include_subs = pd.read_csv("/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv")
15
16 include_subs = set(include_subs.loc[include_subs.comments_rank <= 25000]['subreddit'])
17
18 # remove [deleted] and AutoModerator (TODO remove other bots)
19 # df = df.filter(df.author != '[deleted]')
20 # df = df.filter(df.author != 'AutoModerator')
21
22 df = build_weekly_tfidf_dataset(df, include_subs, 'term')
23
24
25 df.write.parquet('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', mode='overwrite', compression='snappy')
26 spark.stop()
27

Community Data Science Collective || Want to submit a patch?