]> code.communitydata.science - cdsc_reddit.git/blob - old/tfidf_authors_weekly.py
no longer do we need to get daily dumps
[cdsc_reddit.git] / old / tfidf_authors_weekly.py
1 from pyspark.sql import SparkSession
2 from similarities_helper import build_weekly_tfidf_dataset
3 import pandas as pd
4
5 spark = SparkSession.builder.getOrCreate()
6
7 df = spark.read.parquet("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet")
8
9 include_subs = pd.read_csv("/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv")
10
11 include_subs = set(include_subs.loc[include_subs.comments_rank <= 25000]['subreddit'])
12
13 # remove [deleted] and AutoModerator (TODO remove other bots)
14 df = df.filter(df.author != '[deleted]')
15 df = df.filter(df.author != 'AutoModerator')
16
17 df = build_weekly_tfidf_dataset(df, include_subs, 'author')
18
19 df.write.parquet('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', mode='overwrite', compression='snappy')
20
21 spark.stop()

Community Data Science Collective || Want to submit a patch?