X-Git-Url: https://code.communitydata.science/cdsc_reddit.git/blobdiff_plain/a60747292e91a47d122158659182f82bfd2e922a..e6294b5b90135a5163441c8dc62252dd6a188412:/similarities/%23tfidf_weekly.py%23?ds=inline diff --git a/similarities/#tfidf_weekly.py# b/similarities/#tfidf_weekly.py# new file mode 100644 index 0000000..8b0e8ff --- /dev/null +++ b/similarities/#tfidf_weekly.py# @@ -0,0 +1,24 @@ +from pyspark.sql import functions as f +from pyspark.sql import SparkSession +from pyspark.sql import Window +from similarities_helper import build_weekly_tfidf_dataset +import pandas as pd + +def tfidf_weekly(inpath, outpath, topN, term_colname, exclude): + +spark = SparkSession.builder.getOrCreate() +df = spark.read.parquet("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet") + +include_subs = pd.read_csv("/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv") + +include_subs = set(include_subs.loc[include_subs.comments_rank <= 25000]['subreddit']) + +# remove [deleted] and AutoModerator (TODO remove other bots) +# df = df.filter(df.author != '[deleted]') +# df = df.filter(df.author != 'AutoModerator') + +df = build_weekly_tfidf_dataset(df, include_subs, 'term') + + +df.write.parquet('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', mode='overwrite', compression='snappy') +spark.stop()