]> code.communitydata.science - cdsc_reddit.git/blobdiff - tfidf_authors.py
Add code for running tf-idf at the weekly level.
[cdsc_reddit.git] / tfidf_authors.py
index f06a8ce72f4ec995d4dcaa8a2d4abf42b0d447df..6852fe833955eea01e62acf08d509399cfe4837c 100644 (file)
@@ -1,19 +1,21 @@
 from pyspark.sql import SparkSession
 from similarities_helper import build_tfidf_dataset
+import pandas as pd
 
-## TODO:need to exclude automoderator / bot posts.
-## TODO:need to exclude better handle hyperlinks. 
 spark = SparkSession.builder.getOrCreate()
 
-df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/part-00000-d61007de-9cbe-4970-857f-b9fd4b35b741-c000.snappy.parquet")
+df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp")
 
-include_subs = set(open("/gscratch/comdata/users/nathante/cdsc-reddit/top_25000_subs_by_comments.txt"))
-include_subs = {s.strip('\n') for s in include_subs}
+include_subs = pd.read_csv("/gscratch/comdata/users/nathante/cdsc-reddit/subreddits_by_num_comments.csv")
+
+#include_subs = set(include_subs.loc[include_subs.comments_rank < 300]['subreddit'])
+
+# remove [deleted] and AutoModerator (TODO remove other bots)
 df = df.filter(df.author != '[deleted]')
 df = df.filter(df.author != 'AutoModerator')
 
 df = build_tfidf_dataset(df, include_subs, 'author')
 
-df.cache()
-
 df.write.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_authors.parquet',mode='overwrite',compression='snappy')
+
+spark.stop()

Community Data Science Collective || Want to submit a patch?