X-Git-Url: https://code.communitydata.science/cdsc_reddit.git/blobdiff_plain/e6294b5b90135a5163441c8dc62252dd6a188412..9345f9de9437d5965ad4ee5874bc24199e077d48:/similarities/top_subreddits_by_comments.py?ds=sidebyside diff --git a/similarities/top_subreddits_by_comments.py b/similarities/top_subreddits_by_comments.py index 214c7e0..74ffb8d 100644 --- a/similarities/top_subreddits_by_comments.py +++ b/similarities/top_subreddits_by_comments.py @@ -1,17 +1,28 @@ from pyspark.sql import functions as f from pyspark.sql import SparkSession from pyspark.sql import Window +from datetime import datetime +from pathlib import Path spark = SparkSession.builder.getOrCreate() conf = spark.sparkContext.getConf() -df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet") +submissions = spark.read.parquet("../../data/reddit_submissions_by_subreddit.parquet") +submissions = submissions.filter(f.col("CreatedAt") <= datetime(2020,4,13)) + +prop_nsfw = submissions.select(['subreddit','over_18']).groupby('subreddit').agg(f.mean(f.col('over_18').astype('double')).alias('prop_nsfw')) + +df = spark.read.parquet("../../data/reddit_comments_by_subreddit.parquet") +df = df.filter(f.col("CreatedAt") <= datetime(2020,4,13)) # remove /u/ pages df = df.filter(~df.subreddit.like("u_%")) df = df.groupBy('subreddit').agg(f.count('id').alias("n_comments")) +df = df.join(prop_nsfw,on='subreddit') +df = df.filter(df.prop_nsfw < 0.5) + win = Window.orderBy(f.col('n_comments').desc()) df = df.withColumn('comments_rank', f.rank().over(win)) @@ -19,4 +30,6 @@ df = df.toPandas() df = df.sort_values("n_comments") -df.to_csv('/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv', index=False) +outpath = Path("../../data/reddit_similarity/subreddits_by_num_comments_nonsfw.csv") +outpath.parent.mkdir(exist_ok=True, parents=True) +df.to_csv(str(outpath), index=False)