]> code.communitydata.science - cdsc_reddit.git/blobdiff - similarities/top_subreddits_by_comments.py
grid sweep selection for clustering hyperparameters
[cdsc_reddit.git] / similarities / top_subreddits_by_comments.py
index 214c7e0b91d7f720a43a685d05b05186769ee7f2..1197b512a9063904e7a566c1def394a28e52a519 100644 (file)
@@ -5,6 +5,10 @@ from pyspark.sql import Window
 spark = SparkSession.builder.getOrCreate()
 conf = spark.sparkContext.getConf()
 
 spark = SparkSession.builder.getOrCreate()
 conf = spark.sparkContext.getConf()
 
+submissions = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet")
+
+prop_nsfw = submissions.select(['subreddit','over_18']).groupby('subreddit').agg(f.mean(f.col('over_18').astype('double')).alias('prop_nsfw'))
+
 df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet")
 
 # remove /u/ pages
 df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet")
 
 # remove /u/ pages
@@ -12,6 +16,9 @@ df = df.filter(~df.subreddit.like("u_%"))
 
 df = df.groupBy('subreddit').agg(f.count('id').alias("n_comments"))
 
 
 df = df.groupBy('subreddit').agg(f.count('id').alias("n_comments"))
 
+df = df.join(prop_nsfw,on='subreddit')
+df = df.filter(df.prop_nsfw < 0.5)
+
 win = Window.orderBy(f.col('n_comments').desc())
 df = df.withColumn('comments_rank', f.rank().over(win))
 
 win = Window.orderBy(f.col('n_comments').desc())
 df = df.withColumn('comments_rank', f.rank().over(win))
 

Community Data Science Collective || Want to submit a patch?