]> code.communitydata.science - cdsc_reddit.git/blob - top_subreddits_by_comments.py
Add code for running tf-idf at the weekly level.
[cdsc_reddit.git] / top_subreddits_by_comments.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 from pyspark.mllib.linalg.distributed import RowMatrix, CoordinateMatrix
5 import numpy as np
6 import pyarrow
7 import pandas as pd
8 import fire
9 from itertools import islice
10 from pathlib import Path
11 from similarities_helper import cosine_similarities
12
13 spark = SparkSession.builder.getOrCreate()
14 conf = spark.sparkContext.getConf()
15
16 df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet")
17
18 # remove /u/ pages
19 df = df.filter(~df.subreddit.like("u_%"))
20
21 df = df.groupBy('subreddit').agg(f.count('id').alias("n_comments"))
22
23 win = Window.orderBy(f.col('n_comments').desc())
24 df = df.withColumn('comments_rank',f.rank().over(win))
25
26 df = df.toPandas()
27
28 df = df.sort_values("n_comments")
29
30 df.to_csv('/gscratch/comdata/users/nathante/cdsc-reddit/subreddits_by_num_comments.csv',index=False)

Community Data Science Collective || Want to submit a patch?