1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
4 spark = SparkSession.builder.getOrCreate()
5 df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/")
7 max_subreddit_week_authors = df.groupby(['subreddit','week']).max('tf')
8 max_subreddit_week_authors = max_subreddit_week_authors.withColumnRenamed('max(tf)','sr_week_max_tf')
10 df = df.join(max_subreddit_week_authors, ['subreddit','week'])
12 df = df.withColumn("relative_tf", df.tf / df.sr_week_max_tf)
14 # group by term / week
15 idf = df.groupby(['author','week']).count()
17 idf = idf.withColumnRenamed('count','idf')
19 # output: term | week | df
20 #idf.write.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test_sorted_tf.parquet_temp",mode='overwrite',compression='snappy')
22 # collect the dictionary to make a pydict of terms to indexes
23 authors = idf.select('author').distinct()
24 authors = authors.withColumn('author_id',f.monotonically_increasing_id())
27 # map terms to indexes in the tfs and the idfs
28 df = df.join(terms,on='author')
30 idf = idf.join(terms,on='author')
32 # join on subreddit/term/week to create tf/dfs indexed by term
33 df = df.join(idf, on=['author_id','week','author'])
35 # agg terms by subreddit to make sparse tf/df vectors
36 df = df.withColumn("tf_idf",df.relative_tf / df.sr_week_max_tf)
38 df = df.groupby(['subreddit','week']).agg(f.collect_list(f.struct('term_id','tf_idf')).alias('tfidf_maps'))
40 df = df.withColumn('tfidf_vec', f.map_from_entries('tfidf_maps'))
42 # output: subreddit | week | tf/df
43 df.write.parquet('/gscratch/comdata/users/nathante/test_tfidf_authors.parquet',mode='overwrite',compression='snappy')