]> code.communitydata.science - cdsc_reddit.git/blob - tfidf_comments.py
b3e5624a3a5ae6e8585a45e9ffe4ee756a1cde2c
[cdsc_reddit.git] / tfidf_comments.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4
5 ## TODO:need to exclude automoderator / bot posts.
6 ## TODO:need to exclude better handle hyperlinks. 
7
8 spark = SparkSession.builder.getOrCreate()
9 df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp")
10
11 include_subs = set(open("/gscratch/comdata/users/nathante/cdsc-reddit/top_25000_subs_by_comments.txt"))
12 include_subs = {s.strip('\n') for s in include_subs}
13
14 # aggregate counts by week. now subreddit-term is distinct
15 df = df.filter(df.subreddit.isin(include_subs))
16 df = df.groupBy(['subreddit','term']).agg(f.sum('tf').alias('tf'))
17
18 max_subreddit_terms = df.groupby(['subreddit']).max('tf') # subreddits are unique
19 max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
20
21 df = df.join(max_subreddit_terms, on='subreddit')
22
23 df = df.withColumn("relative_tf", df.tf / df.sr_max_tf)
24
25 # group by term. term is unique
26 idf = df.groupby(['term']).count()
27
28 N_docs = df.select('subreddit').distinct().count()
29
30 idf = idf.withColumn('idf',f.log(N_docs/f.col('count')))
31
32 # collect the dictionary to make a pydict of terms to indexes
33 terms = idf.select('term').distinct() # terms are distinct
34 terms = terms.withColumn('term_id',f.row_number().over(Window.orderBy("term"))) # term ids are distinct
35
36 # make subreddit ids
37 subreddits = df.select(['subreddit']).distinct()
38 subreddits = subreddits.withColumn('subreddit_id',f.row_number().over(Window.orderBy("subreddit")))
39
40 df = df.join(subreddits,on='subreddit')
41
42 # map terms to indexes in the tfs and the idfs
43 df = df.join(terms,on='term') # subreddit-term-id is unique
44
45 idf = idf.join(terms,on='term')
46
47 # join on subreddit/term to create tf/dfs indexed by term
48 df = df.join(idf, on=['term_id','term'])
49
50 # agg terms by subreddit to make sparse tf/df vectors
51 df = df.withColumn("tf_idf", (0.5 + (0.5 * df.relative_tf) * df.idf))
52
53 df.write.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf.parquet',mode='overwrite',compression='snappy')

Community Data Science Collective || Want to submit a patch?