]> code.communitydata.science - cdsc_reddit.git/blob - similarities_helper.py
ef434ac4ac52d297533d1282a13e3ad1714f3b2e
[cdsc_reddit.git] / similarities_helper.py
1 from pyspark.sql import Window
2 from pyspark.sql import functions as f
3 from enum import Enum
4 from pyspark.mllib.linalg.distributed import CoordinateMatrix
5 from tempfile import TemporaryDirectory
6 import pyarrow
7 import pyarrow.dataset as ds
8 from scipy.sparse import csr_matrix
9 import pandas as pd
10 import numpy as np
11
12 class tf_weight(Enum):
13     MaxTF = 1
14     Norm05 = 2
15
16 def read_tfidf_matrix(path,term_colname):
17     term = term_colname
18     term_id = term + '_id'
19     term_id_new = term + '_id_new'
20
21     dataset = ds.dataset(path,format='parquet')
22     entries = dataset.to_table(columns=['tf_idf','subreddit_id_new',term_id_new]).to_pandas()
23     return(csr_matrix((entries.tf_idf,(entries[term_id_new]-1, entries.subreddit_id_new-1))))
24     
25 def column_similarities(mat):
26     norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32))
27     mat = mat.multiply(1/norm)
28     sims = mat.T @ mat
29     return(sims)
30
31
32 def prep_tfidf_entries(tfidf, term_colname, min_df, included_subreddits):
33     term = term_colname
34     term_id = term + '_id'
35     term_id_new = term + '_id_new'
36
37     if min_df is None:
38         min_df = 0.1 * len(included_subreddits)
39
40     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
41
42     # reset the subreddit ids
43     sub_ids = tfidf.select('subreddit_id').distinct()
44     sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id")))
45     tfidf = tfidf.join(sub_ids,'subreddit_id')
46
47     # only use terms in at least min_df included subreddits
48     new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count'))
49 #    new_count = new_count.filter(f.col('new_count') >= min_df)
50     tfidf = tfidf.join(new_count,term_id,how='inner')
51     
52     # reset the term ids
53     term_ids = tfidf.select([term_id]).distinct()
54     term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id)))
55     tfidf = tfidf.join(term_ids,term_id)
56
57     tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old")
58     # tfidf = tfidf.withColumnRenamed("idf","idf_old")
59     # tfidf = tfidf.withColumn("idf",f.log(25000/f.col("count")))
60     tfidf = tfidf.withColumn("tf_idf", (tfidf.relative_tf * tfidf.idf).cast('float'))
61     
62     tempdir =TemporaryDirectory(suffix='.parquet',prefix='term_tfidf_entries',dir='.')
63     
64     tfidf.write.parquet(tempdir.name,mode='overwrite',compression='snappy')
65     return tempdir
66
67 def cosine_similarities(tfidf, term_colname, min_df, included_subreddits, similarity_threshold):
68     term = term_colname
69     term_id = term + '_id'
70     term_id_new = term + '_id_new'
71
72     if min_df is None:
73         min_df = 0.1 * len(included_subreddits)
74
75     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
76     tfidf = tfidf.cache()
77
78     # reset the subreddit ids
79     sub_ids = tfidf.select('subreddit_id').distinct()
80     sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id")))
81     tfidf = tfidf.join(sub_ids,'subreddit_id')
82
83     # only use terms in at least min_df included subreddits
84     new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count'))
85 #    new_count = new_count.filter(f.col('new_count') >= min_df)
86     tfidf = tfidf.join(new_count,term_id,how='inner')
87     
88     # reset the term ids
89     term_ids = tfidf.select([term_id]).distinct()
90     term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id)))
91     tfidf = tfidf.join(term_ids,term_id)
92
93     tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old")
94     # tfidf = tfidf.withColumnRenamed("idf","idf_old")
95     # tfidf = tfidf.withColumn("idf",f.log(25000/f.col("count")))
96     tfidf = tfidf.withColumn("tf_idf", tfidf.relative_tf * tfidf.idf)
97
98     # step 1 make an rdd of entires
99     # sorted by (dense) spark subreddit id
100     #    entries = tfidf.filter((f.col('subreddit') == 'asoiaf') | (f.col('subreddit') == 'gameofthrones') | (f.col('subreddit') == 'christianity')).select(f.col("term_id_new")-1,f.col("subreddit_id_new")-1,"tf_idf").rdd
101  
102     n_partitions = int(len(included_subreddits)*2 / 5)
103
104     entries = tfidf.select(f.col(term_id_new)-1,f.col("subreddit_id_new")-1,"tf_idf").rdd.repartition(n_partitions)
105
106     # put like 10 subredis in each partition
107
108     # step 2 make it into a distributed.RowMatrix
109     coordMat = CoordinateMatrix(entries)
110
111     coordMat = CoordinateMatrix(coordMat.entries.repartition(n_partitions))
112
113     # this needs to be an IndexedRowMatrix()
114     mat = coordMat.toRowMatrix()
115
116     #goal: build a matrix of subreddit columns and tf-idfs rows
117     sim_dist = mat.columnSimilarities(threshold=similarity_threshold)
118
119     return (sim_dist, tfidf)
120
121
122 def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
123     term = term_colname
124     term_id = term + '_id'
125
126     # aggregate counts by week. now subreddit-term is distinct
127     df = df.filter(df.subreddit.isin(include_subs))
128     df = df.groupBy(['subreddit',term,'week']).agg(f.sum('tf').alias('tf'))
129
130     max_subreddit_terms = df.groupby(['subreddit','week']).max('tf') # subreddits are unique
131     max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
132     df = df.join(max_subreddit_terms, on=['subreddit','week'])
133     df = df.withColumn("relative_tf", df.tf / df.sr_max_tf)
134
135     # group by term. term is unique
136     idf = df.groupby([term,'week']).count()
137
138     N_docs = df.select(['subreddit','week']).distinct().groupby(['week']).agg(f.count("subreddit").alias("subreddits_in_week"))
139
140     idf = idf.join(N_docs, on=['week'])
141
142     # add a little smoothing to the idf
143     idf = idf.withColumn('idf',f.log(idf.subreddits_in_week) / (1+f.col('count'))+1)
144
145     # collect the dictionary to make a pydict of terms to indexes
146     terms = idf.select([term,'week']).distinct() # terms are distinct
147
148     terms = terms.withColumn(term_id,f.row_number().over(Window.partitionBy('week').orderBy(term))) # term ids are distinct
149
150     # make subreddit ids
151     subreddits = df.select(['subreddit','week']).distinct()
152     subreddits = subreddits.withColumn('subreddit_id',f.row_number().over(Window.partitionBy("week").orderBy("subreddit")))
153
154     df = df.join(subreddits,on=['subreddit','week'])
155
156     # map terms to indexes in the tfs and the idfs
157     df = df.join(terms,on=[term,'week']) # subreddit-term-id is unique
158
159     idf = idf.join(terms,on=[term,'week'])
160
161     # join on subreddit/term to create tf/dfs indexed by term
162     df = df.join(idf, on=[term_id, term,'week'])
163
164     # agg terms by subreddit to make sparse tf/df vectors
165     
166     if tf_family == tf_weight.MaxTF:
167         df = df.withColumn("tf_idf",  df.relative_tf * df.idf)
168     else: # tf_fam = tf_weight.Norm05
169         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
170
171     return df
172
173
174
175 def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
176
177     term = term_colname
178     term_id = term + '_id'
179     # aggregate counts by week. now subreddit-term is distinct
180     df = df.filter(df.subreddit.isin(include_subs))
181     df = df.groupBy(['subreddit',term]).agg(f.sum('tf').alias('tf'))
182
183     max_subreddit_terms = df.groupby(['subreddit']).max('tf') # subreddits are unique
184     max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
185
186     df = df.join(max_subreddit_terms, on='subreddit')
187
188     df = df.withColumn("relative_tf", df.tf / df.sr_max_tf)
189
190     # group by term. term is unique
191     idf = df.groupby([term]).count()
192
193     N_docs = df.select('subreddit').distinct().count()
194
195     # add a little smoothing to the idf
196     idf = idf.withColumn('idf',f.log(N_docs/(1+f.col('count')))+1)
197
198     # collect the dictionary to make a pydict of terms to indexes
199     terms = idf.select(term).distinct() # terms are distinct
200     terms = terms.withColumn(term_id,f.row_number().over(Window.orderBy(term))) # term ids are distinct
201
202     # make subreddit ids
203     subreddits = df.select(['subreddit']).distinct()
204     subreddits = subreddits.withColumn('subreddit_id',f.row_number().over(Window.orderBy("subreddit")))
205
206     df = df.join(subreddits,on='subreddit')
207
208     # map terms to indexes in the tfs and the idfs
209     df = df.join(terms,on=term) # subreddit-term-id is unique
210
211     idf = idf.join(terms,on=term)
212
213     # join on subreddit/term to create tf/dfs indexed by term
214     df = df.join(idf, on=[term_id, term])
215
216     # agg terms by subreddit to make sparse tf/df vectors
217     
218     if tf_family == tf_weight.MaxTF:
219         df = df.withColumn("tf_idf",  df.relative_tf * df.idf)
220     else: # tf_fam = tf_weight.Norm05
221         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
222
223     return df
224
225

Community Data Science Collective || Want to submit a patch?