]> code.communitydata.science - cdsc_reddit.git/blob - term_cosine_similarity.py
add term_cosine_similarity.py
[cdsc_reddit.git] / term_cosine_similarity.py
1 from pyspark.sql import functions as f
2 from pyspark.sql import SparkSession
3 from pyspark.sql import Window
4 from pyspark.mllib.linalg.distributed import RowMatrix, CoordinateMatrix
5 import numpy as np
6 import pyarrow
7 import pandas as pd
8 import fire
9 from itertools import islice
10 from pathlib import Path
11
12 min_df = 1000
13
14 spark = SparkSession.builder.getOrCreate()
15 conf = spark.sparkContext.getConf()
16
17 # outfile = '/gscratch/comdata/users/nathante/test_similarities_500.feather'; min_df = None; included_subreddits=None; similarity_threshold=0;
18 def spark_similarities(outfile, min_df = None, included_subreddits=None, similarity_threshold=0):
19     '''
20     Compute similarities between subreddits based on tfi-idf vectors of comment texts 
21     
22     included_subreddits : string
23         Text file containing a list of subreddits to include (one per line) if included_subreddits is None then do the top 500 subreddits
24
25     similarity_threshold : double (default = 0)
26         set > 0 for large numbers of subreddits to get an approximate solution using the DIMSUM algorithm
27 https://stanford.edu/~rezab/papers/dimsum.pdf. If similarity_threshold=0 we get an exact solution using an O(N^2) algorithm.
28
29     min_df : int (default = 0.1 * (number of included_subreddits)
30          exclude terms that appear in fewer than this number of documents.
31
32     outfile: string
33          where to output csv and feather outputs
34 '''
35
36     tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf.parquet')
37
38     if included_subreddits is None:
39         included_subreddits = list(islice(open("/gscratch/comdata/users/nathante/cdsc-reddit/top_25000_subs_by_comments.txt"),500))
40         included_subreddits = [s.strip('\n') for s in included_subreddits]
41
42     else:
43         included_subreddits = set(open(included_subreddits))
44
45     if min_df is None:
46         min_df = 0.1 * len(included_subreddits)
47
48     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
49
50     # reset the subreddit ids
51     sub_ids = tfidf.select('subreddit_id').distinct()
52     sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id")))
53     tfidf = tfidf.join(sub_ids,'subreddit_id')
54
55     # only use terms in at least min_df included subreddits
56     new_count = tfidf.groupBy('term_id').agg(f.count('term_id').alias('new_count'))
57     term_ids = term_ids.join(new_count,'term_id')
58     term_ids = term_ids.filter(new_count >= min_df)
59
60     # reset the term ids
61     term_ids = tfidf.select('term_id').distinct()
62     term_ids = term_ids.withColumn("term_id_new",f.row_number().over(Window.orderBy("term_id")))
63     tfidf = tfidf.join(term_ids,'term_id')
64
65     # step 1 make an rdd of entires
66     # sorted by (dense) spark subreddit id
67     entries = tfidf.select(f.col("term_id_new")-1,f.col("subreddit_id_new")-1,"tf_idf").rdd
68
69     # step 2 make it into a distributed.RowMatrix
70     coordMat = CoordinateMatrix(entries)
71
72     # this needs to be an IndexedRowMatrix()
73     mat = coordMat.toRowMatrix()
74
75     #goal: build a matrix of subreddit columns and tf-idfs rows
76     sim_dist = mat.columnSimilarities(threshold=similarity_threshold)
77
78     print(sim_dist.numRows(), sim_dist.numCols())
79
80     #instead of toLocalMatrix() why not read as entries and put strait into numpy
81     sim_entries = sim_dist.entries.collect()
82
83     sim_entries = pd.DataFrame([{'i':me.i,'j':me.j,'value':me.value} for me in sim_entries])
84
85     df = tfidf.select('subreddit','subreddit_id_new').distinct().toPandas()
86
87     df = df.sort_values('subreddit_id_new').reset_index(drop=True)
88
89     df = df.set_index('subreddit_id_new')
90
91     similarities = sim_entries.join(df, on='i')
92     similarities = sim_entries.rename(columns={'subreddit':"subreddit_i"})
93     similarities = sim_entries.join(df, on='j')
94     similarities = sim_entries.rename(columns={'subreddit':"subreddit_j"})
95
96     p = Path(outfile)
97     output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
98     output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
99
100     pyarrow.write_feather(similarities,output_feather)
101     pyarrow.write_csv(similarities,output_csv)
102     return similarities
103     
104 if __name__ == '__main__':
105     fire.Fire(spark_similarities)

Community Data Science Collective || Want to submit a patch?