]> code.communitydata.science - cdsc_reddit.git/blobdiff - similarities/tfidf.py
support passing in list of tfidf vectors.
[cdsc_reddit.git] / similarities / tfidf.py
index 5b1c0c94d450da2d7259bc828404c779c734d62c..98a283528e1066bafa2759cb6c548e5eae2caf26 100644 (file)
@@ -3,67 +3,78 @@ from pyspark.sql import SparkSession
 from pyspark.sql import functions as f
 from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
 
 from pyspark.sql import functions as f
 from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
 
-
-def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude):
+def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
     spark = SparkSession.builder.getOrCreate()
 
     df = spark.read.parquet(inpath)
 
     df = df.filter(~ f.col(term_colname).isin(exclude))
 
     spark = SparkSession.builder.getOrCreate()
 
     df = spark.read.parquet(inpath)
 
     df = df.filter(~ f.col(term_colname).isin(exclude))
 
-    include_subs = select_topN_subreddits(topN)
+    if included_subreddits is not None:
+        include_subs = set(map(str.strip,map(str.lower, open(included_subreddits))))
+    else:
+        include_subs = select_topN_subreddits(topN)
 
     df = func(df, include_subs, term_colname)
 
 
     df = func(df, include_subs, term_colname)
 
-    df.write.parquet(outpath,mode='overwrite',compression='snappy')
+    df.write.parquet(outpath,mode='overwrite',copmression='snappy')
 
     spark.stop()
 
 
     spark.stop()
 
-def tfidf(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
 
-def tfidf_weekly(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
 def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
 
 def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
-                  topN=25000):
+                  topN=25000,
+                  included_subreddits=None):
 
     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
                  outpath,
                  topN,
                  'author',
 
     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
                  outpath,
                  topN,
                  'author',
-                 ['[deleted]','AutoModerator']
+                 ['[deleted]','AutoModerator'],
+                 included_subreddits=included_subreddits
                  )
 
 def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
                  )
 
 def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
-                topN=25000):
+                topN=25000,
+                included_subreddits=None):
 
     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
                  outpath,
                  topN,
                  'term',
 
     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
                  outpath,
                  topN,
                  'term',
-                 []
+                 [],
+                 included_subreddits=included_subreddits
                  )
 
                  )
 
-def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
-                  topN=25000):
+def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
+                         topN=25000,
+                         included_subreddits=None):
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
-                 outpath,
-                 topN,
-                 'author',
-                 ['[deleted]','AutoModerator']
-                 )
+                        outpath,
+                        topN,
+                        'author',
+                        ['[deleted]','AutoModerator'],
+                        included_subreddits=included_subreddits
+                        )
+
+def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
+                       topN=25000,
+                       included_subreddits=None):
 
 
-def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
-                topN=25000):
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
-                 outpath,
-                 topN,
-                 'term',
-                 []
-                 )
+                        outpath,
+                        topN,
+                        'term',
+                        [],
+                        included_subreddits=included_subreddits
+                        )
 
 
 if __name__ == "__main__":
 
 
 if __name__ == "__main__":

Community Data Science Collective || Want to submit a patch?