]> code.communitydata.science - cdsc_reddit.git/blobdiff - similarities/tfidf.py
refactor clustring in object oriented style
[cdsc_reddit.git] / similarities / tfidf.py
index 885dae2af76cc00f74f82a8dc133285a6281d9e2..002e89f785b37fd9df3c903775ab6f71846909d4 100644 (file)
@@ -3,27 +3,28 @@ from pyspark.sql import SparkSession
 from pyspark.sql import functions as f
 from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
 
-
-def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude):
+def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
     spark = SparkSession.builder.getOrCreate()
 
     df = spark.read.parquet(inpath)
 
     df = df.filter(~ f.col(term_colname).isin(exclude))
 
-    include_subs = select_topN_subreddits(topN)
-
-    df = func(df, include_subs, term_colname)
+    if included_subreddits is not None:
+        include_subs = list(open(included_subreddits))
+    else:
+        include_subs = select_topN_subreddits(topN)
 
-    df.write.parquet(outpath,mode='overwrite',compression='snappy')
+    dfwriter = func(df, include_subs, term_colname)
 
+    dfwriter.parquet(outpath,mode='overwrite',compression='snappy')
     spark.stop()
 
-def tfidf(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
-def tfidf_weekly(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
 def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
                   topN=25000):
@@ -32,7 +33,8 @@ def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comm
                  outpath,
                  topN,
                  'author',
-                 ['[deleted]','AutoModerator']
+                 ['[deleted]','AutoModerator'],
+                 included_subreddits=None
                  )
 
 def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
@@ -42,28 +44,31 @@ def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/commen
                  outpath,
                  topN,
                  'term',
-                 []
+                 [],
+                 included_subreddits=None
                  )
 
 def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
-                  topN=25000):
+                         topN=25000):
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
-                 outpath,
-                 topN,
-                 'author',
-                 ['[deleted]','AutoModerator']
-                 )
+                        outpath,
+                        topN,
+                        'author',
+                        ['[deleted]','AutoModerator'],
+                        included_subreddits=None
+                        )
 
 def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
-                topN=25000):
+                       topN=25000):
 
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
                         outpath,
                         topN,
                         'term',
-                        []
+                        [],
+                        included_subreddits=None
                         )
 
 

Community Data Science Collective || Want to submit a patch?