topN,
                                exclude_phrases,
                                from_date,
-                               to_date)
+                               to_date
+                               )
 
 def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
     return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
                                topN,
                                exclude_phrases=False,
                                from_date=from_date,
-                               to_date=to_date)
+                               to_date=to_date
+                               )
 
 def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
     return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
                                exclude_phrases=False,
                                from_date=from_date,
                                to_date=to_date,
-                               tfidf_colname='relative_tf')
+                               tfidf_colname='relative_tf'
+                               )
 
 
 if __name__ == "__main__":
 
 from pyspark.sql import functions as f
 from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
 
-
-def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude):
+def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
     spark = SparkSession.builder.getOrCreate()
 
     df = spark.read.parquet(inpath)
 
     df = df.filter(~ f.col(term_colname).isin(exclude))
 
-    include_subs = select_topN_subreddits(topN)
+    if included_subreddits is not None:
+        include_subs = list(open(included_subreddits))
+    else:
+        include_subs = select_topN_subreddits(topN)
 
     df = func(df, include_subs, term_colname)
 
 
     spark.stop()
 
-def tfidf(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
 def tfidf_weekly(inpath, outpath, topN, term_colname, exclude):
-    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
+    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, included_subreddits)
 
 def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
                   topN=25000):
                  outpath,
                  topN,
                  'author',
-                 ['[deleted]','AutoModerator']
+                 ['[deleted]','AutoModerator'],
+                 included_subreddits=None
                  )
 
 def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
                  outpath,
                  topN,
                  'term',
-                 []
+                 [],
+                 included_subreddits=None
                  )
 
 def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
-                  topN=25000):
+                         topN=25000):
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
-                 outpath,
-                 topN,
-                 'author',
-                 ['[deleted]','AutoModerator']
-                 )
+                        outpath,
+                        topN,
+                        'author',
+                        ['[deleted]','AutoModerator'],
+                        included_subreddits=None
+                        )
 
 def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
-                topN=25000):
+                       topN=25000):
 
 
     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
                         outpath,
                         topN,
                         'term',
-                        []
+                        [],
+                        included_subreddits=None
                         )