]> code.communitydata.science - cdsc_reddit.git/blobdiff - similarities/lsi_similarities.py
update pushshift dumps.
[cdsc_reddit.git] / similarities / lsi_similarities.py
index 7ab7e8c204e9f89408e24169859826d4da6334a7..eb89f55789c5dcb285c78737d8d280b935e2bb72 100644 (file)
@@ -1,20 +1,41 @@
 import pandas as pd
 import fire
 from pathlib import Path
-from similarities_helper import similarities, lsi_column_similarities
+from similarities_helper import *
+#from similarities_helper import similarities, lsi_column_similarities
 from functools import partial
 
-def lsi_similarities(infile, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, tfidf_colname='tf_idf',n_components=100,n_iter=5,random_state=1968,algorithm='arpack'):
+inpath = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_terms_compex.parquet/"
+term_colname='term'
+outfile='/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_terms_compex_LSI'
+n_components=[10,50,100]
+included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
+n_iter=5
+random_state=1968
+algorithm='arpack'
+topN = None
+from_date=None
+to_date=None
+min_df=None
+max_df=None
+def lsi_similarities(inpath, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=None, from_date=None, to_date=None, tfidf_colname='tf_idf',n_components=100,n_iter=5,random_state=1968,algorithm='arpack',lsi_model=None):
     print(n_components,flush=True)
 
-    simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm=algorithm)
+        
+    if lsi_model is None:
+        if type(n_components) == list:
+            lsi_model = Path(outfile) / f'{max(n_components)}_{term_colname}_LSIMOD.pkl'
+        else:
+            lsi_model = Path(outfile) / f'{n_components}_{term_colname}_LSIMOD.pkl'
 
-    return similarities(infile=infile, simfunc=simfunc, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname)
+    simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm=algorithm,lsi_model_save=lsi_model)
+
+    return similarities(inpath=inpath, simfunc=simfunc, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname)
 
 # change so that these take in an input as an optional argument (for speed, but also for idf).
-def term_lsi_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, n_components=300,n_iter=5,random_state=1968,algorithm='arpack'):
+def term_lsi_similarities(inpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet',outfile=None, min_df=None, max_df=None, included_subreddits=None, topN=None, from_date=None, to_date=None, algorithm='arpack', n_components=300,n_iter=5,random_state=1968):
 
-    return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet',
+    res =  lsi_similarities(inpath,
                             'term',
                             outfile,
                             min_df,
@@ -23,11 +44,13 @@ def term_lsi_similarities(outfile, min_df=None, max_df=None, included_subreddits
                             topN,
                             from_date,
                             to_date,
-                            n_components=n_components
+                            n_components=n_components,
+                            algorithm = algorithm
                             )
+    return res
 
-def author_lsi_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'):
-    return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',
+def author_lsi_similarities(inpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',outfile=None, min_df=2, max_df=None, included_subreddits=None, topN=None, from_date=None, to_date=None,algorithm='arpack',n_components=300,n_iter=5,random_state=1968):
+    return lsi_similarities(inpath,
                             'author',
                             outfile,
                             min_df,
@@ -39,8 +62,8 @@ def author_lsi_similarities(outfile, min_df=2, max_df=None, included_subreddits=
                             n_components=n_components
                                )
 
-def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'):
-    return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',
+def author_tf_similarities(inpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',outfile=None, min_df=2, max_df=None, included_subreddits=None, topN=None, from_date=None, to_date=None,n_components=300,n_iter=5,random_state=1968):
+    return lsi_similarities(inpath,
                             'author',
                             outfile,
                             min_df,
@@ -50,7 +73,8 @@ def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=N
                             from_date=from_date,
                             to_date=to_date,
                             tfidf_colname='relative_tf',
-                            n_components=n_components
+                            n_components=n_components,
+                            algorithm=algorithm
                             )
 
 

Community Data Science Collective || Want to submit a patch?