]> code.communitydata.science - cdsc_reddit.git/commitdiff
Merge remote-tracking branch 'refs/remotes/origin/excise_reindex' into excise_reindex synced/excise_reindex
authorNathan TeBlunthuis <nathante@uw.edu>
Wed, 6 Apr 2022 18:14:13 +0000 (11:14 -0700)
committerNathan TeBlunthuis <nathante@uw.edu>
Wed, 6 Apr 2022 18:14:13 +0000 (11:14 -0700)
density/overlap_density.py
similarities/lsi_similarities.py
similarities/tfidf.py
similarities/weekly_cosine_similarities.py
timeseries/cluster_timeseries.py

index 20368249cd72c210a91e5d639213ce6edba6feef..ef0eb26953f4c83775972be61afb36cffca814c7 100644 (file)
@@ -4,9 +4,9 @@ from pathlib import Path
 import fire
 import numpy as np
 import sys
-sys.path.append("..")
-sys.path.append("../similarities")
-from similarities.similarities_helper import reindex_tfidf
+sys.path.append("..")
+sys.path.append("../similarities")
+# from similarities.similarities_helper import pull_tfidf
 
 # this is the mean of the ratio of the overlap to the focal size.
 # mean shared membership per focal community member
index 493755fbde9934d529196b22fe9d76eba6b888c4..57a2d0d6e25fb27d9a083df63b282ac01ecff9e5 100644 (file)
@@ -5,14 +5,14 @@ from similarities_helper import *
 #from similarities_helper import similarities, lsi_column_similarities
 from functools import partial
 
-# inpath = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_terms_compex.parquet/"
-# term_colname='term'
-# outfile='/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_terms_compex_LSI'
+# inpath = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet"
+# term_colname='authors'
+# outfile='/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_test_compex_LSI'
 # n_components=[10,50,100]
 # included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
 # n_iter=5
 # random_state=1968
-# algorithm='arpack'
+# algorithm='randomized'
 # topN = None
 # from_date=None
 # to_date=None
index bbae528c0145fdcb98fcda7a9072400c43c60ebb..c44fd0ddbf14d49f7c96e9f4be92c03bcd5b4c96 100644 (file)
@@ -2,8 +2,11 @@ import fire
 from pyspark.sql import SparkSession
 from pyspark.sql import functions as f
 from similarities_helper import tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
+from functools import partial
 
-def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
+inpath = '/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet'
+# include_terms is a path to a parquet file that contains a column of term_colname + '_id' to include.
+def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits, included_terms=None, min_df=None, max_df=None):
     spark = SparkSession.builder.getOrCreate()
 
     df = spark.read.parquet(inpath)
@@ -15,50 +18,71 @@ def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_
     else:
         include_subs = select_topN_subreddits(topN)
 
-    dfwriter = func(df, include_subs, term_colname)
+    include_subs = spark.sparkContext.broadcast(include_subs)
+
+    #    term_id = term_colname + "_id"
+
+    if included_terms is not None:
+        terms_df = spark.read.parquet(included_terms)
+        terms_df = terms_df.select(term_colname).distinct()
+        df = df.join(terms_df, on=term_colname, how='left_semi')
+
+    dfwriter = func(df, include_subs.value, term_colname)
 
     dfwriter.parquet(outpath,mode='overwrite',compression='snappy')
     spark.stop()
 
-def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
-    return _tfidf_wrapper(tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
+def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits, min_df, max_df):
+    tfidf_func = partial(tfidf_dataset, max_df=max_df, min_df=min_df)
+    return _tfidf_wrapper(tfidf_func, inpath, outpath, topN, term_colname, exclude, included_subreddits)
+
+def tfidf_weekly(inpath, outpath, static_tfidf_path, topN, term_colname, exclude, included_subreddits):
+    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits, included_terms=static_tfidf_path)
 
-def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits):
-    return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
 
 def tfidf_authors(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
                   outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
                   topN=None,
-                  included_subreddits=None):
+                  included_subreddits=None,
+                  min_df=None,
+                  max_df=None):
 
     return tfidf(inpath,
                  outpath,
                  topN,
                  'author',
                  ['[deleted]','AutoModerator'],
-                 included_subreddits=included_subreddits
+                 included_subreddits=included_subreddits,
+                 min_df=min_df,
+                 max_df=max_df
                  )
 
 def tfidf_terms(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
                 outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
                 topN=None,
-                included_subreddits=None):
+                included_subreddits=None,
+                min_df=None,
+                max_df=None):
 
     return tfidf(inpath,
                  outpath,
                  topN,
                  'term',
                  [],
-                 included_subreddits=included_subreddits
+                 included_subreddits=included_subreddits,
+                 min_df=min_df,
+                 max_df=max_df
                  )
 
 def tfidf_authors_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
+                         static_tfidf_path="/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet",
                          outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
                          topN=None,
                          included_subreddits=None):
 
     return tfidf_weekly(inpath,
                         outpath,
+                        static_tfidf_path,
                         topN,
                         'author',
                         ['[deleted]','AutoModerator'],
@@ -66,6 +90,7 @@ def tfidf_authors_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_
                         )
 
 def tfidf_terms_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
+                       static_tfidf_path="/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet",
                        outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
                        topN=None,
                        included_subreddits=None):
@@ -73,6 +98,7 @@ def tfidf_terms_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_te
 
     return tfidf_weekly(inpath,
                         outpath,
+                        static_tfidf_path,
                         topN,
                         'term',
                         [],
index 6ce30b8e4642049d5fbd15de785f6b3aebfbd389..45327c731a32a1bb9ddcf91bb2200194671d3a40 100755 (executable)
@@ -13,18 +13,23 @@ from similarities_helper import pull_tfidf, column_similarities, write_weekly_si
 from scipy.sparse import csr_matrix
 from multiprocessing import Pool, cpu_count
 from functools import partial
-
-infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_10k.parquet"
-tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet"
-min_df=None
-included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
-max_df = None
-topN=100
-term_colname='author'
-# outfile = '/gscratch/comdata/output/reddit_similarity/weekly/comment_authors_test.parquet'
-# included_subreddits=None
-
-def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, included_subreddits, topN, outdir:Path, subreddit_names, nterms):
+import pickle
+
+# tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity_weekly/comment_authors_tfidf.parquet"
+# #tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data//comment_authors_compex.parquet"
+# min_df=2
+# included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
+# max_df = None
+# topN=100
+# term_colname='author'
+# # outfile = '/gscratch/comdata/output/reddit_similarity/weekly/comment_authors_test.parquet'
+# # included_subreddits=None
+outfile="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity_weekly/comment_authors.parquet"; infile="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf_weekly/comment_authors_tfidf.parquet"; included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"; lsi_model="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_authors_compex_LSI/2000_authors_LSIMOD.pkl"; n_components=1500; algorithm="randomized"; term_colname='author'; tfidf_path=infile; random_state=1968;
+
+# static_tfidf = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet"
+# dftest = spark.read.parquet(static_tfidf)
+
+def _week_similarities(week, simfunc, tfidf_path, term_colname, included_subreddits, outdir:Path, subreddit_names, nterms, topN=None, min_df=None, max_df=None):
     term = term_colname
     term_id = term + '_id'
     term_id_new = term + '_id_new'
@@ -32,20 +37,19 @@ def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df,
 
     entries = pull_tfidf(infile = tfidf_path,
                          term_colname=term_colname,
-                         min_df=min_df,
-                         max_df=max_df,
                          included_subreddits=included_subreddits,
                          topN=topN,
-                         week=week,
+                         week=week.isoformat(),
                          rescale_idf=False)
     
     tfidf_colname='tf_idf'
     # if the max subreddit id we found is less than the number of subreddit names then we have to fill in 0s
     mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1)),shape=(nterms,subreddit_names.shape[0]))
-
     print('computing similarities')
+    print(simfunc)
     sims = simfunc(mat)
     del mat
+    sims = next(sims)[0]
     sims = pd.DataFrame(sims)
     sims = sims.rename({i: sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1)
     sims['_subreddit'] = subreddit_names.subreddit.values
@@ -56,18 +60,20 @@ def pull_weeks(batch):
     return set(batch.to_pandas()['week'])
 
 # This requires a prefit LSI model, since we shouldn't fit different LSI models for every week. 
-def cosine_similarities_weekly_lsi(n_components=100, lsi_model=None, *args, **kwargs):
+def cosine_similarities_weekly_lsi(*args, n_components=100, lsi_model=None, **kwargs):
+    print(args)
+    print(kwargs)
     term_colname= kwargs.get('term_colname')
-    #lsi_model = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_terms_compex_LSI/1000_term_LSIMOD.pkl"
-
-    # simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm='randomized',lsi_model_load=lsi_model)
+    # lsi_model = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_authors_compex_LSI/1000_author_LSIMOD.pkl"
 
-    simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=kwargs.get('n_iter'),random_state=kwargs.get('random_state'),algorithm=kwargs.get('algorithm'),lsi_model_load=lsi_model)
+    lsi_model = pickle.load(open(lsi_model,'rb'))
+    #simfunc = partial(lsi_column_similarities,n_components=n_components,random_state=random_state,algorithm='randomized',lsi_model=lsi_model)
+    simfunc = partial(lsi_column_similarities,n_components=n_components,random_state=kwargs.get('random_state'),lsi_model=lsi_model)
 
     return cosine_similarities_weekly(*args, simfunc=simfunc, **kwargs)
 
 #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
-def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, max_df=None, included_subreddits = None, topN = 500, simfunc=column_similarities):
+def cosine_similarities_weekly(tfidf_path, outfile, term_colname, included_subreddits = None, topN = None, simfunc=column_similarities, min_df=None,max_df=None):
     print(outfile)
     # do this step in parallel if we have the memory for it.
     # should be doable with pool.map
@@ -84,12 +90,14 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None,
     spark.stop()
 
     print(f"computing weekly similarities")
-    week_similarities_helper = partial(_week_similarities,simfunc=simfunc, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df,max_df=max_df,included_subreddits=included_subreddits,topN=topN, subreddit_names=subreddit_names,nterms=nterms)
+    week_similarities_helper = partial(_week_similarities,simfunc=simfunc, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=None, subreddit_names=subreddit_names,nterms=nterms)
 
-    pool = Pool(cpu_count())
-    
-    list(pool.imap(week_similarities_helper,weeks))
-    pool.close()
+    for week in weeks:
+        week_similarities_helper(week)
+    # pool = Pool(cpu_count())
+        
+    # list(pool.imap(week_similarities_helper, weeks))
+    # pool.close()
     #    with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine?
 
 
@@ -97,10 +105,11 @@ def author_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/
     return cosine_similarities_weekly(infile,
                                       outfile,
                                       'author',
-                                      min_df,
                                       max_df,
                                       included_subreddits,
-                                      topN)
+                                      topN,
+                                      min_df=2
+)
 
 def term_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=None):
         return cosine_similarities_weekly(infile,
@@ -112,32 +121,29 @@ def term_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/re
                                           topN)
 
 
-def author_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=None,n_components=100,lsi_model=None):
+def author_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', included_subreddits=None, n_components=100,lsi_model=None):
     return cosine_similarities_weekly_lsi(infile,
                                           outfile,
                                           'author',
-                                          min_df,
-                                          max_df,
-                                          included_subreddits,
-                                          topN,
+                                          included_subreddits=included_subreddits,
                                           n_components=n_components,
-                                          lsi_model=lsi_model)
+                                          lsi_model=lsi_model
+                                          )
 
 
-def term_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=500,n_components=100,lsi_model=None):
+def term_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', included_subreddits=None, n_components=100,lsi_model=None):
         return cosine_similarities_weekly_lsi(infile,
                                               outfile,
                                               'term',
-                                              min_df,
-                                              max_df,
-                                              included_subreddits,
-                                              topN,
+                                              included_subreddits=included_subreddits,
                                               n_components=n_components,
-                                              lsi_model=lsi_model)
+                                              lsi_model=lsi_model,
+                                              )
 
 if __name__ == "__main__":
     fire.Fire({'authors':author_cosine_similarities_weekly,
                'terms':term_cosine_similarities_weekly,
                'authors-lsi':author_cosine_similarities_weekly_lsi,
-               'terms-lsi':term_cosine_similarities_weekly
+               'terms-lsi':term_cosine_similarities_weekly_lsi
                })
+
index 91fa705af34f4242d05f8e7d28e4d212e0fc1419..2286ab0cad083307fbe977344f96a35f8b6a1c41 100644 (file)
@@ -12,10 +12,6 @@ def build_cluster_timeseries(term_clusters_path="/gscratch/comdata/output/reddit
          author_densities_path="/gscratch/comdata/output/reddit_density/comment_authors_10000.feather",
          output="data/subreddit_timeseries.parquet"):
 
-
-    clusters = load_clusters(term_clusters_path, author_clusters_path)
-    densities = load_densities(term_densities_path, author_densities_path)
-    
     spark = SparkSession.builder.getOrCreate()
     
     df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet")
@@ -26,11 +22,15 @@ def build_cluster_timeseries(term_clusters_path="/gscratch/comdata/output/reddit
     ts = df.select(['subreddit','week','author']).distinct().groupby(['subreddit','week']).count()
     
     ts = ts.repartition('subreddit')
-    spk_clusters = spark.createDataFrame(clusters)
+
+    if term_densities_path is not None and author_densities_path is not None:
+        densities = load_densities(term_densities_path, author_densities_path)
+        spk_densities = spark.createDataFrame(densities)
+        ts = ts.join(spk_densities, on='subreddit', how='inner')
     
+    clusters = load_clusters(term_clusters_path, author_clusters_path)
+    spk_clusters = spark.createDataFrame(clusters)
     ts = ts.join(spk_clusters, on='subreddit', how='inner')
-    spk_densities = spark.createDataFrame(densities)
-    ts = ts.join(spk_densities, on='subreddit', how='inner')
     ts.write.parquet(output, mode='overwrite')
 
 if __name__ == "__main__":

Community Data Science Collective || Want to submit a patch?