from itertools import islice
from pathlib import Path
from similarities_helper import *
+from multiprocessing import Pool, cpu_count
+def _week_similarities(tempdir, term_colname, week):
+ print(f"loading matrix: {week}")
+ mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
+ print('computing similarities')
+ sims = column_similarities(mat)
+ del mat
+
+ names = subreddit_names.loc[subreddit_names.week == week]
+ sims = pd.DataFrame(sims.todense())
+
+ sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1)
+ sims['_subreddit'] = names.subreddit.values
+
+ write_weekly_similarities(outfile, sims, week, names)
#tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500):
print(f"computing weekly similarities for {len(included_subreddits)} subreddits")
print("creating temporary parquet with matrix indicies")
- tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits)
+ tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df=None, included_subreddits=included_subreddits)
tfidf = spark.read.parquet(tempdir.name)
spark.stop()
weeks = sorted(list(subreddit_names.week.drop_duplicates()))
- for week in weeks:
- print(f"loading matrix: {week}")
- mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
- print('computing similarities')
- sims = column_similarities(mat)
- del mat
+ # do this step in parallel if we have the memory for it.
+ # should be doable with pool.map
- names = subreddit_names.loc[subreddit_names.week == week]
- sims = pd.DataFrame(sims.todense())
-
- sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1)
- sims['subreddit'] = names.subreddit.values
-
- write_weekly_similarities(outfile, sims, week, names)
+ def week_similarities_helper(week):
+ _week_similarities(tempdir, term_colname, week)
+ with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine?
+ list(pool.map(week_similarities_helper,weeks))
-def author_cosine_similarities_weekly(outfile, min_df=None , included_subreddits=None, topN=500):
+def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500):
return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
outfile,
'author',
topN)
if __name__ == "__main__":
- fire.Fire({'author':author_cosine_similarities_weekly,
- 'term':term_cosine_similarities_weekly})
+ fire.Fire({'authors':author_cosine_similarities_weekly,
+ 'terms':term_cosine_similarities_weekly})