X-Git-Url: https://code.communitydata.science/cdsc_reddit.git/blobdiff_plain/4dc949de5fb8d3eac04bae125c819100002c9522..53f5b8c03c55aab7fa535a851c61d47e5bf65857:/similarities/similarities_helper.py?ds=sidebyside diff --git a/similarities/similarities_helper.py b/similarities/similarities_helper.py index 9e33c9d..3ace8f2 100644 --- a/similarities/similarities_helper.py +++ b/similarities/similarities_helper.py @@ -60,7 +60,7 @@ def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subre if included_subreddits is None: included_subreddits = select_topN_subreddits(topN) else: - included_subreddits = set(open(included_subreddits)) + included_subreddits = set(map(str.strip,map(str.lower,open(included_subreddits)))) if exclude_phrases == True: tfidf = tfidf.filter(~f.col(term_colname).contains("_")) @@ -89,7 +89,8 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non print("loading matrix") # mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname) mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) - print('computing similarities') + print(f'computing similarities on mat. mat.shape:{mat.shape}') + print(f"size of mat is:{mat.data.nbytes}") sims = simfunc(mat) del mat @@ -387,7 +388,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm return df -def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonswf.csv"): +def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"): rankdf = pd.read_csv(path) included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values) return included_subreddits