]> code.communitydata.science - cdsc_reddit.git/blobdiff - similarities/similarities_helper.py
Updating to support wang-style user overlaps.
[cdsc_reddit.git] / similarities / similarities_helper.py
index 88c830cacf7d5971e6e885882b1771d0c864183b..69516a6abfd69b28e70f7bbf02ec923745da10b3 100644 (file)
@@ -1,3 +1,4 @@
+from pyspark.sql import SparkSession
 from pyspark.sql import Window
 from pyspark.sql import functions as f
 from enum import Enum
@@ -5,15 +6,108 @@ from pyspark.mllib.linalg.distributed import CoordinateMatrix
 from tempfile import TemporaryDirectory
 import pyarrow
 import pyarrow.dataset as ds
-from scipy.sparse import csr_matrix
+from scipy.sparse import csr_matrix, issparse
 import pandas as pd
 import numpy as np
 import pathlib
+from datetime import datetime
+from pathlib import Path
 
 class tf_weight(Enum):
     MaxTF = 1
     Norm05 = 2
 
+infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet"
+
+def reindex_tfidf_time_interval(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None):
+    term = term_colname
+    term_id = term + '_id'
+    term_id_new = term + '_id_new'
+
+    spark = SparkSession.builder.getOrCreate()
+    conf = spark.sparkContext.getConf()
+    print(exclude_phrases)
+    tfidf_weekly = spark.read.parquet(infile)
+
+    # create the time interval
+    if from_date is not None:
+        if type(from_date) is str:
+            from_date = datetime.fromisoformat(from_date)
+
+        tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week >= from_date)
+        
+    if to_date is not None:
+        if type(to_date) is str:
+            to_date = datetime.fromisoformat(to_date)
+        tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week < to_date)
+
+    tfidf = tfidf_weekly.groupBy(["subreddit","week", term_id, term]).agg(f.sum("tf").alias("tf"))
+    tfidf = _calc_tfidf(tfidf, term_colname, tf_weight.Norm05)
+    tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits)
+    tfidf = spark.read_parquet(tempdir.name)
+    subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
+    subreddit_names = subreddit_names.sort_values("subreddit_id_new")
+    subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
+    return(tempdir, subreddit_names)
+
+def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False):
+    spark = SparkSession.builder.getOrCreate()
+    conf = spark.sparkContext.getConf()
+    print(exclude_phrases)
+
+    tfidf = spark.read.parquet(infile)
+
+    if included_subreddits is None:
+        included_subreddits = select_topN_subreddits(topN)
+    else:
+        included_subreddits = set(open(included_subreddits))
+
+    if exclude_phrases == True:
+        tfidf = tfidf.filter(~f.col(term_colname).contains("_"))
+
+    print("creating temporary parquet with matrix indicies")
+    tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits)
+
+    tfidf = spark.read.parquet(tempdir.name)
+    subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas()
+    subreddit_names = subreddit_names.sort_values("subreddit_id_new")
+    subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1
+    spark.stop()
+    return (tempdir, subreddit_names)
+
+def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None):
+
+    if from_date is not None or to_date is not None:
+        tempdir, subreddit_names = reindex_tfidf_time_interval(infile, term_colname='author', min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False, from_date=from_date, to_date=to_date)
+        
+    else:
+        tempdir, subreddit_names = reindex_tfidf(infile, term_colname='author', min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False)
+
+    print("loading matrix")
+    #    mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname)
+    mat = read_tfidf_matrix(tempdir.name, term_colname)
+    print('computing similarities')
+    sims = simfunc(mat)
+    del mat
+
+    if issparse(sims):
+        sims = sims.todense()
+
+    print(f"shape of sims:{sims.shape}")
+    print(f"len(subreddit_names.subreddit.values):{len(subreddit_names.subreddit.values)}")
+    sims = pd.DataFrame(sims)
+    sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1)
+    sims['subreddit'] = subreddit_names.subreddit.values
+
+    p = Path(outfile)
+
+    output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
+    output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
+    output_parquet =  Path(str(p).replace("".join(p.suffixes), ".parquet"))
+
+    sims.to_feather(outfile)
+    tempdir.cleanup()
+
 def read_tfidf_matrix_weekly(path, term_colname, week):
     term = term_colname
     term_id = term + '_id'
@@ -33,8 +127,6 @@ def write_weekly_similarities(path, sims, week, names):
     sims = sims.melt(id_vars=['subreddit','week'],value_vars=names.subreddit.values)
     sims.to_parquet(p / week.isoformat())
 
-
-
 def read_tfidf_matrix(path,term_colname):
     term = term_colname
     term_id = term + '_id'
@@ -44,6 +136,15 @@ def read_tfidf_matrix(path,term_colname):
     entries = dataset.to_table(columns=['tf_idf','subreddit_id_new',term_id_new]).to_pandas()
     return(csr_matrix((entries.tf_idf,(entries[term_id_new]-1, entries.subreddit_id_new-1))))
     
+def column_overlaps(mat):
+    non_zeros = (mat != 0).astype('double')
+    
+    intersection = non_zeros.T @ non_zeros
+    card1 = non_zeros.sum(axis=0)
+    den = np.add.outer(card1,card1) - intersection
+
+    return intersection / den
+    
 def column_similarities(mat):
     norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32))
     mat = mat.multiply(1/norm)
@@ -51,13 +152,16 @@ def column_similarities(mat):
     return(sims)
 
 
-def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits):
+def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df, included_subreddits):
     term = term_colname
     term_id = term + '_id'
     term_id_new = term + '_id_new'
 
     if min_df is None:
         min_df = 0.1 * len(included_subreddits)
+        tfidf = tfidf.filter(f.col('count') >= min_df)
+    if max_df is not None:
+        tfidf = tfidf.filter(f.col('count') <= max_df)
 
     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
 
@@ -86,19 +190,22 @@ def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits):
     return(tempdir)
     
 
-def prep_tfidf_entries(tfidf, term_colname, min_df, included_subreddits):
+def prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits):
     term = term_colname
     term_id = term + '_id'
     term_id_new = term + '_id_new'
 
     if min_df is None:
         min_df = 0.1 * len(included_subreddits)
+        tfidf = tfidf.filter(f.col('count') >= min_df)
+    if max_df is not None:
+        tfidf = tfidf.filter(f.col('count') <= max_df)
 
     tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits))
 
     # reset the subreddit ids
     sub_ids = tfidf.select('subreddit_id').distinct()
-    sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id")))
+    sub_ids = sub_ids.withColumn("subreddit_id_new", f.row_number().over(Window.orderBy("subreddit_id")))
     tfidf = tfidf.join(sub_ids,'subreddit_id')
 
     # only use terms in at least min_df included subreddits
@@ -221,15 +328,9 @@ def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weig
 
     return df
 
-
-
-def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
-
+def _calc_tfidf(df, term_colname, tf_family):
     term = term_colname
     term_id = term + '_id'
-    # aggregate counts by week. now subreddit-term is distinct
-    df = df.filter(df.subreddit.isin(include_subs))
-    df = df.groupBy(['subreddit',term]).agg(f.sum('tf').alias('tf'))
 
     max_subreddit_terms = df.groupby(['subreddit']).max('tf') # subreddits are unique
     max_subreddit_terms = max_subreddit_terms.withColumnRenamed('max(tf)','sr_max_tf')
@@ -240,9 +341,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm
 
     # group by term. term is unique
     idf = df.groupby([term]).count()
-
     N_docs = df.select('subreddit').distinct().count()
-
     # add a little smoothing to the idf
     idf = idf.withColumn('idf',f.log(N_docs/(1+f.col('count')))+1)
 
@@ -271,6 +370,18 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm
         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
 
     return df
+    
+
+def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05):
+    term = term_colname
+    term_id = term + '_id'
+    # aggregate counts by week. now subreddit-term is distinct
+    df = df.filter(df.subreddit.isin(include_subs))
+    df = df.groupBy(['subreddit',term]).agg(f.sum('tf').alias('tf'))
+
+    df = _calc_tfidf(df, term_colname, tf_family)
+
+    return df
 
 def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv"):
     rankdf = pd.read_csv(path)

Community Data Science Collective || Want to submit a patch?