]> code.communitydata.science - cdsc_reddit.git/commitdiff
support isolates in visualization
authorNate E TeBlunthuis <nathante@klone-login01.hyak.local>
Fri, 14 May 2021 05:26:58 +0000 (22:26 -0700)
committerNate E TeBlunthuis <nathante@klone-login01.hyak.local>
Fri, 14 May 2021 05:26:58 +0000 (22:26 -0700)
ngrams/tf_comments.py
similarities/Makefile
similarities/similarities_helper.py
visualization/tsne_vis.py

index f86548a957a866b56d4dec6e9b4f813b2a4b5fa2..a40e5d93914a9dbda0f58853a549d5ffd5e98a4e 100755 (executable)
@@ -13,10 +13,7 @@ from nltk.corpus import stopwords
 from nltk.util import ngrams
 import string
 from random import random
 from nltk.util import ngrams
 import string
 from random import random
-
-# remove urls
-# taken from https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url
-urlregex = re.compile(r"[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)")
+from redditcleaner import clean
 
 # compute term frequencies for comments in each subreddit by week
 def weekly_tf(partition, mwe_pass = 'first'):
 
 # compute term frequencies for comments in each subreddit by week
 def weekly_tf(partition, mwe_pass = 'first'):
@@ -95,8 +92,8 @@ def weekly_tf(partition, mwe_pass = 'first'):
         # lowercase        
         text = text.lower()
 
         # lowercase        
         text = text.lower()
 
-        # remove urls
-        text = urlregex.sub("", text)
+        # redditcleaner removes reddit markdown(newlines, quotes, bullet points, links, strikethrough, spoiler, code, superscript, table, headings)
+        text = clean(text)
 
         # sentence tokenize
         sentences = sent_tokenize(text)
 
         # sentence tokenize
         sentences = sent_tokenize(text)
@@ -107,14 +104,13 @@ def weekly_tf(partition, mwe_pass = 'first'):
         # remove punctuation
                         
         sentences = map(remove_punct, sentences)
         # remove punctuation
                         
         sentences = map(remove_punct, sentences)
-
-        # remove sentences with less than 2 words
-        sentences = filter(lambda sentence: len(sentence) > 2, sentences)
-
         # datta et al. select relatively common phrases from the reddit corpus, but they don't really explain how. We'll try that in a second phase.
         # they say that the extract 1-4 grams from 10% of the sentences and then find phrases that appear often relative to the original terms
         # here we take a 10 percent sample of sentences 
         if mwe_pass == 'first':
         # datta et al. select relatively common phrases from the reddit corpus, but they don't really explain how. We'll try that in a second phase.
         # they say that the extract 1-4 grams from 10% of the sentences and then find phrases that appear often relative to the original terms
         # here we take a 10 percent sample of sentences 
         if mwe_pass == 'first':
+
+            # remove sentences with less than 2 words
+            sentences = filter(lambda sentence: len(sentence) > 2, sentences)
             sentences = list(sentences)
             for sentence in sentences:
                 if random() <= 0.1:
             sentences = list(sentences)
             for sentence in sentences:
                 if random() <= 0.1:
index cfe8a49b97ffe5d9ac43c45dd5d58bb8423df159..f578fd5105a86af777fc4ee9c5868bb94f4405cc 100644 (file)
@@ -1,7 +1,7 @@
 #all: /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_130k.parquet
 srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh
 srun_singularity_huge=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity_huge.sh
 #all: /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_130k.parquet
 srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh
 srun_singularity_huge=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity_huge.sh
-base_data=/gscratch/comdata/output/
+base_data=/gscratch/comdata/output
 similarity_data=${base_data}/reddit_similarity
 tfidf_data=${similarity_data}/tfidf
 tfidf_weekly_data=${similarity_data}/tfidf_weekly
 similarity_data=${base_data}/reddit_similarity
 tfidf_data=${similarity_data}/tfidf
 tfidf_weekly_data=${similarity_data}/tfidf_weekly
@@ -97,7 +97,7 @@ ${tfidf_data}/tfidf_weekly/comment_authors_100k.parquet: /gscratch/comdata/outpu
        start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=100000 --outpath=${tfidf_weekly_data}/comment_authors_100k.parquet
 
 ${tfidf_weekly_data}/comment_terms_30k.parquet:  /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
        start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=100000 --outpath=${tfidf_weekly_data}/comment_authors_100k.parquet
 
 ${tfidf_weekly_data}/comment_terms_30k.parquet:  /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
-       start_spark_and_run.sh 4 tfidf.py terms_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
+       start_spark_and_run.sh 2 tfidf.py terms_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
 
 ${tfidf_weekly_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
        start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
 
 ${tfidf_weekly_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
        start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
index 7f8a639aeecf255ed3db0e47f4ad14769cb5ceb4..e59563e396bc0988cf645dc80a6cba27997a512e 100644 (file)
@@ -23,9 +23,6 @@ class tf_weight(Enum):
 infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet"
 cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet"
 
 infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet"
 cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet"
 
-def termauthor_tfidf(term_tfidf_callable, author_tfidf_callable):
-    
-
 # subreddits missing after this step don't have any terms that have a high enough idf
 # try rewriting without merges
 def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, week=None, from_date=None, to_date=None, rescale_idf=True, tf_family=tf_weight.MaxTF):
 # subreddits missing after this step don't have any terms that have a high enough idf
 # try rewriting without merges
 def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, week=None, from_date=None, to_date=None, rescale_idf=True, tf_family=tf_weight.MaxTF):
@@ -283,7 +280,7 @@ def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weig
         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
 
     df = df.repartition(400,'subreddit','week')
         df = df.withColumn("tf_idf",  (0.5 + 0.5 * df.relative_tf) * df.idf)
 
     df = df.repartition(400,'subreddit','week')
-    dfwriter = df.write.partitionBy("week").sortBy("subreddit")
+    dfwriter = df.write.partitionBy("week")
     return dfwriter
 
 def _calc_tfidf(df, term_colname, tf_family):
     return dfwriter
 
 def _calc_tfidf(df, term_colname, tf_family):
@@ -339,7 +336,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm
 
     df = _calc_tfidf(df, term_colname, tf_family)
     df = df.repartition('subreddit')
 
     df = _calc_tfidf(df, term_colname, tf_family)
     df = df.repartition('subreddit')
-    dfwriter = df.write.sortBy("subreddit","tf")
+    dfwriter = df.write
     return dfwriter
 
 def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"):
     return dfwriter
 
 def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"):
index c39a7400e5e5c2ab726eb0a692e4180536d72ce5..eb6a6be840c0497810c49a8c84f0610fb828d9db 100644 (file)
@@ -22,8 +22,12 @@ def base_plot(plot_data):
     #
     #    subreddit_select = alt.selection_single(on='click',fields=['subreddit'],bind=subreddit_dropdown,name='subreddit_click')
     
     #
     #    subreddit_select = alt.selection_single(on='click',fields=['subreddit'],bind=subreddit_dropdown,name='subreddit_click')
     
+    base_scale = alt.Scale(scheme={"name":'category10',
+                                   "extent":[0,100],
+                                   "count":10})
+
     color = alt.condition(cluster_click_select ,
     color = alt.condition(cluster_click_select ,
-                          alt.Color(field='color',type='nominal',scale=alt.Scale(scheme='category10')),
+                          alt.Color(field='color',type='nominal',scale=base_scale),
                           alt.value("lightgray"))
   
     
                           alt.value("lightgray"))
   
     
@@ -84,6 +88,11 @@ def viewport_plot(plot_data):
     return chart
 
 def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
     return chart
 
 def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
+    isolate_color = 101
+
+    cluster_sizes = clusters.groupby('cluster').count()
+    singletons = set(cluster_sizes.loc[cluster_sizes.subreddit == 1].reset_index().cluster)
+
     tsne_data = tsne_data.merge(clusters,on='subreddit')
     
     centroids = tsne_data.groupby('cluster').agg({'x':np.mean,'y':np.mean})
     tsne_data = tsne_data.merge(clusters,on='subreddit')
     
     centroids = tsne_data.groupby('cluster').agg({'x':np.mean,'y':np.mean})
@@ -120,15 +129,17 @@ def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
     color_assignments = np.repeat(-1,len(centroids))
 
     for i in range(len(centroids)):
     color_assignments = np.repeat(-1,len(centroids))
 
     for i in range(len(centroids)):
-        knn = indices[i]
-        knn_colors = color_assignments[knn]
-        available_colors = color_ids[list(set(color_ids) - set(knn_colors))]
-
-        if(len(available_colors) > 0):
-            color_assignments[i] = available_colors[0]
+        if (centroids.iloc[i].name == -1) or (i in singletons):
+            color_assignments[i] = isolate_color
         else:
         else:
-            raise Exception("Can't color this many neighbors with this many colors")
+            knn = indices[i]
+            knn_colors = color_assignments[knn]
+            available_colors = color_ids[list(set(color_ids) - set(knn_colors))]
 
 
+            if(len(available_colors) > 0):
+                color_assignments[i] = available_colors[0]
+            else:
+                raise Exception("Can't color this many neighbors with this many colors")
 
     centroids = centroids.reset_index()
     colors = centroids.loc[:,['cluster']]
 
     centroids = centroids.reset_index()
     colors = centroids.loc[:,['cluster']]
@@ -143,12 +154,13 @@ def build_visualization(tsne_data, clusters, output):
     # clusters = "/gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather"
 
     tsne_data = pd.read_feather(tsne_data)
     # clusters = "/gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather"
 
     tsne_data = pd.read_feather(tsne_data)
+    tsne_data = tsne_data.rename(columns={'_subreddit':'subreddit'})
     clusters = pd.read_feather(clusters)
 
     tsne_data = assign_cluster_colors(tsne_data,clusters,10,8)
 
     clusters = pd.read_feather(clusters)
 
     tsne_data = assign_cluster_colors(tsne_data,clusters,10,8)
 
-    sr_per_cluster = tsne_data.groupby('cluster').subreddit.count().reset_index()
-    sr_per_cluster = sr_per_cluster.rename(columns={'subreddit':'cluster_size'})
+    sr_per_cluster = tsne_data.groupby('cluster').subreddit.count().reset_index()
+    sr_per_cluster = sr_per_cluster.rename(columns={'subreddit':'cluster_size'})
 
     tsne_data = tsne_data.merge(sr_per_cluster,on='cluster')
 
 
     tsne_data = tsne_data.merge(sr_per_cluster,on='cluster')
 

Community Data Science Collective || Want to submit a patch?