]> code.communitydata.science - cdsc_reddit.git/blob - clustering/umap_hdbscan_clustering.py
changes from dirty branch.
[cdsc_reddit.git] / clustering / umap_hdbscan_clustering.py
1 from clustering_base import clustering_result, clustering_job, twoway_clustering_job
2 from hdbscan_clustering import hdbscan_clustering_result
3 import umap
4 from grid_sweep import twoway_grid_sweep
5 from dataclasses import dataclass
6 import hdbscan
7 from sklearn.neighbors import NearestNeighbors
8 import plotnine as pn
9 import numpy as np
10 from itertools import product, starmap, chain
11 import pandas as pd
12 from multiprocessing import cpu_count
13 import fire
14
15 def test_select_hdbscan_clustering():
16     # select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI",
17     #                           "test_hdbscan_author30k",
18     #                           min_cluster_sizes=[2],
19     #                           min_samples=[1,2],
20     #                           cluster_selection_epsilons=[0,0.05,0.1,0.15],
21     #                           cluster_selection_methods=['eom','leaf'],
22     #                           lsi_dimensions='all')
23     inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI"
24     outpath = "test_umap_hdbscan_lsi"
25     min_cluster_sizes=[2,3,4]
26     min_samples=[1,2,3]
27     cluster_selection_epsilons=[0,0.1,0.3,0.5]
28     cluster_selection_methods=[1]
29     lsi_dimensions='all'
30     n_neighbors = [5,10,15,25,35,70,100]
31     learning_rate = [0.1,0.5,1,2]
32     min_dist = [0.5,1,1.5,2]
33     local_connectivity = [1,2,3,4,5]
34
35     hdbscan_params = {"min_cluster_sizes":min_cluster_sizes, "min_samples":min_samples, "cluster_selection_epsilons":cluster_selection_epsilons, "cluster_selection_methods":cluster_selection_methods}
36     umap_params = {"n_neighbors":n_neighbors, "learning_rate":learning_rate, "min_dist":min_dist, "local_connectivity":local_connectivity}
37     gs = umap_hdbscan_grid_sweep(inpath, "all", outpath, hdbscan_params,umap_params)
38
39     # gs.run(20)
40     # gs.save("test_hdbscan/lsi_sweep.csv")
41
42
43     # job1 = hdbscan_lsi_job(infile=inpath, outpath=outpath, name="test", lsi_dims=500, min_cluster_size=2, min_samples=1,cluster_selection_epsilon=0,cluster_selection_method='eom')
44     # job1.run()
45     # print(job1.get_info())
46
47     # df = pd.read_csv("test_hdbscan/selection_data.csv")
48     # test_select_hdbscan_clustering()
49     # check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather")
50     # silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather")
51     # c = check_clusters.merge(silscores,on='subreddit')#    fire.Fire(select_hdbscan_clustering)
52 class umap_hdbscan_grid_sweep(twoway_grid_sweep):
53     def __init__(self,
54                  inpath,
55                  outpath,
56                  umap_params,
57                  hdbscan_params):
58
59         super().__init__(umap_hdbscan_job, inpath, outpath, self.namer, umap_params, hdbscan_params)
60
61     def namer(self,
62               min_cluster_size,
63               min_samples,
64               cluster_selection_epsilon,
65               cluster_selection_method,
66               n_components,
67               n_neighbors,
68               learning_rate,
69               min_dist,
70               local_connectivity,
71               densmap
72               ):
73         return f"mcs-{min_cluster_size}_ms-{min_samples}_cse-{cluster_selection_epsilon}_csm-{cluster_selection_method}_nc-{n_components}_nn-{n_neighbors}_lr-{learning_rate}_md-{min_dist}_lc-{local_connectivity}_dm-{densmap}"
74
75 @dataclass
76 class umap_hdbscan_clustering_result(hdbscan_clustering_result):
77     n_components:int
78     n_neighbors:int
79     learning_rate:float
80     min_dist:float
81     local_connectivity:int
82     densmap:bool
83
84 class umap_hdbscan_job(twoway_clustering_job):
85     def __init__(self, infile, outpath, name,
86                  umap_args = {"n_components":2,"n_neighbors":15, "learning_rate":1, "min_dist":1, "local_connectivity":1,'densmap':False},
87                  hdbscan_args = {"min_cluster_size":2, "min_samples":1, "cluster_selection_epsilon":0, "cluster_selection_method":'eom'},
88                  *args,
89                  **kwargs):
90         super().__init__(infile,
91                          outpath,
92                          name,
93                          call1=umap_hdbscan_job._umap_embedding,
94                          call2=umap_hdbscan_job._hdbscan_clustering,
95                          args1=umap_args,
96                          args2=hdbscan_args,
97                          *args,
98                          **kwargs
99                          )
100
101         self.n_components = umap_args['n_components']
102         self.n_neighbors = umap_args['n_neighbors']
103         self.learning_rate = umap_args['learning_rate']
104         self.min_dist = umap_args['min_dist']
105         self.local_connectivity = umap_args['local_connectivity']
106         self.densmap = umap_args['densmap']
107         self.min_cluster_size = hdbscan_args['min_cluster_size']
108         self.min_samples = hdbscan_args['min_samples']
109         self.cluster_selection_epsilon = hdbscan_args['cluster_selection_epsilon']
110         self.cluster_selection_method = hdbscan_args['cluster_selection_method']
111
112     def after_run(self):
113         coords = self.step1.embedding_
114         self.cluster_data['x'] = coords[:,0]
115         self.cluster_data['y'] = coords[:,1]
116         super().after_run()
117
118
119     def _umap_embedding(mat, **umap_args):
120         print(f"running umap embedding. umap_args:{umap_args}")
121         umapmodel = umap.UMAP(metric='precomputed', **umap_args)
122         umapmodel = umapmodel.fit(mat)
123         return umapmodel
124
125     def _hdbscan_clustering(mat, umapmodel, **hdbscan_args):
126         print(f"running hdbascan clustering. hdbscan_args:{hdbscan_args}")
127         
128         umap_coords = umapmodel.transform(mat)
129
130         clusterer = hdbscan.HDBSCAN(metric='euclidean',
131                                     core_dist_n_jobs=cpu_count(),
132                                     **hdbscan_args
133                                     )
134     
135         clustering = clusterer.fit(umap_coords)
136     
137         return(clustering)
138
139     def get_info(self):
140         result = super().get_info()
141         self.result = umap_hdbscan_clustering_result(**result.__dict__,
142                                                      min_cluster_size=self.min_cluster_size,
143                                                      min_samples=self.min_samples,
144                                                      cluster_selection_epsilon=self.cluster_selection_epsilon,
145                                                      cluster_selection_method=self.cluster_selection_method,
146                                                      n_components = self.n_components,
147                                                      n_neighbors = self.n_neighbors,
148                                                      learning_rate = self.learning_rate,
149                                                      min_dist = self.min_dist,
150                                                      local_connectivity=self.local_connectivity,
151                                                      densmap=self.densmap
152                                                      )
153         return self.result
154
155 def run_umap_hdbscan_grid_sweep(savefile, inpath, outpath, n_neighbors = [15], n_components=[2], learning_rate=[1], min_dist=[1], local_connectivity=[1],
156                                 densmap=[False],
157                                 min_cluster_sizes=[2], min_samples=[1], cluster_selection_epsilons=[0], cluster_selection_methods=['eom']):
158     """Run umap + hdbscan clustering once or more with different parameters.
159     
160     Usage:
161     umap_hdbscan_clustering.py --savefile=SAVEFILE --inpath=INPATH --outpath=OUTPATH --n_neighbors=<csv> --learning_rate=<csv> --min_dist=<csv> --local_connectivity=<csv> --min_cluster_sizes=<csv> --min_samples=<csv> --cluster_selection_epsilons=<csv> --cluster_selection_methods=<csv "eom"|"leaf">
162
163     Keword arguments:
164     savefile: path to save the metadata and diagnostics 
165     inpath: path to feather data containing a labeled matrix of subreddit similarities.
166     outpath: path to output fit kmeans clusterings.
167     n_neighbors: umap parameter takes integers greater than 1
168     learning_rate: umap parameter takes positive real values
169     min_dist: umap parameter takes positive real values
170     local_connectivity: umap parameter takes positive integers
171     min_cluster_sizes: one or more integers indicating the minumum cluster size
172     min_samples: one ore more integers indicating the minimum number of samples used in the algorithm
173     cluster_selection_epsilon: one or more similarity thresholds for transition from dbscan to hdbscan
174     cluster_selection_method: "eom" or "leaf" eom gives larger clusters. 
175     """    
176     
177     umap_args = {'n_neighbors':list(map(int, n_neighbors)),
178                  'learning_rate':list(map(float,learning_rate)),
179                  'min_dist':list(map(float,min_dist)),
180                  'local_connectivity':list(map(int,local_connectivity)),
181                  'n_components':list(map(int, n_components)),
182                  'densmap':list(map(bool,densmap))
183                  }
184
185     hdbscan_args = {'min_cluster_size':list(map(int,min_cluster_sizes)),
186                     'min_samples':list(map(int,min_samples)),
187                     'cluster_selection_epsilon':list(map(float,cluster_selection_epsilons)),
188                     'cluster_selection_method':cluster_selection_methods}
189
190     obj = umap_hdbscan_grid_sweep(inpath,
191                                   outpath,
192                                   umap_args,
193                                   hdbscan_args)
194     obj.run(cores=10)
195     obj.save(savefile)
196
197     
198 def KNN_distances_plot(mat,outname,k=2):
199     nbrs = NearestNeighbors(n_neighbors=k,algorithm='auto',metric='precomputed').fit(mat)
200     distances, indices = nbrs.kneighbors(mat)
201     d2 = distances[:,-1]
202     df = pd.DataFrame({'dist':d2})
203     df = df.sort_values("dist",ascending=False)
204     df['idx'] = np.arange(0,d2.shape[0]) + 1
205     p = pn.qplot(x='idx',y='dist',data=df,geom='line') + pn.scales.scale_y_continuous(minor_breaks = np.arange(0,50)/50,
206                                                                                       breaks = np.arange(0,10)/10)
207     p.save(outname,width=16,height=10)
208     
209 def make_KNN_plots():
210     similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_10k.feather"
211     subreddits, mat = read_similarity_mat(similarities)
212     mat = sim_to_dist(mat)
213
214     KNN_distances_plot(mat,k=2,outname='terms_knn_dist2.png')
215
216     similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10k.feather"
217     subreddits, mat = read_similarity_mat(similarities)
218     mat = sim_to_dist(mat)
219     KNN_distances_plot(mat,k=2,outname='authors_knn_dist2.png')
220
221     similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k.feather"
222     subreddits, mat = read_similarity_mat(similarities)
223     mat = sim_to_dist(mat)
224     KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png')
225
226 if __name__ == "__main__":
227     fire.Fire(run_umap_hdbscan_grid_sweep)
228     
229 #    test_select_hdbscan_clustering()
230     #fire.Fire(select_hdbscan_clustering)  

Community Data Science Collective || Want to submit a patch?