]> code.communitydata.science - cdsc_reddit.git/blob - clustering/affinity_clustering.py
287f7e21e84b7ddf6ad87fda6b96431d9ceec922
[cdsc_reddit.git] / clustering / affinity_clustering.py
1 from sklearn.metrics import silhouette_score
2 from sklearn.cluster import AffinityPropagation
3 from functools import partial
4 from dataclasses import dataclass
5 from clustering import _affinity_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result
6 from multiprocessing  import Pool, cpu_count, Array, Process
7 from pathlib import Path
8 from itertools import product, starmap
9 import numpy as np
10 import pandas as pd
11 import fire
12 import sys
13
14 # silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying. 
15 @dataclass
16 class affinity_clustering_result(clustering_result):
17     damping:float
18     convergence_iter:int
19     preference_quantile:float
20
21 def affinity_clustering(similarities, output, *args, **kwargs):
22     subreddits, mat = read_similarity_mat(similarities)
23     clustering = _affinity_clustering(mat, *args, **kwargs)
24     cluster_data = process_clustering_result(clustering, subreddits)
25     cluster_data['algorithm'] = 'affinity'
26     return(cluster_data)
27
28 def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True):
29     '''
30     similarities: matrix of similarity scores
31     preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits.
32     damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. 
33     '''
34     print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}")
35
36     preference = np.quantile(mat,preference_quantile)
37
38     print(f"preference is {preference}")
39     print("data loaded")
40     sys.stdout.flush()
41     clustering = AffinityPropagation(damping=damping,
42                                      max_iter=max_iter,
43                                      convergence_iter=convergence_iter,
44                                      copy=False,
45                                      preference=preference,
46                                      affinity='precomputed',
47                                      verbose=verbose,
48                                      random_state=random_state).fit(mat)
49
50     cluster_data = process_clustering_result(clustering, subreddits)
51     output = Path(output)
52     output.parent.mkdir(parents=True,exist_ok=True)
53     cluster_data.to_feather(output)
54     print(f"saved {output}")
55     return clustering
56
57
58 def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits,  max_iter,  outdir:Path, random_state, verbose, alt_mat, overwrite=False):
59     if name is None:
60         name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}"
61     print(name)
62     sys.stdout.flush()
63     outpath = outdir / (str(name) + ".feather")
64     outpath.parent.mkdir(parents=True,exist_ok=True)
65     print(outpath)
66     clustering = _affinity_clustering(mat, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose)
67     cluster_data = process_clustering_result(clustering, subreddits)
68     mat = sim_to_dist(clustering.affinity_matrix_)
69
70     try: 
71         score = silhouette_score(mat, clustering.labels_, metric='precomputed')
72     except ValueError:
73         score = None
74
75     if alt_mat is not None:
76         alt_distances = sim_to_dist(alt_mat)
77         try:
78             alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed')
79         except ValueError:
80             alt_score = None
81     
82     res = affinity_clustering_result(outpath=outpath,
83                                      damping=damping,
84                                      max_iter=max_iter,
85                                      convergence_iter=convergence_iter,
86                                      preference_quantile=preference_quantile,
87                                      silhouette_score=score,
88                                      alt_silhouette_score=score,
89                                      name=str(name))
90
91     return res
92
93 # alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering).
94
95 def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None):
96
97     damping = list(map(float,damping))
98     convergence_iter = convergence_iter = list(map(int,convergence_iter))
99     preference_quantile = list(map(float,preference_quantile))
100
101     if type(outdir) is str:
102         outdir = Path(outdir)
103
104     outdir.mkdir(parents=True,exist_ok=True)
105
106     subreddits, mat = read_similarity_mat(similarities,use_threads=True)
107
108     if alt_similarities is not None:
109         alt_mat = read_similarity_mat(alt_similarities,use_threads=True)
110     else:
111         alt_mat = None
112
113     if J is None:
114         J = cpu_count()
115     pool = Pool(J)
116
117     # get list of tuples: the combinations of hyperparameters
118     hyper_grid = product(damping, convergence_iter, preference_quantile)
119     hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid))
120
121     _do_clustering = partial(do_clustering,  mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat)
122
123     #    similarities = Array('d', mat)
124     # call pool.starmap
125     print("running clustering selection")
126     clustering_data = pool.starmap(_do_clustering, hyper_grid)
127     clustering_data = pd.DataFrame(list(clustering_data))
128     clustering_data.to_csv(outinfo)
129     return clustering_data
130
131 if __name__ == "__main__":
132     x = fire.Fire(select_affinity_clustering)

Community Data Science Collective || Want to submit a patch?