1 from sklearn.metrics import silhouette_score
2 from sklearn.cluster import AffinityPropagation
3 from functools import partial
4 from dataclasses import dataclass
5 from clustering import _affinity_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result
6 from multiprocessing import Pool, cpu_count, Array, Process
7 from pathlib import Path
8 from itertools import product, starmap
14 # silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying.
16 class affinity_clustering_result(clustering_result):
19 preference_quantile:float
21 def affinity_clustering(similarities, output, *args, **kwargs):
22 subreddits, mat = read_similarity_mat(similarities)
23 clustering = _affinity_clustering(mat, *args, **kwargs)
24 cluster_data = process_clustering_result(clustering, subreddits)
25 cluster_data['algorithm'] = 'affinity'
28 def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True):
30 similarities: matrix of similarity scores
31 preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits.
32 damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author.
34 print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}")
36 preference = np.quantile(mat,preference_quantile)
38 print(f"preference is {preference}")
41 clustering = AffinityPropagation(damping=damping,
43 convergence_iter=convergence_iter,
45 preference=preference,
46 affinity='precomputed',
48 random_state=random_state).fit(mat)
50 cluster_data = process_clustering_result(clustering, subreddits)
52 output.parent.mkdir(parents=True,exist_ok=True)
53 cluster_data.to_feather(output)
54 print(f"saved {output}")
58 def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False):
60 name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}"
63 outpath = outdir / (str(name) + ".feather")
64 outpath.parent.mkdir(parents=True,exist_ok=True)
66 clustering = _affinity_clustering(mat, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose)
67 cluster_data = process_clustering_result(clustering, subreddits)
68 mat = sim_to_dist(clustering.affinity_matrix_)
71 score = silhouette_score(mat, clustering.labels_, metric='precomputed')
75 if alt_mat is not None:
76 alt_distances = sim_to_dist(alt_mat)
78 alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed')
82 res = affinity_clustering_result(outpath=outpath,
85 convergence_iter=convergence_iter,
86 preference_quantile=preference_quantile,
87 silhouette_score=score,
88 alt_silhouette_score=score,
93 # alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering).
95 def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None):
97 damping = list(map(float,damping))
98 convergence_iter = convergence_iter = list(map(int,convergence_iter))
99 preference_quantile = list(map(float,preference_quantile))
101 if type(outdir) is str:
102 outdir = Path(outdir)
104 outdir.mkdir(parents=True,exist_ok=True)
106 subreddits, mat = read_similarity_mat(similarities,use_threads=True)
108 if alt_similarities is not None:
109 alt_mat = read_similarity_mat(alt_similarities,use_threads=True)
117 # get list of tuples: the combinations of hyperparameters
118 hyper_grid = product(damping, convergence_iter, preference_quantile)
119 hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid))
121 _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat)
123 # similarities = Array('d', mat)
125 print("running clustering selection")
126 clustering_data = pool.starmap(_do_clustering, hyper_grid)
127 clustering_data = pd.DataFrame(list(clustering_data))
128 clustering_data.to_csv(outinfo)
129 return clustering_data
131 if __name__ == "__main__":
132 x = fire.Fire(select_affinity_clustering)