2 # TODO: replace prints with logging.
6 from sklearn.cluster import AffinityPropagation
8 from pathlib import Path
9 from multiprocessing import cpu_count
10 from dataclasses import dataclass
11 from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat
13 def affinity_clustering(similarities, output, *args, **kwargs):
14 subreddits, mat = read_similarity_mat(similarities)
15 clustering = _affinity_clustering(mat, *args, **kwargs)
16 cluster_data = process_clustering_result(clustering, subreddits)
17 cluster_data['algorithm'] = 'affinity'
20 def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True):
22 similarities: matrix of similarity scores
23 preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits.
24 damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author.
26 print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}")
28 preference = np.quantile(mat,preference_quantile)
30 print(f"preference is {preference}")
33 clustering = AffinityPropagation(damping=damping,
35 convergence_iter=convergence_iter,
37 preference=preference,
38 affinity='precomputed',
40 random_state=random_state).fit(mat)
42 cluster_data = process_clustering_result(clustering, subreddits)
44 output.parent.mkdir(parents=True,exist_ok=True)
45 cluster_data.to_feather(output)
46 print(f"saved {output}")
51 if __name__ == "__main__":
52 fire.Fire(affinity_clustering)