]> code.communitydata.science - cdsc_reddit.git/blob - clustering/clustering.py
refactor clustring in object oriented style
[cdsc_reddit.git] / clustering / clustering.py
1 #!/usr/bin/env python3
2 # TODO: replace prints with logging.
3 import sys
4 import pandas as pd
5 import numpy as np
6 from sklearn.cluster import AffinityPropagation
7 import fire
8 from pathlib import Path
9 from multiprocessing import cpu_count
10 from dataclasses import dataclass
11 from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat
12
13 def affinity_clustering(similarities, output, *args, **kwargs):
14     subreddits, mat = read_similarity_mat(similarities)
15     clustering = _affinity_clustering(mat, *args, **kwargs)
16     cluster_data = process_clustering_result(clustering, subreddits)
17     cluster_data['algorithm'] = 'affinity'
18     return(cluster_data)
19
20 def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True):
21     '''
22     similarities: matrix of similarity scores
23     preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits.
24     damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. 
25     '''
26     print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}")
27
28     preference = np.quantile(mat,preference_quantile)
29
30     print(f"preference is {preference}")
31     print("data loaded")
32     sys.stdout.flush()
33     clustering = AffinityPropagation(damping=damping,
34                                      max_iter=max_iter,
35                                      convergence_iter=convergence_iter,
36                                      copy=False,
37                                      preference=preference,
38                                      affinity='precomputed',
39                                      verbose=verbose,
40                                      random_state=random_state).fit(mat)
41
42     cluster_data = process_clustering_result(clustering, subreddits)
43     output = Path(output)
44     output.parent.mkdir(parents=True,exist_ok=True)
45     cluster_data.to_feather(output)
46     print(f"saved {output}")
47     return clustering
48
49
50
51 if __name__ == "__main__":
52     fire.Fire(affinity_clustering)

Community Data Science Collective || Want to submit a patch?