1 from pathlib import Path
4 from dataclasses import dataclass
5 from sklearn.metrics import silhouette_score, silhouette_samples
7 # this is meant to be an interface, not created directly
9 def __init__(self, infile, outpath, name, call, *args, **kwargs):
10 self.outpath = Path(outpath)
14 self.infile = Path(infile)
19 self.subreddits, self.mat = self.read_distance_mat(self.infile)
20 self.clustering = self.call(self.mat, *self.args, **self.kwargs)
21 self.cluster_data = self.process_clustering(self.clustering, self.subreddits)
22 self.score = self.silhouette()
23 self.outpath.mkdir(parents=True, exist_ok=True)
24 self.cluster_data.to_feather(self.outpath/(self.name + ".feather"))
31 self.result = clustering_result(outpath=str(self.outpath.resolve()),
32 silhouette_score=self.score,
34 n_clusters=self.n_clusters,
35 n_isolates=self.n_isolates,
36 silhouette_samples = self.silsampout
41 isolates = self.clustering.labels_ == -1
42 scoremat = self.mat[~isolates][:,~isolates]
43 if scoremat.shape[0] > 0:
44 score = silhouette_score(scoremat, self.clustering.labels_[~isolates], metric='precomputed')
45 silhouette_samp = silhouette_samples(self.mat, self.clustering.labels_, metric='precomputed')
46 silhouette_samp = pd.DataFrame({'subreddit':self.subreddits,'score':silhouette_samp})
47 self.outpath.mkdir(parents=True, exist_ok=True)
48 silsampout = self.outpath / ("silhouette_samples-" + self.name + ".feather")
49 self.silsampout = silsampout.resolve()
50 silhouette_samp.to_feather(self.silsampout)
53 self.silsampout = None
56 def read_distance_mat(self, similarities, use_threads=True):
57 df = pd.read_feather(similarities, use_threads=use_threads)
58 mat = np.array(df.drop('_subreddit',1))
60 mat[range(n),range(n)] = 1
61 return (df._subreddit,1-mat)
63 def process_clustering(self, clustering, subreddits):
65 if hasattr(clustering,'n_iter_'):
66 print(f"clustering took {clustering.n_iter_} iterations")
68 clusters = clustering.labels_
69 self.n_clusters = len(set(clusters))
71 print(f"found {self.n_clusters} clusters")
73 cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_})
75 cluster_sizes = cluster_data.groupby("cluster").count().reset_index()
76 print(f"the largest cluster has {cluster_sizes.loc[cluster_sizes.cluster!=-1].subreddit.max()} members")
78 print(f"the median cluster has {cluster_sizes.subreddit.median()} members")
79 n_isolates1 = (cluster_sizes.subreddit==1).sum()
81 print(f"{n_isolates1} clusters have 1 member")
83 n_isolates2 = (cluster_sizes.loc[cluster_sizes.cluster==-1,['subreddit']])
85 print(f"{n_isolates2} subreddits are in cluster -1",flush=True)
88 self.n_isolates = n_isolates2
90 self.n_isolates = n_isolates1
95 class clustering_result:
97 silhouette_score:float
101 silhouette_samples:str