]> code.communitydata.science - cdsc_reddit.git/blob - clustering/clustering_base.py
Use Latent semantic indexing and hdbscan
[cdsc_reddit.git] / clustering / clustering_base.py
1 from pathlib import Path
2 import numpy as np
3 import pandas as pd
4 from dataclasses import dataclass
5
6 def sim_to_dist(mat):
7     dist = 1-mat
8     dist[dist < 0] = 0
9     np.fill_diagonal(dist,0)
10     return dist
11
12 def process_clustering_result(clustering, subreddits):
13
14     if hasattr(clustering,'n_iter_'):
15         print(f"clustering took {clustering.n_iter_} iterations")
16
17     clusters = clustering.labels_
18
19     print(f"found {len(set(clusters))} clusters")
20
21     cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_})
22
23     cluster_sizes = cluster_data.groupby("cluster").count().reset_index()
24     print(f"the largest cluster has {cluster_sizes.loc[cluster_sizes.cluster!=-1].subreddit.max()} members")
25
26     print(f"the median cluster has {cluster_sizes.subreddit.median()} members")
27
28     print(f"{(cluster_sizes.subreddit==1).sum()} clusters have 1 member")
29
30     print(f"{(cluster_sizes.loc[cluster_sizes.cluster==-1,['subreddit']])} subreddits are in cluster -1",flush=True)
31
32     return cluster_data
33
34
35 @dataclass
36 class clustering_result:
37     outpath:Path
38     max_iter:int
39     silhouette_score:float
40     alt_silhouette_score:float
41     name:str
42     n_clusters:int
43
44 def read_similarity_mat(similarities, use_threads=True):
45     df = pd.read_feather(similarities, use_threads=use_threads)
46     mat = np.array(df.drop('_subreddit',1))
47     n = mat.shape[0]
48     mat[range(n),range(n)] = 1
49     return (df._subreddit,mat)

Community Data Science Collective || Want to submit a patch?