]> code.communitydata.science - cdsc_reddit.git/blob - clustering/pick_best_clustering.py
lsi support for weekly similarities
[cdsc_reddit.git] / clustering / pick_best_clustering.py
1 #!/usr/bin/env python3
2 import fire
3 import pandas as pd
4 from pathlib import Path
5 import shutil
6 selection_data="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/clustering/comment_authors_compex_LSI/selection_data.csv"
7
8 outpath = 'test_best.feather'
9 min_clusters=50; max_isolates=7500; min_cluster_size=2
10
11 # pick the best clustering according to silhouette score subject to contraints
12 def pick_best_clustering(selection_data, output, min_clusters, max_isolates, min_cluster_size):
13     df = pd.read_csv(selection_data,index_col=0)
14     df = df.sort_values("silhouette_score",ascending=False)
15
16     # not sure I fixed the bug underlying this fully or not.
17     df['n_isolates_str'] = df.n_isolates.str.strip("[]")
18     df['n_isolates_0'] = df['n_isolates_str'].apply(lambda l: len(l) == 0)
19     df.loc[df.n_isolates_0,'n_isolates'] = 0
20     df.loc[~df.n_isolates_0,'n_isolates'] = df.loc[~df.n_isolates_0].n_isolates_str.apply(lambda l: int(l))
21     
22     best_cluster = df[(df.n_isolates <= max_isolates)&(df.n_clusters >= min_clusters)&(df.min_cluster_size==min_cluster_size)]
23
24     best_cluster = best_cluster.iloc[0]
25
26     best_lsi_dimensions = best_cluster.lsi_dimensions
27     print(best_cluster.to_dict())
28     best_path = Path(best_cluster.outpath) / (str(best_cluster['name']) + ".feather")
29     shutil.copy(best_path,output)
30     print(f"lsi dimensions:{best_lsi_dimensions}")
31     
32 if __name__ == "__main__":
33     fire.Fire(pick_best_clustering)

Community Data Science Collective || Want to submit a patch?