X-Git-Url: https://code.communitydata.science/cdsc_reddit.git/blobdiff_plain/b7c39a3494ce214f315fd7e3bb0bf99bc58070d1..7b130a30af863dfa727d80d9fea23648dcc9d5d8:/clustering/pick_best_clustering.py diff --git a/clustering/pick_best_clustering.py b/clustering/pick_best_clustering.py old mode 100644 new mode 100755 index c541d23..e05e3ac --- a/clustering/pick_best_clustering.py +++ b/clustering/pick_best_clustering.py @@ -1,11 +1,12 @@ +#!/usr/bin/env python3 import fire import pandas as pd from pathlib import Path import shutil -selection_data="/gscratch/comdata/output/reddit_clustering/subreddit_comment_authors-tf_10k_LSI/hdbscan/selection_data.csv" +selection_data="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/clustering/comment_authors_compex_LSI/selection_data.csv" outpath = 'test_best.feather' -min_clusters=50; max_isolates=5000; min_cluster_size=2 +min_clusters=50; max_isolates=7500; min_cluster_size=2 # pick the best clustering according to silhouette score subject to contraints def pick_best_clustering(selection_data, output, min_clusters, max_isolates, min_cluster_size): @@ -18,11 +19,15 @@ def pick_best_clustering(selection_data, output, min_clusters, max_isolates, min df.loc[df.n_isolates_0,'n_isolates'] = 0 df.loc[~df.n_isolates_0,'n_isolates'] = df.loc[~df.n_isolates_0].n_isolates_str.apply(lambda l: int(l)) - best_cluster = df[(df.n_isolates <= max_isolates)&(df.n_clusters >= min_clusters)&(df.min_cluster_size==min_cluster_size)].iloc[df.shape[1]] + best_cluster = df[(df.n_isolates <= max_isolates)&(df.n_clusters >= min_clusters)&(df.min_cluster_size==min_cluster_size)] + best_cluster = best_cluster.iloc[0] + + best_lsi_dimensions = best_cluster.lsi_dimensions print(best_cluster.to_dict()) best_path = Path(best_cluster.outpath) / (str(best_cluster['name']) + ".feather") shutil.copy(best_path,output) - + print(f"lsi dimensions:{best_lsi_dimensions}") + if __name__ == "__main__": fire.Fire(pick_best_clustering)