]> code.communitydata.science - cdsc_reddit.git/blob - similarities/Makefile
make pass keyword arg to dataframe.drop
[cdsc_reddit.git] / similarities / Makefile
1 srun=srun -p compute-bigmem -A comdata --mem-per-cpu=9g --time=200:00:00 -c 40
2 srun_huge=srun -p compute-hugemem -A comdata --mem=724g --time=200:00:00 -c 40
3
4 similarity_data=../../data/reddit_similarity
5 tfidf_data=${similarity_data}/tfidf
6 lsi_components=[10,50,100,200,300,400,500,600,700,850]
7
8 lsi_similarities: ${similarity_data}/subreddit_comment_authors-tf_10k_LSI
9
10 all: ${similarity_data}/subreddit_comment_authors-tf_10k.feather
11
12 ${similarity_data}/subreddit_comment_authors-tf_10k_LSI: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py ${similarity_data}/subreddits_by_num_comments_nonsfw.csv
13          ${srun_huge} /bin/bash -c "source ~/.bashrc; python3 lsi_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_10k_LSI --topN=10000 --n_components=${lsi_components} --min_df=10 --inpath=$<"
14
15 ${similarity_data}/subreddits_by_num_comments_nonsfw.csv: ../../data/reddit_submissions_by_subreddit.parquet ../../data/reddit_comments_by_subreddit.parquet
16         ../start_spark_and_run.sh 3 top_subreddits_by_comments.py
17
18 ${tfidf_data}/comment_authors_100k.parquet: ../../data/reddit_ngrams/comment_authors_sorted.parquet ${similarity_data}/subreddits_by_num_comments_nonsfw.csv
19         ../start_spark_and_run.sh 3 tfidf.py authors --topN=100000 --inpath=$< --outpath=${tfidf_data}/comment_authors_100k.parquet
20
21 ../../data/reddit_ngrams/comment_authors_sorted.parquet:
22         $(MAKE) -C ../ngrams
23
24 ../../data/reddit_submissions_by_subreddit.parquet:
25         $(MAKE) -C ../datasets
26
27 ../../data/reddit_comments_by_subreddit.parquet:
28         $(MAKE) -C ../datasets

Community Data Science Collective || Want to submit a patch?