]> code.communitydata.science - cdsc_reddit.git/blob - datasets/Makefile
changes for archiving.
[cdsc_reddit.git] / datasets / Makefile
1 all: ../../data/reddit_comments_by_subreddit.parquet ../../data/reddit_submissions_by_subreddit.parquet
2
3 ../../data/reddit_comments_by_subreddit.parquet:../../data/temp/reddit_comments.parquet
4         ../start_spark_and_run.sh 4 comments_2_parquet_part2.py
5
6 ../../data/temp/reddit_comments.parquet: comments_task_list.sh run_comments_jobs.sbatch
7         mkdir -p comments_jobs
8         mkdir -p ../../data/temp/
9         sbatch --wait --array=1-$(shell cat comments_task_list.sh | wc -l) run_comments_jobs.sbatch 0
10
11 temp_reddit_comments.parquet: ../../data/temp/reddit_comments.parquet
12
13 comments_task_list.sh: comments_2_parquet_part1.py
14         srun -p compute-bigmem -A comdata --nodes=1 --mem-per-cpu=9g -c 40 --time=120:00:00 bash -c "source ~/.bashrc && python3 comments_2_parquet_part1.py gen_task_list --overwrite=False"
15
16 submissions_task_list.sh: submissions_2_parquet_part1.py
17         srun -p compute-bigmem -A comdata --nodes=1 --mem-per-cpu=9g -c 40 --time=120:00:00 python3 submissions_2_parquet_part1.py gen_task_list
18
19 ../../data/reddit_submissions_by_subreddit.parquet:../../data/temp/reddit_submissions.parquet
20         ../start_spark_and_run.sh 4 submissions_2_parquet_part2.py
21
22 ../../data/temp/reddit_submissions.parquet: submissions_task_list.sh run_submissions_jobs.sbatch
23         mkdir -p submissions_jobs
24         rm -rf ../../data/temp/reddit_submissions.parquet
25         mkdir -p ../../data/temp/
26         sbatch --wait --array=1-$(shell cat submissions_task_list.sh | wc -l) run_submissions_jobs.sbatch 0
27
28 temp_reddit_submissions.parquet: ../../data/temp/reddit_submissions.parquet

Community Data Science Collective || Want to submit a patch?