]> code.communitydata.science - cdsc_reddit.git/blobdiff - comments_2_parquet.sh
Update reddit comments data with daily dumps.
[cdsc_reddit.git] / comments_2_parquet.sh
index 802cc70b7a880220725ec7a443e8345b1ae27e74..e9818c19cfcd6c7f29f2c7bcc04b564b5471e3e6 100755 (executable)
@@ -1,9 +1,10 @@
-#!/usr/bin/env bash
+## needs to be run by hand since i don't have a nice way of waiting on a parallel-sql job to complete 
 
-echo "!#/usr/bin/bash" > job_script.sh
+#!/usr/bin/env bash
+echo "#!/usr/bin/bash" > job_script.sh
 echo "source $(pwd)/../bin/activate" >> job_script.sh
 echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh
 
-srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh
+srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 --pty job_script.sh
 
 start_spark_and_run.sh 1 $(pwd)/comments_2_parquet_part2.py

Community Data Science Collective || Want to submit a patch?