]> code.communitydata.science - cdsc_reddit.git/blobdiff - submissions_2_parquet.sh
Build comments dataset similarly to submissions and improve partitioning scheme
[cdsc_reddit.git] / submissions_2_parquet.sh
index d1c6bce4df0704e6c3924de079b3f89894841845..4ec4354ad8c345c2cdca9f83d1b3d1eb9df73075 100644 (file)
@@ -1,8 +1,10 @@
 #!/usr/bin/env bash
 
-# part2 should be run on one ore more spark nodes
+echo "!#/usr/bin/bash" > job_script.sh
+echo "source $(pwd)/../bin/activate" >> job_script.sh
+echo "python3 $(pwd)/submissions_2_parquet_part1.py" >> job_script.sh
 
-./submissions_2_parquet_part1.py
+srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh
 
 start_spark_and_run.sh 1 $(pwd)/submissions_2_parquet_part2.py
 

Community Data Science Collective || Want to submit a patch?