#!/usr/bin/env bash
 
-echo "!#/usr/bin/bash" > job_script.sh
+echo "#!/usr/bin/bash" > job_script.sh
 echo "source $(pwd)/../bin/activate" >> job_script.sh
 echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh
 
-srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh
+srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 --pty job_script.sh
 
 start_spark_and_run.sh 1 $(pwd)/comments_2_parquet_part2.py
 
 
 # spark script to make sorted, and partitioned parquet files 
 
+import pyspark
 from pyspark.sql import functions as f
 from pyspark.sql import SparkSession
+import os
 
 spark = SparkSession.builder.getOrCreate()
 
 sc = spark.sparkContext
 
-conf = SparkConf().setAppName("Reddit submissions to parquet")
+conf = pyspark.SparkConf().setAppName("Reddit submissions to parquet")
 conf = conf.set("spark.sql.shuffle.partitions",2000)
 conf = conf.set('spark.sql.crossJoin.enabled',"true")
 conf = conf.set('spark.debug.maxToStringFields',200)
 sqlContext = pyspark.SQLContext(sc)
 
-df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions.parquet_temp")
+df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet")
 
 df = df.withColumn("subreddit_2", f.lower(f.col('subreddit')))
 df = df.drop('subreddit')
 df = df.repartition("subreddit")
 df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True)
 df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True)
-df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet", partitionBy=["Year",'Month'], mode='overwrite')
+df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy')
 
 
 # # we also want to have parquet files sorted by author then reddit. 
 df = df.repartition("author")
 df3 = df.sort(["author","CreatedAt","id"],ascending=True)
 df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True)
-df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
+df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy')
 
 os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")