3 # spark script to make sorted, and partitioned parquet files 
 
   6 from pyspark.sql import functions as f
 
   7 from pyspark.sql import SparkSession
 
  10 spark = SparkSession.builder.getOrCreate()
 
  12 sc = spark.sparkContext
 
  14 conf = pyspark.SparkConf().setAppName("Reddit submissions to parquet")
 
  15 conf = conf.set("spark.sql.shuffle.partitions",2000)
 
  16 conf = conf.set('spark.sql.crossJoin.enabled',"true")
 
  17 conf = conf.set('spark.debug.maxToStringFields',200)
 
  18 sqlContext = pyspark.SQLContext(sc)
 
  20 df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet")
 
  22 df = df.withColumn("subreddit_2", f.lower(f.col('subreddit')))
 
  23 df = df.drop('subreddit')
 
  24 df = df.withColumnRenamed('subreddit_2','subreddit')
 
  25 df = df.withColumnRenamed("created_utc","CreatedAt")
 
  26 df = df.withColumn("Month",f.month(f.col("CreatedAt")))
 
  27 df = df.withColumn("Year",f.year(f.col("CreatedAt")))
 
  28 df = df.withColumn("Day",f.dayofmonth(f.col("CreatedAt")))
 
  29 df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3])
 
  31 # next we gotta resort it all.
 
  32 df = df.repartition("subreddit")
 
  33 df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True)
 
  34 df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True)
 
  35 df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy')
 
  38 # # we also want to have parquet files sorted by author then reddit. 
 
  39 df = df.repartition("author")
 
  40 df3 = df.sort(["author","CreatedAt","id"],ascending=True)
 
  41 df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True)
 
  42 df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy')
 
  44 os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")