From 4ec9c1424711eb81edf6b6431d5dfae360013174 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Mon, 6 Jul 2020 22:26:29 -0700 Subject: [PATCH] Move the spark part of submissions_2_parquet to a separate script. --- ...rquet.py => submissions_2_parquet_part1.py | 39 +---------------- submissions_2_parquet_part2.py | 42 +++++++++++++++++++ 2 files changed, 44 insertions(+), 37 deletions(-) rename submissions_2_parquet.py => submissions_2_parquet_part1.py (78%) create mode 100644 submissions_2_parquet_part2.py diff --git a/submissions_2_parquet.py b/submissions_2_parquet_part1.py similarity index 78% rename from submissions_2_parquet.py rename to submissions_2_parquet_part1.py index 0142210..10bb5f0 100755 --- a/submissions_2_parquet.py +++ b/submissions_2_parquet_part1.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 # two stages: -# 1. from gz to arrow parquet -# 2. from arrow parquet to spark parquet +# 1. from gz to arrow parquet (this script) +# 2. from arrow parquet to spark parquet (submissions_2_parquet_part2.py) from collections import defaultdict from os import path @@ -170,38 +170,3 @@ with pq.ParquetWriter("/gscratch/comdata/output/reddit_submissions.parquet_temp writer.close() -import pyspark -from pyspark.sql import functions as f -from pyspark.sql.types import * -from pyspark import SparkConf, SparkContext -from pyspark.sql import SparkSession, SQLContext - -spark = SparkSession.builder.getOrCreate() -sc = spark.sparkContext - -conf = SparkConf().setAppName("Reddit submissions to parquet") -conf = conf.set('spark.sql.crossJoin.enabled',"true") - -sqlContext = pyspark.SQLContext(sc) - -df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions.parquet_temp") - -df = df.withColumn("subreddit_2", f.lower(f.col('subreddit'))) -df = df.drop('subreddit') -df = df.withColumnRenamed('subreddit_2','subreddit') -df = df.withColumnRenamed("created_utc","CreatedAt") -df = df.withColumn("Month",f.month(f.col("CreatedAt"))) -df = df.withColumn("Year",f.year(f.col("CreatedAt"))) -df = df.withColumn("Day",f.dayofmonth(f.col("CreatedAt"))) -df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3]) - -# next we gotta resort it all. -df2 = df.sort(["subreddit","author","id","Year","Month","Day"],ascending=True) -df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet", partitionBy=["Year",'Month'], mode='overwrite') - - -# we also want to have parquet files sorted by author then reddit. -df3 = df.sort(["author","CreatedAt","subreddit","id","Year","Month","Day"],ascending=True) -df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite') - -os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp") diff --git a/submissions_2_parquet_part2.py b/submissions_2_parquet_part2.py new file mode 100644 index 0000000..1708548 --- /dev/null +++ b/submissions_2_parquet_part2.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 + +# spark script to make sorted, and partitioned parquet files + +import pyspark +from pyspark.sql import functions as f +from pyspark.sql.types import * +from pyspark import SparkConf, SparkContext +from pyspark.sql import SparkSession, SQLContext +import os + +spark = SparkSession.builder.getOrCreate() + +sc = spark.sparkContext + +conf = SparkConf().setAppName("Reddit submissions to parquet") +conf = conf.set("spark.sql.shuffle.partitions",2000) +conf = conf.set('spark.sql.crossJoin.enabled',"true") +conf = conf.set('spark.debug.maxToStringFields',200) +sqlContext = pyspark.SQLContext(sc) + +df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions.parquet_temp") + +df = df.withColumn("subreddit_2", f.lower(f.col('subreddit'))) +df = df.drop('subreddit') +df = df.withColumnRenamed('subreddit_2','subreddit') +df = df.withColumnRenamed("created_utc","CreatedAt") +df = df.withColumn("Month",f.month(f.col("CreatedAt"))) +df = df.withColumn("Year",f.year(f.col("CreatedAt"))) +df = df.withColumn("Day",f.dayofmonth(f.col("CreatedAt"))) +df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3]) + +# next we gotta resort it all. +df2 = df.sort(["subreddit","author","id","Year","Month","Day"],ascending=True) +df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet", partitionBy=["Year",'Month'], mode='overwrite') + + +# # we also want to have parquet files sorted by author then reddit. +df3 = df.sort(["author","CreatedAt","subreddit","id","Year","Month","Day"],ascending=True) +df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite') + +os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp") -- 2.39.5