From: Nate E TeBlunthuis Date: Wed, 8 Jul 2020 06:29:36 +0000 (-0700) Subject: Bugfixes in scripts. X-Git-Url: https://code.communitydata.science/cdsc_reddit.git/commitdiff_plain/aa84a7df032dcb20bb284892f12cdac4853f31aa Bugfixes in scripts. --- diff --git a/comments_2_parquet.sh b/comments_2_parquet.sh index 802cc70..096fa06 100755 --- a/comments_2_parquet.sh +++ b/comments_2_parquet.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash -echo "!#/usr/bin/bash" > job_script.sh +echo "#!/usr/bin/bash" > job_script.sh echo "source $(pwd)/../bin/activate" >> job_script.sh echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh -srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh +srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 --pty job_script.sh start_spark_and_run.sh 1 $(pwd)/comments_2_parquet_part2.py diff --git a/helper.py b/helper.py index 4dc6210..b401cad 100644 --- a/helper.py +++ b/helper.py @@ -17,16 +17,8 @@ def find_dumps(dumpdir, base_pattern): ext_priority = ['.zst','.xz','.bz2'] for base, exts in dumpext.items(): - found = False - if len(exts) == 1: - yield base + exts[0] - found = True - else: - for ext in ext_priority: - if ext in exts: - yield base + ext - found = True - assert(found == True) + ext = [ext for ext in ext_priority if ext in exts][0] + yield base + ext def open_fileset(files): for fh in files: diff --git a/submissions_2_parquet_part2.py b/submissions_2_parquet_part2.py index bd538e2..b88764b 100644 --- a/submissions_2_parquet_part2.py +++ b/submissions_2_parquet_part2.py @@ -2,20 +2,22 @@ # spark script to make sorted, and partitioned parquet files +import pyspark from pyspark.sql import functions as f from pyspark.sql import SparkSession +import os spark = SparkSession.builder.getOrCreate() sc = spark.sparkContext -conf = SparkConf().setAppName("Reddit submissions to parquet") +conf = pyspark.SparkConf().setAppName("Reddit submissions to parquet") conf = conf.set("spark.sql.shuffle.partitions",2000) conf = conf.set('spark.sql.crossJoin.enabled',"true") conf = conf.set('spark.debug.maxToStringFields',200) sqlContext = pyspark.SQLContext(sc) -df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions.parquet_temp") +df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet") df = df.withColumn("subreddit_2", f.lower(f.col('subreddit'))) df = df.drop('subreddit') @@ -30,13 +32,13 @@ df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3]) df = df.repartition("subreddit") df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True) df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True) -df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet", partitionBy=["Year",'Month'], mode='overwrite') +df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy') # # we also want to have parquet files sorted by author then reddit. df = df.repartition("author") df3 = df.sort(["author","CreatedAt","id"],ascending=True) df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True) -df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite') +df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy') os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")