]> code.communitydata.science - cdsc_reddit.git/commitdiff
Secondary sort for the by_author dataset should be CreatedAt.
authorNate E TeBlunthuis <nathante@n2347.hyak.local>
Mon, 6 Jul 2020 06:27:18 +0000 (23:27 -0700)
committerNate E TeBlunthuis <nathante@n2347.hyak.local>
Mon, 6 Jul 2020 06:29:35 +0000 (23:29 -0700)
comments_2_parquet.py
submissions_2_parquet.py

index cff16342dede2f0d04f71aeb36d0c4e1f21ab999..bd853f72daf8899a371c06cde85150bf2fb3b418 100755 (executable)
@@ -1,7 +1,7 @@
 
 #!/usr/bin/env python3
 import pyspark
 
 #!/usr/bin/env python3
 import pyspark
-nfrom pyspark.sql import functions as f
+from pyspark.sql import functions as f
 from pyspark.sql.types import *
 from pyspark import SparkConf, SparkContext
 from pyspark.sql import SparkSession, SQLContext
 from pyspark.sql.types import *
 from pyspark import SparkConf, SparkContext
 from pyspark.sql import SparkSession, SQLContext
index 6e46970a8139e58028d9504500be7bb640186fb0..014221044fb266a02d071b0106aeb59a02ed7e24 100755 (executable)
@@ -201,7 +201,7 @@ df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parq
 
 
 # we also want to have parquet files sorted by author then reddit. 
 
 
 # we also want to have parquet files sorted by author then reddit. 
-df3 = df.sort(["author","subreddit","id","Year","Month","Day"],ascending=True)
+df3 = df.sort(["author","CreatedAt","subreddit","id","Year","Month","Day"],ascending=True)
 df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
 
 os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
 df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
 
 os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")

Community Data Science Collective || Want to submit a patch?