]> code.communitydata.science - cdsc_reddit.git/blobdiff - submissions_2_parquet_part1.py
Merge remote-tracking branch 'refs/remotes/origin/master' into master
[cdsc_reddit.git] / submissions_2_parquet_part1.py
index 10bb5f044f8dd4c196be201a45beea515075e820..16d1988f0e04ebbbf47796d97fd9b03f70a252f4 100755 (executable)
@@ -4,88 +4,30 @@
 # 1. from gz to arrow parquet (this script) 
 # 2. from arrow parquet to spark parquet (submissions_2_parquet_part2.py)
 
 # 1. from gz to arrow parquet (this script) 
 # 2. from arrow parquet to spark parquet (submissions_2_parquet_part2.py)
 
-from collections import defaultdict
-from os import path
-import glob
-import json
-import re
 from datetime import datetime
 from datetime import datetime
-from subprocess import Popen, PIPE
-from multiprocessing import Pool, SimpleQueue
-
-dumpdir = "/gscratch/comdata/raw_data/reddit_dumps/submissions"
-
-def find_json_files(dumpdir):
-    base_pattern = "RS_20*.*"
-
-    files = glob.glob(path.join(dumpdir,base_pattern))
-
-    # build a dictionary of possible extensions for each dump
-    dumpext = defaultdict(list)
-    for fpath in files:
-        fname, ext = path.splitext(fpath)
-        dumpext[fname].append(ext)
-
-    ext_priority = ['.zst','.xz','.bz2']
-
-    for base, exts in dumpext.items():
-        found = False
-        if len(exts) == 1:
-            yield base + exts[0]
-            found = True
-        else:
-            for ext in ext_priority:
-                if ext in exts:
-                    yield base + ext
-                    found = True
-        assert(found == True)
-
-files = list(find_json_files(dumpdir))
-
-def read_file(fh):
-    lines = open_input_file(fh)
-    for line in lines:
-        yield line
-
-def open_fileset(files):
-    for fh in files:
-        print(fh)
-        lines = open_input_file(fh)
-        for line in lines:
-            yield line
-
-def open_input_file(input_filename):
-    if re.match(r'.*\.7z$', input_filename):
-        cmd = ["7za", "x", "-so", input_filename, '*'] 
-    elif re.match(r'.*\.gz$', input_filename):
-        cmd = ["zcat", input_filename] 
-    elif re.match(r'.*\.bz2$', input_filename):
-        cmd = ["bzcat", "-dk", input_filename] 
-    elif re.match(r'.*\.bz', input_filename):
-        cmd = ["bzcat", "-dk", input_filename] 
-    elif re.match(r'.*\.xz', input_filename):
-        cmd = ["xzcat",'-dk', '-T 20',input_filename]
-    elif re.match(r'.*\.zst',input_filename):
-        cmd = ['zstd','-dck', input_filename]
-    try:
-        input_file = Popen(cmd, stdout=PIPE).stdout
-    except NameError as e:
-        print(e)
-        input_file = open(input_filename, 'r')
-    return input_file
+from multiprocessing import Pool
+from itertools import islice
+from helper import find_dumps, open_fileset
+import pandas as pd
+import pyarrow as pa
+import pyarrow.parquet as pq
+import simdjson
+import fire
+import os
 
 
+parser = simdjson.Parser()
 
 def parse_submission(post, names = None):
     if names is None:
 
 def parse_submission(post, names = None):
     if names is None:
-        names = ['id','author','subreddit','title','created_utc','permalink','url','domain','score','ups','downs','over_18','has_media','selftext','retrieved_on','num_comments','gilded','edited','time_edited','subreddit_type','subreddit_id','subreddit_subscribers','name','is_self','stickied','is_submitter','quarantine','error']
+        names = ['id','author','subreddit','title','created_utc','permalink','url','domain','score','ups','downs','over_18','has_media','selftext','retrieved_on','num_comments','gilded','edited','time_edited','subreddit_type','subreddit_id','subreddit_subscribers','name','is_self','stickied','quarantine','error']
 
     try:
 
     try:
-        post = json.loads(post)
-    except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
+        post = parser.parse(post)
+    except (ValueError) as e:
         #        print(e)
         #        print(post)
         row = [None for _ in names]
         #        print(e)
         #        print(post)
         row = [None for _ in names]
-        row[-1] = "json.decoder.JSONDecodeError|{0}|{1}".format(e,post)
+        row[-1] = "Error parsing json|{0}|{1}".format(e,post)
         return tuple(row)
 
     row = []
         return tuple(row)
 
     row = []
@@ -116,57 +58,61 @@ def parse_submission(post, names = None):
             row.append(post[name])
     return tuple(row)
 
             row.append(post[name])
     return tuple(row)
 
-pool = Pool(28)
-
-stream = open_fileset(files)
-
-N = 100000
-
-rows = pool.imap_unordered(parse_submission, stream, chunksize=int(N/28))
-
-from itertools import islice
-import pandas as pd
-import pyarrow as pa
-import pyarrow.parquet as pq
-
-schema = pa.schema([
-    pa.field('id', pa.string(),nullable=True),
-    pa.field('author', pa.string(),nullable=True),
-    pa.field('subreddit', pa.string(),nullable=True),
-    pa.field('title', pa.string(),nullable=True),
-    pa.field('created_utc', pa.timestamp('ms'),nullable=True),
-    pa.field('permalink', pa.string(),nullable=True),
-    pa.field('url', pa.string(),nullable=True),
-    pa.field('domain', pa.string(),nullable=True),
-    pa.field('score', pa.int64(),nullable=True),
-    pa.field('ups', pa.int64(),nullable=True),
-    pa.field('downs', pa.int64(),nullable=True),
-    pa.field('over_18', pa.bool_(),nullable=True),
-    pa.field('has_media',pa.bool_(),nullable=True),
-    pa.field('selftext',pa.string(),nullable=True),
-    pa.field('retrieved_on', pa.timestamp('ms'),nullable=True),
-    pa.field('num_comments', pa.int64(),nullable=True),
-    pa.field('gilded',pa.int64(),nullable=True),
-    pa.field('edited',pa.bool_(),nullable=True),
-    pa.field('time_edited',pa.timestamp('ms'),nullable=True),
-    pa.field('subreddit_type',pa.string(),nullable=True),
-    pa.field('subreddit_id',pa.string(),nullable=True),
-    pa.field('subreddit_subscribers',pa.int64(),nullable=True),
-    pa.field('name',pa.string(),nullable=True),
-    pa.field('is_self',pa.bool_(),nullable=True),
-    pa.field('stickied',pa.bool_(),nullable=True),
-    pa.field('is_submitter',pa.bool_(),nullable=True),
-    pa.field('quarantine',pa.bool_(),nullable=True),
-    pa.field('error',pa.string(),nullable=True)])
-
-with  pq.ParquetWriter("/gscratch/comdata/output/reddit_submissions.parquet_temp",schema=schema,compression='snappy',flavor='spark') as writer:
-    while True:
-        chunk = islice(rows,N)
-        pddf = pd.DataFrame(chunk, columns=schema.names)
-        table = pa.Table.from_pandas(pddf,schema=schema)
-        if table.shape[0] == 0:
-            break
-        writer.write_table(table)
-
-    writer.close()
-
+def parse_dump(partition):
+
+    N=10000
+    stream = open_fileset([f"/gscratch/comdata/raw_data/reddit_dumps/submissions/{partition}"])
+    rows = map(parse_submission,stream)
+    schema = pa.schema([
+        pa.field('id', pa.string(),nullable=True),
+        pa.field('author', pa.string(),nullable=True),
+        pa.field('subreddit', pa.string(),nullable=True),
+        pa.field('title', pa.string(),nullable=True),
+        pa.field('created_utc', pa.timestamp('ms'),nullable=True),
+        pa.field('permalink', pa.string(),nullable=True),
+        pa.field('url', pa.string(),nullable=True),
+        pa.field('domain', pa.string(),nullable=True),
+        pa.field('score', pa.int64(),nullable=True),
+        pa.field('ups', pa.int64(),nullable=True),
+        pa.field('downs', pa.int64(),nullable=True),
+        pa.field('over_18', pa.bool_(),nullable=True),
+        pa.field('has_media',pa.bool_(),nullable=True),
+        pa.field('selftext',pa.string(),nullable=True),
+        pa.field('retrieved_on', pa.timestamp('ms'),nullable=True),
+        pa.field('num_comments', pa.int64(),nullable=True),
+        pa.field('gilded',pa.int64(),nullable=True),
+        pa.field('edited',pa.bool_(),nullable=True),
+        pa.field('time_edited',pa.timestamp('ms'),nullable=True),
+        pa.field('subreddit_type',pa.string(),nullable=True),
+        pa.field('subreddit_id',pa.string(),nullable=True),
+        pa.field('subreddit_subscribers',pa.int64(),nullable=True),
+        pa.field('name',pa.string(),nullable=True),
+        pa.field('is_self',pa.bool_(),nullable=True),
+        pa.field('stickied',pa.bool_(),nullable=True),
+        pa.field('quarantine',pa.bool_(),nullable=True),
+        pa.field('error',pa.string(),nullable=True)])
+
+    if not os.path.exists("/gscratch/comdata/output/temp/reddit_submissions.parquet/"):
+        os.mkdir("/gscratch/comdata/output/temp/reddit_submissions.parquet/")
+
+    with pq.ParquetWriter(f"/gscratch/comdata/output/temp/reddit_submissions.parquet/{partition}",schema=schema,compression='snappy',flavor='spark') as writer:
+        while True:
+            chunk = islice(rows,N)
+            pddf = pd.DataFrame(chunk, columns=schema.names)
+            table = pa.Table.from_pandas(pddf,schema=schema)
+            if table.shape[0] == 0:
+                break
+            writer.write_table(table)
+
+        writer.close()
+
+def gen_task_list(dumpdir="/gscratch/comdata/raw_data/reddit_dumps/submissions"):
+    files = list(find_dumps(dumpdir,base_pattern="RS_20*.*"))
+    with open("parse_submissions_task_list",'w') as of:
+        for fpath in files:
+            partition = os.path.split(fpath)[1]
+            of.write(f'python3 submissions_2_parquet_part1.py parse_dump {partition}\n')
+
+if __name__ == "__main__":
+    fire.Fire({'parse_dump':parse_dump,
+              'gen_task_list':gen_task_list})

Community Data Science Collective || Want to submit a patch?