4 # 1. from gz to arrow parquet (this script)
5 # 2. from arrow parquet to spark parquet (submissions_2_parquet_part2.py)
8 from datetime import datetime
9 from multiprocessing import Pool
10 from itertools import islice
11 from helper import find_dumps, open_fileset
14 import pyarrow.parquet as pq
17 def parse_submission(post, names = None):
19 names = ['id','author','subreddit','title','created_utc','permalink','url','domain','score','ups','downs','over_18','has_media','selftext','retrieved_on','num_comments','gilded','edited','time_edited','subreddit_type','subreddit_id','subreddit_subscribers','name','is_self','stickied','is_submitter','quarantine','error']
22 post = json.loads(post)
23 except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
26 row = [None for _ in names]
27 row[-1] = "json.decoder.JSONDecodeError|{0}|{1}".format(e,post)
33 if name == 'created_utc' or name == 'retrieved_on':
34 val = post.get(name,None)
36 row.append(datetime.fromtimestamp(int(post[name]),tz=None))
39 elif name == 'edited':
46 row.append(datetime.fromtimestamp(int(val),tz=None))
47 elif name == "time_edited":
49 elif name == 'has_media':
50 row.append(post.get('media',None) is not None)
52 elif name not in post:
55 row.append(post[name])
58 dumpdir = "/gscratch/comdata/raw_data/reddit_dumps/submissions"
60 files = list(find_dumps(dumpdir))
64 stream = open_fileset(files)
68 rows = pool.imap_unordered(parse_submission, stream, chunksize=int(N/28))
71 pa.field('id', pa.string(),nullable=True),
72 pa.field('author', pa.string(),nullable=True),
73 pa.field('subreddit', pa.string(),nullable=True),
74 pa.field('title', pa.string(),nullable=True),
75 pa.field('created_utc', pa.timestamp('ms'),nullable=True),
76 pa.field('permalink', pa.string(),nullable=True),
77 pa.field('url', pa.string(),nullable=True),
78 pa.field('domain', pa.string(),nullable=True),
79 pa.field('score', pa.int64(),nullable=True),
80 pa.field('ups', pa.int64(),nullable=True),
81 pa.field('downs', pa.int64(),nullable=True),
82 pa.field('over_18', pa.bool_(),nullable=True),
83 pa.field('has_media',pa.bool_(),nullable=True),
84 pa.field('selftext',pa.string(),nullable=True),
85 pa.field('retrieved_on', pa.timestamp('ms'),nullable=True),
86 pa.field('num_comments', pa.int64(),nullable=True),
87 pa.field('gilded',pa.int64(),nullable=True),
88 pa.field('edited',pa.bool_(),nullable=True),
89 pa.field('time_edited',pa.timestamp('ms'),nullable=True),
90 pa.field('subreddit_type',pa.string(),nullable=True),
91 pa.field('subreddit_id',pa.string(),nullable=True),
92 pa.field('subreddit_subscribers',pa.int64(),nullable=True),
93 pa.field('name',pa.string(),nullable=True),
94 pa.field('is_self',pa.bool_(),nullable=True),
95 pa.field('stickied',pa.bool_(),nullable=True),
96 pa.field('is_submitter',pa.bool_(),nullable=True),
97 pa.field('quarantine',pa.bool_(),nullable=True),
98 pa.field('error',pa.string(),nullable=True)])
100 with pq.ParquetWriter("/gscratch/comdata/output/reddit_submissions.parquet_temp",schema=schema,compression='snappy',flavor='spark') as writer:
102 chunk = islice(rows,N)
103 pddf = pd.DataFrame(chunk, columns=schema.names)
104 table = pa.Table.from_pandas(pddf,schema=schema)
105 if table.shape[0] == 0:
107 writer.write_table(table)