3 from datetime import datetime
4 from multiprocessing import Pool
5 from itertools import islice
6 from helper import find_dumps, open_fileset
9 import pyarrow.parquet as pq
11 def parse_comment(comment, names= None):
13 names = ["id","subreddit","link_id","parent_id","created_utc","author","ups","downs","score","edited","subreddit_type","subreddit_id","stickied","is_submitter","body","error"]
16 comment = json.loads(comment)
17 except json.decoder.JSONDecodeError as e:
20 row = [None for _ in names]
21 row[-1] = "json.decoder.JSONDecodeError|{0}|{1}".format(e,comment)
26 if name == 'created_utc':
27 row.append(datetime.fromtimestamp(int(comment['created_utc']),tz=None))
28 elif name == 'edited':
35 row.append(datetime.fromtimestamp(int(val),tz=None))
36 elif name == "time_edited":
38 elif name not in comment:
42 row.append(comment[name])
47 # conf = sc._conf.setAll([('spark.executor.memory', '20g'), ('spark.app.name', 'extract_reddit_timeline'), ('spark.executor.cores', '26'), ('spark.cores.max', '26'), ('spark.driver.memory','84g'),('spark.driver.maxResultSize','0'),('spark.local.dir','/gscratch/comdata/spark_tmp')])
49 dumpdir = "/gscratch/comdata/raw_data/reddit_dumps/comments/"
51 files = list(find_dumps(dumpdir, base_pattern="RC_20*"))
55 stream = open_fileset(files)
59 rows = pool.imap_unordered(parse_comment, stream, chunksize=int(N/28))
62 pa.field('id', pa.string(), nullable=True),
63 pa.field('subreddit', pa.string(), nullable=True),
64 pa.field('link_id', pa.string(), nullable=True),
65 pa.field('parent_id', pa.string(), nullable=True),
66 pa.field('created_utc', pa.timestamp('ms'), nullable=True),
67 pa.field('author', pa.string(), nullable=True),
68 pa.field('ups', pa.int64(), nullable=True),
69 pa.field('downs', pa.int64(), nullable=True),
70 pa.field('score', pa.int64(), nullable=True),
71 pa.field('edited', pa.bool_(), nullable=True),
72 pa.field('time_edited', pa.timestamp('ms'), nullable=True),
73 pa.field('subreddit_type', pa.string(), nullable=True),
74 pa.field('subreddit_id', pa.string(), nullable=True),
75 pa.field('stickied', pa.bool_(), nullable=True),
76 pa.field('is_submitter', pa.bool_(), nullable=True),
77 pa.field('body', pa.string(), nullable=True),
78 pa.field('error', pa.string(), nullable=True),
81 from pathlib import Path
82 p = Path("/gscratch/comdata/output/reddit_comments.parquet_temp2")
90 list(map(Path.unlink,p.glob('*')))
95 writer = pq.ParquetWriter(f"/gscratch/comdata/output/reddit_comments.parquet_temp2/part_{part}.parquet",schema=schema,compression='snappy',flavor='spark')
98 if n_output > part_size:
105 writer = pq.ParquetWriter(f"/gscratch/comdata/output/reddit_comments.parquet_temp2/part_{part}.parquet",schema=schema,compression='snappy',flavor='spark')
108 chunk = islice(rows,N)
109 pddf = pd.DataFrame(chunk, columns=schema.names)
110 table = pa.Table.from_pandas(pddf,schema=schema)
111 if table.shape[0] == 0:
113 writer.write_table(table)