]> code.communitydata.science - cdsc_reddit.git/blobdiff - examples/pyarrow_reading.py
support passing in list of tfidf vectors.
[cdsc_reddit.git] / examples / pyarrow_reading.py
index d67376db9c117f2f23a6b4af79fcfd7f203580a1..59f9fd91bfb536f2c181e5b5d3f79eac1b8214b0 100644 (file)
@@ -1,8 +1,8 @@
 import pyarrow.dataset as ds
-import pyarrow as pa
+
 # A pyarrow dataset abstracts reading, writing, or filtering a parquet file. It does not read dataa into memory. 
 #dataset = ds.dataset(pathlib.Path('/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet/'), format='parquet', partitioning='hive')
-dataset = ds.dataset('/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet/', format='parquet', partitioning='hive')
+dataset = ds.dataset('/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/', format='parquet')
 
 # let's get all the comments to two subreddits:
 subreddits_to_pull = ['seattle','seattlewa']

Community Data Science Collective || Want to submit a patch?