--- /dev/null
+#!/bin/bash
+## parallel_sql_job.sh
+#SBATCH --job-name=tf_subreddit_comments
+## Allocation Definition
+#SBATCH --account=comdata-ckpt
+#SBATCH --partition=ckpt
+## Resources
+## Nodes. This should always be 1 for parallel-sql.
+#SBATCH --nodes=1
+## Walltime (12 hours)
+#SBATCH --time=12:00:00
+## Memory per node
+#SBATCH --mem=100G
+#SBATCH --cpus-per-task=4
+#SBATCH --ntasks=1
+
+
+module load parallel_sql
+
+#Put here commands to load other modules (e.g. matlab etc.)
+#Below command means that parallel_sql will get tasks from the database
+#and run them on the node (in parallel). So a 16 core node will have
+#16 tasks running at one time.
+parallel-sql --sql -a parallel --exit-on-term --jobs 4
subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
- mwe_tokenize = MWETokenizer().tokenize
+ if mwe_pass != 'first':
+ mwe_dataset = ds.dataset(f'/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet',format='parquet')
+ mwe_dataset = mwe_dataset.to_pandas(columns=['phrase','phraseCount','phrasePWMI'])
+ mwe_dataset = mwe_dataset.sort_values(['phrasePWMI'],ascending=False)
+ mwe_phrases = list(mwe_dataset.phrase[0:1000])
+
+
+ mwe_tokenize = MWETokenizer(mwe_phrases).tokenize
+
def remove_punct(sentence):
new_sentence = []
else:
# remove stopWords
+ sentences = map(mwe_tokenize, sentences)
sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
return chain(* sentences)
outchunksize = 10000
- with pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
+ with pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
while True:
chunk = islice(outrows,outchunksize)
pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
- print(pddf)
- author_pddf = pddf.loc[pddf.is_token == False]
+
+ author_pddf = pddf.loc[pddf.is_token == False, schema.names]
+ pddf = pddf.loc[pddf.is_token == True, schema.names]
+
author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
author_pddf = author_pddf.loc[:,author_schema.names]
-
- pddf = pddf.loc[pddf.is_token == True, schema.names]
- print(pddf)
- print(author_pddf)
table = pa.Table.from_pandas(pddf,schema=schema)
author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
if table.shape[0] == 0:
with open("tf_task_list",'w') as outfile:
for f in files:
if f.endswith(".parquet"):
- outfile.write(f"source python3 tf_comments.py weekly_tf {f}\n")
+ outfile.write(f"python3 tf_comments.py weekly_tf {f}\n")
if __name__ == "__main__":
fire.Fire({"gen_task_list":gen_task_list,