]> code.communitydata.science - cdsc_reddit.git/commitdiff
Use groupby - joins instead of windows
authorNate E TeBlunthuis <nathante@n2347.hyak.local>
Sun, 9 Aug 2020 07:21:50 +0000 (00:21 -0700)
committerNate E TeBlunthuis <nathante@n2347.hyak.local>
Sun, 9 Aug 2020 07:21:50 +0000 (00:21 -0700)
checkpoint_parallelsql.sbatch [new file with mode: 0644]
run_tf_jobs.sh [new file with mode: 0755]
tf_comments.py

diff --git a/checkpoint_parallelsql.sbatch b/checkpoint_parallelsql.sbatch
new file mode 100644 (file)
index 0000000..a54aab1
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+## parallel_sql_job.sh
+#SBATCH --job-name=tf_subreddit_comments
+## Allocation Definition
+#SBATCH --account=comdata-ckpt
+#SBATCH --partition=ckpt
+## Resources
+## Nodes. This should always be 1 for parallel-sql.
+#SBATCH --nodes=1    
+## Walltime (12 hours)
+#SBATCH --time=12:00:00
+## Memory per node
+#SBATCH --mem=100G
+#SBATCH --cpus-per-task=4
+#SBATCH --ntasks=1
+
+
+module load parallel_sql
+
+#Put here commands to load other modules (e.g. matlab etc.)
+#Below command means that parallel_sql will get tasks from the database
+#and run them on the node (in parallel). So a 16 core node will have
+#16 tasks running at one time.
+parallel-sql --sql -a parallel --exit-on-term --jobs 4
diff --git a/run_tf_jobs.sh b/run_tf_jobs.sh
new file mode 100755 (executable)
index 0000000..fc191d4
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+module load parallel_sql
+source ../bin/activate
+python3 tf_comments.py gen_task_list
+psu --del --Y
+cat tf_task_list | psu --load
+
+for job in $(seq 1 50); do sbatch checkpoint_parallelsql.sbatch; done;
index 85eebecc5d6ceed9482b13241d3420ae61d4bfe5..277b76fe84a4b745f91bfd9eccf7fed1d11b41c9 100644 (file)
@@ -64,7 +64,15 @@ def weekly_tf(partition, mwe_pass = 'first'):
 
     subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
 
 
     subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
 
-    mwe_tokenize = MWETokenizer().tokenize
+    if mwe_pass != 'first':
+        mwe_dataset = ds.dataset(f'/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet',format='parquet')
+        mwe_dataset = mwe_dataset.to_pandas(columns=['phrase','phraseCount','phrasePWMI'])
+        mwe_dataset = mwe_dataset.sort_values(['phrasePWMI'],ascending=False)
+        mwe_phrases = list(mwe_dataset.phrase[0:1000])
+        
+        
+        mwe_tokenize = MWETokenizer(mwe_phrases).tokenize
+        
 
     def remove_punct(sentence):
         new_sentence = []
 
     def remove_punct(sentence):
         new_sentence = []
@@ -119,6 +127,7 @@ def weekly_tf(partition, mwe_pass = 'first'):
 
         else:
             # remove stopWords
 
         else:
             # remove stopWords
+            sentences = map(mwe_tokenize, sentences)
             sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
             return chain(* sentences)
 
             sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
             return chain(* sentences)
 
@@ -142,19 +151,17 @@ def weekly_tf(partition, mwe_pass = 'first'):
 
     outchunksize = 10000
 
 
     outchunksize = 10000
 
-    with pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
+    with pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
         while True:
             chunk = islice(outrows,outchunksize)
             pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
         while True:
             chunk = islice(outrows,outchunksize)
             pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
-            print(pddf)
-            author_pddf = pddf.loc[pddf.is_token == False]
+
+            author_pddf = pddf.loc[pddf.is_token == False, schema.names]
+            pddf = pddf.loc[pddf.is_token == True, schema.names]
+
             author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
             author_pddf = author_pddf.loc[:,author_schema.names]
             author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
             author_pddf = author_pddf.loc[:,author_schema.names]
-            
-            pddf = pddf.loc[pddf.is_token == True, schema.names]
 
 
-            print(pddf)
-            print(author_pddf)
             table = pa.Table.from_pandas(pddf,schema=schema)
             author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
             if table.shape[0] == 0:
             table = pa.Table.from_pandas(pddf,schema=schema)
             author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
             if table.shape[0] == 0:
@@ -171,7 +178,7 @@ def gen_task_list():
     with open("tf_task_list",'w') as outfile:
         for f in files:
             if f.endswith(".parquet"):
     with open("tf_task_list",'w') as outfile:
         for f in files:
             if f.endswith(".parquet"):
-                outfile.write(f"source python3 tf_comments.py weekly_tf {f}\n")
+                outfile.write(f"python3 tf_comments.py weekly_tf {f}\n")
 
 if __name__ == "__main__":
     fire.Fire({"gen_task_list":gen_task_list,
 
 if __name__ == "__main__":
     fire.Fire({"gen_task_list":gen_task_list,

Community Data Science Collective || Want to submit a patch?