]> code.communitydata.science - cdsc_reddit.git/blob - ngrams/tf_comments.py
lsi support for weekly similarities
[cdsc_reddit.git] / ngrams / tf_comments.py
1 #!/usr/bin/env python3
2 import pandas as pd
3 import pyarrow as pa
4 import pyarrow.dataset as ds
5 import pyarrow.parquet as pq
6 from itertools import groupby, islice, chain
7 import fire
8 from collections import Counter
9 import os
10 import re
11 from nltk import wordpunct_tokenize, MWETokenizer, sent_tokenize
12 from nltk.corpus import stopwords
13 from nltk.util import ngrams
14 import string
15 from random import random
16 from redditcleaner import clean
17
18 # compute term frequencies for comments in each subreddit by week
19 def weekly_tf(partition, mwe_pass = 'first'):
20     dataset = ds.dataset(f'/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/{partition}', format='parquet')
21     if not os.path.exists("/gscratch/comdata/users/nathante/reddit_comment_ngrams_10p_sample/"):
22         os.mkdir("/gscratch/comdata/users/nathante/reddit_comment_ngrams_10p_sample/")
23
24     if not os.path.exists("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/"):
25         os.mkdir("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/")
26
27     ngram_output = partition.replace("parquet","txt")
28
29     if mwe_pass == 'first':
30         if os.path.exists(f"/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}"):
31             os.remove(f"/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}")
32     
33     batches = dataset.to_batches(columns=['CreatedAt','subreddit','body','author'])
34
35
36     schema = pa.schema([pa.field('subreddit', pa.string(), nullable=False),
37                         pa.field('term', pa.string(), nullable=False),
38                         pa.field('week', pa.date32(), nullable=False),
39                         pa.field('tf', pa.int64(), nullable=False)]
40     )
41
42     author_schema = pa.schema([pa.field('subreddit', pa.string(), nullable=False),
43                                pa.field('author', pa.string(), nullable=False),
44                                pa.field('week', pa.date32(), nullable=False),
45                                pa.field('tf', pa.int64(), nullable=False)]
46     )
47
48     dfs = (b.to_pandas() for b in batches)
49
50     def add_week(df):
51         df['week'] = (df.CreatedAt - pd.to_timedelta(df.CreatedAt.dt.dayofweek, unit='d')).dt.date
52         return(df)
53
54     dfs = (add_week(df) for df in dfs)
55
56     def iterate_rows(dfs):
57         for df in dfs:
58             for row in df.itertuples():
59                 yield row
60
61     rows = iterate_rows(dfs)
62
63     subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
64
65     if mwe_pass != 'first':
66         mwe_dataset = pd.read_feather(f'/gscratch/comdata/output/reddit_ngrams/multiword_expressions.feather')
67         mwe_dataset = mwe_dataset.sort_values(['phrasePWMI'],ascending=False)
68         mwe_phrases = list(mwe_dataset.phrase)
69         mwe_phrases = [tuple(s.split(' ')) for s in mwe_phrases]
70         mwe_tokenizer = MWETokenizer(mwe_phrases)
71         mwe_tokenize = mwe_tokenizer.tokenize
72     
73     else:
74         mwe_tokenize = MWETokenizer().tokenize
75
76     def remove_punct(sentence):
77         new_sentence = []
78         for token in sentence:
79             new_token = ''
80             for c in token:
81                 if c not in string.punctuation:
82                     new_token += c
83             if len(new_token) > 0:
84                 new_sentence.append(new_token)
85         return new_sentence
86
87     stopWords = set(stopwords.words('english'))
88
89     # we follow the approach described in datta, phelan, adar 2017
90     def my_tokenizer(text):
91         # remove stopwords, punctuation, urls, lower case
92         # lowercase        
93         text = text.lower()
94
95         # redditcleaner removes reddit markdown(newlines, quotes, bullet points, links, strikethrough, spoiler, code, superscript, table, headings)
96         text = clean(text)
97
98         # sentence tokenize
99         sentences = sent_tokenize(text)
100
101         # wordpunct_tokenize
102         sentences = map(wordpunct_tokenize, sentences)
103
104         # remove punctuation
105                         
106         sentences = map(remove_punct, sentences)
107         # datta et al. select relatively common phrases from the reddit corpus, but they don't really explain how. We'll try that in a second phase.
108         # they say that the extract 1-4 grams from 10% of the sentences and then find phrases that appear often relative to the original terms
109         # here we take a 10 percent sample of sentences 
110         if mwe_pass == 'first':
111
112             # remove sentences with less than 2 words
113             sentences = filter(lambda sentence: len(sentence) > 2, sentences)
114             sentences = list(sentences)
115             for sentence in sentences:
116                 if random() <= 0.1:
117                     grams = list(chain(*map(lambda i : ngrams(sentence,i),range(4))))
118                     with open(f'/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}','a') as gram_file:
119                         for ng in grams:
120                             gram_file.write(' '.join(ng) + '\n')
121                 for token in sentence:
122                     if token not in stopWords:
123                         yield token
124
125         else:
126             # remove stopWords
127             sentences = map(mwe_tokenize, sentences)
128             sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
129             for sentence in sentences:
130                 for token in sentence:
131                     yield token
132
133     def tf_comments(subreddit_weeks):
134         for key, posts in subreddit_weeks:
135             subreddit, week = key
136             tfs = Counter([])
137             authors = Counter([])
138             for post in posts:
139                 tokens = my_tokenizer(post.body)
140                 tfs.update(tokens)
141                 authors.update([post.author])
142
143             for term, tf in tfs.items():
144                 yield [True, subreddit, term, week, tf]
145
146             for author, tf in authors.items():
147                 yield [False, subreddit, author, week, tf]
148
149     outrows = tf_comments(subreddit_weeks)
150
151     outchunksize = 10000
152
153     with pq.ParquetWriter(f"/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter(f"/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
154     
155         while True:
156
157             chunk = islice(outrows,outchunksize)
158             chunk = (c for c in chunk if c[1] is not None)
159             pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
160             author_pddf = pddf.loc[pddf.is_token == False, schema.names]
161             pddf = pddf.loc[pddf.is_token == True, schema.names]
162             author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
163             author_pddf = author_pddf.loc[:,author_schema.names]
164             table = pa.Table.from_pandas(pddf,schema=schema)
165             author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
166             do_break = True
167
168             if table.shape[0] != 0:
169                 writer.write_table(table)
170                 do_break = False
171             if author_table.shape[0] != 0:
172                 author_writer.write_table(author_table)
173                 do_break = False
174
175             if do_break:
176                 break
177
178         writer.close()
179         author_writer.close()
180
181
182 def gen_task_list(mwe_pass='first'):
183     files = os.listdir("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/")
184     with open("tf_task_list",'w') as outfile:
185         for f in files:
186             if f.endswith(".parquet"):
187                 outfile.write(f"./tf_comments.py weekly_tf --mwe-pass {mwe_pass} {f}\n")
188
189 if __name__ == "__main__":
190     fire.Fire({"gen_task_list":gen_task_list,
191                "weekly_tf":weekly_tf})

Community Data Science Collective || Want to submit a patch?