]> code.communitydata.science - mediawiki_dump_tools.git/blob - wikiq
fix code to work with bzip files
[mediawiki_dump_tools.git] / wikiq
1 #!/usr/bin/env python3
2
3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
6
7 import argparse
8 import sys
9 import os, os.path
10 import re
11
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
15
16 from mw.xml_dump import Iterator
17 from mw.lib import persistence
18 from mw.lib import reverts
19
20 PERSISTENCE_RADIUS=7
21
22 def calculate_persistence(tokens_added):
23     return(sum([(len(x.revisions)-1) for x in tokens_added]),
24            len(tokens_added))
25
26 class WikiqIterator():
27     def __init__(self, fh, collapse_user=False):
28         self.fh = fh
29         self.collapse_user = collapse_user
30         self.mwiterator = Iterator.from_file(self.fh)
31         self.__pages = self.load_pages()
32
33     def load_pages(self):
34         for page in self.mwiterator:
35             yield WikiqPage(page, collapse_user=self.collapse_user)
36
37     def __iter__(self):
38         return self.__pages
39
40     def __next__(self):
41         return next(self._pages)
42
43 class WikiqPage():
44     __slots__ = ('id', 'title', 'namespace', 'redirect',
45                  'restrictions', 'mwpage', '__revisions',
46                  'collapse_user')
47
48     def __init__(self, page, collapse_user=False):
49         self.id = page.id
50         self.title = page.title
51         self.namespace = page.namespace
52         self.redirect = page.redirect
53         self.restrictions = page.restrictions
54         
55         self.collapse_user = collapse_user
56         self.mwpage = page
57         self.__revisions = self.rev_list()
58
59     def rev_list(self):
60         # Outline for how we want to handle collapse_user=True
61         # iteration   rev.user   prev_rev.user   add prev_rev?
62         #         0          A            None           Never
63         #         1          A               A           False
64         #         2          B               A            True
65         #         3          A               B            True
66         #         4          A               A           False
67         # Post-loop                          A          Always
68         for i, rev in enumerate(self.mwpage):
69             # never yield the first time
70             if i == 0:
71                 if self.collapse_user: 
72                     collapsed_revs = 1
73                     rev.collapsed_revs = collapsed_revs
74
75             else:
76                 if self.collapse_user:
77                     # yield if this is the last edit in a seq by a user and reset
78                     if not rev.contributor.user_text == prev_rev.contributor.user_text:
79                         yield prev_rev
80                         collapsed_revs = 1
81                         rev.collapsed_revs = collapsed_revs
82                     # otherwise, add one to the counter
83                     else:
84                         collapsed_revs += 1
85                         rev.collapsed_revs = collapsed_revs
86                 # if collapse_user is false, we always yield
87                 else:
88                     yield prev_rev
89
90             prev_rev = rev
91         # also yield the final time
92         yield prev_rev
93
94     def __iter__(self):
95         return self.__revisions
96
97     def __next__(self):
98         return next(self.__revisions)
99
100 class WikiqParser():
101     def __init__(self, input_file, output_file, collapse_user=False, persist=False):
102         self.input_file = input_file
103         self.output_file = output_file
104         self.collapse_user = collapse_user
105         self.persist = persist
106         self.printed_header = False
107         self.namespaces = []
108
109     def __get_namespace_from_title(self, title):
110         default_ns = None
111
112         for ns in self.namespaces:
113             # skip if the namespace is not defined
114             if ns == None:
115                 default_ns = self.namespaces[ns]
116                 continue
117
118             if title.startswith(ns + ":"):
119                 return self.namespaces[ns]
120
121         # if we've made it this far with no matches, we return the default namespace
122         return default_ns
123
124     def process(self):
125         print("Processing file: %s" % self.input_file.name, file=sys.stderr)
126
127         # create a regex that creates the output filename
128         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
129         #                         r'output/wikiq-\1-\2.tsv',
130         #                         input_filename)
131
132         # Construct dump file iterator
133         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
134
135         # extract list of namspaces
136         self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
137
138         page_count = 0
139         rev_count = 0
140         # Iterate through pages
141         for page in dump:
142             if self.persist:
143                 state = persistence.State()
144                 window = deque(maxlen=PERSISTENCE_RADIUS)
145
146             rev_detector = reverts.Detector()
147
148             # Iterate through a page's revisions
149             for rev in page:
150
151                 rev_data = {'revid' : rev.id,
152                             'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
153                             'articleid' : page.id,
154                             'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
155                             'title' : '"' + page.title + '"',
156                             'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
157                             'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
158
159                 # if revisions are deleted, /many/ things will be missing
160                 if rev.text.deleted:
161                     rev_data['text_chars'] = ""
162                     rev_data['sha1'] = ""
163                     rev_data['revert'] = ""
164                     rev_data['reverteds'] = ""
165
166                 else:
167                     # if text exists, we'll check for a sha1 and generate one otherwise
168                     if rev.sha1:
169                         text_sha1 = rev.sha1
170                     else:
171                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
172                     
173                     rev_data['sha1'] = text_sha1
174
175                     # TODO rev.bytes doesn't work.. looks like a bug
176                     rev_data['text_chars'] = len(rev.text)
177                
178                     # generate revert data
179                     revert = rev_detector.process(text_sha1, rev.id)
180                     if revert:
181                         rev_data['revert'] = "TRUE"
182                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
183                     else:
184                         rev_data['revert'] = "FALSE"
185                         rev_data['reverteds'] = ""
186
187                 # if the fact that the edit was minor can be hidden, this might be an issue
188                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
189
190                 if rev.contributor.user_text:
191                     # wrap user-defined editors in quotes for fread
192                     rev_data['editor'] = '"' + rev.contributor.user_text + '"'
193                     rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
194                     
195                 else:
196                     rev_data['anon'] = ""
197                     rev_data['editor'] = ""
198
199                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
200                 #    redirect = True
201                 #else:
202                 #    redirect = False
203                 
204                 #TODO missing: additions_size deletions_size
205                 
206                 # if collapse user was on, lets run that
207                 if self.collapse_user:
208                     rev_data['collapsed_revs'] = rev.collapsed_revs
209
210                 if self.persist:
211                     if rev.text.deleted:
212                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
213                             old_rev_data[k] = None
214                     else:
215                         _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
216                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
217                         
218                         if len(window) == PERSISTENCE_RADIUS:
219                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
220                             
221                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
222
223                             old_rev_data["token_revs"] = num_token_revs
224                             old_rev_data["tokens_added"] = num_tokens
225                             old_rev_data["tokens_removed"] = len(old_tokens_removed)
226                             old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
227
228                             self.print_rev_data(old_rev_data)
229
230                 else:
231                     self.print_rev_data(rev_data)
232
233                 rev_count += 1
234
235             if self.persist:
236                 # print out metadata for the last RADIUS revisions
237                 for i, item in enumerate(window):
238                     # if the window was full, we've already printed item 0
239                     if len(window) == PERSISTENCE_RADIUS and i == 0:
240                         continue
241
242                     rev_id, rev_data, tokens_added, tokens_removed = item
243                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
244
245                     rev_data["token_revs"] = num_token_revs
246                     rev_data["tokens_added"] = num_tokens
247                     rev_data["tokens_removed"] = len(tokens_removed)
248                     rev_data["tokens_window"] = len(window)-(i+1)
249                                            
250                     self.print_rev_data(rev_data)
251
252             page_count += 1
253
254         print("Done: %s revisions and %s pages." % (rev_count, page_count),
255               file=sys.stderr)
256
257     def print_rev_data(self, rev_data):
258         # if it's the first time through, print the header
259         if not self.printed_header:
260             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
261             self.printed_header = True
262         
263         print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
264
265
266 def open_input_file(input_filename):
267     if re.match(r'.*\.7z', input_filename):
268         cmd = ["7za", "x", "-so", input_filename] 
269     elif re.match(r'.*\.gz', input_filename):
270         cmd = ["zcat", input_filename] 
271     elif re.match(r'.*\.bz2', input_filename):
272         cmd = ["bzcat", input_filename] 
273
274     try:
275         input_file = Popen(cmd, stdout=PIPE).stdout
276     except NameError:
277         input_file = open(input_filename, 'r')
278
279     return input_file
280
281 def open_output_file(input_filename):
282     # create a regex that creates the output filename
283     output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
284     output_filename = output_filename + ".tsv"
285     output_file = open(output_filename, "w")
286
287     return output_file
288
289 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
290
291 # arguments for the input direction
292 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
293                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
294
295 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
296                     help="Directory for output files.")
297
298 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
299                     help="Write output to standard out (do not create dump file)")
300
301 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
302                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
303
304 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
305                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
306
307 args = parser.parse_args()
308
309 if len(args.dumpfiles) > 0:
310     for filename in args.dumpfiles:
311         input_file = open_input_file(filename)
312
313         # open file for output
314         if args.stdout:
315             output_file = sys.stdout
316         else:
317             if args.output_dir:
318                 output_dir = args.output_dir[0]
319             else:
320                 output_dir = "."
321
322             filename = os.path.join(output_dir, os.path.basename(filename))
323             output_file = open_output_file(filename)
324
325         wikiq = WikiqParser(input_file, output_file, 
326                            collapse_user=args.collapse_user,
327                            persist=args.persist)
328         wikiq.process()
329
330         # close things 
331         input_file.close()
332         output_file.close()
333 else:
334     wikiq = WikiqParser(sys.stdin, sys.stdout,
335                        collapse_user=args.collapse_user,
336                        persist=args.persist)
337     wikiq.process()
338
339 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
340 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?