]> code.communitydata.science - mediawiki_dump_tools.git/blob - wikiq
merging pull containing revert-radius with 2nd version of regex scanner w/ unit tests
[mediawiki_dump_tools.git] / wikiq
1 #!/usr/bin/env python3
2
3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
6
7 import argparse
8 import sys
9 import os, os.path
10 import re
11
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
15
16 from mwxml import Dump
17
18 from deltas.tokenizers import wikitext_split
19 import mwpersistence
20 import mwreverts
21 from urllib.parse import quote
22 TO_ENCODE = ('title', 'editor')
23 PERSISTENCE_RADIUS=7
24 from deltas import SequenceMatcher
25 from deltas import SegmentMatcher
26
27 class PersistMethod:
28     none = 0
29     sequence = 1
30     segment = 2
31     legacy = 3
32
33 def calculate_persistence(tokens_added):
34     return(sum([(len(x.revisions)-1) for x in tokens_added]),
35            len(tokens_added))
36
37 def matchmake(scanned_content, rev_data, regex, label):
38     p = re.compile(regex)
39
40     temp_dict = {}
41     # if there are named capture groups in the regex
42     if bool(p.groupindex):
43         capture_groups = list(p.groupindex.keys())
44
45         # initialize the {capture_group_name:list} for each capture group
46         for cap_group in capture_groups:
47             temp_dict["{}_{}".format(label, cap_group)] = []
48
49         # if there are matches of some sort in this revision content, fill the lists for each cap_group
50         if p.search(scanned_content) is not None:
51             m = re.finditer(p,scanned_content)
52             matchobjects = list(m)
53
54             for cap_group in capture_groups:
55                 temp_list = []
56                 for match in matchobjects:
57                     # we only want to add the match for the capture group if the match is not None
58                     if match.group(cap_group) != None:
59                         temp_list.append(match.group(cap_group))
60
61                 # if temp_list of matches is empty just make that column None
62                 if len(temp_list)==0:
63                     temp_dict["{}_{}".format(label, cap_group)] = None
64                 # else we put in the list we made in the for-loop above
65                 else:
66                     temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list)
67         
68         # there are no matches at all in this revision content, we default values to None
69         else:
70             for cap_group in capture_groups:
71                 temp_dict["{}_{}".format(label, cap_group)] = None
72
73     # there are no capture groups, we just search for all the matches of the regex
74     else:
75         #given that there are matches to be made
76         if p.search(scanned_content) is not None:
77             m = p.findall(scanned_content)
78             temp_dict[label] = ', '.join(m)
79         else:
80             temp_dict[label] = None    
81     # update rev_data with our new columns
82     rev_data.update(temp_dict)
83     print(rev_data.keys())
84     return rev_data
85
86
87 class WikiqIterator():
88     def __init__(self, fh, collapse_user=False):
89         self.fh = fh
90         self.collapse_user = collapse_user
91         self.mwiterator = Dump.from_file(self.fh)
92         self.namespace_map = { ns.id : ns.name for ns in
93                                self.mwiterator.site_info.namespaces }
94         self.__pages = self.load_pages()
95
96     def load_pages(self):
97         for page in self.mwiterator:
98             yield WikiqPage(page,
99                             namespace_map = self.namespace_map,
100                             collapse_user=self.collapse_user)
101
102     def __iter__(self):
103         return self.__pages
104
105     def __next__(self):
106         return next(self._pages)
107
108 class WikiqPage():
109     __slots__ = ('id', 'title', 'namespace', 'redirect',
110                  'restrictions', 'mwpage', '__revisions',
111                  'collapse_user')
112     
113     def __init__(self, page, namespace_map, collapse_user=False):
114         self.id = page.id
115         self.namespace = page.namespace
116         # following mwxml, we assume namespace 0 in cases where
117         # page.namespace is inconsistent with namespace_map
118         if page.namespace not in namespace_map:
119             self.title = page.title
120             page.namespace = 0
121         if page.namespace != 0:
122             self.title = ':'.join([namespace_map[page.namespace], page.title])
123         else:
124             self.title = page.title
125         self.restrictions = page.restrictions
126         self.collapse_user = collapse_user
127         self.mwpage = page
128         self.__revisions = self.rev_list()
129
130     def rev_list(self):
131         # Outline for how we want to handle collapse_user=True
132         # iteration   rev.user   prev_rev.user   add prev_rev?
133         #         0          A            None           Never
134         #         1          A               A           False
135         #         2          B               A            True
136         #         3          A               B            True
137         #         4          A               A           False
138         # Post-loop                          A          Always
139         for i, rev in enumerate(self.mwpage):
140             # never yield the first time
141             if i == 0:
142                 if self.collapse_user: 
143                     collapsed_revs = 1
144                     rev.collapsed_revs = collapsed_revs
145
146             else:
147                 if self.collapse_user:
148                     # yield if this is the last edit in a seq by a user and reset
149                     # also yield if we do know who the user is
150
151                     if rev.deleted.user or prev_rev.deleted.user:
152                         yield prev_rev
153                         collapsed_revs = 1
154                         rev.collapsed_revs = collapsed_revs
155
156                     elif not rev.user.text == prev_rev.user.text:
157                         yield prev_rev
158                         collapsed_revs = 1
159                         rev.collapsed_revs = collapsed_revs
160                     # otherwise, add one to the counter
161                     else:
162                         collapsed_revs += 1
163                         rev.collapsed_revs = collapsed_revs
164                 # if collapse_user is false, we always yield
165                 else:
166                     yield prev_rev
167
168             prev_rev = rev
169
170         # also yield the final time
171         yield prev_rev
172
173     def __iter__(self):
174         return self.__revisions
175
176     def __next__(self):
177         return next(self.__revisions)
178
179 class WikiqParser():
180     def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
181         """ 
182         Parameters:
183            persist : what persistence method to use. Takes a PersistMethod value
184         """
185
186         self.input_file = input_file
187         self.output_file = output_file
188         self.collapse_user = collapse_user
189         self.persist = persist
190         self.printed_header = False
191         self.namespaces = []
192         self.urlencode = urlencode
193         self.revert_radius = revert_radius
194         self.regex_match_revision = regex_match_revision
195         self.regex_revision_label = regex_revision_label
196         self.regex_match_comment = regex_match_comment
197         self.regex_comment_label = regex_comment_label
198
199         if namespaces is not None:
200             self.namespace_filter = set(namespaces)
201         else:
202             self.namespace_filter = None
203
204     def __get_namespace_from_title(self, title):
205         default_ns = None
206
207         for ns in self.namespaces:
208             # skip if the namespace is not defined
209             if ns == None:
210                 default_ns = self.namespaces[ns]
211                 continue
212
213             if title.startswith(ns + ":"):
214                 return self.namespaces[ns]
215
216         # if we've made it this far with no matches, we return the default namespace
217         return default_ns
218
219
220     def process(self):
221
222         # create a regex that creates the output filename
223         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
224         #                         r'output/wikiq-\1-\2.tsv',
225         #                         input_filename)
226
227         # Construct dump file iterator
228         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
229
230         # extract list of namspaces
231         self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
232
233         page_count = 0
234         rev_count = 0
235
236
237         # Iterate through pages
238         for page in dump:
239             namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
240
241             # skip namespaces not in the filter
242             if self.namespace_filter is not None:
243                 if namespace not in self.namespace_filter:
244                     continue
245
246             print(self.revert_radius)
247             rev_detector = mwreverts.Detector(radius = self.revert_radius)
248
249
250             if self.persist != PersistMethod.none:
251                 window = deque(maxlen=PERSISTENCE_RADIUS)
252
253                 if self.persist == PersistMethod.sequence:
254                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
255                                                     revert_radius=PERSISTENCE_RADIUS)
256
257                 elif self.persist == PersistMethod.segment:
258                     state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
259                                                     revert_radius=PERSISTENCE_RADIUS)
260
261                 # self.persist == PersistMethod.legacy
262                 else:
263                     from mw.lib import persistence
264                     state = persistence.State()
265
266             # Iterate through a page's revisions
267             for rev in page:
268                 
269                 # initialize rev_data
270                 rev_data = {}
271
272                 # if the command line args only gave a label (and no regular expression is given)
273                 if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None):
274                     sys.exit('The given regex label(s) has no corresponding regex to search for.')
275                 
276                 # if there's anything in the list of regex_match_revision
277                 if self.regex_match_revision is not None:
278                     if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)):
279                         sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
280                     
281                     # initialize and construct the list of regex-label tuples
282                     pairs = []
283                     for i in range(0,len(self.regex_match_revision)):
284                         pairs.append((self.regex_match_revision[i], self.regex_revision_label[i]))
285
286                     # for each regex/label pair, we now run matchmake to check and output columns
287                     for pair in pairs:
288                         # pair[0] corresponds to the regex, pair[1] to the label
289                         rev_data = matchmake(rev.text, rev_data, pair[0], pair[1])
290                 
291                 # if there's anything in the list of regex_match_comment
292                 if self.regex_match_comment is not None:
293                     if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)):
294                         sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
295                     
296                     # initialize and construct the list of regex-label tuples
297                     pairs = []
298                     for i in range(0,len(self.regex_match_comment)):
299                         pairs.append((self.regex_match_comment[i], self.regex_comment_label[i]))
300
301                     # for each regex/label pair, we now run matchmake to check and output columns
302                     for pair in pairs:
303                         # pair[0] corresponds to the regex, pair[1] to the label
304                         rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1])
305
306                 # we fill out the rest of the data structure now
307                 rev_data['revid'] = rev.id
308                 rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
309                 rev_data['articleid'] = page.id
310                 rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
311                 rev_data['title'] = '"' + page.title + '"'
312                 rev_data['namespace'] = namespace
313                 rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
314
315                 # if revisions are deleted, /many/ things will be missing
316                 if rev.deleted.text:
317                     rev_data['text_chars'] = ""
318                     rev_data['sha1'] = ""
319                     rev_data['revert'] = ""
320                     rev_data['reverteds'] = ""
321
322                 else:
323                     # rev.text can be None if the page has no text
324                     if not rev.text:
325                         rev.text = ""
326                     # if text exists, we'll check for a sha1 and generate one otherwise
327
328                     if rev.sha1:
329                         text_sha1 = rev.sha1
330                     else:
331
332                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
333                     
334                     rev_data['sha1'] = text_sha1
335
336                     # TODO rev.bytes doesn't work.. looks like a bug
337                     rev_data['text_chars'] = len(rev.text)
338
339                     # generate revert data
340                     revert = rev_detector.process(text_sha1, rev.id)
341                     
342                     if revert:
343                         rev_data['revert'] = "TRUE"
344                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
345                     else:
346                         rev_data['revert'] = "FALSE"
347                         rev_data['reverteds'] = ""
348
349                 # if the fact that the edit was minor can be hidden, this might be an issue
350                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
351
352                 if not rev.deleted.user:
353                     # wrap user-defined editors in quotes for fread
354                     rev_data['editor'] = '"' + rev.user.text + '"'
355                     rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
356                     
357                 else:
358                     rev_data['anon'] = ""
359                     rev_data['editor'] = ""
360
361                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
362                 #    redirect = True
363                 #else:
364                 #    redirect = False
365                 
366                 #TODO missing: additions_size deletions_size
367                 
368                 # if collapse user was on, lets run that
369                 if self.collapse_user:
370                     rev_data['collapsed_revs'] = rev.collapsed_revs
371
372                 if self.persist != PersistMethod.none:
373                     if rev.deleted.text:
374                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
375                             old_rev_data[k] = None
376                     else:
377
378                         if self.persist != PersistMethod.legacy:
379                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
380
381                         else:
382                             _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
383                             
384                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
385                         
386                         if len(window) == PERSISTENCE_RADIUS:
387                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
388                             
389                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
390
391                             old_rev_data["token_revs"] = num_token_revs
392                             old_rev_data["tokens_added"] = num_tokens
393                             old_rev_data["tokens_removed"] = len(old_tokens_removed)
394                             old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
395
396                             self.print_rev_data(old_rev_data)
397
398                 else:
399                     self.print_rev_data(rev_data)
400
401                 rev_count += 1
402
403             if self.persist != PersistMethod.none:
404                 # print out metadata for the last RADIUS revisions
405                 for i, item in enumerate(window):
406                     # if the window was full, we've already printed item 0
407                     if len(window) == PERSISTENCE_RADIUS and i == 0:
408                         continue
409
410                     rev_id, rev_data, tokens_added, tokens_removed = item
411                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
412
413                     rev_data["token_revs"] = num_token_revs
414                     rev_data["tokens_added"] = num_tokens
415                     rev_data["tokens_removed"] = len(tokens_removed)
416                     rev_data["tokens_window"] = len(window)-(i+1)
417                     
418                     self.print_rev_data(rev_data)
419
420             page_count += 1
421
422         print("Done: %s revisions and %s pages." % (rev_count, page_count),
423               file=sys.stderr)
424
425     def print_rev_data(self, rev_data):
426         # if it's the first time through, print the header
427         if self.urlencode:
428             for field in TO_ENCODE:
429                 rev_data[field] = quote(str(rev_data[field]))
430
431         if not self.printed_header:
432             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
433             self.printed_header = True
434         
435         print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
436
437
438 def open_input_file(input_filename):
439     if re.match(r'.*\.7z$', input_filename):
440         cmd = ["7za", "x", "-so", input_filename, '*'] 
441     elif re.match(r'.*\.gz$', input_filename):
442         cmd = ["zcat", input_filename] 
443     elif re.match(r'.*\.bz2$', input_filename):
444         cmd = ["bzcat", "-dk", input_filename] 
445
446     try:
447         input_file = Popen(cmd, stdout=PIPE).stdout
448     except NameError:
449         input_file = open(input_filename, 'r')
450
451     return input_file
452
453 def open_output_file(input_filename):
454     # create a regex that creates the output filename
455     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
456     output_filename = re.sub(r'\.xml', '', output_filename)
457     output_filename = output_filename + ".tsv"
458     output_file = open(output_filename, "w")
459
460     return output_file
461
462 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
463
464 # arguments for the input direction
465 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
466                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
467
468 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
469                     help="Directory for output files.")
470
471 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
472                     help="Write output to standard out (do not create dump file)")
473
474 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
475                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
476
477 parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
478                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
479
480 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
481                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
482
483 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
484                     help="Id number of namspace to include. Can be specified more than once.")
485
486 parser.add_argument('-rr',
487                     '--revert-radius',
488                     dest="revert_radius",
489                     type=int,
490                     action='store',
491                     default=15,
492                     help="Number of edits to check when looking for reverts (default: 15)")
493
494 parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
495                     help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
496
497 parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
498                     help="The label for the outputted column based on matching the regex in revision text.")
499
500 parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
501                     help="The regular expression to search for in comments of revisions.")
502
503 parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
504                     help="The label for the outputted column based on matching the regex in comments.")
505
506 args = parser.parse_args()
507
508 # set persistence method
509
510 if args.persist is None:
511     persist = PersistMethod.none
512 elif args.persist == "segment":
513     persist = PersistMethod.segment
514 elif args.persist == "legacy":
515     persist = PersistMethod.legacy
516 else:
517     persist = PersistMethod.sequence
518
519 if args.namespace_filter is not None:
520     namespaces = args.namespace_filter
521 else:
522     namespaces = None
523
524 if len(args.dumpfiles) > 0:
525     for filename in args.dumpfiles:
526         input_file = open_input_file(filename)
527
528         # open directory for output
529         if args.output_dir:
530             output_dir = args.output_dir[0]
531         else:
532             output_dir = "."
533
534         print("Processing file: %s" % filename, file=sys.stderr)
535
536         if args.stdout:
537             output_file = sys.stdout
538         else:
539             filename = os.path.join(output_dir, os.path.basename(filename))
540             output_file = open_output_file(filename)
541
542         wikiq = WikiqParser(input_file,
543                             output_file,
544                             collapse_user=args.collapse_user,
545                             persist=persist,
546                             urlencode=args.urlencode,
547                             namespaces=namespaces,
548                             revert_radius=args.revert_radius,
549                             regex_match_revision = args.regex_match_revision,
550                             regex_revision_label = args.regex_revision_label,
551                             regex_match_comment = args.regex_match_comment,
552                             regex_comment_label = args.regex_comment_label)
553
554         wikiq.process()
555
556         # close things 
557         input_file.close()
558         output_file.close()
559 else:
560     wikiq = WikiqParser(sys.stdin,
561                         sys.stdout,
562                         collapse_user=args.collapse_user,
563                         persist=persist,
564                         #persist_legacy=args.persist_legacy,
565                         urlencode=args.urlencode,
566                         namespaces=namespaces,
567                         revert_radius=args.revert_radius,
568                         regex_match_revision = args.regex_match_revision,
569                         regex_revision_label = args.regex_revision_label,
570                         regex_match_comment = args.regex_match_comment,
571                         regex_comment_label = args.regex_comment_label)
572
573     wikiq.process() 
574
575 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
576 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?