3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
16 from mwxml import Dump
18 from deltas.tokenizers import wikitext_split
21 from urllib.parse import quote
22 TO_ENCODE = ('title', 'editor')
24 from deltas import SequenceMatcher
25 from deltas import SegmentMatcher
33 def calculate_persistence(tokens_added):
34 return(sum([(len(x.revisions)-1) for x in tokens_added]),
38 class WikiqIterator():
39 def __init__(self, fh, collapse_user=False):
41 self.collapse_user = collapse_user
42 self.mwiterator = Dump.from_file(self.fh)
43 self.namespace_map = { ns.id : ns.name for ns in
44 self.mwiterator.site_info.namespaces }
45 self.__pages = self.load_pages()
48 for page in self.mwiterator:
50 namespace_map = self.namespace_map,
51 collapse_user=self.collapse_user)
57 return next(self._pages)
60 __slots__ = ('id', 'title', 'namespace', 'redirect',
61 'restrictions', 'mwpage', '__revisions',
64 def __init__(self, page, namespace_map, collapse_user=False):
66 self.namespace = page.namespace
67 # following mwxml, we assume namespace 0 in cases where
68 # page.namespace is inconsistent with namespace_map
69 if page.namespace not in namespace_map:
70 self.title = page.title
72 if page.namespace != 0:
73 self.title = ':'.join([namespace_map[page.namespace], page.title])
75 self.title = page.title
76 self.restrictions = page.restrictions
77 self.collapse_user = collapse_user
79 self.__revisions = self.rev_list()
82 # Outline for how we want to handle collapse_user=True
83 # iteration rev.user prev_rev.user add prev_rev?
90 for i, rev in enumerate(self.mwpage):
91 # never yield the first time
93 if self.collapse_user:
95 rev.collapsed_revs = collapsed_revs
98 if self.collapse_user:
99 # yield if this is the last edit in a seq by a user and reset
100 # also yield if we do know who the user is
102 if rev.deleted.user or prev_rev.deleted.user:
105 rev.collapsed_revs = collapsed_revs
107 elif not rev.user.text == prev_rev.user.text:
110 rev.collapsed_revs = collapsed_revs
111 # otherwise, add one to the counter
114 rev.collapsed_revs = collapsed_revs
115 # if collapse_user is false, we always yield
121 # also yield the final time
125 return self.__revisions
128 return next(self.__revisions)
131 class RegexPair(object):
132 def __init__(self, pattern, label):
133 self.pattern = re.compile(pattern)
135 self.has_groups = bool(self.pattern.groupindex)
137 self.capture_groups = list(self.pattern.groupindex.keys())
139 def _make_key(self, cap_group):
140 return ("{}_{}".format(self.label, cap_group))
142 def matchmake(self, content, rev_data):
145 # if there are named capture groups in the regex
148 # if there are matches of some sort in this revision content, fill the lists for each cap_group
149 if self.pattern.search(content) is not None:
150 m = self.pattern.finditer(content)
151 matchobjects = list(m)
153 for cap_group in self.capture_groups:
154 key = self._make_key(cap_group)
156 for match in matchobjects:
157 # we only want to add the match for the capture group if the match is not None
158 if match.group(cap_group) != None:
159 temp_list.append(match.group(cap_group))
161 # if temp_list of matches is empty just make that column None
162 if len(temp_list)==0:
163 temp_dict[key] = None
164 # else we put in the list we made in the for-loop above
166 temp_dict[key] = ', '.join(temp_list)
168 # there are no matches at all in this revision content, we default values to None
170 for cap_group in self.capture_groups:
171 key = self._make_key(cap_group)
172 temp_dict[key] = None
174 # there are no capture groups, we just search for all the matches of the regex
176 #given that there are matches to be made
177 if self.pattern.search(content) is not None:
178 m = self.pattern.findall(content)
179 temp_dict[self.label] = ', '.join(m)
181 temp_dict[self.label] = None
182 # update rev_data with our new columns
183 rev_data.update(temp_dict)
188 def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
191 persist : what persistence method to use. Takes a PersistMethod value
193 self.input_file = input_file
194 self.output_file = output_file
195 self.collapse_user = collapse_user
196 self.persist = persist
197 self.printed_header = False
199 self.urlencode = urlencode
200 self.revert_radius = revert_radius
202 if namespaces is not None:
203 self.namespace_filter = set(namespaces)
205 self.namespace_filter = None
207 self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
208 self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
211 def make_matchmake_pairs(self, patterns, labels):
212 if (patterns is not None and labels is not None) and \
213 (len(patterns) == len(labels)):
214 return [RegexPair(pattern, label) for pattern, label in zip(patterns, labels)]
215 elif (patterns is None and labels is None):
218 sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
220 def matchmake(self, rev, rev_data):
221 rev_data = self.matchmake_revision(rev.text, rev_data)
222 rev_data = self.matchmake_comment(rev.comment, rev_data)
225 def matchmake_revision(self, text, rev_data):
226 return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
228 def matchmake_comment(self, comment, rev_data):
229 return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
231 def matchmake_pairs(self, text, rev_data, pairs):
233 rev_data = pair.matchmake(text, rev_data)
236 def __get_namespace_from_title(self, title):
239 for ns in self.namespaces:
240 # skip if the namespace is not defined
242 default_ns = self.namespaces[ns]
245 if title.startswith(ns + ":"):
246 return self.namespaces[ns]
248 # if we've made it this far with no matches, we return the default namespace
254 # create a regex that creates the output filename
255 # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
256 # r'output/wikiq-\1-\2.tsv',
259 # Construct dump file iterator
260 dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
262 # extract list of namspaces
263 self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
269 # Iterate through pages
271 namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
273 # skip namespaces not in the filter
274 if self.namespace_filter is not None:
275 if namespace not in self.namespace_filter:
278 rev_detector = mwreverts.Detector(radius = self.revert_radius)
280 if self.persist != PersistMethod.none:
281 window = deque(maxlen=PERSISTENCE_RADIUS)
283 if self.persist == PersistMethod.sequence:
284 state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
285 revert_radius=PERSISTENCE_RADIUS)
287 elif self.persist == PersistMethod.segment:
288 state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
289 revert_radius=PERSISTENCE_RADIUS)
291 # self.persist == PersistMethod.legacy
293 from mw.lib import persistence
294 state = persistence.State()
296 # Iterate through a page's revisions
299 # initialize rev_data
302 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
303 'articleid' : page.id,
304 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
305 'title' : '"' + page.title + '"',
306 'namespace' : namespace,
307 'deleted' : "TRUE" if rev.deleted.text else "FALSE"
310 rev_data = self.matchmake(rev, rev_data)
312 # if revisions are deleted, /many/ things will be missing
314 rev_data['text_chars'] = ""
315 rev_data['sha1'] = ""
316 rev_data['revert'] = ""
317 rev_data['reverteds'] = ""
320 # rev.text can be None if the page has no text
323 # if text exists, we'll check for a sha1 and generate one otherwise
329 text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
331 rev_data['sha1'] = text_sha1
333 # TODO rev.bytes doesn't work.. looks like a bug
334 rev_data['text_chars'] = len(rev.text)
336 # generate revert data
337 revert = rev_detector.process(text_sha1, rev.id)
340 rev_data['revert'] = "TRUE"
341 rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
343 rev_data['revert'] = "FALSE"
344 rev_data['reverteds'] = ""
346 # if the fact that the edit was minor can be hidden, this might be an issue
347 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
349 if not rev.deleted.user:
350 # wrap user-defined editors in quotes for fread
351 rev_data['editor'] = '"' + rev.user.text + '"'
352 rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
355 rev_data['anon'] = ""
356 rev_data['editor'] = ""
358 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
363 #TODO missing: additions_size deletions_size
365 # if collapse user was on, lets run that
366 if self.collapse_user:
367 rev_data['collapsed_revs'] = rev.collapsed_revs
369 if self.persist != PersistMethod.none:
371 for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
372 old_rev_data[k] = None
375 if self.persist != PersistMethod.legacy:
376 _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
379 _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
381 window.append((rev.id, rev_data, tokens_added, tokens_removed))
383 if len(window) == PERSISTENCE_RADIUS:
384 old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
386 num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
388 old_rev_data["token_revs"] = num_token_revs
389 old_rev_data["tokens_added"] = num_tokens
390 old_rev_data["tokens_removed"] = len(old_tokens_removed)
391 old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
393 self.print_rev_data(old_rev_data)
396 self.print_rev_data(rev_data)
400 if self.persist != PersistMethod.none:
401 # print out metadata for the last RADIUS revisions
402 for i, item in enumerate(window):
403 # if the window was full, we've already printed item 0
404 if len(window) == PERSISTENCE_RADIUS and i == 0:
407 rev_id, rev_data, tokens_added, tokens_removed = item
408 num_token_revs, num_tokens = calculate_persistence(tokens_added)
410 rev_data["token_revs"] = num_token_revs
411 rev_data["tokens_added"] = num_tokens
412 rev_data["tokens_removed"] = len(tokens_removed)
413 rev_data["tokens_window"] = len(window)-(i+1)
415 self.print_rev_data(rev_data)
419 print("Done: %s revisions and %s pages." % (rev_count, page_count),
422 def print_rev_data(self, rev_data):
423 # if it's the first time through, print the header
425 for field in TO_ENCODE:
426 rev_data[field] = quote(str(rev_data[field]))
428 if not self.printed_header:
429 print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
430 self.printed_header = True
432 print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
435 def open_input_file(input_filename):
436 if re.match(r'.*\.7z$', input_filename):
437 cmd = ["7za", "x", "-so", input_filename, '*']
438 elif re.match(r'.*\.gz$', input_filename):
439 cmd = ["zcat", input_filename]
440 elif re.match(r'.*\.bz2$', input_filename):
441 cmd = ["bzcat", "-dk", input_filename]
444 input_file = Popen(cmd, stdout=PIPE).stdout
446 input_file = open(input_filename, 'r')
450 def open_output_file(input_filename):
451 # create a regex that creates the output filename
452 output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
453 output_filename = re.sub(r'\.xml', '', output_filename)
454 output_filename = output_filename + ".tsv"
455 output_file = open(output_filename, "w")
459 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
461 # arguments for the input direction
462 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
463 help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
465 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
466 help="Directory for output files.")
468 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
469 help="Write output to standard out (do not create dump file)")
471 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
472 help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
474 parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
475 help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
477 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
478 help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
480 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
481 help="Id number of namspace to include. Can be specified more than once.")
483 parser.add_argument('-rr',
485 dest="revert_radius",
489 help="Number of edits to check when looking for reverts (default: 15)")
491 parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
492 help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
494 parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
495 help="The label for the outputted column based on matching the regex in revision text.")
497 parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
498 help="The regular expression to search for in comments of revisions.")
500 parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
501 help="The label for the outputted column based on matching the regex in comments.")
503 args = parser.parse_args()
505 # set persistence method
507 if args.persist is None:
508 persist = PersistMethod.none
509 elif args.persist == "segment":
510 persist = PersistMethod.segment
511 elif args.persist == "legacy":
512 persist = PersistMethod.legacy
514 persist = PersistMethod.sequence
516 if args.namespace_filter is not None:
517 namespaces = args.namespace_filter
521 if len(args.dumpfiles) > 0:
522 for filename in args.dumpfiles:
523 input_file = open_input_file(filename)
525 # open directory for output
527 output_dir = args.output_dir[0]
531 print("Processing file: %s" % filename, file=sys.stderr)
534 output_file = sys.stdout
536 filename = os.path.join(output_dir, os.path.basename(filename))
537 output_file = open_output_file(filename)
539 wikiq = WikiqParser(input_file,
541 collapse_user=args.collapse_user,
543 urlencode=args.urlencode,
544 namespaces=namespaces,
545 revert_radius=args.revert_radius,
546 regex_match_revision = args.regex_match_revision,
547 regex_revision_label = args.regex_revision_label,
548 regex_match_comment = args.regex_match_comment,
549 regex_comment_label = args.regex_comment_label)
557 wikiq = WikiqParser(sys.stdin,
559 collapse_user=args.collapse_user,
561 #persist_legacy=args.persist_legacy,
562 urlencode=args.urlencode,
563 namespaces=namespaces,
564 revert_radius=args.revert_radius,
565 regex_match_revision = args.regex_match_revision,
566 regex_revision_label = args.regex_revision_label,
567 regex_match_comment = args.regex_match_comment,
568 regex_comment_label = args.regex_comment_label)
572 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
573 # stop_words = stop_words.split(",")