3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
16 from mwxml import Dump, Page
18 from deltas.tokenizers import wikitext_split
19 from mwdiffs.utilities import dump2diffs
21 from mwpersistence.state import Version, apply_opdocs, apply_operations, persist_revision_once
23 from mwpersistence import Token
24 from mwpersistence.utilities import diffs2persistence
26 from urllib.parse import quote
28 from deltas import SequenceMatcher
29 from deltas import SegmentMatcher
30 TO_ENCODE = ('title', 'editor')
33 # this is a simple override of mwpersistence.DiffState that doesn't do anything special for reverts.
34 class WikiqDiffState(mwpersistence.DiffState):
35 def _update(self, text=None, checksum=None, opdocs=None, revision=None):
38 raise TypeError("Either 'text' or 'checksum' must be " +
41 checksum = sha1(bytes(text, 'utf8')).hexdigest()
43 current_version = Version()
45 # the main difference we have is that we don't do anything special for reverts
46 if opdocs is not None:
47 transition = apply_opdocs(opdocs, self.last.tokens or [])
48 current_version.tokens, _, _ = transition
50 # NOTICE: HEAVY COMPUTATION HERE!!!
52 # Diffs usually run in O(n^2) -- O(n^3) time and most
53 # tokenizers produce a lot of tokens.
54 if self.diff_processor is None:
55 raise RuntimeError("DiffState cannot process raw text " +
56 "without a diff_engine specified.")
57 operations, _, current_tokens = \
58 self.diff_processor.process(text, token_class=Token)
60 transition = apply_operations(operations,
61 self.last.tokens or [],
63 current_version.tokens, _, _ = transition
66 persist_revision_once(current_version.tokens, revision)
69 self.last = current_version
71 # Return the tranisitoned state
80 def calculate_persistence(tokens_added):
81 return(sum([(len(x.revisions)-1) for x in tokens_added]),
84 class WikiqIterator(Dump):
87 def from_file(cls, fh, collapse_user = False):
88 cls = super(WikiqIterator, cls).from_file(fh)
90 cls.collapse_user = collapse_user
91 cls.namespace_map = { ns.id : ns.name for ns in
92 cls.site_info.namespaces }
96 def process_item(cls, item_element, namespace_map, collapse_user = False):
97 if item_element.tag == "page":
98 return WikiqPage.from_element(item_element, namespace_map, collapse_user)
99 elif item_element.tag == "logitem":
100 return LogItem.from_element(item_element, namespace_map)
102 raise MalformedXML("Expected to see <page> or <logitem>. " +
103 "Instead saw <{0}>".format(item_element.tag))
105 class WikiqPage(Page):
106 __slots__ = ('id', 'title', 'namespace', 'redirect',
107 'restrictions','collapse_user')
110 def from_element(cls, item_element, namespace_map, collapse_user = False):
113 inv_namespace_map = {ns.id:name for name,ns in namespace_map.items()}
115 cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
117 # following mwxml, we assume namespace 0 in cases where
118 # page.namespace is inconsistent with namespace_map
119 # this undoes the "correction" of the namespace in mwxml
121 if cls.namespace not in inv_namespace_map:
123 if cls.namespace != 0:
124 cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
126 cls.collapse_user = collapse_user
127 cls.revisions = cls._Page__revisions
131 def _correct_sha(rev_data):
133 if rev_data.deleted.text:
135 rev_data.text_chars = 0
138 rev_data.reverteds = ""
141 if rev_data.text is None :
144 rev_data.text_chars = len(rev_data.text)
146 if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
147 text_sha1 = rev_data.sha1
150 text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
152 rev_data.sha1 = text_sha1
156 # Outline for how we want to handle collapse_user=True
157 # iteration rev.user prev_rev.user add prev_rev?
164 def __find_next_revision(self):
166 if self.prev_rev is None:
167 prev_rev = WikiqPage._correct_sha(next(self.revisions))
168 self.prev_rev = prev_rev
170 prev_rev = self.prev_rev
172 if self.collapse_user:
174 rev.collapsed_revs = collapsed_revs
176 for rev in self.revisions:
177 rev = WikiqPage._correct_sha(rev)
178 if self.collapse_user:
179 # yield if this is the last edit in a seq by a user and reset
180 # also yield if we do know who the user is
182 if rev.deleted.user or prev_rev.deleted.user:
184 if prev_rev is not None:
185 prev_rev.collapsed_revs = collapsed_revs
188 elif not rev.user.text == prev_rev.user.text:
190 if prev_rev is not None:
191 prev_rev.collapsed_revs = collapsed_revs
194 # otherwise, add one to the counter
197 rev.collapsed_revs = collapsed_revs
198 # if collapse_user is false, we always yield
201 if prev_rev is not None:
207 if self.collapse_user:
208 prev_rev.collapsed_revs = collapsed_revs
213 revision = self.__find_next_revision()
219 revision = self.__find_next_revision()
223 # def __iter__(self):
224 # return self.__revisions
226 # def __next__(self):
227 # return next(self.__revisions)
231 def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
234 persist : what persistence method to use. Takes a PersistMethod value
236 self.input_file = input_file
237 self.output_file = output_file
238 self.collapse_user = collapse_user
239 self.persist = persist
240 self.printed_header = False
242 self.urlencode = urlencode
243 if namespaces is not None:
244 self.namespace_filter = set(namespaces)
246 self.namespace_filter = None
248 # create a regex that creates the output filename
249 # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
250 # r'output/wikiq-\1-\2.tsv',
253 # Construct dump file iterator
254 self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
256 self.diff_engine = None
258 if self.persist == PersistMethod.sequence:
259 self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
261 if self.persist == PersistMethod.segment:
262 self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
264 # def __get_namespace_from_title(self, title):
267 # for ns in self.namespaces:
268 # # skip if the namespace is not defined
270 # default_ns = self.namespaces[ns]
273 # if title.startswith(ns + ":"):
274 # return self.namespaces[ns]
276 # # if we've made it this far with no matches, we return the default namespace
279 # def _set_namespace(self, rev_docs):
281 # for rev_data in rev_docs:
282 # if 'namespace' not in rev_data['page']:
283 # namespace = self.__get_namespace_from_title(page['title'])
284 # rev_data['page']['namespace'] = namespace
291 for page in self.dump:
293 # skip pages not in the namespaces we want
294 if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
297 rev_detector = mwreverts.Detector()
299 if self.persist != PersistMethod.none:
300 window = deque(maxlen=PERSISTENCE_RADIUS)
302 if self.persist == PersistMethod.sequence:
303 state = WikiqDiffState(SequenceMatcher(tokenizer = wikitext_split),
304 revert_radius=PERSISTENCE_RADIUS)
306 elif self.persist == PersistMethod.segment:
307 state = WikiqDiffState(SegmentMatcher(tokenizer = wikitext_split),
308 revert_radius=PERSISTENCE_RADIUS)
311 from mw.lib import persistence
312 state = persistence.State()
314 # Iterate through a page's revisions
316 rev_data = {'revid' : rev.id,
317 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
318 'articleid' : page.id,
319 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
320 'title' : '"' + page.title + '"',
321 'namespace' : page.namespace,
322 'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
324 # if revisions are deleted, /many/ things will be missing
326 rev_data['text_chars'] = ""
327 rev_data['sha1'] = ""
328 rev_data['revert'] = ""
329 rev_data['reverteds'] = ""
332 # rev.text can be None if the page has no text
335 # if text exists, we'll check for a sha1 and generate one otherwise
341 text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
343 rev_data['sha1'] = text_sha1
345 # TODO rev.bytes doesn't work.. looks like a bug
346 rev_data['text_chars'] = len(rev.text)
348 # generate revert data
349 revert = rev_detector.process(text_sha1, rev.id)
352 rev_data['revert'] = "TRUE"
353 rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
355 rev_data['revert'] = "FALSE"
356 rev_data['reverteds'] = ""
358 # if the fact that the edit was minor can be hidden, this might be an issue
359 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
361 if not rev.deleted.user:
362 # wrap user-defined editors in quotes for fread
363 rev_data['editor'] = '"' + rev.user.text + '"'
364 rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
367 rev_data['anon'] = ""
368 rev_data['editor'] = ""
370 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
375 #TODO missing: additions_size deletions_size
377 # if collapse user was on, lets run that
378 # if self.collapse_user:
379 # rev_data.collapsed_revs = rev.collapsed_revs
381 if self.persist != PersistMethod.none:
383 for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
384 old_rev_data[k] = None
387 if self.persist != PersistMethod.legacy:
388 _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
391 _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
393 window.append((rev.id, rev_data, tokens_added, tokens_removed))
395 if len(window) == PERSISTENCE_RADIUS:
396 old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
398 num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
400 old_rev_data["token_revs"] = num_token_revs
401 old_rev_data["tokens_added"] = num_tokens
402 old_rev_data["tokens_removed"] = len(old_tokens_removed)
403 old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
405 self.print_rev_data(old_rev_data)
408 self.print_rev_data(rev_data)
412 if self.persist != PersistMethod.none:
413 # print out metadata for the last RADIUS revisions
414 for i, item in enumerate(window):
415 # if the window was full, we've already printed item 0
416 if len(window) == PERSISTENCE_RADIUS and i == 0:
419 rev_id, rev_data, tokens_added, tokens_removed = item
420 num_token_revs, num_tokens = calculate_persistence(tokens_added)
422 rev_data["token_revs"] = num_token_revs
423 rev_data["tokens_added"] = num_tokens
424 rev_data["tokens_removed"] = len(tokens_removed)
425 rev_data["tokens_window"] = len(window)-(i+1)
427 self.print_rev_data(rev_data)
431 print("Done: %s revisions and %s pages." % (rev_count, page_count),
434 def print_rev_data(self, rev_data):
435 # if it's the first time through, print the header
437 for field in TO_ENCODE:
438 rev_data[field] = quote(str(rev_data[field]))
440 if not self.printed_header:
441 print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
442 self.printed_header = True
444 print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
447 def open_input_file(input_filename):
448 if re.match(r'.*\.7z$', input_filename):
449 cmd = ["7za", "x", "-so", input_filename, '*']
450 elif re.match(r'.*\.gz$', input_filename):
451 cmd = ["zcat", input_filename]
452 elif re.match(r'.*\.bz2$', input_filename):
453 cmd = ["bzcat", "-dk", input_filename]
456 input_file = Popen(cmd, stdout=PIPE).stdout
458 input_file = open(input_filename, 'r')
462 def open_output_file(input_filename):
463 # create a regex that creates the output filename
464 output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
465 output_filename = re.sub(r'\.xml', '', output_filename)
466 output_filename = output_filename + ".tsv"
467 output_file = open(output_filename, "w")
471 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
473 # arguments for the input direction
474 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
475 help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
477 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
478 help="Directory for output files.")
480 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
481 help="Write output to standard out (do not create dump file)")
483 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
484 help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
486 parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
487 help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
489 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
490 help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
492 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
493 help="Id number of namspace to include. Can be specified more than once.")
497 args = parser.parse_args()
499 # set persistence method
501 if args.persist is None:
502 persist = PersistMethod.none
503 elif args.persist == "segment":
504 persist = PersistMethod.segment
505 elif args.persist == "legacy":
506 persist = PersistMethod.legacy
508 persist = PersistMethod.sequence
510 if args.namespace_filter is not None:
511 namespaces = args.namespace_filter
515 if len(args.dumpfiles) > 0:
516 for filename in args.dumpfiles:
517 input_file = open_input_file(filename)
519 # open directory for output
521 output_dir = args.output_dir[0]
525 print("Processing file: %s" % filename, file=sys.stderr)
528 output_file = sys.stdout
530 filename = os.path.join(output_dir, os.path.basename(filename))
531 output_file = open_output_file(filename)
533 wikiq = WikiqParser(input_file, output_file,
534 collapse_user=args.collapse_user,
536 urlencode=args.urlencode,
537 namespaces = namespaces)
545 wikiq = WikiqParser(sys.stdin, sys.stdout,
546 collapse_user=args.collapse_user,
548 persist_legacy=args.persist_legacy,
549 urlencode=args.urlencode,
550 namespaces = namespaces)
553 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
554 # stop_words = stop_words.split(",")