3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
16 from mwxml import Dump
18 from deltas.tokenizers import wikitext_split
21 from urllib.parse import quote
22 TO_ENCODE = ('title', 'editor')
24 from deltas import SequenceMatcher
25 from deltas import SegmentMatcher
33 def calculate_persistence(tokens_added):
34 return(sum([(len(x.revisions)-1) for x in tokens_added]),
37 def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
38 for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
39 if location == "comment":
40 matching_string = rev.comment
41 elif location == "text":
42 matching_string = rev.text
44 sys.exit("regex scanner location must be 'comment' or 'text'.")
46 if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
47 m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
50 blob = blob + "," + result.group(0)
52 rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing
54 rev_data['matches'] = None
60 class WikiqIterator():
61 def __init__(self, fh, collapse_user=False):
63 self.collapse_user = collapse_user
64 self.mwiterator = Dump.from_file(self.fh)
65 self.namespace_map = { ns.id : ns.name for ns in
66 self.mwiterator.site_info.namespaces }
67 self.__pages = self.load_pages()
70 for page in self.mwiterator:
72 namespace_map = self.namespace_map,
73 collapse_user=self.collapse_user)
79 return next(self._pages)
82 __slots__ = ('id', 'title', 'namespace', 'redirect',
83 'restrictions', 'mwpage', '__revisions',
86 def __init__(self, page, namespace_map, collapse_user=False):
88 self.namespace = page.namespace
89 # following mwxml, we assume namespace 0 in cases where
90 # page.namespace is inconsistent with namespace_map
91 if page.namespace not in namespace_map:
92 self.title = page.title
94 if page.namespace != 0:
95 self.title = ':'.join([namespace_map[page.namespace], page.title])
97 self.title = page.title
98 self.restrictions = page.restrictions
99 self.collapse_user = collapse_user
101 self.__revisions = self.rev_list()
104 # Outline for how we want to handle collapse_user=True
105 # iteration rev.user prev_rev.user add prev_rev?
112 for i, rev in enumerate(self.mwpage):
113 # never yield the first time
115 if self.collapse_user:
117 rev.collapsed_revs = collapsed_revs
120 if self.collapse_user:
121 # yield if this is the last edit in a seq by a user and reset
122 # also yield if we do know who the user is
124 if rev.deleted.user or prev_rev.deleted.user:
127 rev.collapsed_revs = collapsed_revs
129 elif not rev.user.text == prev_rev.user.text:
132 rev.collapsed_revs = collapsed_revs
133 # otherwise, add one to the counter
136 rev.collapsed_revs = collapsed_revs
137 # if collapse_user is false, we always yield
143 # also yield the final time
147 return self.__revisions
150 return next(self.__revisions)
154 def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
157 persist : what persistence method to use. Takes a PersistMethod value
160 self.input_file = input_file
161 self.output_file = output_file
162 self.collapse_user = collapse_user
163 self.persist = persist
164 self.printed_header = False
166 self.urlencode = urlencode
167 self.scanner = scanner
168 self.match_regex = match_regex
170 if namespaces is not None:
171 self.namespace_filter = set(namespaces)
173 self.namespace_filter = None
175 def __get_namespace_from_title(self, title):
178 for ns in self.namespaces:
179 # skip if the namespace is not defined
181 default_ns = self.namespaces[ns]
184 if title.startswith(ns + ":"):
185 return self.namespaces[ns]
187 # if we've made it this far with no matches, we return the default namespace
193 # create a regex that creates the output filename
194 # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
195 # r'output/wikiq-\1-\2.tsv',
198 # Construct dump file iterator
199 dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
201 # extract list of namspaces
202 self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
208 # Iterate through pages
210 namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
212 # skip namespaces not in the filter
213 if self.namespace_filter is not None:
214 if namespace not in self.namespace_filter:
217 rev_detector = mwreverts.Detector()
219 if self.persist != PersistMethod.none:
220 window = deque(maxlen=PERSISTENCE_RADIUS)
222 if self.persist == PersistMethod.sequence:
223 state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
224 revert_radius=PERSISTENCE_RADIUS)
226 elif self.persist == PersistMethod.segment:
227 state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
228 revert_radius=PERSISTENCE_RADIUS)
230 # self.persist == PersistMethod.legacy
232 from mw.lib import persistence
233 state = persistence.State()
235 # Iterate through a page's revisions
237 ## m = re.finditer() #so we can find all instances
238 ## m.groupdict() #so we can look at them all with their names
243 if self.scanner is not None: # we know we want to do a regex search
244 ## comment = want to look in comment attached to revision
245 ## text = want to look in revision text
247 ### call the scanner function
248 rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
250 if self.scanner is not None and rev_data['matches'] is None:
253 # we fill out the rest of the data structure now
254 rev_data['revid'] = rev.id
255 rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
256 rev_data['articleid'] = page.id
257 rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
258 rev_data['title'] = '"' + page.title + '"'
259 rev_data['namespace'] = namespace
260 rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
262 # if revisions are deleted, /many/ things will be missing
264 rev_data['text_chars'] = ""
265 rev_data['sha1'] = ""
266 rev_data['revert'] = ""
267 rev_data['reverteds'] = ""
270 # rev.text can be None if the page has no text
273 # if text exists, we'll check for a sha1 and generate one otherwise
279 text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
281 rev_data['sha1'] = text_sha1
283 # TODO rev.bytes doesn't work.. looks like a bug
284 rev_data['text_chars'] = len(rev.text)
286 # generate revert data
287 revert = rev_detector.process(text_sha1, rev.id)
290 rev_data['revert'] = "TRUE"
291 rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
293 rev_data['revert'] = "FALSE"
294 rev_data['reverteds'] = ""
296 # if the fact that the edit was minor can be hidden, this might be an issue
297 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
299 if not rev.deleted.user:
300 # wrap user-defined editors in quotes for fread
301 rev_data['editor'] = '"' + rev.user.text + '"'
302 rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
305 rev_data['anon'] = ""
306 rev_data['editor'] = ""
308 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
313 #TODO missing: additions_size deletions_size
315 # if collapse user was on, lets run that
316 if self.collapse_user:
317 rev_data['collapsed_revs'] = rev.collapsed_revs
319 if self.persist != PersistMethod.none:
321 for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
322 old_rev_data[k] = None
325 if self.persist != PersistMethod.legacy:
326 _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
329 _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
331 window.append((rev.id, rev_data, tokens_added, tokens_removed))
333 if len(window) == PERSISTENCE_RADIUS:
334 old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
336 num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
338 old_rev_data["token_revs"] = num_token_revs
339 old_rev_data["tokens_added"] = num_tokens
340 old_rev_data["tokens_removed"] = len(old_tokens_removed)
341 old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
343 self.print_rev_data(old_rev_data)
346 self.print_rev_data(rev_data)
350 if self.persist != PersistMethod.none:
351 # print out metadata for the last RADIUS revisions
352 for i, item in enumerate(window):
353 # if the window was full, we've already printed item 0
354 if len(window) == PERSISTENCE_RADIUS and i == 0:
357 rev_id, rev_data, tokens_added, tokens_removed = item
358 num_token_revs, num_tokens = calculate_persistence(tokens_added)
360 rev_data["token_revs"] = num_token_revs
361 rev_data["tokens_added"] = num_tokens
362 rev_data["tokens_removed"] = len(tokens_removed)
363 rev_data["tokens_window"] = len(window)-(i+1)
365 self.print_rev_data(rev_data)
369 print("Done: %s revisions and %s pages." % (rev_count, page_count),
372 def print_rev_data(self, rev_data):
373 # if it's the first time through, print the header
375 for field in TO_ENCODE:
376 rev_data[field] = quote(str(rev_data[field]))
378 if not self.printed_header:
379 print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
380 self.printed_header = True
382 print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
385 def open_input_file(input_filename):
386 if re.match(r'.*\.7z$', input_filename):
387 cmd = ["7za", "x", "-so", input_filename, '*']
388 elif re.match(r'.*\.gz$', input_filename):
389 cmd = ["zcat", input_filename]
390 elif re.match(r'.*\.bz2$', input_filename):
391 cmd = ["bzcat", "-dk", input_filename]
394 input_file = Popen(cmd, stdout=PIPE).stdout
396 input_file = open(input_filename, 'r')
400 def open_output_file(input_filename):
401 # create a regex that creates the output filename
402 output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
403 output_filename = re.sub(r'\.xml', '', output_filename)
404 output_filename = output_filename + ".tsv"
405 output_file = open(output_filename, "w")
409 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
411 # arguments for the input direction
412 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
413 help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
415 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
416 help="Directory for output files.")
418 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
419 help="Write output to standard out (do not create dump file)")
421 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
422 help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
424 parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
425 help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
427 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
428 help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
430 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
431 help="Id number of namspace to include. Can be specified more than once.")
433 parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
434 help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")
436 parser.add_argument('-R', '--match', dest="match_regex", type=str,
437 help="The regular expression you would like to find in the string and put in capture group")
439 args = parser.parse_args()
441 # set persistence method
443 if args.persist is None:
444 persist = PersistMethod.none
445 elif args.persist == "segment":
446 persist = PersistMethod.segment
447 elif args.persist == "legacy":
448 persist = PersistMethod.legacy
450 persist = PersistMethod.sequence
452 if args.namespace_filter is not None:
453 namespaces = args.namespace_filter
457 if len(args.dumpfiles) > 0:
458 for filename in args.dumpfiles:
459 input_file = open_input_file(filename)
461 # open directory for output
463 output_dir = args.output_dir[0]
467 print("Processing file: %s" % filename, file=sys.stderr)
470 output_file = sys.stdout
472 filename = os.path.join(output_dir, os.path.basename(filename))
473 output_file = open_output_file(filename)
475 wikiq = WikiqParser(input_file, output_file,
476 collapse_user=args.collapse_user,
478 urlencode=args.urlencode,
479 namespaces = namespaces,
480 match_regex=args.match_regex, # adding in the new 2 args for regex searching
481 scanner=args.scanner)
489 wikiq = WikiqParser(sys.stdin, sys.stdout,
490 collapse_user=args.collapse_user,
492 #persist_legacy=args.persist_legacy,
493 urlencode=args.urlencode,
494 namespaces = namespaces,
495 match_regex=args.match_regex, # adding in the new 2 args for regex searching
496 scanner=args.scanner)
499 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
500 # stop_words = stop_words.split(",")