#!/usr/bin/env python3 # original wikiq headers are: title articleid revid date_time anon # editor editor_id minor text_size text_entropy text_md5 reversion # additions_size deletions_size import argparse import sys import os, os.path import re from subprocess import Popen, PIPE from collections import deque from hashlib import sha1 from mwxml import Dump from deltas.tokenizers import wikitext_split import mwpersistence import mwreverts from urllib.parse import quote TO_ENCODE = ('title', 'editor') PERSISTENCE_RADIUS=7 from deltas import SequenceMatcher from deltas import SegmentMatcher class PersistMethod: none = 0 sequence = 1 segment = 2 legacy = 3 def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), len(tokens_added)) def matchmake(scanned_content, rev_data, regex, label): p = re.compile(regex) temp_dict = {} # if there are named capture groups in the regex if bool(p.groupindex): capture_groups = list(p.groupindex.keys()) # initialize the {capture_group_name:list} for each capture group for cap_group in capture_groups: temp_dict["{}_{}".format(label, cap_group)] = [] # if there are matches of some sort in this revision content, fill the lists for each cap_group if p.search(scanned_content) is not None: m = re.finditer(p,scanned_content) matchobjects = list(m) for cap_group in capture_groups: temp_list = [] for match in matchobjects: # we only want to add the match for the capture group if the match is not None if match.group(cap_group) != None: temp_list.append(match.group(cap_group)) # if temp_list of matches is empty just make that column None if len(temp_list)==0: temp_dict["{}_{}".format(label, cap_group)] = None # else we put in the list we made in the for-loop above else: temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list) # there are no matches at all in this revision content, we default values to None else: for cap_group in capture_groups: temp_dict["{}_{}".format(label, cap_group)] = None # there are no capture groups, we just search for all the matches of the regex else: #given that there are matches to be made if p.search(scanned_content) is not None: m = p.findall(scanned_content) temp_dict[label] = ', '.join(m) else: temp_dict[label] = None # update rev_data with our new columns rev_data.update(temp_dict) # print(rev_data.keys()) return rev_data class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh self.collapse_user = collapse_user self.mwiterator = Dump.from_file(self.fh) self.namespace_map = { ns.id : ns.name for ns in self.mwiterator.site_info.namespaces } self.__pages = self.load_pages() def load_pages(self): for page in self.mwiterator: yield WikiqPage(page, namespace_map = self.namespace_map, collapse_user=self.collapse_user) def __iter__(self): return self.__pages def __next__(self): return next(self._pages) class WikiqPage(): __slots__ = ('id', 'title', 'namespace', 'redirect', 'restrictions', 'mwpage', '__revisions', 'collapse_user') def __init__(self, page, namespace_map, collapse_user=False): self.id = page.id self.namespace = page.namespace # following mwxml, we assume namespace 0 in cases where # page.namespace is inconsistent with namespace_map if page.namespace not in namespace_map: self.title = page.title page.namespace = 0 if page.namespace != 0: self.title = ':'.join([namespace_map[page.namespace], page.title]) else: self.title = page.title self.restrictions = page.restrictions self.collapse_user = collapse_user self.mwpage = page self.__revisions = self.rev_list() def rev_list(self): # Outline for how we want to handle collapse_user=True # iteration rev.user prev_rev.user add prev_rev? # 0 A None Never # 1 A A False # 2 B A True # 3 A B True # 4 A A False # Post-loop A Always for i, rev in enumerate(self.mwpage): # never yield the first time if i == 0: if self.collapse_user: collapsed_revs = 1 rev.collapsed_revs = collapsed_revs else: if self.collapse_user: # yield if this is the last edit in a seq by a user and reset # also yield if we do know who the user is if rev.deleted.user or prev_rev.deleted.user: yield prev_rev collapsed_revs = 1 rev.collapsed_revs = collapsed_revs elif not rev.user.text == prev_rev.user.text: yield prev_rev collapsed_revs = 1 rev.collapsed_revs = collapsed_revs # otherwise, add one to the counter else: collapsed_revs += 1 rev.collapsed_revs = collapsed_revs # if collapse_user is false, we always yield else: yield prev_rev prev_rev = rev # also yield the final time yield prev_rev def __iter__(self): return self.__revisions def __next__(self): return next(self.__revisions) class WikiqParser(): def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15): """ Parameters: persist : what persistence method to use. Takes a PersistMethod value """ self.input_file = input_file self.output_file = output_file self.collapse_user = collapse_user self.persist = persist self.printed_header = False self.namespaces = [] self.urlencode = urlencode self.revert_radius = revert_radius self.regex_match_revision = regex_match_revision self.regex_revision_label = regex_revision_label self.regex_match_comment = regex_match_comment self.regex_comment_label = regex_comment_label if namespaces is not None: self.namespace_filter = set(namespaces) else: self.namespace_filter = None def __get_namespace_from_title(self, title): default_ns = None for ns in self.namespaces: # skip if the namespace is not defined if ns == None: default_ns = self.namespaces[ns] continue if title.startswith(ns + ":"): return self.namespaces[ns] # if we've made it this far with no matches, we return the default namespace return default_ns def process(self): # create a regex that creates the output filename # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$', # r'output/wikiq-\1-\2.tsv', # input_filename) # Construct dump file iterator dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user) # extract list of namspaces self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces} page_count = 0 rev_count = 0 # Iterate through pages for page in dump: namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title) # skip namespaces not in the filter if self.namespace_filter is not None: if namespace not in self.namespace_filter: continue # print(self.revert_radius) rev_detector = mwreverts.Detector(radius = self.revert_radius) if self.persist != PersistMethod.none: window = deque(maxlen=PERSISTENCE_RADIUS) if self.persist == PersistMethod.sequence: state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split), revert_radius=PERSISTENCE_RADIUS) elif self.persist == PersistMethod.segment: state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split), revert_radius=PERSISTENCE_RADIUS) # self.persist == PersistMethod.legacy else: from mw.lib import persistence state = persistence.State() # Iterate through a page's revisions for rev in page: # initialize rev_data rev_data = {} # if the command line args only gave a label (and no regular expression is given) if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None): sys.exit('The given regex label(s) has no corresponding regex to search for.') # if there's anything in the list of regex_match_revision if self.regex_match_revision is not None: if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)): sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') # initialize and construct the list of regex-label tuples pairs = [] for i in range(0,len(self.regex_match_revision)): pairs.append((self.regex_match_revision[i], self.regex_revision_label[i])) # for each regex/label pair, we now run matchmake to check and output columns for pair in pairs: # pair[0] corresponds to the regex, pair[1] to the label rev_data = matchmake(rev.text, rev_data, pair[0], pair[1]) # if there's anything in the list of regex_match_comment if self.regex_match_comment is not None: if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)): sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') # initialize and construct the list of regex-label tuples pairs = [] for i in range(0,len(self.regex_match_comment)): pairs.append((self.regex_match_comment[i], self.regex_comment_label[i])) # for each regex/label pair, we now run matchmake to check and output columns for pair in pairs: # pair[0] corresponds to the regex, pair[1] to the label rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1]) # we fill out the rest of the data structure now rev_data['revid'] = rev.id rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S') rev_data['articleid'] = page.id rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id rev_data['title'] = '"' + page.title + '"' rev_data['namespace'] = namespace rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE" # if revisions are deleted, /many/ things will be missing if rev.deleted.text: rev_data['text_chars'] = "" rev_data['sha1'] = "" rev_data['revert'] = "" rev_data['reverteds'] = "" else: # rev.text can be None if the page has no text if not rev.text: rev.text = "" # if text exists, we'll check for a sha1 and generate one otherwise if rev.sha1: text_sha1 = rev.sha1 else: text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest() rev_data['sha1'] = text_sha1 # TODO rev.bytes doesn't work.. looks like a bug rev_data['text_chars'] = len(rev.text) # generate revert data revert = rev_detector.process(text_sha1, rev.id) if revert: rev_data['revert'] = "TRUE" rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"' else: rev_data['revert'] = "FALSE" rev_data['reverteds'] = "" # if the fact that the edit was minor can be hidden, this might be an issue rev_data['minor'] = "TRUE" if rev.minor else "FALSE" if not rev.deleted.user: # wrap user-defined editors in quotes for fread rev_data['editor'] = '"' + rev.user.text + '"' rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE" else: rev_data['anon'] = "" rev_data['editor'] = "" #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I): # redirect = True #else: # redirect = False #TODO missing: additions_size deletions_size # if collapse user was on, lets run that if self.collapse_user: rev_data['collapsed_revs'] = rev.collapsed_revs if self.persist != PersistMethod.none: if rev.deleted.text: for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]: old_rev_data[k] = None else: if self.persist != PersistMethod.legacy: _, tokens_added, tokens_removed = state.update(rev.text, rev.id) else: _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1) window.append((rev.id, rev_data, tokens_added, tokens_removed)) if len(window) == PERSISTENCE_RADIUS: old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0] num_token_revs, num_tokens = calculate_persistence(old_tokens_added) old_rev_data["token_revs"] = num_token_revs old_rev_data["tokens_added"] = num_tokens old_rev_data["tokens_removed"] = len(old_tokens_removed) old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1 self.print_rev_data(old_rev_data) else: self.print_rev_data(rev_data) rev_count += 1 if self.persist != PersistMethod.none: # print out metadata for the last RADIUS revisions for i, item in enumerate(window): # if the window was full, we've already printed item 0 if len(window) == PERSISTENCE_RADIUS and i == 0: continue rev_id, rev_data, tokens_added, tokens_removed = item num_token_revs, num_tokens = calculate_persistence(tokens_added) rev_data["token_revs"] = num_token_revs rev_data["tokens_added"] = num_tokens rev_data["tokens_removed"] = len(tokens_removed) rev_data["tokens_window"] = len(window)-(i+1) self.print_rev_data(rev_data) page_count += 1 print("Done: %s revisions and %s pages." % (rev_count, page_count), file=sys.stderr) def print_rev_data(self, rev_data): # if it's the first time through, print the header if self.urlencode: for field in TO_ENCODE: rev_data[field] = quote(str(rev_data[field])) if not self.printed_header: print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file) self.printed_header = True print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file) def open_input_file(input_filename): if re.match(r'.*\.7z$', input_filename): cmd = ["7za", "x", "-so", input_filename, '*'] elif re.match(r'.*\.gz$', input_filename): cmd = ["zcat", input_filename] elif re.match(r'.*\.bz2$', input_filename): cmd = ["bzcat", "-dk", input_filename] try: input_file = Popen(cmd, stdout=PIPE).stdout except NameError: input_file = open(input_filename, 'r') return input_file def open_output_file(input_filename): # create a regex that creates the output filename output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename) output_filename = re.sub(r'\.xml', '', output_filename) output_filename = output_filename + ".tsv" output_file = open(output_filename, "w") return output_file parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.') # arguments for the input direction parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.") parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1, help="Directory for output files.") parser.add_argument('-s', '--stdout', dest="stdout", action="store_true", help="Write output to standard out (do not create dump file)") parser.add_argument('--collapse-user', dest="collapse_user", action="store_true", help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.") parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?', help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.") parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true", help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append', help="Id number of namspace to include. Can be specified more than once.") parser.add_argument('-rr', '--revert-radius', dest="revert_radius", type=int, action='store', default=15, help="Number of edits to check when looking for reverts (default: 15)") parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append', help="The regular expression to search for in revision text. The regex must be surrounded by quotes.") parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append', help="The label for the outputted column based on matching the regex in revision text.") parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append', help="The regular expression to search for in comments of revisions.") parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append', help="The label for the outputted column based on matching the regex in comments.") args = parser.parse_args() # set persistence method if args.persist is None: persist = PersistMethod.none elif args.persist == "segment": persist = PersistMethod.segment elif args.persist == "legacy": persist = PersistMethod.legacy else: persist = PersistMethod.sequence if args.namespace_filter is not None: namespaces = args.namespace_filter else: namespaces = None if len(args.dumpfiles) > 0: for filename in args.dumpfiles: input_file = open_input_file(filename) # open directory for output if args.output_dir: output_dir = args.output_dir[0] else: output_dir = "." print("Processing file: %s" % filename, file=sys.stderr) if args.stdout: output_file = sys.stdout else: filename = os.path.join(output_dir, os.path.basename(filename)) output_file = open_output_file(filename) wikiq = WikiqParser(input_file, output_file, collapse_user=args.collapse_user, persist=persist, urlencode=args.urlencode, namespaces=namespaces, revert_radius=args.revert_radius, regex_match_revision = args.regex_match_revision, regex_revision_label = args.regex_revision_label, regex_match_comment = args.regex_match_comment, regex_comment_label = args.regex_comment_label) wikiq.process() # close things input_file.close() output_file.close() else: wikiq = WikiqParser(sys.stdin, sys.stdout, collapse_user=args.collapse_user, persist=persist, #persist_legacy=args.persist_legacy, urlencode=args.urlencode, namespaces=namespaces, revert_radius=args.revert_radius, regex_match_revision = args.regex_match_revision, regex_revision_label = args.regex_revision_label, regex_match_comment = args.regex_match_comment, regex_comment_label = args.regex_comment_label) wikiq.process() # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your" # stop_words = stop_words.split(",")