X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/blobdiff_plain/eeb0742cc6ce316e9cc8c53e8833aed20da079bb..f784c77f60ae014aeda3c3b56ed8168692ddb13c:/wikiq?ds=sidebyside diff --git a/wikiq b/wikiq index f115fdc..d7bc0bd 100755 --- a/wikiq +++ b/wikiq @@ -13,11 +13,22 @@ from subprocess import Popen, PIPE from collections import deque from hashlib import sha1 -from mw.xml_dump import Iterator -from mw.lib import persistence -from mw.lib import reverts +from mwxml import Dump +from deltas.tokenizers import wikitext_split +import mwpersistence +import mwreverts +from urllib.parse import quote +TO_ENCODE = ('title', 'editor') PERSISTENCE_RADIUS=7 +from deltas import SequenceMatcher +from deltas import SegmentMatcher + +class PersistMethod: + none = 0 + sequence = 1 + segment = 2 + legacy = 3 def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), @@ -27,12 +38,16 @@ class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh self.collapse_user = collapse_user - self.mwiterator = Iterator.from_file(self.fh) + self.mwiterator = Dump.from_file(self.fh) + self.namespace_map = { ns.id : ns.name for ns in + self.mwiterator.site_info.namespaces } self.__pages = self.load_pages() def load_pages(self): for page in self.mwiterator: - yield WikiqPage(page, collapse_user=self.collapse_user) + yield WikiqPage(page, + namespace_map = self.namespace_map, + collapse_user=self.collapse_user) def __iter__(self): return self.__pages @@ -44,14 +59,20 @@ class WikiqPage(): __slots__ = ('id', 'title', 'namespace', 'redirect', 'restrictions', 'mwpage', '__revisions', 'collapse_user') - - def __init__(self, page, collapse_user=False): + + def __init__(self, page, namespace_map, collapse_user=False): self.id = page.id - self.title = page.title self.namespace = page.namespace - self.redirect = page.redirect + # following mwxml, we assume namespace 0 in cases where + # page.namespace is inconsistent with namespace_map + if page.namespace not in namespace_map: + self.title = page.title + page.namespace = 0 + if page.namespace != 0: + self.title = ':'.join([namespace_map[page.namespace], page.title]) + else: + self.title = page.title self.restrictions = page.restrictions - self.collapse_user = collapse_user self.mwpage = page self.__revisions = self.rev_list() @@ -75,7 +96,14 @@ class WikiqPage(): else: if self.collapse_user: # yield if this is the last edit in a seq by a user and reset - if not rev.contributor.user_text == prev_rev.contributor.user_text: + # also yield if we do know who the user is + + if rev.deleted.user or prev_rev.deleted.user: + yield prev_rev + collapsed_revs = 1 + rev.collapsed_revs = collapsed_revs + + elif not rev.user.text == prev_rev.user.text: yield prev_rev collapsed_revs = 1 rev.collapsed_revs = collapsed_revs @@ -88,6 +116,7 @@ class WikiqPage(): yield prev_rev prev_rev = rev + # also yield the final time yield prev_rev @@ -98,15 +127,41 @@ class WikiqPage(): return next(self.__revisions) class WikiqParser(): - def __init__(self, input_file, output_file, collapse_user=False, persist=False): + + def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None): + """ + Parameters: + persist : what persistence method to use. Takes a PersistMethod value + """ + self.input_file = input_file self.output_file = output_file self.collapse_user = collapse_user self.persist = persist self.printed_header = False + self.namespaces = [] + self.urlencode = urlencode + if namespaces is not None: + self.namespace_filter = set(namespaces) + else: + self.namespace_filter = None + + def __get_namespace_from_title(self, title): + default_ns = None + + for ns in self.namespaces: + # skip if the namespace is not defined + if ns == None: + default_ns = self.namespaces[ns] + continue + + if title.startswith(ns + ":"): + return self.namespaces[ns] + + # if we've made it this far with no matches, we return the default namespace + return default_ns def process(self): - print("Processing file: %s" % self.input_file.name, file=sys.stderr) # create a regex that creates the output filename # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$', @@ -116,15 +171,39 @@ class WikiqParser(): # Construct dump file iterator dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user) + # extract list of namspaces + self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces} + page_count = 0 rev_count = 0 + + # Iterate through pages for page in dump: - if self.persist: - state = persistence.State() + namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title) + + # skip namespaces not in the filter + if self.namespace_filter is not None: + if namespace not in self.namespace_filter: + continue + + rev_detector = mwreverts.Detector() + + if self.persist != PersistMethod.none: window = deque(maxlen=PERSISTENCE_RADIUS) - rev_detector = reverts.Detector() + if self.persist == PersistMethod.sequence: + state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split), + revert_radius=PERSISTENCE_RADIUS) + + elif self.persist == PersistMethod.segment: + state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split), + revert_radius=PERSISTENCE_RADIUS) + + # self.persist == PersistMethod.legacy + else: + from mw.lib import persistence + state = persistence.State() # Iterate through a page's revisions for rev in page: @@ -132,23 +211,28 @@ class WikiqParser(): rev_data = {'revid' : rev.id, 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'), 'articleid' : page.id, - 'editor_id' : "" if rev.contributor.id == None else rev.contributor.id, + 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id, 'title' : '"' + page.title + '"', - 'namespace' : page.namespace, - 'deleted' : "TRUE" if rev.text.deleted else "FALSE" } + 'namespace' : namespace, + 'deleted' : "TRUE" if rev.deleted.text else "FALSE" } # if revisions are deleted, /many/ things will be missing - if rev.text.deleted: + if rev.deleted.text: rev_data['text_chars'] = "" rev_data['sha1'] = "" rev_data['revert'] = "" rev_data['reverteds'] = "" else: + # rev.text can be None if the page has no text + if not rev.text: + rev.text = "" # if text exists, we'll check for a sha1 and generate one otherwise + if rev.sha1: text_sha1 = rev.sha1 else: + text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest() rev_data['sha1'] = text_sha1 @@ -158,6 +242,7 @@ class WikiqParser(): # generate revert data revert = rev_detector.process(text_sha1, rev.id) + if revert: rev_data['revert'] = "TRUE" rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"' @@ -168,10 +253,10 @@ class WikiqParser(): # if the fact that the edit was minor can be hidden, this might be an issue rev_data['minor'] = "TRUE" if rev.minor else "FALSE" - if rev.contributor.user_text: + if not rev.deleted.user: # wrap user-defined editors in quotes for fread - rev_data['editor'] = '"' + rev.contributor.user_text + '"' - rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE" + rev_data['editor'] = '"' + rev.user.text + '"' + rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE" else: rev_data['anon'] = "" @@ -188,12 +273,18 @@ class WikiqParser(): if self.collapse_user: rev_data['collapsed_revs'] = rev.collapsed_revs - if self.persist: - if rev.text.deleted: + if self.persist != PersistMethod.none: + if rev.deleted.text: for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]: old_rev_data[k] = None else: - _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1) + + if self.persist != PersistMethod.legacy: + _, tokens_added, tokens_removed = state.update(rev.text, rev.id) + + else: + _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1) + window.append((rev.id, rev_data, tokens_added, tokens_removed)) if len(window) == PERSISTENCE_RADIUS: @@ -213,7 +304,7 @@ class WikiqParser(): rev_count += 1 - if self.persist: + if self.persist != PersistMethod.none: # print out metadata for the last RADIUS revisions for i, item in enumerate(window): # if the window was full, we've already printed item 0 @@ -227,7 +318,7 @@ class WikiqParser(): rev_data["tokens_added"] = num_tokens rev_data["tokens_removed"] = len(tokens_removed) rev_data["tokens_window"] = len(window)-(i+1) - + self.print_rev_data(rev_data) page_count += 1 @@ -237,6 +328,10 @@ class WikiqParser(): def print_rev_data(self, rev_data): # if it's the first time through, print the header + if self.urlencode: + for field in TO_ENCODE: + rev_data[field] = quote(str(rev_data[field])) + if not self.printed_header: print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file) self.printed_header = True @@ -245,12 +340,12 @@ class WikiqParser(): def open_input_file(input_filename): - if re.match(r'.*\.7z', input_filename): - cmd = ["7za", "x", "-so", input_filename] - elif re.match(r'.*\.gz', input_filename): - cmd = ["zcat", input_filename] - elif re.match(r'.*\.bz2', input_filename): + if re.match(r'.*\.7z$', input_filename): + cmd = ["7za", "x", "-so", input_filename, '*'] + elif re.match(r'.*\.gz$', input_filename): cmd = ["zcat", input_filename] + elif re.match(r'.*\.bz2$', input_filename): + cmd = ["bzcat", "-dk", input_filename] try: input_file = Popen(cmd, stdout=PIPE).stdout @@ -261,7 +356,8 @@ def open_input_file(input_filename): def open_output_file(input_filename): # create a regex that creates the output filename - output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename) + output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename) + output_filename = re.sub(r'\.xml', '', output_filename) output_filename = output_filename + ".tsv" output_file = open(output_filename, "w") @@ -282,30 +378,59 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true", parser.add_argument('--collapse-user', dest="collapse_user", action="store_true", help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.") -parser.add_argument('-p', '--persistence', dest="persist", action="store_true", - help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.") +parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?', + help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.") + +parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true", + help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") + +parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append', + help="Id number of namspace to include.") + + args = parser.parse_args() +# set persistence method + +if args.persist is None: + persist = PersistMethod.none +elif args.persist == "segment": + persist = PersistMethod.segment +elif args.persist == "legacy": + persist = PersistMethod.legacy +else: + persist = PersistMethod.sequence + +if args.namespace_filter is not None: + namespaces = args.namespace_filter +else: + namespaces = None + if len(args.dumpfiles) > 0: for filename in args.dumpfiles: input_file = open_input_file(filename) - # open file for output + # open directory for output + if args.output_dir: + output_dir = args.output_dir[0] + else: + output_dir = "." + + print("Processing file: %s" % filename, file=sys.stderr) + if args.stdout: output_file = sys.stdout else: - if args.output_dir: - output_dir = args.output_dir[0] - else: - output_dir = "." - filename = os.path.join(output_dir, os.path.basename(filename)) output_file = open_output_file(filename) wikiq = WikiqParser(input_file, output_file, - collapse_user=args.collapse_user, - persist=args.persist) + collapse_user=args.collapse_user, + persist=persist, + urlencode=args.urlencode, + namespaces = namespaces) + wikiq.process() # close things @@ -313,8 +438,11 @@ if len(args.dumpfiles) > 0: output_file.close() else: wikiq = WikiqParser(sys.stdin, sys.stdout, - collapse_user=args.collapse_user, - persist=args.persist) + collapse_user=args.collapse_user, + persist=persist, + persist_legacy=args.persist_legacy, + urlencode=args.urlencode, + namespaces = namespaces) wikiq.process() # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"