X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/blobdiff_plain/3f9da4074733981fa1dda6e88af75a19054e6c52..d77b0a4965e59a3e5f84956b27dad58d98110c06:/wikiq?ds=sidebyside diff --git a/wikiq b/wikiq index f25874e..9260f35 100755 --- a/wikiq +++ b/wikiq @@ -3,7 +3,7 @@ # original wikiq headers are: title articleid revid date_time anon # editor editor_id minor text_size text_entropy text_md5 reversion # additions_size deletions_size - +import pdb import argparse import sys import os, os.path @@ -14,16 +14,20 @@ from collections import deque from hashlib import sha1 from mw.xml_dump import Iterator -from mw.lib import persistence -from mw.lib import reverts + +from deltas.tokenizers import wikitext_split +import mwpersistence +import mwreverts from urllib.parse import quote TO_ENCODE = ('title', 'editor') PERSISTENCE_RADIUS=7 +from deltas import SequenceMatcher def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), len(tokens_added)) + class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh @@ -101,12 +105,13 @@ class WikiqPage(): class WikiqParser(): - def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False): + def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False): self.input_file = input_file self.output_file = output_file self.collapse_user = collapse_user self.persist = persist + self.persist_legacy = persist_legacy self.printed_header = False self.namespaces = [] self.urlencode = urlencode @@ -141,13 +146,22 @@ class WikiqParser(): page_count = 0 rev_count = 0 + + # Iterate through pages for page in dump: - if self.persist: - state = persistence.State() + rev_detector = mwreverts.Detector() + + if self.persist or self.persist_legacy: window = deque(maxlen=PERSISTENCE_RADIUS) - rev_detector = reverts.Detector() + if not self.persist_legacy: + state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split), + revert_radius=PERSISTENCE_RADIUS) + + else: + from mw.lib import persistence + state = persistence.State() # Iterate through a page's revisions for rev in page: @@ -181,6 +195,7 @@ class WikiqParser(): # generate revert data revert = rev_detector.process(text_sha1, rev.id) + if revert: rev_data['revert'] = "TRUE" rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"' @@ -211,12 +226,18 @@ class WikiqParser(): if self.collapse_user: rev_data['collapsed_revs'] = rev.collapsed_revs - if self.persist: + if self.persist or self.persist_legacy: if rev.text.deleted: for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]: old_rev_data[k] = None else: - _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1) + + if not self.persist_legacy: + _, tokens_added, tokens_removed = state.update(rev.text, rev.id) + + else: + _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1) + window.append((rev.id, rev_data, tokens_added, tokens_removed)) if len(window) == PERSISTENCE_RADIUS: @@ -236,7 +257,7 @@ class WikiqParser(): rev_count += 1 - if self.persist: + if self.persist or self.persist_legacy: # print out metadata for the last RADIUS revisions for i, item in enumerate(window): # if the window was full, we've already printed item 0 @@ -272,12 +293,12 @@ class WikiqParser(): def open_input_file(input_filename): - if re.match(r'.*\.7z', input_filename): - cmd = ["7za", "x", "-so", input_filename, '*.xml'] - elif re.match(r'.*\.gz', input_filename): - cmd = ["zcat", input_filename] - elif re.match(r'.*\.bz2', input_filename): + if re.match(r'.*\.7z$', input_filename): + cmd = ["7za", "x", "-so", input_filename, '*'] + elif re.match(r'.*\.gz$', input_filename): cmd = ["zcat", input_filename] + elif re.match(r'.*\.bz2$', input_filename): + cmd = ["bzcat", "-dk", input_filename] try: input_file = Popen(cmd, stdout=PIPE).stdout @@ -316,30 +337,35 @@ parser.add_argument('-p', '--persistence', dest="persist", action="store_true", parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true", help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") +parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true", + help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") + args = parser.parse_args() if len(args.dumpfiles) > 0: for filename in args.dumpfiles: input_file = open_input_file(filename) - # open file for output + # open directory for output + if args.output_dir: + output_dir = args.output_dir[0] + else: + output_dir = "." + + print("Processing file: %s" % filename, file=sys.stderr) + if args.stdout: output_file = sys.stdout else: - if args.output_dir: - output_dir = args.output_dir[0] - else: - output_dir = "." - filename = os.path.join(output_dir, os.path.basename(filename)) output_file = open_output_file(filename) wikiq = WikiqParser(input_file, output_file, - collapse_user=args.collapse_user, + collapse_user=args.collapse_user, persist=args.persist, + persist_legacy=args.persist_legacy, urlencode=args.urlencode) - print("Processing file: %s" % filename, file=sys.stderr) wikiq.process() @@ -348,8 +374,9 @@ if len(args.dumpfiles) > 0: output_file.close() else: wikiq = WikiqParser(sys.stdin, sys.stdout, - collapse_user=args.collapse_user, + collapse_user=args.collapse_user, persist=args.persist, + persist_legacy=args.persist_legacy, urlencode=args.urlencode) wikiq.process()