# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
-
+import pdb
import argparse
import sys
import os, os.path
from hashlib import sha1
from mw.xml_dump import Iterator
-from mw.lib import persistence
+
+from deltas.tokenizers import wikitext_split
+import mwpersistence
import mwreverts
from urllib.parse import quote
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
def calculate_persistence(tokens_added):
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
+
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
class WikiqParser():
- def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
+ def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
+ self.persist_legacy = persist_legacy
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
page_count = 0
rev_count = 0
+
+
# Iterate through pages
for page in dump:
- if self.persist:
- state = persistence.State()
+ rev_detector = mwreverts.Detector()
+
+ if self.persist or self.persist_legacy:
window = deque(maxlen=PERSISTENCE_RADIUS)
- rev_detector = mwreverts.Detector()
+ if not self.persist_legacy:
+ state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ else:
+ from mw.lib import persistence
+ state = persistence.State()
# Iterate through a page's revisions
for rev in page:
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
+
if revert:
rev_data['revert'] = "TRUE"
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
if self.collapse_user:
rev_data['collapsed_revs'] = rev.collapsed_revs
- if self.persist:
+ if self.persist or self.persist_legacy:
if rev.text.deleted:
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
- _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
+ if not self.persist_legacy:
+ _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+ else:
+ _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
+
window.append((rev.id, rev_data, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
rev_count += 1
- if self.persist:
+ if self.persist or self.persist_legacy:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
+ help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
args = parser.parse_args()
if len(args.dumpfiles) > 0:
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
persist=args.persist,
+ persist_legacy=args.persist_legacy,
urlencode=args.urlencode)
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
persist=args.persist,
+ persist_legacy=args.persist_legacy,
urlencode=args.urlencode)
wikiq.process()