# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
-
import argparse
import sys
import os, os.path
from collections import deque
from hashlib import sha1
-from mw.xml_dump import Iterator
-from mw.lib import persistence
-from mw.lib import reverts
+from mwxml import Dump
+from deltas.tokenizers import wikitext_split
+import mwpersistence
+import mwreverts
+from urllib.parse import quote
+TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
def calculate_persistence(tokens_added):
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
+
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
- self.mwiterator = Iterator.from_file(self.fh)
+ self.mwiterator = Dump.from_file(self.fh)
self.__pages = self.load_pages()
def load_pages(self):
__slots__ = ('id', 'title', 'namespace', 'redirect',
'restrictions', 'mwpage', '__revisions',
'collapse_user')
-
+
def __init__(self, page, collapse_user=False):
self.id = page.id
self.title = page.title
else:
if self.collapse_user:
# yield if this is the last edit in a seq by a user and reset
- if not rev.contributor.user_text == prev_rev.contributor.user_text:
+ # also yield if we do know who the user is
+
+ if rev.deleted.user or prev_rev.deleted.user:
+ yield prev_rev
+ collapsed_revs = 1
+ rev.collapsed_revs = collapsed_revs
+
+ elif not rev.user.text == prev_rev.user.text:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
yield prev_rev
prev_rev = rev
+
# also yield the final time
yield prev_rev
return next(self.__revisions)
class WikiqParser():
- def __init__(self, input_file, output_file, collapse_user=False, persist=False):
+
+
+ def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
+
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
+ self.persist_legacy = persist_legacy
self.printed_header = False
self.namespaces = []
-
+ self.urlencode = urlencode
+
def __get_namespace_from_title(self, title):
default_ns = None
return default_ns
def process(self):
- print("Processing file: %s" % self.input_file.name, file=sys.stderr)
# create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
# extract list of namspaces
- self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+ self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
page_count = 0
rev_count = 0
+
+
# Iterate through pages
for page in dump:
- if self.persist:
- state = persistence.State()
+ rev_detector = mwreverts.Detector()
+
+ if self.persist or self.persist_legacy:
window = deque(maxlen=PERSISTENCE_RADIUS)
- rev_detector = reverts.Detector()
+ if not self.persist_legacy:
+ state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ else:
+ from mw.lib import persistence
+ state = persistence.State()
# Iterate through a page's revisions
for rev in page:
rev_data = {'revid' : rev.id,
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'articleid' : page.id,
- 'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
+ 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
'title' : '"' + page.title + '"',
- 'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
- 'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
+ 'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
+ 'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
# if revisions are deleted, /many/ things will be missing
- if rev.text.deleted:
+ if rev.deleted.text:
rev_data['text_chars'] = ""
rev_data['sha1'] = ""
rev_data['revert'] = ""
rev_data['reverteds'] = ""
else:
+ # rev.text can be None if the page has no text
+ if not rev.text:
+ rev.text = ""
# if text exists, we'll check for a sha1 and generate one otherwise
+
if rev.sha1:
text_sha1 = rev.sha1
else:
+
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data['sha1'] = text_sha1
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
+
if revert:
rev_data['revert'] = "TRUE"
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
- if rev.contributor.user_text:
+ if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
- rev_data['editor'] = '"' + rev.contributor.user_text + '"'
- rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
+ rev_data['editor'] = '"' + rev.user.text + '"'
+ rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
else:
rev_data['anon'] = ""
if self.collapse_user:
rev_data['collapsed_revs'] = rev.collapsed_revs
- if self.persist:
- if rev.text.deleted:
+ if self.persist or self.persist_legacy:
+ if rev.deleted.text:
+
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
- _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
+ if not self.persist_legacy:
+ _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+ else:
+ _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
window.append((rev.id, rev_data, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
rev_count += 1
- if self.persist:
+ if self.persist or self.persist_legacy:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
rev_data["tokens_added"] = num_tokens
rev_data["tokens_removed"] = len(tokens_removed)
rev_data["tokens_window"] = len(window)-(i+1)
-
+
self.print_rev_data(rev_data)
page_count += 1
def print_rev_data(self, rev_data):
# if it's the first time through, print the header
+ if self.urlencode:
+ for field in TO_ENCODE:
+ rev_data[field] = quote(str(rev_data[field]))
+
if not self.printed_header:
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
self.printed_header = True
def open_input_file(input_filename):
- if re.match(r'.*\.7z', input_filename):
- cmd = ["7za", "x", "-so", input_filename]
- elif re.match(r'.*\.gz', input_filename):
+ if re.match(r'.*\.7z$', input_filename):
+ cmd = ["7za", "x", "-so", input_filename, '*']
+ elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
- elif re.match(r'.*\.bz2', input_filename):
- cmd = ["bzcat", input_filename]
+ elif re.match(r'.*\.bz2$', input_filename):
+ cmd = ["bzcat", "-dk", input_filename]
try:
input_file = Popen(cmd, stdout=PIPE).stdout
def open_output_file(input_filename):
# create a regex that creates the output filename
- output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
+ output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
+ output_filename = re.sub(r'\.xml', '', output_filename)
output_filename = output_filename + ".tsv"
output_file = open(output_filename, "w")
parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
+ help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
+parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
+ help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
args = parser.parse_args()
if len(args.dumpfiles) > 0:
for filename in args.dumpfiles:
input_file = open_input_file(filename)
- # open file for output
+ # open directory for output
+ if args.output_dir:
+ output_dir = args.output_dir[0]
+ else:
+ output_dir = "."
+
+ print("Processing file: %s" % filename, file=sys.stderr)
+
if args.stdout:
output_file = sys.stdout
else:
- if args.output_dir:
- output_dir = args.output_dir[0]
- else:
- output_dir = "."
-
filename = os.path.join(output_dir, os.path.basename(filename))
output_file = open_output_file(filename)
wikiq = WikiqParser(input_file, output_file,
- collapse_user=args.collapse_user,
- persist=args.persist)
+ collapse_user=args.collapse_user,
+ persist=args.persist,
+ persist_legacy=args.persist_legacy,
+ urlencode=args.urlencode)
+
+
wikiq.process()
# close things
output_file.close()
else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
- collapse_user=args.collapse_user,
- persist=args.persist)
+ collapse_user=args.collapse_user,
+ persist=args.persist,
+ persist_legacy=args.persist_legacy,
+ urlencode=args.urlencode)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"