import sys
import os, os.path
import re
+from datetime import datetime,timezone
+import json
from subprocess import Popen, PIPE
from collections import deque
from deltas import SequenceMatcher
from deltas import SegmentMatcher
+import dataclasses as dc
+from dataclasses import dataclass
+import pyarrow as pa
+import pyarrow.parquet as pq
+from itertools import chain
+
class PersistMethod:
none = 0
sequence = 1
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
-
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
return next(self.__revisions)
+"""
+A RegexPair is defined by a regular expression (pattern) and a label.
+The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
+If the pattern does not include a capture group, then only one output column will result.
+"""
class RegexPair(object):
def __init__(self, pattern, label):
- self.pattern = re.compile(pattern)
+ self.pattern = pattern
+
+ if type(self.pattern) is str:
+ self.pattern = re.compile(pattern)
+
self.label = label
self.has_groups = bool(self.pattern.groupindex)
if self.has_groups:
self.capture_groups = list(self.pattern.groupindex.keys())
+ def get_pyarrow_fields(self):
+ if self.has_groups:
+ fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
+ for cap_group in self.capture_groups]
+ else:
+ fields = [pa.field(self.label, pa.list_(pa.string()))]
+
+ return fields
+
def _make_key(self, cap_group):
return ("{}_{}".format(self.label, cap_group))
temp_dict = {}
# if there are named capture groups in the regex
if self.has_groups:
- # initialize the {capture_group_name:list} for each capture group
+
# if there are matches of some sort in this revision content, fill the lists for each cap_group
if self.pattern.search(content) is not None:
m = self.pattern.finditer(content)
# there are no capture groups, we just search for all the matches of the regex
else:
#given that there are matches to be made
- if self.pattern.search(content) is not None:
- m = self.pattern.findall(content)
- temp_dict[self.label] = ', '.join(m)
- else:
- temp_dict[self.label] = None
+ if type(content) in(str, bytes):
+ if self.pattern.search(content) is not None:
+ m = self.pattern.findall(content)
+ temp_dict[self.label] = m
+ else:
+ temp_dict[self.label] = None
+
# update rev_data with our new columns
- rev_data.update(temp_dict)
+ for k, v in temp_dict.items():
+ setattr(rev_data, k, v)
+
return rev_data
+"""
+
+We used to use a dictionary to collect fields for the output.
+Now we use dataclasses. Compared to a dictionary, this should help:
+- prevent some bugs
+- make it easier to output parquet data.
+- use class attribute '.' syntax instead of dictionary syntax.
+- improve support for tooling (autocomplete, type hints)
+- use type information to define formatting rules
+
+Depending on the parameters passed into Wikiq, the output schema can be different.
+Therefore, we need to end up constructing a dataclass with the correct output schema.
+It also needs to have the correct pyarrow schema so we can write parquet files.
+
+The RevDataBase type has all the fields that will be output no matter how wikiq is invoked.
+"""
+@dataclass()
+class RevDataBase():
+ revid: int
+ date_time: datetime
+ articleid: int
+ editorid: int
+ title: str
+ namespace: int
+ deleted: bool
+ text_chars: int = None
+ revert: bool = None
+ reverteds: list[int] = None
+ sha1: str = None
+ minor: bool = None
+ editor: str = None
+ anon: bool = None
+
+ # toggles url encoding. this isn't a dataclass field since it doesn't have a type annotation
+ urlencode = False
+
+ # defines pyarrow schema.
+ # each field in the data class needs an entry in this array.
+ # the names should match and be in the same order.
+ # this isn't a dataclass field since it doesn't have a type annotation
+ pa_schema_fields = [
+ pa.field("revid", pa.int64()),
+ pa.field("date_time", pa.timestamp('ms')),
+ pa.field("articleid",pa.int64()),
+ pa.field("editorid",pa.int64()),
+ pa.field("title",pa.string()),
+ pa.field("namespace",pa.int32()),
+ pa.field("deleted",pa.bool_()),
+ pa.field("text_chars",pa.int32()),
+ pa.field("revert",pa.bool_()),
+ pa.field("reverteds",pa.list_(pa.int64())),
+ pa.field("sha1",pa.string()),
+ pa.field("minor",pa.bool_()),
+ pa.field("editor",pa.string()),
+ pa.field("anon",pa.bool_()),
+ ]
+
+ # pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function
+ def to_pyarrow(self):
+ return dc.astuple(self)
+
+ # logic to convert each field into the wikiq tsv format goes here.
+ def to_tsv_row(self):
+
+ row = []
+ for f in dc.fields(self):
+ val = getattr(self, f.name)
+ if getattr(self, f.name) is None:
+ row.append("")
+ elif f.type == bool:
+ row.append("TRUE" if val else "FALSE")
+
+ elif f.type == datetime:
+ row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
+
+ elif f.name in {'editor','title'}:
+ s = '"' + val + '"'
+ if self.urlencode and f.name in TO_ENCODE:
+ row.append(quote(str(s)))
+ else:
+ row.append(s)
+
+ elif f.type == list[int]:
+ row.append('"' + ",".join([str(x) for x in val]) + '"')
+
+ elif f.type == list[str]:
+ row.append('"' + ",".join([(x) for x in val]) + '"')
+
+ elif f.type == str:
+ if self.urlencode and f.name in TO_ENCODE:
+ row.append(quote(str(val)))
+ else:
+ row.append(val)
+ else:
+ row.append(val)
+
+ return '\t'.join(map(str,row))
+
+ def header_row(self):
+ return '\t'.join(map(lambda f: f.name, dc.fields(self)))
+
+"""
+
+If collapse=True we'll use a RevDataCollapse dataclass.
+This class inherits from RevDataBase. This means that it has all the same fields and functions.
+It just adds a new field and updates the pyarrow schema.
+
+"""
+@dataclass()
+class RevDataCollapse(RevDataBase):
+ collapsed_revs:int = None
+
+ pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
+ pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema]
+
+"""
+
+If persistence data is to be computed we'll need the fields added by RevDataPersistence.
+
+"""
+@dataclass()
+class RevDataPersistence(RevDataBase):
+ token_revs:int = None
+ tokens_added:int = None
+ tokens_removed:int = None
+ tokens_window:int = None
+
+ pa_persistence_schema_fields = [
+ pa.field("token_revs", pa.int64()),
+ pa.field("tokens_added", pa.int64()),
+ pa.field("tokens_removed", pa.int64()),
+ pa.field("tokens_window", pa.int64())]
+ pa_schema_fields = RevDataBase.pa_schema_fields + pa_persistence_schema_fields
+
+"""
+class RevDataCollapsePersistence uses multiple inheritence to make a class that has both persistence and collapse fields.
+
+"""
+@dataclass()
+class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
+ pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+
+
+
class WikiqParser():
- def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
+ def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000, siteinfo_file=None):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
"""
self.input_file = input_file
- self.output_file = output_file
+
self.collapse_user = collapse_user
self.persist = persist
- self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
self.revert_radius = revert_radius
else:
self.namespace_filter = None
+ self.regex_schemas = []
self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+
+ if siteinfo_file is not None:
+ siteinfo = open_siteinfo(siteinfo_file)
+ siteinfo = json.loads(siteinfo.read())
+
+ magicwords = siteinfo.get('query').get('magicwords')
+
+ if magicwords:
+ redirect_config = list(filter(lambda obj: obj.get("name") == "redirect", magicwords))
+ redirect_aliases = chain(* map(lambda obj: obj.get("aliases"), redirect_config))
+ redirect_aliases = list(map(lambda s: s.lstrip('#'), redirect_aliases))
+ redirect_aliases.append('REDIRECT') # just in case
+
+ # this regular expression is copied from pywikibot
+ pattern = '(?:' + '|'.join(redirect_aliases) + ')'
+ redirect_regex = re.compile(r'\s*#{pattern}\s*:?\s*\[\[(.+?)(?:\|.*?)?\]\]'
+ .format(pattern=pattern), re.IGNORECASE | re.DOTALL)
+
+ self.regex_revision_pairs.extend(self.make_matchmake_pairs([redirect_regex], ["redirect"]))
+
+ # This is where we set the type for revdata.
+
+ if self.collapse_user is True:
+ if self.persist == PersistMethod.none:
+ revdata_type = RevDataCollapse
+ else:
+ revdata_type = RevDataCollapsePersistence
+ elif self.persist != PersistMethod.none:
+ revdata_type = RevDataPersistence
+ else:
+ revdata_type = RevDataBase
+
+ # if there are regex fields, we need to add them to the revdata type.
+ regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas]
+
+ # make_dataclass is a function that defines a new dataclass type.
+ # here we extend the type we have already chosen and add the regular expression types
+ self.revdata_type = dc.make_dataclass('RevData_Parser',
+ fields=regex_fields,
+ bases=(revdata_type,))
+ # we also need to make sure that we have the right pyarrow schema
+ self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
+
+ self.revdata_type.urlencode = self.urlencode
+
+ self.schema = pa.schema(self.revdata_type.pa_schema_fields)
+
+ # here we initialize the variables we need for output.
+ if output_parquet is True:
+ self.output_parquet = True
+ self.pq_writer = None
+ self.output_file = output_file
+ self.parquet_buffer = []
+ self.parquet_buffer_size = parquet_buffer_size
+ else:
+ self.print_header = True
+ if output_file == sys.stdout:
+
+ self.output_file = output_file
+ else:
+ self.output_file = open(output_file,'w')
+ self.output_parquet = False
def make_matchmake_pairs(self, patterns, labels):
if (patterns is not None and labels is not None) and \
(len(patterns) == len(labels)):
- return [RegexPair(pattern, label) for pattern, label in zip(patterns, labels)]
+ result = []
+ for pattern, label in zip(patterns, labels):
+ rp = RegexPair(pattern, label)
+ result.append(rp)
+ self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
+ return result
elif (patterns is None and labels is None):
return []
else:
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
- def matchmake(self, rev, rev_data):
- rev_data = self.matchmake_revision(rev.text, rev_data)
+ def matchmake_revision(self, rev, rev_data):
+ rev_data = self.matchmake_text(rev.text, rev_data)
rev_data = self.matchmake_comment(rev.comment, rev_data)
return rev_data
- def matchmake_revision(self, text, rev_data):
- return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+ def matchmake_text(self, text, rev_data):
+ return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
def matchmake_comment(self, comment, rev_data):
return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
page_count = 0
rev_count = 0
-
# Iterate through pages
for page in dump:
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
-
+
if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
# Iterate through a page's revisions
for rev in page:
- # initialize rev_data
- rev_data = {
- 'revid':rev.id,
- 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
- 'articleid' : page.id,
- 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
- 'title' : '"' + page.title + '"',
- 'namespace' : namespace,
- 'deleted' : "TRUE" if rev.deleted.text else "FALSE"
- }
-
- rev_data = self.matchmake(rev, rev_data)
-
- # if revisions are deleted, /many/ things will be missing
- if rev.deleted.text:
- rev_data['text_chars'] = ""
- rev_data['sha1'] = ""
- rev_data['revert'] = ""
- rev_data['reverteds'] = ""
-
- else:
+ # create a new data object instead of a dictionary.
+ rev_data = self.revdata_type(revid = rev.id,
+ date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
+ articleid = page.id,
+ editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+ title = page.title,
+ deleted = rev.deleted.text,
+ namespace = namespace
+ )
+
+ rev_data = self.matchmake_revision(rev, rev_data)
+
+ if not rev.deleted.text:
# rev.text can be None if the page has no text
if not rev.text:
rev.text = ""
if rev.sha1:
text_sha1 = rev.sha1
else:
-
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
- rev_data['sha1'] = text_sha1
+ rev_data.sha1 = text_sha1
# TODO rev.bytes doesn't work.. looks like a bug
- rev_data['text_chars'] = len(rev.text)
+ rev_data.text_chars = len(rev.text)
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
if revert:
- rev_data['revert'] = "TRUE"
- rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
+ rev_data.revert = True
+ rev_data.reverteds = revert.reverteds
else:
- rev_data['revert'] = "FALSE"
- rev_data['reverteds'] = ""
+ rev_data.revert = False
# if the fact that the edit was minor can be hidden, this might be an issue
- rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
+ rev_data.minor = rev.minor
if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
- rev_data['editor'] = '"' + rev.user.text + '"'
- rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
-
- else:
- rev_data['anon'] = ""
- rev_data['editor'] = ""
-
- #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
- # redirect = True
- #else:
- # redirect = False
+ rev_data.editor = rev.user.text
+ rev_data.anon = rev.user.id is None
#TODO missing: additions_size deletions_size
# if collapse user was on, lets run that
if self.collapse_user:
- rev_data['collapsed_revs'] = rev.collapsed_revs
+ rev_data.collapsed_revs = rev.collapsed_revs
+ # get the
if self.persist != PersistMethod.none:
- if rev.deleted.text:
- for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
- old_rev_data[k] = None
- else:
+ if not rev.deleted.text:
if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
- old_rev_data["token_revs"] = num_token_revs
- old_rev_data["tokens_added"] = num_tokens
- old_rev_data["tokens_removed"] = len(old_tokens_removed)
- old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
+ old_rev_data.token_revs = num_token_revs
+ old_rev_data.tokens_added = num_tokens
+ old_rev_data.tokens_removed = len(old_tokens_removed)
+ old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
self.print_rev_data(old_rev_data)
rev_id, rev_data, tokens_added, tokens_removed = item
num_token_revs, num_tokens = calculate_persistence(tokens_added)
- rev_data["token_revs"] = num_token_revs
- rev_data["tokens_added"] = num_tokens
- rev_data["tokens_removed"] = len(tokens_removed)
- rev_data["tokens_window"] = len(window)-(i+1)
-
+ rev_data.token_revs = num_token_revs
+ rev_data.tokens_added = num_tokens
+ rev_data.tokens_removed = len(tokens_removed)
+ rev_data.tokens_window = len(window)-(i+1)
self.print_rev_data(rev_data)
page_count += 1
print("Done: %s revisions and %s pages." % (rev_count, page_count),
file=sys.stderr)
+ # remember to flush the parquet_buffer if we're done
+ if self.output_parquet is True:
+ self.flush_parquet_buffer()
+ self.pq_writer.close()
+
+ else:
+ self.output_file.close()
+
+
+ """
+ For performance reasons it's better to write parquet in batches instead of one row at a time.
+ So this function just puts the data on a buffer. If the buffer is full, then it gets flushed (written).
+ """
+ def write_parquet_row(self, rev_data):
+ padata = rev_data.to_pyarrow()
+ self.parquet_buffer.append(padata)
+
+ if len(self.parquet_buffer) >= self.parquet_buffer_size:
+ self.flush_parquet_buffer()
+
+
+ """
+ Function that actually writes data to the parquet file.
+ It needs to transpose the data from row-by-row to column-by-column
+ """
+ def flush_parquet_buffer(self):
+
+ """
+ Returns the pyarrow table that we'll write
+ """
+ def rows_to_table(rg, schema):
+ cols = []
+ first = rg[0]
+ for col in first:
+ cols.append([col])
+
+ for row in rg[1:]:
+ for j in range(len(cols)):
+ cols[j].append(row[j])
+
+ arrays = []
+ for col, typ in zip(cols, schema.types):
+ arrays.append(pa.array(col, typ))
+ return pa.Table.from_arrays(arrays, schema=schema)
+
+ outtable = rows_to_table(self.parquet_buffer, self.schema)
+ if self.pq_writer is None:
+ self.pq_writer = pq.ParquetWriter(self.output_file, self.schema, flavor='spark')
+
+ self.pq_writer.write_table(outtable)
+ self.parquet_buffer = []
+
+ # depending on if we are configured to write tsv or parquet, we'll call a different function.
def print_rev_data(self, rev_data):
- # if it's the first time through, print the header
- if self.urlencode:
- for field in TO_ENCODE:
- rev_data[field] = quote(str(rev_data[field]))
-
- if not self.printed_header:
- print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
- self.printed_header = True
+
+ if self.output_parquet is False:
+ printfunc = self.write_tsv_row
+ else:
+ printfunc = self.write_parquet_row
- print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+ printfunc(rev_data)
+
+ def write_tsv_row(self, rev_data):
+ if self.print_header:
+ print(rev_data.header_row(), file=self.output_file)
+ self.print_header = False
+
+ line = rev_data.to_tsv_row()
+ print(line, file=self.output_file)
+
+def open_siteinfo(siteinfo_file):
+ if re.match(r'.*\.7z$', siteinfo_file):
+ cmd = ["7za", "x", "-so", siteinfo_file, "*.json"]
+ elif re.match(r'.*\.gz$', siteinfo_file):
+ cmd = ["zcat", siteinfo_file]
+ elif re.match(r'.*\.bz2$', siteinfo_file):
+ cmd = ["bzcat", "-dk", siteinfo_file]
+
+ try:
+ input_file = Popen(cmd, stdout=PIPE).stdout
+ except NameError:
+ input_file = open(siteinfo_file, 'r')
+
+ return input_file
def open_input_file(input_filename):
if re.match(r'.*\.7z$', input_filename):
- cmd = ["7za", "x", "-so", input_filename, '*']
+ cmd = ["7za", "x", "-so", input_filename, "*.xml"]
elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename):
return input_file
-def open_output_file(input_filename):
- # create a regex that creates the output filename
+def get_output_filename(input_filename, parquet = False):
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
output_filename = re.sub(r'\.xml', '', output_filename)
- output_filename = output_filename + ".tsv"
- output_file = open(output_filename, "w")
+ if parquet is False:
+ output_filename = output_filename + ".tsv"
+ else:
+ output_filename = output_filename + ".parquet"
+ return output_filename
+def open_output_file(input_filename):
+ # create a regex that creates the output filename
+ output_filename = get_output_filename(input_filename, parquet = False)
+ output_file = open(output_filename, "w")
return output_file
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
- help="Directory for output files.")
+ help="Directory for output files. If it ends with .parquet output will be in parquet format.")
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
help="Write output to standard out (do not create dump file)")
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
help="The label for the outputted column based on matching the regex in comments.")
+parser.add_argument('--SI', '--siteinfo', dest="siteinfo", default=None, type=str,
+ help="Path to archive containing siteinfo json. This is required for resolving redirects")
+
+
+
args = parser.parse_args()
+
+
# set persistence method
if args.persist is None:
namespaces = None
if len(args.dumpfiles) > 0:
+ output_parquet = False
for filename in args.dumpfiles:
input_file = open_input_file(filename)
else:
output_dir = "."
+ if output_dir.endswith(".parquet"):
+ output_parquet = True
+
print("Processing file: %s" % filename, file=sys.stderr)
if args.stdout:
output_file = sys.stdout
else:
filename = os.path.join(output_dir, os.path.basename(filename))
- output_file = open_output_file(filename)
+ output_file = get_output_filename(filename, parquet = output_parquet)
+ print(args.siteinfo, file=sys.stderr)
wikiq = WikiqParser(input_file,
output_file,
collapse_user=args.collapse_user,
regex_match_revision = args.regex_match_revision,
regex_revision_label = args.regex_revision_label,
regex_match_comment = args.regex_match_comment,
- regex_comment_label = args.regex_comment_label)
+ regex_comment_label = args.regex_comment_label,
+ output_parquet=output_parquet,
+ siteinfo_file = args.siteinfo)
wikiq.process()
# close things
input_file.close()
- output_file.close()
+
else:
wikiq = WikiqParser(sys.stdin,
sys.stdout,
regex_match_revision = args.regex_match_revision,
regex_revision_label = args.regex_revision_label,
regex_match_comment = args.regex_match_comment,
- regex_comment_label = args.regex_comment_label)
+ regex_comment_label = args.regex_comment_label,
+ siteinfo_file = args.siteinfo)
wikiq.process()