X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/blobdiff_plain/26f6d8f984283fa858bae1fdb34d41d793d47150..refs/heads/parquet_support:/wikiq diff --git a/wikiq b/wikiq index 03b3147..75c1af8 100755 --- a/wikiq +++ b/wikiq @@ -8,7 +8,7 @@ import argparse import sys import os, os.path import re -from datetime import datetime +from datetime import datetime,timezone from subprocess import Popen, PIPE from collections import deque @@ -25,6 +25,7 @@ PERSISTENCE_RADIUS=7 from deltas import SequenceMatcher from deltas import SegmentMatcher +import dataclasses as dc from dataclasses import dataclass import pyarrow as pa import pyarrow.parquet as pq @@ -39,7 +40,6 @@ def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), len(tokens_added)) - class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh @@ -133,6 +133,11 @@ class WikiqPage(): return next(self.__revisions) +""" +A RegexPair is defined by a regular expression (pattern) and a label. +The pattern can include capture groups. If it does then each capture group will have a resulting column in the output. +If the pattern does not include a capture group, then only one output column will result. +""" class RegexPair(object): def __init__(self, pattern, label): self.pattern = re.compile(pattern) @@ -141,6 +146,15 @@ class RegexPair(object): if self.has_groups: self.capture_groups = list(self.pattern.groupindex.keys()) + def get_pyarrow_fields(self): + if self.has_groups: + fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string())) + for cap_group in self.capture_groups] + else: + fields = [pa.field(self.label, pa.list_(pa.string()))] + + return fields + def _make_key(self, cap_group): return ("{}_{}".format(self.label, cap_group)) @@ -187,11 +201,27 @@ class RegexPair(object): temp_dict[self.label] = None # update rev_data with our new columns - for k, v in temp_dict: - rev_data.setattr(k,v) + for k, v in temp_dict.items(): + setattr(rev_data, k, v) return rev_data +""" + +We used to use a dictionary to collect fields for the output. +Now we use dataclasses. Compared to a dictionary, this should help: +- prevent some bugs +- make it easier to output parquet data. +- use class attribute '.' syntax instead of dictionary syntax. +- improve support for tooling (autocomplete, type hints) +- use type information to define formatting rules + +Depending on the parameters passed into Wikiq, the output schema can be different. +Therefore, we need to end up constructing a dataclass with the correct output schema. +It also needs to have the correct pyarrow schema so we can write parquet files. + +The RevDataBase type has all the fields that will be output no matter how wikiq is invoked. +""" @dataclass() class RevDataBase(): revid: int @@ -208,33 +238,40 @@ class RevDataBase(): minor: bool = None editor: str = None anon: bool = None - collapsed_revs:int = None + # toggles url encoding. this isn't a dataclass field since it doesn't have a type annotation + urlencode = False + + # defines pyarrow schema. + # each field in the data class needs an entry in this array. + # the names should match and be in the same order. + # this isn't a dataclass field since it doesn't have a type annotation pa_schema_fields = [ - pa.field("revid", pa.int64), - pa.field("date_time",pa.timestamp('ms')), + pa.field("revid", pa.int64()), + pa.field("date_time", pa.timestamp('ms')), pa.field("articleid",pa.int64()), - pa.field("editorid",pa.int64()), + pa.field("editorid",pa.int64(), nullable=True), pa.field("title",pa.string()), pa.field("namespace",pa.int32()), - pa.field("deleted",pa.binary()), - pa.field("test_chars",pa.int32()), - pa.field("revert",pa.binary()), - pa.field("reverteds",pa.list_(pa.int64())), + pa.field("deleted",pa.bool_()), + pa.field("text_chars",pa.int32()), + pa.field("revert",pa.bool_(), nullable=True), + pa.field("reverteds",pa.list_(pa.int64()), nullable=True), pa.field("sha1",pa.string()), - pa.field("minor",pa.binary()), + pa.field("minor",pa.bool_()), pa.field("editor",pa.string()), - pa.field("anon",pa.binary()) + pa.field("anon",pa.bool_()) ] + # pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function def to_pyarrow(self): - return pa.array(self.astuple(), map(self.pa_schema_fields, pa.field.type)) - + return dc.astuple(self) + # logic to convert each field into the wikiq tsv format goes here. def to_tsv_row(self): row = [] - for f in self.fields(): + for f in dc.fields(self): val = getattr(self, f.name) if getattr(self, f.name) is None: row.append("") @@ -246,61 +283,69 @@ class RevDataBase(): elif f.name in {'editor','title'}: s = '"' + val + '"' - if f.name in TO_ENCODE: - row.append(quote(str(val))) + if self.urlencode and f.name in TO_ENCODE: + row.append(quote(str(s))) + else: + row.append(s) elif f.type == list[int]: row.append('"' + ",".join([str(x) for x in val]) + '"') elif f.type == str: - if f.name in TO_ENCODE: + if self.urlencode and f.name in TO_ENCODE: row.append(quote(str(val))) + else: + row.append(val) else: row.append(val) - return '\t'.join(row) - - # def __init__(revid: int, - # date_time: datetime, - # articleid: int, - # editorid: int, - # title: str, - # namespace: int, - # deleted: bool, - # test_chars: int, - # revert: bool, - # reverteds: list[bool], - # sha1: str, - # minor: bool, - # editor: str, - # anon: bool): + return '\t'.join(map(str,row)) - + def header_row(self): + return '\t'.join(map(lambda f: f.name, dc.fields(self))) + +""" + +If collapse=True we'll use a RevDataCollapse dataclass. +This class inherits from RevDataBase. This means that it has all the same fields and functions. +It just adds a new field and updates the pyarrow schema. + +""" @dataclass() class RevDataCollapse(RevDataBase): collapsed_revs:int = None + pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64()) - pa_schema_fields = RevDataBase.pa_schema_fields + pa_collapsed_revs_schema - pa_schema = pa.schema(pa_schema_fields) + pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema] + +""" +If persistence data is to be computed we'll need the fields added by RevDataPersistence. + +""" @dataclass() class RevDataPersistence(RevDataBase): token_revs:int = None tokens_added:int = None tokens_removed:int = None tokens_window:int = None + pa_persistence_schema_fields = [ - pa.field(token_revs, pa.int64()), - pa.field(tokens_added, pa.int64()), - pa.field(tokens_removed, pa.int64()), - pa.tokens_window, pa.int64()] + pa.field("token_revs", pa.int64()), + pa.field("tokens_added", pa.int64()), + pa.field("tokens_removed", pa.int64()), + pa.field("tokens_window", pa.int64())] pa_schema_fields = RevDataBase.pa_schema_fields + pa_persistence_schema_fields +""" +class RevDataCollapsePersistence uses multiple inheritence to make a class that has both persistence and collapse fields. + +""" @dataclass() class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence): - pa_scehma_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields + pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields class WikiqParser(): def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000): @@ -316,9 +361,6 @@ class WikiqParser(): self.urlencode = urlencode self.revert_radius = revert_radius - self.output_buffer = [] - self.output_buffer_size = output_buffer_size - if namespaces is not None: self.namespace_filter = set(namespaces) else: @@ -328,6 +370,9 @@ class WikiqParser(): self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label) self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label) + + # This is where we set the type for revdata. + if self.collapse_user is True: if self.persist == PersistMethod.none: revdata_type = RevDataCollapse @@ -338,44 +383,58 @@ class WikiqParser(): else: revdata_type = RevDataBase - regex_fields = [(field.name, list[str]), for field in self.regex_schemas] - self.revdata_type = dataclasses.make_dataclass('RevData_Parser', - fields=map(regex_fields, - lambda pa_field: (pa_field.name, - list[string], - field(default=None))), - bases=(revdata_type)) + # if there are regex fields, we need to add them to the revdata type. + regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas] + + # make_dataclass is a function that defines a new dataclass type. + # here we extend the type we have already chosen and add the regular expression types + self.revdata_type = dc.make_dataclass('RevData_Parser', + fields=regex_fields, + bases=(revdata_type,)) - self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + regex_fields + # we also need to make sure that we have the right pyarrow schema + self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas + self.revdata_type.urlencode = self.urlencode + + self.schema = pa.schema(self.revdata_type.pa_schema_fields) + + # here we initialize the variables we need for output. if output_parquet is True: self.output_parquet = True self.pq_writer = None self.output_file = output_file + self.parquet_buffer = [] + self.parquet_buffer_size = parquet_buffer_size else: - self.output_file = open(output_file,'w') - + self.print_header = True + if output_file == sys.stdout: + + self.output_file = output_file + else: + self.output_file = open(output_file,'w') + self.output_parquet = False def make_matchmake_pairs(self, patterns, labels): if (patterns is not None and labels is not None) and \ (len(patterns) == len(labels)): result = [] for pattern, label in zip(patterns, labels): - result.append(RegexPair(pattern, label)) - self.regex_schemas.append(pa.field(label, pa.list_(pa.string()))) - + rp = RegexPair(pattern, label) + result.append(rp) + self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields() return result elif (patterns is None and labels is None): return [] else: sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') - def matchmake(self, rev, rev_data): - rev_data = self.matchmake_revision(rev.text, rev_data) + def matchmake_revision(self, rev, rev_data): + rev_data = self.matchmake_text(rev.text, rev_data) rev_data = self.matchmake_comment(rev.comment, rev_data) return rev_data - def matchmake_revision(self, text, rev_data): + def matchmake_text(self, text, rev_data): return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs) def matchmake_comment(self, comment, rev_data): @@ -432,7 +491,7 @@ class WikiqParser(): if self.persist != PersistMethod.none: window = deque(maxlen=PERSISTENCE_RADIUS) - + if self.persist == PersistMethod.sequence: state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split), revert_radius=PERSISTENCE_RADIUS) @@ -449,15 +508,17 @@ class WikiqParser(): # Iterate through a page's revisions for rev in page: + # create a new data object instead of a dictionary. rev_data = self.revdata_type(revid = rev.id, - date_time = rev.timestamp, + date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc), articleid = page.id, editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id, title = page.title, - deleted = rev.deleted.text + deleted = rev.deleted.text, + namespace = namespace ) - rev_data = self.matchmake(rev, rev_data) + rev_data = self.matchmake_revision(rev, rev_data) if not rev.deleted.text: # rev.text can be None if the page has no text @@ -468,7 +529,6 @@ class WikiqParser(): if rev.sha1: text_sha1 = rev.sha1 else: - text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest() rev_data.sha1 = text_sha1 @@ -477,10 +537,13 @@ class WikiqParser(): rev_data.text_chars = len(rev.text) # generate revert data - rev_data.revert = rev_detector.process(text_sha1, rev.id) + revert = rev_detector.process(text_sha1, rev.id) if revert: + rev_data.revert = True rev_data.reverteds = revert.reverteds + else: + rev_data.revert = False # if the fact that the edit was minor can be hidden, this might be an issue rev_data.minor = rev.minor @@ -488,8 +551,8 @@ class WikiqParser(): if not rev.deleted.user: # wrap user-defined editors in quotes for fread rev_data.editor = rev.user.text - rev_data.anon = rev.user.id == None - + rev_data.anon = rev.user.id is None + #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I): # redirect = True #else: @@ -501,8 +564,8 @@ class WikiqParser(): if self.collapse_user: rev_data.collapsed_revs = rev.collapsed_revs + # get the if self.persist != PersistMethod.none: - if not rev.deleted.text: if self.persist != PersistMethod.legacy: @@ -518,12 +581,12 @@ class WikiqParser(): num_token_revs, num_tokens = calculate_persistence(old_tokens_added) - rev_data.token_revs = num_token_revs - rev_data.tokens_added = num_tokens - rev_data.tokens_removed = len(old_tokens_removed) - rev_data.tokens_window = PERSISTENCE_RADIUS-1 + old_rev_data.token_revs = num_token_revs + old_rev_data.tokens_added = num_tokens + old_rev_data.tokens_removed = len(old_tokens_removed) + old_rev_data.tokens_window = PERSISTENCE_RADIUS-1 - self.print_rev_data(rev_data) + self.print_rev_data(old_rev_data) else: self.print_rev_data(rev_data) @@ -551,31 +614,59 @@ class WikiqParser(): print("Done: %s revisions and %s pages." % (rev_count, page_count), file=sys.stderr) + # remember to flush the parquet_buffer if we're done if self.output_parquet is True: self.flush_parquet_buffer() self.pq_writer.close() else: - output_file.close() + self.output_file.close() + """ + For performance reasons it's better to write parquet in batches instead of one row at a time. + So this function just puts the data on a buffer. If the buffer is full, then it gets flushed (written). + """ def write_parquet_row(self, rev_data): padata = rev_data.to_pyarrow() - self.output_buffer.append(padata) + self.parquet_buffer.append(padata) - if len(self.output_buffer) >= self.output_buffer_size: + if len(self.parquet_buffer) >= self.parquet_buffer_size: self.flush_parquet_buffer() + """ + Function that actually writes data to the parquet file. + It needs to transpose the data from row-by-row to column-by-column + """ def flush_parquet_buffer(self): - outtable = pa.table.concat_arrays(self.output_buffer) + + """ + Returns the pyarrow table that we'll write + """ + def rows_to_table(rg, schema): + cols = [] + first = rg[0] + for col in first: + cols.append([col]) + + for row in rg[1:]: + for j in range(len(cols)): + cols[j].append(row[j]) + + arrays = [] + for col, typ in zip(cols, schema.types): + arrays.append(pa.array(col, typ)) + return pa.Table.from_arrays(arrays, schema=schema) + + outtable = rows_to_table(self.parquet_buffer, self.schema) if self.pq_writer is None: - schema = pa.schema(self.revdata_type.pa_schema_field) self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark') self.pq_writer.write_table(outtable) - self.output_buffer = [] + self.parquet_buffer = [] + # depending on if we are configured to write tsv or parquet, we'll call a different function. def print_rev_data(self, rev_data): if self.output_parquet is False: printfunc = self.write_tsv_row @@ -585,15 +676,13 @@ class WikiqParser(): printfunc(rev_data) def write_tsv_row(self, rev_data): - - self.output_buffer.append(rev_data.to_tsv_line()) - - if len(self.output_buffer) >= self.output_buffer_size: - self.flush_tsv_buffer() + if self.print_header: + print(rev_data.header_row(), file=self.output_file) + self.print_header = False + line = rev_data.to_tsv_row() + print(line, file=self.output_file) - def flush_tsv_buffer(): - if self.output_header: def open_input_file(input_filename): if re.match(r'.*\.7z$', input_filename): @@ -724,7 +813,6 @@ if len(args.dumpfiles) > 0: regex_comment_label = args.regex_comment_label, output_parquet=output_parquet) - print(wikiq.output_parquet) wikiq.process() # close things