import sys
import os, os.path
import re
-from datetime import datetime
+from datetime import datetime,timezone
from subprocess import Popen, PIPE
from collections import deque
from deltas import SequenceMatcher
from deltas import SegmentMatcher
-from dataclasses import dataclass
+import dataclasses as dc
+from dataclasses import dataclass, make_dataclass
import pyarrow as pa
import pyarrow.parquet as pq
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
-
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
if self.has_groups:
self.capture_groups = list(self.pattern.groupindex.keys())
+ def get_pyarrow_fields(self):
+ if self.has_groups:
+ fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
+ for cap_group in self.capture_groups]
+ else:
+ fields = [pa.field(self.label, pa.list_(pa.string()))]
+
+ return fields
+
def _make_key(self, cap_group):
return ("{}_{}".format(self.label, cap_group))
temp_dict[self.label] = None
# update rev_data with our new columns
- for k, v in temp_dict:
- rev_data.setattr(k,v)
+ for k, v in temp_dict.items():
+ setattr(rev_data, k, v)
return rev_data
minor: bool = None
editor: str = None
anon: bool = None
- collapsed_revs:int = None
+ urlencode = False
pa_schema_fields = [
- pa.field("revid", pa.int64),
+ pa.field("revid", pa.int64()),
pa.field("date_time",pa.timestamp('ms')),
pa.field("articleid",pa.int64()),
pa.field("editorid",pa.int64()),
pa.field("title",pa.string()),
pa.field("namespace",pa.int32()),
- pa.field("deleted",pa.binary()),
+ pa.field("deleted",pa.bool_()),
pa.field("test_chars",pa.int32()),
- pa.field("revert",pa.binary()),
+ pa.field("revert",pa.bool_()),
pa.field("reverteds",pa.list_(pa.int64())),
pa.field("sha1",pa.string()),
- pa.field("minor",pa.binary()),
+ pa.field("minor",pa.bool_()),
pa.field("editor",pa.string()),
- pa.field("anon",pa.binary())
+ pa.field("anon",pa.bool_())
]
def to_pyarrow(self):
- return pa.array(self.astuple(), map(self.pa_schema_fields, pa.field.type))
-
+ return dc.astuple(self)
def to_tsv_row(self):
row = []
- for f in self.fields():
+ for f in dc.fields(self):
val = getattr(self, f.name)
if getattr(self, f.name) is None:
row.append("")
elif f.name in {'editor','title'}:
s = '"' + val + '"'
- if f.name in TO_ENCODE:
- row.append(quote(str(val)))
+ if self.urlencode and f.name in TO_ENCODE:
+ row.append(quote(str(s)))
+ else:
+ row.append(s)
elif f.type == list[int]:
row.append('"' + ",".join([str(x) for x in val]) + '"')
elif f.type == str:
- if f.name in TO_ENCODE:
+ if self.urlencode and f.name in TO_ENCODE:
row.append(quote(str(val)))
+ else:
+ row.append(val)
else:
row.append(val)
- return '\t'.join(row)
-
- # def __init__(revid: int,
- # date_time: datetime,
- # articleid: int,
- # editorid: int,
- # title: str,
- # namespace: int,
- # deleted: bool,
- # test_chars: int,
- # revert: bool,
- # reverteds: list[bool],
- # sha1: str,
- # minor: bool,
- # editor: str,
- # anon: bool):
+ return '\t'.join(map(str,row))
-
+ def header_row(self):
+ return '\t'.join(map(lambda f: f.name, dc.fields(self)))
@dataclass()
class RevDataCollapse(RevDataBase):
collapsed_revs:int = None
pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
- pa_schema_fields = RevDataBase.pa_schema_fields + pa_collapsed_revs_schema
- pa_schema = pa.schema(pa_schema_fields)
+ pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema]
@dataclass()
class RevDataPersistence(RevDataBase):
tokens_added:int = None
tokens_removed:int = None
tokens_window:int = None
+
pa_persistence_schema_fields = [
- pa.field(token_revs, pa.int64()),
- pa.field(tokens_added, pa.int64()),
- pa.field(tokens_removed, pa.int64()),
- pa.tokens_window, pa.int64()]
+ pa.field("token_revs", pa.int64()),
+ pa.field("tokens_added", pa.int64()),
+ pa.field("tokens_removed", pa.int64()),
+ pa.field("tokens_window", pa.int64())]
pa_schema_fields = RevDataBase.pa_schema_fields + pa_persistence_schema_fields
@dataclass()
class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
- pa_scehma_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+ pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
class WikiqParser():
def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
self.urlencode = urlencode
self.revert_radius = revert_radius
- self.output_buffer = []
- self.output_buffer_size = output_buffer_size
-
if namespaces is not None:
self.namespace_filter = set(namespaces)
else:
else:
revdata_type = RevDataBase
- regex_fields = [(field.name, list[str]), for field in self.regex_schemas]
- self.revdata_type = dataclasses.make_dataclass('RevData_Parser',
- fields=map(regex_fields,
- lambda pa_field: (pa_field.name,
- list[string],
- field(default=None))),
- bases=(revdata_type))
+ regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas]
+
+ self.revdata_type = make_dataclass('RevData_Parser',
+ fields=regex_fields,
+ bases=(revdata_type,))
- self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + regex_fields
+ self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
+ self.revdata_type.urlencode = self.urlencode
+
if output_parquet is True:
self.output_parquet = True
self.pq_writer = None
self.output_file = output_file
+ self.parquet_buffer = []
+ self.parquet_buffer_size = parquet_buffer_size
else:
- self.output_file = open(output_file,'w')
-
+ self.print_header = True
+ if output_file == sys.stdout:
+
+ self.output_file = output_file
+ else:
+ self.output_file = open(output_file,'w')
+ self.output_parquet = False
def make_matchmake_pairs(self, patterns, labels):
if (patterns is not None and labels is not None) and \
(len(patterns) == len(labels)):
result = []
for pattern, label in zip(patterns, labels):
- result.append(RegexPair(pattern, label))
- self.regex_schemas.append(pa.field(label, pa.list_(pa.string())))
-
+ rp = RegexPair(pattern, label)
+ result.append(rp)
+ self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
return result
elif (patterns is None and labels is None):
return []
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
-
+
if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
for rev in page:
rev_data = self.revdata_type(revid = rev.id,
- date_time = rev.timestamp,
+ date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
articleid = page.id,
editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
title = page.title,
- deleted = rev.deleted.text
+ deleted = rev.deleted.text,
+ namespace = namespace
)
rev_data = self.matchmake(rev, rev_data)
if rev.sha1:
text_sha1 = rev.sha1
else:
-
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data.sha1 = text_sha1
rev_data.text_chars = len(rev.text)
# generate revert data
- rev_data.revert = rev_detector.process(text_sha1, rev.id)
+ revert = rev_detector.process(text_sha1, rev.id)
if revert:
+ rev_data.revert = True
rev_data.reverteds = revert.reverteds
+ else:
+ rev_data.revert = False
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data.minor = rev.minor
if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
rev_data.editor = rev.user.text
- rev_data.anon = rev.user.id == None
-
+ rev_data.anon = rev.user.id is None
+
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
# redirect = True
#else:
if self.collapse_user:
rev_data.collapsed_revs = rev.collapsed_revs
+ # get the
if self.persist != PersistMethod.none:
-
if not rev.deleted.text:
if self.persist != PersistMethod.legacy:
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
- rev_data.token_revs = num_token_revs
- rev_data.tokens_added = num_tokens
- rev_data.tokens_removed = len(old_tokens_removed)
- rev_data.tokens_window = PERSISTENCE_RADIUS-1
+ old_rev_data.token_revs = num_token_revs
+ old_rev_data.tokens_added = num_tokens
+ old_rev_data.tokens_removed = len(old_tokens_removed)
+ old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
- self.print_rev_data(rev_data)
+ self.print_rev_data(old_rev_data)
else:
self.print_rev_data(rev_data)
self.pq_writer.close()
else:
- output_file.close()
+ self.output_file.close()
def write_parquet_row(self, rev_data):
padata = rev_data.to_pyarrow()
- self.output_buffer.append(padata)
+ self.parquet_buffer.append(padata)
- if len(self.output_buffer) >= self.output_buffer_size:
+ if len(self.parquet_buffer) >= self.parquet_buffer_size:
self.flush_parquet_buffer()
def flush_parquet_buffer(self):
- outtable = pa.table.concat_arrays(self.output_buffer)
+ schema = pa.schema(self.revdata_type.pa_schema_fields)
+
+ def row_to_col(rg, types):
+ cols = []
+ first = rg[0]
+ for col in first:
+ cols.append([col])
+
+ for row in rg[1:]:
+ for j in range(len(cols)):
+ cols[j].append(row[j])
+
+ arrays = []
+ for col, typ in zip(cols, types):
+ arrays.append(pa.array(col, typ))
+ return arrays
+
+ outtable = pa.Table.from_arrays(row_to_col(self.parquet_buffer, schema.types), schema=schema)
if self.pq_writer is None:
- schema = pa.schema(self.revdata_type.pa_schema_field)
self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
self.pq_writer.write_table(outtable)
- self.output_buffer = []
+ self.parquet_buffer = []
def print_rev_data(self, rev_data):
if self.output_parquet is False:
printfunc(rev_data)
def write_tsv_row(self, rev_data):
-
- self.output_buffer.append(rev_data.to_tsv_line())
-
- if len(self.output_buffer) >= self.output_buffer_size:
- self.flush_tsv_buffer()
+ if self.print_header:
+ print(rev_data.header_row(), file=self.output_file)
+ self.print_header = False
+ line = rev_data.to_tsv_row()
+ print(line, file=self.output_file)
- def flush_tsv_buffer():
- if self.output_header:
def open_input_file(input_filename):
if re.match(r'.*\.7z$', input_filename):
regex_comment_label = args.regex_comment_label,
output_parquet=output_parquet)
- print(wikiq.output_parquet)
wikiq.process()
# close things