import sys
import os, os.path
import re
-from datetime import datetime
+from datetime import datetime,timezone
from subprocess import Popen, PIPE
from collections import deque
if self.has_groups:
self.capture_groups = list(self.pattern.groupindex.keys())
+ def get_pyarrow_fields(self):
+ if self.has_groups:
+ fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
+ for cap_group in self.capture_groups]
+ else:
+ fields = [pa.field(self.label, pa.list_(pa.string()))]
+
+ return fields
+
def _make_key(self, cap_group):
return ("{}_{}".format(self.label, cap_group))
editor: str = None
anon: bool = None
+ urlencode = False
pa_schema_fields = [
pa.field("revid", pa.int64()),
pa.field("date_time",pa.timestamp('ms')),
def to_tsv_row(self):
row = []
- for f in self.fields():
+ for f in dc.fields(self):
val = getattr(self, f.name)
if getattr(self, f.name) is None:
row.append("")
elif f.name in {'editor','title'}:
s = '"' + val + '"'
- if f.name in TO_ENCODE:
- row.append(quote(str(val)))
+ if self.urlencode and f.name in TO_ENCODE:
+ row.append(quote(str(s)))
+ else:
+ row.append(s)
elif f.type == list[int]:
row.append('"' + ",".join([str(x) for x in val]) + '"')
elif f.type == str:
- if f.name in TO_ENCODE:
+ if self.urlencode and f.name in TO_ENCODE:
row.append(quote(str(val)))
+ else:
+ row.append(val)
else:
row.append(val)
- return '\t'.join(row)
-
- # def __init__(revid: int,
- # date_time: datetime,
- # articleid: int,
- # editorid: int,
- # title: str,
- # namespace: int,
- # deleted: bool,
- # test_chars: int,
- # revert: bool,
- # reverteds: list[bool],
- # sha1: str,
- # minor: bool,
- # editor: str,
- # anon: bool):
+ return '\t'.join(map(str,row))
-
+ def header_row(self):
+ return '\t'.join(map(lambda f: f.name, dc.fields(self)))
@dataclass()
class RevDataCollapse(RevDataBase):
self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
-
- # print(list(map(lambda d: d.name, dc.fields(self.revdata_type))))
- # print(self.revdata_type.pa_schema_fields)
+ self.revdata_type.urlencode = self.urlencode
if output_parquet is True:
self.output_parquet = True
self.parquet_buffer = []
self.parquet_buffer_size = parquet_buffer_size
else:
- self.output_file = open(output_file,'w')
-
+ self.print_header = True
+ if output_file == sys.stdout:
+
+ self.output_file = output_file
+ else:
+ self.output_file = open(output_file,'w')
+ self.output_parquet = False
def make_matchmake_pairs(self, patterns, labels):
if (patterns is not None and labels is not None) and \
(len(patterns) == len(labels)):
result = []
for pattern, label in zip(patterns, labels):
- result.append(RegexPair(pattern, label))
- self.regex_schemas.append(pa.field(label, pa.list_(pa.string())))
-
+ rp = RegexPair(pattern, label)
+ result.append(rp)
+ self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
return result
elif (patterns is None and labels is None):
return []
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
-
+
if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
for rev in page:
rev_data = self.revdata_type(revid = rev.id,
- date_time = datetime.fromtimestamp(rev.timestamp.unix()),
+ date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
articleid = page.id,
editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
title = page.title,
if rev.sha1:
text_sha1 = rev.sha1
else:
-
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data.sha1 = text_sha1
if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
rev_data.editor = rev.user.text
- rev_data.anon = rev.user.id == None
-
+ rev_data.anon = rev.user.id is None
+
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
# redirect = True
#else:
if self.collapse_user:
rev_data.collapsed_revs = rev.collapsed_revs
+ # get the
if self.persist != PersistMethod.none:
-
if not rev.deleted.text:
if self.persist != PersistMethod.legacy:
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
- rev_data.token_revs = num_token_revs
- rev_data.tokens_added = num_tokens
- rev_data.tokens_removed = len(old_tokens_removed)
- rev_data.tokens_window = PERSISTENCE_RADIUS-1
+ old_rev_data.token_revs = num_token_revs
+ old_rev_data.tokens_added = num_tokens
+ old_rev_data.tokens_removed = len(old_tokens_removed)
+ old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
- self.print_rev_data(rev_data)
+ self.print_rev_data(old_rev_data)
else:
self.print_rev_data(rev_data)
self.pq_writer.close()
else:
- output_file.close()
+ self.output_file.close()
def write_parquet_row(self, rev_data):
printfunc(rev_data)
def write_tsv_row(self, rev_data):
- line = rev_data.to_tsv_line()
+ if self.print_header:
+ print(rev_data.header_row(), file=self.output_file)
+ self.print_header = False
+
+ line = rev_data.to_tsv_row()
print(line, file=self.output_file)
regex_comment_label = args.regex_comment_label,
output_parquet=output_parquet)
- print(wikiq.output_parquet)
wikiq.process()
# close things