import os, os.path
import re
from datetime import datetime,timezone
+import json
from subprocess import Popen, PIPE
from collections import deque
from dataclasses import dataclass
import pyarrow as pa
import pyarrow.parquet as pq
+from itertools import chain
class PersistMethod:
none = 0
"""
class RegexPair(object):
def __init__(self, pattern, label):
- self.pattern = re.compile(pattern)
+ self.pattern = pattern
+
+ if type(self.pattern) is str:
+ self.pattern = re.compile(pattern)
+
self.label = label
self.has_groups = bool(self.pattern.groupindex)
if self.has_groups:
pa.field("sha1",pa.string()),
pa.field("minor",pa.bool_()),
pa.field("editor",pa.string()),
- pa.field("anon",pa.bool_())
+ pa.field("anon",pa.bool_()),
]
# pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function
class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+
+
class WikiqParser():
- def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
+ def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000, siteinfo_file=None):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
self.namespaces = []
self.urlencode = urlencode
self.revert_radius = revert_radius
-
+
if namespaces is not None:
self.namespace_filter = set(namespaces)
else:
self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+ if siteinfo_file is not None:
+ siteinfo = open_siteinfo(siteinfo_file)
+ siteinfo = json.loads(siteinfo.read())
+
+ magicwords = siteinfo.get('query').get('magicwords')
+
+ if magicwords:
+ redirect_config = list(filter(lambda obj: obj.get("name") == "redirect", magicwords))
+ redirect_aliases = chain(* map(lambda obj: obj.get("aliases"), redirect_config))
+ redirect_aliases = list(map(lambda s: s.lstrip('#'), redirect_aliases))
+ redirect_aliases.append('REDIRECT') # just in case
+ pattern = '(?:' + '|'.join(redirect_aliases) + ')'
+ redirect_regex = re.compile(r'\s*#{pattern}\s*:?\s*\[\[(.+?)(?:\|.*?)?\]\]'
+ .format(pattern=pattern), re.IGNORECASE | re.DOTALL)
+
+ self.regex_revision_pairs.extend(self.make_matchmake_pairs([redirect_regex], ["redirect"]))
# This is where we set the type for revdata.
page_count = 0
rev_count = 0
-
# Iterate through pages
for page in dump:
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
rev_data.sha1 = text_sha1
# TODO rev.bytes doesn't work.. looks like a bug
- rev_data.text_chars = len(rev.text)
+ rev_data.text_chars = len(rev.text)
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
rev_data.editor = rev.user.text
rev_data.anon = rev.user.id is None
- #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
- # redirect = True
- #else:
- # redirect = False
-
#TODO missing: additions_size deletions_size
# if collapse user was on, lets run that
line = rev_data.to_tsv_row()
print(line, file=self.output_file)
+def open_siteinfo(siteinfo_file):
+ if re.match(r'.*\.7z$', siteinfo_file):
+ cmd = ["7za", "x", "-so", siteinfo_file, "*.json"]
+ elif re.match(r'.*\.gz$', siteinfo_file):
+ cmd = ["zcat", siteinfo_file]
+ elif re.match(r'.*\.bz2$', siteinfo_file):
+ cmd = ["bzcat", "-dk", siteinfo_file]
+
+ try:
+ input_file = Popen(cmd, stdout=PIPE).stdout
+ except NameError:
+ input_file = open(siteinfo_file, 'r')
+
+ return input_file
+
def open_input_file(input_filename):
if re.match(r'.*\.7z$', input_filename):
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
help="The label for the outputted column based on matching the regex in comments.")
+parser.add_argument('--SI', '--siteinfo', dest="siteinfo", default=None, type=str,
+ help="Path to archive containing siteinfo json. This is required for resolving redirects")
+
+
+
args = parser.parse_args()
filename = os.path.join(output_dir, os.path.basename(filename))
output_file = get_output_filename(filename, parquet = output_parquet)
+ print(args.siteinfo)
wikiq = WikiqParser(input_file,
output_file,
collapse_user=args.collapse_user,
regex_revision_label = args.regex_revision_label,
regex_match_comment = args.regex_match_comment,
regex_comment_label = args.regex_comment_label,
- output_parquet=output_parquet)
+ output_parquet=output_parquet,
+ siteinfo_file = args.siteinfo)
wikiq.process()
regex_match_revision = args.regex_match_revision,
regex_revision_label = args.regex_revision_label,
regex_match_comment = args.regex_match_comment,
- regex_comment_label = args.regex_comment_label)
+ regex_comment_label = args.regex_comment_label,
+ siteinfo_file = args.siteinfo)
wikiq.process()