from collections import deque
from hashlib import sha1
-from mw.xml_dump import Iterator
-from mw.lib import persistence
+from mwxml import Dump
+
+from deltas.tokenizers import wikitext_split
+import mwpersistence
import mwreverts
from urllib.parse import quote
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+from dataclasses import dataclass
+import pandas as pd
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+from typing import List
+
+class PersistMethod:
+ none = 0
+ sequence = 1
+ segment = 2
+ legacy = 3
def calculate_persistence(tokens_added):
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
+
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
- self.mwiterator = Iterator.from_file(self.fh)
+ self.mwiterator = Dump.from_file(self.fh)
+ self.namespace_map = { ns.id : ns.name for ns in
+ self.mwiterator.site_info.namespaces }
self.__pages = self.load_pages()
def load_pages(self):
for page in self.mwiterator:
- yield WikiqPage(page, collapse_user=self.collapse_user)
+ yield WikiqPage(page,
+ namespace_map = self.namespace_map,
+ collapse_user=self.collapse_user)
def __iter__(self):
return self.__pages
'restrictions', 'mwpage', '__revisions',
'collapse_user')
- def __init__(self, page, collapse_user=False):
+ def __init__(self, page, namespace_map, collapse_user=False):
self.id = page.id
- self.title = page.title
self.namespace = page.namespace
- self.redirect = page.redirect
+ # following mwxml, we assume namespace 0 in cases where
+ # page.namespace is inconsistent with namespace_map
+ if page.namespace not in namespace_map:
+ self.title = page.title
+ page.namespace = 0
+ if page.namespace != 0:
+ self.title = ':'.join([namespace_map[page.namespace], page.title])
+ else:
+ self.title = page.title
self.restrictions = page.restrictions
-
self.collapse_user = collapse_user
self.mwpage = page
self.__revisions = self.rev_list()
else:
if self.collapse_user:
# yield if this is the last edit in a seq by a user and reset
- if not rev.contributor.user_text == prev_rev.contributor.user_text:
+ # also yield if we do know who the user is
+
+ if rev.deleted.user or prev_rev.deleted.user:
+ yield prev_rev
+ collapsed_revs = 1
+ rev.collapsed_revs = collapsed_revs
+
+ elif not rev.user.text == prev_rev.user.text:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
yield prev_rev
prev_rev = rev
+
# also yield the final time
yield prev_rev
def __next__(self):
return next(self.__revisions)
-class WikiqParser():
+class RegexPair(object):
+ def __init__(self, pattern, label):
+ self.pattern = re.compile(pattern)
+ self.label = label
+ self.has_groups = bool(self.pattern.groupindex)
+ if self.has_groups:
+ self.capture_groups = list(self.pattern.groupindex.keys())
+
+ def _make_key(self, cap_group):
+ return ("{}_{}".format(self.label, cap_group))
- def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
+ def matchmake(self, content, rev_data):
+ temp_dict = {}
+ # if there are named capture groups in the regex
+ if self.has_groups:
+
+ # if there are matches of some sort in this revision content, fill the lists for each cap_group
+ if self.pattern.search(content) is not None:
+ m = self.pattern.finditer(content)
+ matchobjects = list(m)
+
+ for cap_group in self.capture_groups:
+ key = self._make_key(cap_group)
+ temp_list = []
+ for match in matchobjects:
+ # we only want to add the match for the capture group if the match is not None
+ if match.group(cap_group) != None:
+ temp_list.append(match.group(cap_group))
+
+ # if temp_list of matches is empty just make that column None
+ if len(temp_list)==0:
+ temp_dict[key] = None
+ # else we put in the list we made in the for-loop above
+ else:
+ temp_dict[key] = ', '.join(temp_list)
+
+ # there are no matches at all in this revision content, we default values to None
+ else:
+ for cap_group in self.capture_groups:
+ key = self._make_key(cap_group)
+ temp_dict[key] = None
+
+ # there are no capture groups, we just search for all the matches of the regex
+ else:
+ #given that there are matches to be made
+ if type(content) in(str, bytes):
+ if self.pattern.search(content) is not None:
+ m = self.pattern.findall(content)
+ temp_dict[self.label] = ', '.join(m)
+ else:
+ temp_dict[self.label] = None
+
+ # update rev_data with our new columns
+ rev_data.update(temp_dict)
+ return rev_data
+
+@dataclass
+class RevData():
+ revid: int
+ date_time: datetime
+ articleid: int
+ editorid: int
+ title: str
+ namespace: int
+ deleted: bool
+ text_chars: int
+ revert: bool
+ reverteds: list[bool]
+ sha1: str
+ text_chars: int
+ revert: bool
+ reverteds: list[int]
+ minor: bool
+ editor: str
+ anon: bool
+ collapsed_revs:int
+ token_revs:int
+ tokens_added:int
+ tokens_removed:int
+ tokens_window:int
+
+class WikiqParser():
+ def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
+ """
+ Parameters:
+ persist : what persistence method to use. Takes a PersistMethod value
+ """
self.input_file = input_file
- self.output_file = output_file
+
self.collapse_user = collapse_user
self.persist = persist
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
+ self.revert_radius = revert_radius
+ self.parquet_buffer = []
+ self.parquet_buffer_size = parquet_buffer_size
+
+ if namespaces is not None:
+ self.namespace_filter = set(namespaces)
+ else:
+ self.namespace_filter = None
+
+ self.regex_schemas = []
+ self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
+ self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+
+
+ if output_parquet is True:
+ self.output_parquet = True
+ self.pq_writer = None
+ self.output_file = output_file
+ else:
+ self.output_file = open(output_file,'w')
+
+
+ def make_matchmake_pairs(self, patterns, labels):
+ if (patterns is not None and labels is not None) and \
+ (len(patterns) == len(labels)):
+ result = []
+ for pattern, label in zip(patterns, labels):
+ result.append(RegexPair(pattern, label))
+ self.regex_schemas.append(pa.field(label, pa.list_(pa.string())))
+
+ return result
+ elif (patterns is None and labels is None):
+ return []
+ else:
+ sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+ def matchmake(self, rev, rev_data):
+ rev_data = self.matchmake_revision(rev.text, rev_data)
+ rev_data = self.matchmake_comment(rev.comment, rev_data)
+ return rev_data
+
+ def matchmake_revision(self, text, rev_data):
+ return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+
+ def matchmake_comment(self, comment, rev_data):
+ return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
+
+ def matchmake_pairs(self, text, rev_data, pairs):
+ for pair in pairs:
+ rev_data = pair.matchmake(text, rev_data)
+ return rev_data
+
def __get_namespace_from_title(self, title):
default_ns = None
# if we've made it this far with no matches, we return the default namespace
return default_ns
+
def process(self):
# create a regex that creates the output filename
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
# extract list of namspaces
- self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+ self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
page_count = 0
rev_count = 0
+
+
# Iterate through pages
for page in dump:
- if self.persist:
- state = persistence.State()
+ namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+ # skip namespaces not in the filter
+ if self.namespace_filter is not None:
+ if namespace not in self.namespace_filter:
+ continue
+
+ rev_detector = mwreverts.Detector(radius = self.revert_radius)
+
+ if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
- rev_detector = mwreverts.Detector()
+ if self.persist == PersistMethod.sequence:
+ state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ elif self.persist == PersistMethod.segment:
+ state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ # self.persist == PersistMethod.legacy
+ else:
+ from mw.lib import persistence
+ state = persistence.State()
# Iterate through a page's revisions
for rev in page:
-
- rev_data = {'revid' : rev.id,
- 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
- 'articleid' : page.id,
- 'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
- 'title' : '"' + page.title + '"',
- 'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
- 'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
+
+ # initialize rev_data
+ rev_data = {
+ 'revid':rev.id,
+ 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
+ 'articleid' : page.id,
+ 'editorid' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+ 'title' : '"' + page.title + '"',
+ 'namespace' : namespace,
+ 'deleted' : "TRUE" if rev.deleted.text else "FALSE"
+ }
+
+ rev_data = self.matchmake(rev, rev_data)
# if revisions are deleted, /many/ things will be missing
- if rev.text.deleted:
+ if rev.deleted.text:
rev_data['text_chars'] = ""
rev_data['sha1'] = ""
rev_data['revert'] = ""
rev_data['reverteds'] = ""
else:
+ # rev.text can be None if the page has no text
+ if not rev.text:
+ rev.text = ""
# if text exists, we'll check for a sha1 and generate one otherwise
+
if rev.sha1:
text_sha1 = rev.sha1
else:
+
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data['sha1'] = text_sha1
# TODO rev.bytes doesn't work.. looks like a bug
rev_data['text_chars'] = len(rev.text)
-
+
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
+
if revert:
rev_data['revert'] = "TRUE"
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
- if rev.contributor.user_text:
+ if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
- rev_data['editor'] = '"' + rev.contributor.user_text + '"'
- rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
+ rev_data['editor'] = '"' + rev.user.text + '"'
+ rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
else:
rev_data['anon'] = ""
if self.collapse_user:
rev_data['collapsed_revs'] = rev.collapsed_revs
- if self.persist:
- if rev.text.deleted:
+ if self.persist != PersistMethod.none:
+ if rev.deleted.text:
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
- _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
+ if self.persist != PersistMethod.legacy:
+ _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+ else:
+ _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
window.append((rev.id, rev_data, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
rev_count += 1
- if self.persist:
+ if self.persist != PersistMethod.none:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
rev_data["tokens_added"] = num_tokens
rev_data["tokens_removed"] = len(tokens_removed)
rev_data["tokens_window"] = len(window)-(i+1)
-
self.print_rev_data(rev_data)
page_count += 1
print("Done: %s revisions and %s pages." % (rev_count, page_count),
file=sys.stderr)
+ if self.output_parquet is True:
+ self.flush_parquet_buffer()
+ self.pq_writer.close()
+
+ else:
+ output_file.close()
+
+
+ def write_parquet_row(self, rev_data):
+ if 'deleted' in rev_data.keys():
+ rev_data['deleted'] = True if rev_data['deleted'] == "TRUE" else False
+
+ if 'minor' in rev_data.keys():
+ rev_data['minor'] = True if rev_data['minor'] == "TRUE" else False
+
+
+ if 'anon' in rev_data.keys():
+ rev_data['anon'] = True if rev_data['anon'] == "TRUE" else False
+
+
+ self.parquet_buffer.append(rev_data)
+
+ if len(self.parquet_buffer) >= self.parquet_buffer_size:
+ self.flush_parquet_buffer()
+
+ def flush_parquet_buffer(self):
+ outtable = pd.DataFrame.from_records(self.parquet_buffer)
+ outtable = pa.Table.from_pandas(outtable)
+ if self.pq_writer is None:
+ schema = outtable.schema
+ for regex_schema in self.regex_schemas:
+ schema.append(regex_schema)
+
+ self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
+
+ self.pq_writer.write_table(outtable)
+
def print_rev_data(self, rev_data):
+
+ if self.output_parquet is False:
+ printfunc = lambda rev_data: print("\t".join(rev_data), file=self.output_file)
+ else:
+ printfunc = self.write_parquet_row
+
# if it's the first time through, print the header
if self.urlencode:
for field in TO_ENCODE:
rev_data[field] = quote(str(rev_data[field]))
-
+
if not self.printed_header:
- print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
+ printfunc(rev_data)
self.printed_header = True
- print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+ printfunc(rev_data)
def open_input_file(input_filename):
if re.match(r'.*\.7z$', input_filename):
- cmd = ["7za", "x", "-so", input_filename, '*']
+ cmd = ["7za", "x", "-so", input_filename, "*.xml"]
elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename):
return input_file
-def open_output_file(input_filename):
- # create a regex that creates the output filename
+def get_output_filename(input_filename, parquet = False):
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
output_filename = re.sub(r'\.xml', '', output_filename)
- output_filename = output_filename + ".tsv"
- output_file = open(output_filename, "w")
+ if parquet is False:
+ output_filename = output_filename + ".tsv"
+ else:
+ output_filename = output_filename + ".parquet"
+ return output_filename
+def open_output_file(input_filename):
+ # create a regex that creates the output filename
+ output_filename = get_output_filename(input_filename, parquet = False)
+ output_file = open(output_filename, "w")
return output_file
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
- help="Directory for output files.")
+ help="Directory for output files. If it ends with .parquet output will be in parquet format.")
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
help="Write output to standard out (do not create dump file)")
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
- help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+ help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+ help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+ '--revert-radius',
+ dest="revert_radius",
+ type=int,
+ action='store',
+ default=15,
+ help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+ help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+ help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+ help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+ help="The label for the outputted column based on matching the regex in comments.")
+
args = parser.parse_args()
+
+
+# set persistence method
+
+if args.persist is None:
+ persist = PersistMethod.none
+elif args.persist == "segment":
+ persist = PersistMethod.segment
+elif args.persist == "legacy":
+ persist = PersistMethod.legacy
+else:
+ persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+ namespaces = args.namespace_filter
+else:
+ namespaces = None
+
if len(args.dumpfiles) > 0:
+ output_parquet = False
for filename in args.dumpfiles:
input_file = open_input_file(filename)
else:
output_dir = "."
+ if output_dir.endswith(".parquet"):
+ output_parquet = True
+
print("Processing file: %s" % filename, file=sys.stderr)
if args.stdout:
output_file = sys.stdout
else:
filename = os.path.join(output_dir, os.path.basename(filename))
- output_file = open_output_file(filename)
+ output_file = get_output_filename(filename, parquet = output_parquet)
- wikiq = WikiqParser(input_file, output_file,
+ wikiq = WikiqParser(input_file,
+ output_file,
collapse_user=args.collapse_user,
- persist=args.persist,
- urlencode=args.urlencode)
-
-
+ persist=persist,
+ urlencode=args.urlencode,
+ namespaces=namespaces,
+ revert_radius=args.revert_radius,
+ regex_match_revision = args.regex_match_revision,
+ regex_revision_label = args.regex_revision_label,
+ regex_match_comment = args.regex_match_comment,
+ regex_comment_label = args.regex_comment_label,
+ output_parquet=output_parquet)
+
+ print(wikiq.output_parquet)
wikiq.process()
# close things
input_file.close()
- output_file.close()
+
else:
- wikiq = WikiqParser(sys.stdin, sys.stdout,
+ wikiq = WikiqParser(sys.stdin,
+ sys.stdout,
collapse_user=args.collapse_user,
- persist=args.persist,
- urlencode=args.urlencode)
- wikiq.process()
+ persist=persist,
+ #persist_legacy=args.persist_legacy,
+ urlencode=args.urlencode,
+ namespaces=namespaces,
+ revert_radius=args.revert_radius,
+ regex_match_revision = args.regex_match_revision,
+ regex_revision_label = args.regex_revision_label,
+ regex_match_comment = args.regex_match_comment,
+ regex_comment_label = args.regex_comment_label)
+
+ wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
# stop_words = stop_words.split(",")