]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
fix bugs and unit tests
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 9260f35710d029e99f2c4bffe470aa000e7f1615..1a951e8c16228867a1a2a0e8dac43712e0eada4e 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,17 +3,18 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
-import pdb
+
 import argparse
 import sys
 import os, os.path
 import re
+from datetime import datetime,timezone
 
 from subprocess import Popen, PIPE
 from collections import deque
 from hashlib import sha1
 
-from mw.xml_dump import Iterator
+from mwxml import Dump
 
 from deltas.tokenizers import wikitext_split
 import mwpersistence
@@ -22,22 +23,37 @@ from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+import dataclasses as dc
+from dataclasses import dataclass, make_dataclass
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
-
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
-        self.mwiterator = Iterator.from_file(self.fh)
+        self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
 
     def __iter__(self):
         return self.__pages
@@ -50,13 +66,19 @@ class WikiqPage():
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
-    def __init__(self, page, collapse_user=False):
+    def __init__(self, page, namespace_map, collapse_user=False):
         self.id = page.id
-        self.title = page.title
         self.namespace = page.namespace
-        self.redirect = page.redirect
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
         self.restrictions = page.restrictions
-        
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
@@ -80,7 +102,14 @@ class WikiqPage():
             else:
                 if self.collapse_user:
                     # yield if this is the last edit in a seq by a user and reset
-                    if not rev.contributor.user_text == prev_rev.contributor.user_text:
+                    # also yield if we do know who the user is
+
+                    if rev.deleted.user or prev_rev.deleted.user:
+                        yield prev_rev
+                        collapsed_revs = 1
+                        rev.collapsed_revs = collapsed_revs
+
+                    elif not rev.user.text == prev_rev.user.text:
                         yield prev_rev
                         collapsed_revs = 1
                         rev.collapsed_revs = collapsed_revs
@@ -93,6 +122,7 @@ class WikiqPage():
                     yield prev_rev
 
             prev_rev = rev
+
         # also yield the final time
         yield prev_rev
 
@@ -102,20 +132,262 @@ class WikiqPage():
     def __next__(self):
         return next(self.__revisions)
 
-class WikiqParser():
 
+class RegexPair(object):
+    def __init__(self, pattern, label):
+        self.pattern = re.compile(pattern)
+        self.label = label
+        self.has_groups = bool(self.pattern.groupindex)
+        if self.has_groups:
+            self.capture_groups = list(self.pattern.groupindex.keys())
+            
+    def get_pyarrow_fields(self):
+        if self.has_groups:
+            fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
+                      for cap_group in self.capture_groups]
+        else:
+            fields = [pa.field(self.label, pa.list_(pa.string()))]
+
+        return fields
+
+    def _make_key(self, cap_group):
+        return ("{}_{}".format(self.label, cap_group))
+
+    def matchmake(self, content, rev_data):
+        
+        temp_dict = {}
+        # if there are named capture groups in the regex
+        if self.has_groups:
+
+            # if there are matches of some sort in this revision content, fill the lists for each cap_group
+            if self.pattern.search(content) is not None:
+                m = self.pattern.finditer(content)
+                matchobjects = list(m)
+
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_list = []
+                    for match in matchobjects:
+                        # we only want to add the match for the capture group if the match is not None
+                        if match.group(cap_group) != None:
+                            temp_list.append(match.group(cap_group))
+
+                    # if temp_list of matches is empty just make that column None
+                    if len(temp_list)==0:
+                        temp_dict[key] = None
+                    # else we put in the list we made in the for-loop above
+                    else:
+                        temp_dict[key] = ', '.join(temp_list)
+
+            # there are no matches at all in this revision content, we default values to None
+            else:
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_dict[key] = None
 
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
+        # there are no capture groups, we just search for all the matches of the regex
+        else:
+            #given that there are matches to be made
+            if type(content) in(str, bytes):
+                if self.pattern.search(content) is not None:
+                    m = self.pattern.findall(content)
+                    temp_dict[self.label] = ', '.join(m)
+                else:
+                    temp_dict[self.label] = None
+
+        # update rev_data with our new columns
+        for k, v in temp_dict.items():
+            setattr(rev_data, k, v)
+
+        return rev_data
+
+@dataclass()
+class RevDataBase():
+    revid: int 
+    date_time: datetime
+    articleid: int
+    editorid: int
+    title: str
+    namespace: int
+    deleted: bool
+    text_chars: int = None
+    revert: bool = None
+    reverteds: list[int] = None
+    sha1: str = None
+    minor: bool = None
+    editor: str = None
+    anon: bool = None
+
+    urlencode = False
+    pa_schema_fields = [
+        pa.field("revid", pa.int64()),
+        pa.field("date_time",pa.timestamp('ms')),
+        pa.field("articleid",pa.int64()),
+        pa.field("editorid",pa.int64()),
+        pa.field("title",pa.string()),
+        pa.field("namespace",pa.int32()),
+        pa.field("deleted",pa.bool_()),
+        pa.field("test_chars",pa.int32()),
+        pa.field("revert",pa.bool_()),
+        pa.field("reverteds",pa.list_(pa.int64())),
+        pa.field("sha1",pa.string()),
+        pa.field("minor",pa.bool_()),
+        pa.field("editor",pa.string()),
+        pa.field("anon",pa.bool_())
+    ]
+
+    def to_pyarrow(self):
+        return dc.astuple(self)
+
+    def to_tsv_row(self):
         
+        row = []
+        for f in dc.fields(self):
+            val = getattr(self, f.name)
+            if getattr(self, f.name) is None:
+                row.append("")
+            elif f.type == bool:
+                row.append("TRUE" if val else "FALSE")
+
+            elif f.type == datetime:
+                row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
+
+            elif f.name in {'editor','title'}:
+                s = '"' + val + '"'
+                if self.urlencode and f.name in TO_ENCODE:
+                    row.append(quote(str(s)))
+                else:
+                    row.append(s)
+
+            elif f.type == list[int]:
+                row.append('"' + ",".join([str(x) for x in val]) + '"')
+
+            elif f.type == str:
+                if self.urlencode and f.name in TO_ENCODE:
+                    row.append(quote(str(val)))
+                else:
+                    row.append(val)
+            else:
+                row.append(val)
+
+        return '\t'.join(map(str,row))
+
+    def header_row(self):
+        return '\t'.join(map(lambda f: f.name, dc.fields(self)))
+
+@dataclass()
+class RevDataCollapse(RevDataBase):
+    collapsed_revs:int = None
+    pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
+    pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema]
+
+@dataclass()
+class RevDataPersistence(RevDataBase):
+    token_revs:int = None
+    tokens_added:int = None
+    tokens_removed:int = None
+    tokens_window:int = None
+
+    pa_persistence_schema_fields = [
+        pa.field("token_revs", pa.int64()),
+        pa.field("tokens_added", pa.int64()),
+        pa.field("tokens_removed", pa.int64()),
+        pa.field("tokens_window", pa.int64())]
+        
+    pa_schema_fields = RevDataBase.pa_schema_fields  + pa_persistence_schema_fields
+
+@dataclass()
+class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
+    pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+
+class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
         self.input_file = input_file
-        self.output_file = output_file
+
         self.collapse_user = collapse_user
         self.persist = persist
-        self.persist_legacy = persist_legacy
-        self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
+        self.revert_radius = revert_radius
+        
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
+        self.regex_schemas = []
+        self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
+        self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+
+        if self.collapse_user is True:
+            if self.persist == PersistMethod.none:
+                revdata_type = RevDataCollapse
+            else:
+                revdata_type = RevDataCollapsePersistence
+        elif self.persist != PersistMethod.none:
+            revdata_type = RevDataPersistence
+        else:
+            revdata_type = RevDataBase
+
+        regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas]
+
+        self.revdata_type = make_dataclass('RevData_Parser',
+                                           fields=regex_fields,
+                                           bases=(revdata_type,))
         
+        self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
+                        
+        self.revdata_type.urlencode = self.urlencode
+
+        if output_parquet is True:
+            self.output_parquet = True
+            self.pq_writer = None
+            self.output_file = output_file
+            self.parquet_buffer = []
+            self.parquet_buffer_size = parquet_buffer_size
+        else:
+            self.print_header = True
+            if output_file == sys.stdout:
+                
+                self.output_file = output_file
+            else:
+                self.output_file = open(output_file,'w')
+            self.output_parquet = False
+
+    def make_matchmake_pairs(self, patterns, labels):
+        if (patterns is not None and labels is not None) and \
+           (len(patterns) == len(labels)):
+            result = []
+            for pattern, label in zip(patterns, labels):
+                rp = RegexPair(pattern, label)
+                result.append(rp)
+                self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
+            return result
+        elif (patterns is None and labels is None):
+            return []
+        else:
+            sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+    def matchmake(self, rev, rev_data):
+        rev_data = self.matchmake_revision(rev.text, rev_data)
+        rev_data = self.matchmake_comment(rev.comment, rev_data)
+        return rev_data
+
+    def matchmake_revision(self, text, rev_data):
+         return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+
+    def matchmake_comment(self, comment, rev_data):
+        return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
+
+    def matchmake_pairs(self, text, rev_data, pairs):
+        for pair in pairs:
+            rev_data = pair.matchmake(text, rev_data)
+        return rev_data
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -131,6 +403,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
@@ -142,7 +415,7 @@ class WikiqParser():
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
         # extract list of namspaces
-        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
 
         page_count = 0
         rev_count = 0
@@ -150,71 +423,78 @@ class WikiqParser():
 
         # Iterate through pages
         for page in dump:
-            rev_detector = mwreverts.Detector()
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
 
-            if self.persist or self.persist_legacy:
-                window = deque(maxlen=PERSISTENCE_RADIUS)
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
 
-                if not self.persist_legacy:
+            if self.persist != PersistMethod.none:
+                window = deque(maxlen=PERSISTENCE_RADIUS)
+                
+                if self.persist == PersistMethod.sequence:
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
-
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
-
-                # if revisions are deleted, /many/ things will be missing
-                if rev.text.deleted:
-                    rev_data['text_chars'] = ""
-                    rev_data['sha1'] = ""
-                    rev_data['revert'] = ""
-                    rev_data['reverteds'] = ""
-
-                else:
+                
+                rev_data = self.revdata_type(revid = rev.id,
+                                             date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
+                                             articleid = page.id,
+                                             editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+                                             title =  page.title,
+                                             deleted = rev.deleted.text,
+                                             namespace = namespace
+                                             )
+
+                rev_data = self.matchmake(rev, rev_data)
+
+                if not rev.deleted.text:
+                    # rev.text can be None if the page has no text
+                    if not rev.text:
+                        rev.text = ""
                     # if text exists, we'll check for a sha1 and generate one otherwise
+
                     if rev.sha1:
                         text_sha1 = rev.sha1
                     else:
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
-                    rev_data['sha1'] = text_sha1
+                    rev_data.sha1 = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
-                    rev_data['text_chars'] = len(rev.text)
-               
+                    rev_data.text_chars = len(rev.text)
+
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     
                     if revert:
-                        rev_data['revert'] = "TRUE"
-                        rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
+                        rev_data.revert = True
+                        rev_data.reverteds = revert.reverteds
                     else:
-                        rev_data['revert'] = "FALSE"
-                        rev_data['reverteds'] = ""
+                        rev_data.revert = False
 
                 # if the fact that the edit was minor can be hidden, this might be an issue
-                rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
+                rev_data.minor = rev.minor
 
-                if rev.contributor.user_text:
+                if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.contributor.user_text + '"'
-                    rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
-                    
-                else:
-                    rev_data['anon'] = ""
-                    rev_data['editor'] = ""
-
+                    rev_data.editor = rev.user.text 
+                    rev_data.anon = rev.user.id is None
+                
                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
                 #    redirect = True
                 #else:
@@ -224,19 +504,17 @@ class WikiqParser():
                 
                 # if collapse user was on, lets run that
                 if self.collapse_user:
-                    rev_data['collapsed_revs'] = rev.collapsed_revs
+                    rev_data.collapsed_revs = rev.collapsed_revs
 
-                if self.persist or self.persist_legacy:
-                    if rev.text.deleted:
-                        for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
-                            old_rev_data[k] = None
-                    else:
+                # get the 
+                if self.persist != PersistMethod.none:
+                    if not rev.deleted.text:
 
-                        if not self.persist_legacy:
+                        if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
-                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
                             
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
@@ -245,10 +523,10 @@ class WikiqParser():
                             
                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
 
-                            old_rev_data["token_revs"] = num_token_revs
-                            old_rev_data["tokens_added"] = num_tokens
-                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
-                            old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
+                            old_rev_data.token_revs = num_token_revs
+                            old_rev_data.tokens_added = num_tokens
+                            old_rev_data.tokens_removed = len(old_tokens_removed)
+                            old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
 
                             self.print_rev_data(old_rev_data)
 
@@ -257,7 +535,7 @@ class WikiqParser():
 
                 rev_count += 1
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -267,11 +545,10 @@ class WikiqParser():
                     rev_id, rev_data, tokens_added, tokens_removed = item
                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
 
-                    rev_data["token_revs"] = num_token_revs
-                    rev_data["tokens_added"] = num_tokens
-                    rev_data["tokens_removed"] = len(tokens_removed)
-                    rev_data["tokens_window"] = len(window)-(i+1)
-                    
+                    rev_data.token_revs = num_token_revs
+                    rev_data.tokens_added = num_tokens
+                    rev_data.tokens_removed = len(tokens_removed)
+                    rev_data.tokens_window = len(window)-(i+1)
                     self.print_rev_data(rev_data)
 
             page_count += 1
@@ -279,22 +556,67 @@ class WikiqParser():
         print("Done: %s revisions and %s pages." % (rev_count, page_count),
               file=sys.stderr)
 
+        if self.output_parquet is True:
+            self.flush_parquet_buffer()
+            self.pq_writer.close()
+
+        else:
+            self.output_file.close()
+
+
+    def write_parquet_row(self, rev_data):
+        padata = rev_data.to_pyarrow()
+        self.parquet_buffer.append(padata)
+
+        if len(self.parquet_buffer) >= self.parquet_buffer_size:
+            self.flush_parquet_buffer()
+
+
+    def flush_parquet_buffer(self):
+        schema = pa.schema(self.revdata_type.pa_schema_fields)
+
+        def row_to_col(rg, types):
+            cols = []
+            first = rg[0]
+            for col in first:
+                cols.append([col])
+
+            for row in rg[1:]:
+                for j in range(len(cols)):
+                    cols[j].append(row[j])
+
+            arrays = []
+            for col, typ in zip(cols, types):
+                arrays.append(pa.array(col, typ))
+            return arrays
+
+        outtable = pa.Table.from_arrays(row_to_col(self.parquet_buffer, schema.types), schema=schema)
+        if self.pq_writer is None:
+            self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
+
+        self.pq_writer.write_table(outtable)
+        self.parquet_buffer = []
+        
     def print_rev_data(self, rev_data):
-        # if it's the first time through, print the header
-        if self.urlencode:
-            for field in TO_ENCODE:
-                rev_data[field] = quote(str(rev_data[field]))
-            
-        if not self.printed_header:
-            print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
-            self.printed_header = True
+        if self.output_parquet is False:
+            printfunc = self.write_tsv_row
+        else:
+            printfunc = self.write_parquet_row
         
-        print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+        printfunc(rev_data)
+
+    def write_tsv_row(self, rev_data):
+        if self.print_header:
+            print(rev_data.header_row(), file=self.output_file)
+            self.print_header = False
+
+        line = rev_data.to_tsv_row()
+        print(line, file=self.output_file)
 
 
 def open_input_file(input_filename):
     if re.match(r'.*\.7z$', input_filename):
-        cmd = ["7za", "x", "-so", input_filename, '*'
+        cmd = ["7za", "x", "-so", input_filename, "*.xml"
     elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
     elif re.match(r'.*\.bz2$', input_filename):
@@ -307,13 +629,19 @@ def open_input_file(input_filename):
 
     return input_file
 
-def open_output_file(input_filename):
-    # create a regex that creates the output filename
+def get_output_filename(input_filename, parquet = False):
     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
     output_filename = re.sub(r'\.xml', '', output_filename)
-    output_filename = output_filename + ".tsv"
-    output_file = open(output_filename, "w")
+    if parquet is False:
+        output_filename = output_filename + ".tsv"
+    else:
+        output_filename = output_filename + ".parquet"
+    return output_filename
 
+def open_output_file(input_filename):
+    # create a regex that creates the output filename
+    output_filename = get_output_filename(input_filename, parquet = False)
+    output_file = open(output_filename, "w")
     return output_file
 
 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
@@ -323,7 +651,7 @@ parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
 
 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
-                    help="Directory for output files.")
+                    help="Directory for output files. If it ends with .parquet output will be in parquet format.")
 
 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
                     help="Write output to standard out (do not create dump file)")
@@ -331,18 +659,57 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
-                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
 
 args = parser.parse_args()
 
+
+
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
+    output_parquet = False
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
@@ -352,33 +719,50 @@ if len(args.dumpfiles) > 0:
         else:
             output_dir = "."
 
+        if output_dir.endswith(".parquet"):
+            output_parquet = True
+
         print("Processing file: %s" % filename, file=sys.stderr)
 
         if args.stdout:
             output_file = sys.stdout
         else:
             filename = os.path.join(output_dir, os.path.basename(filename))
-            output_file = open_output_file(filename)
+            output_file = get_output_filename(filename, parquet = output_parquet)
 
-        wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file,
+                            output_file,
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            persist_legacy=args.persist_legacy,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label,
+                            output_parquet=output_parquet)
 
         wikiq.process()
 
         # close things 
         input_file.close()
-        output_file.close()
+
 else:
-    wikiq = WikiqParser(sys.stdin, sys.stdout,
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
-                        persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
-    wikiq.process()
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?