]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
remove dependency on pandas.
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 8a12d90980f6379d096b6fac0fb70d8f274bdb3f..03b3147446b5af284efc01d6d2435e0ee1ba9c5c 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,10 +3,12 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
+
 import argparse
 import sys
 import os, os.path
 import re
 import argparse
 import sys
 import os, os.path
 import re
+from datetime import datetime
 
 from subprocess import Popen, PIPE
 from collections import deque
 
 from subprocess import Popen, PIPE
 from collections import deque
@@ -21,6 +23,17 @@ from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+from dataclasses import dataclass
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
@@ -32,11 +45,15 @@ class WikiqIterator():
         self.fh = fh
         self.collapse_user = collapse_user
         self.mwiterator = Dump.from_file(self.fh)
         self.fh = fh
         self.collapse_user = collapse_user
         self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
 
     def __iter__(self):
         return self.__pages
 
     def __iter__(self):
         return self.__pages
@@ -49,13 +66,19 @@ class WikiqPage():
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
-    def __init__(self, page, collapse_user=False):
+    def __init__(self, page, namespace_map, collapse_user=False):
         self.id = page.id
         self.id = page.id
-        self.title = page.title
         self.namespace = page.namespace
         self.namespace = page.namespace
-        self.redirect = page.redirect
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
         self.restrictions = page.restrictions
         self.restrictions = page.restrictions
-        
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
@@ -109,20 +132,260 @@ class WikiqPage():
     def __next__(self):
         return next(self.__revisions)
 
     def __next__(self):
         return next(self.__revisions)
 
-class WikiqParser():
 
 
+class RegexPair(object):
+    def __init__(self, pattern, label):
+        self.pattern = re.compile(pattern)
+        self.label = label
+        self.has_groups = bool(self.pattern.groupindex)
+        if self.has_groups:
+            self.capture_groups = list(self.pattern.groupindex.keys())
+            
+    def _make_key(self, cap_group):
+        return ("{}_{}".format(self.label, cap_group))
+
+    def matchmake(self, content, rev_data):
+        
+        temp_dict = {}
+        # if there are named capture groups in the regex
+        if self.has_groups:
+
+            # if there are matches of some sort in this revision content, fill the lists for each cap_group
+            if self.pattern.search(content) is not None:
+                m = self.pattern.finditer(content)
+                matchobjects = list(m)
+
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_list = []
+                    for match in matchobjects:
+                        # we only want to add the match for the capture group if the match is not None
+                        if match.group(cap_group) != None:
+                            temp_list.append(match.group(cap_group))
+
+                    # if temp_list of matches is empty just make that column None
+                    if len(temp_list)==0:
+                        temp_dict[key] = None
+                    # else we put in the list we made in the for-loop above
+                    else:
+                        temp_dict[key] = ', '.join(temp_list)
+
+            # there are no matches at all in this revision content, we default values to None
+            else:
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_dict[key] = None
+
+        # there are no capture groups, we just search for all the matches of the regex
+        else:
+            #given that there are matches to be made
+            if type(content) in(str, bytes):
+                if self.pattern.search(content) is not None:
+                    m = self.pattern.findall(content)
+                    temp_dict[self.label] = ', '.join(m)
+                else:
+                    temp_dict[self.label] = None
+
+        # update rev_data with our new columns
+        for k, v in temp_dict:
+            rev_data.setattr(k,v)
+
+        return rev_data
+
+@dataclass()
+class RevDataBase():
+    revid: int 
+    date_time: datetime
+    articleid: int
+    editorid: int
+    title: str
+    namespace: int
+    deleted: bool
+    text_chars: int = None
+    revert: bool = None
+    reverteds: list[int] = None
+    sha1: str = None
+    minor: bool = None
+    editor: str = None
+    anon: bool = None
+    collapsed_revs:int = None
+
+    pa_schema_fields = [
+        pa.field("revid", pa.int64),
+        pa.field("date_time",pa.timestamp('ms')),
+        pa.field("articleid",pa.int64()),
+        pa.field("editorid",pa.int64()),
+        pa.field("title",pa.string()),
+        pa.field("namespace",pa.int32()),
+        pa.field("deleted",pa.binary()),
+        pa.field("test_chars",pa.int32()),
+        pa.field("revert",pa.binary()),
+        pa.field("reverteds",pa.list_(pa.int64())),
+        pa.field("sha1",pa.string()),
+        pa.field("minor",pa.binary()),
+        pa.field("editor",pa.string()),
+        pa.field("anon",pa.binary())
+    ]
+
+    def to_pyarrow(self):
+        return pa.array(self.astuple(), map(self.pa_schema_fields, pa.field.type))
+        
+
+    def to_tsv_row(self):
+        
+        row = []
+        for f in self.fields():
+            val = getattr(self, f.name)
+            if getattr(self, f.name) is None:
+                row.append("")
+            elif f.type == bool:
+                row.append("TRUE" if val else "FALSE")
+
+            elif f.type == datetime:
+                row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
+
+            elif f.name in {'editor','title'}:
+                s = '"' + val + '"'
+                if f.name in TO_ENCODE:
+                    row.append(quote(str(val)))
+
+            elif f.type == list[int]:
+                row.append('"' + ",".join([str(x) for x in val]) + '"')
+
+            elif f.type == str:
+                if f.name in TO_ENCODE:
+                    row.append(quote(str(val)))
+            else:
+                row.append(val)
 
 
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
+        return '\t'.join(row)
+    
+    # def __init__(revid: int,
+    #              date_time: datetime,
+    #              articleid: int,
+    #              editorid: int,
+    #              title: str,
+    #              namespace: int,
+    #              deleted: bool,
+    #              test_chars: int,
+    #              revert: bool,
+    #              reverteds: list[bool],
+    #              sha1: str,
+    #              minor: bool,
+    #              editor: str,
+    #              anon: bool):
+
+        
+
+@dataclass()
+class RevDataCollapse(RevDataBase):
+    collapsed_revs:int = None
+    pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
+    pa_schema_fields = RevDataBase.pa_schema_fields + pa_collapsed_revs_schema
+    pa_schema = pa.schema(pa_schema_fields)    
+
+@dataclass()
+class RevDataPersistence(RevDataBase):
+    token_revs:int = None
+    tokens_added:int = None
+    tokens_removed:int = None
+    tokens_window:int = None
+    pa_persistence_schema_fields = [
+        pa.field(token_revs, pa.int64()),
+        pa.field(tokens_added, pa.int64()),
+        pa.field(tokens_removed, pa.int64()),
+        pa.tokens_window, pa.int64()]
         
         
+    pa_schema_fields = RevDataBase.pa_schema_fields  + pa_persistence_schema_fields
+
+@dataclass()
+class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
+    pa_scehma_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+
+class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
         self.input_file = input_file
         self.input_file = input_file
-        self.output_file = output_file
+
         self.collapse_user = collapse_user
         self.persist = persist
         self.collapse_user = collapse_user
         self.persist = persist
-        self.persist_legacy = persist_legacy
-        self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
         self.namespaces = []
         self.urlencode = urlencode
+        self.revert_radius = revert_radius
+        
+        self.output_buffer = []
+        self.output_buffer_size = output_buffer_size
+
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
+        self.regex_schemas = []
+        self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
+        self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+
+        if self.collapse_user is True:
+            if self.persist == PersistMethod.none:
+                revdata_type = RevDataCollapse
+            else:
+                revdata_type = RevDataCollapsePersistence
+        elif self.persist != PersistMethod.none:
+            revdata_type = RevDataPersistence
+        else:
+            revdata_type = RevDataBase
+
+        regex_fields = [(field.name, list[str]), for field in self.regex_schemas]
+        self.revdata_type = dataclasses.make_dataclass('RevData_Parser',
+                                                       fields=map(regex_fields,
+                                                                  lambda pa_field: (pa_field.name,
+                                                                                    list[string],
+                                                                                    field(default=None))),
+                                                       bases=(revdata_type))
         
         
+        self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + regex_fields
+                        
+        if output_parquet is True:
+            self.output_parquet = True
+            self.pq_writer = None
+            self.output_file = output_file
+        else:
+            self.output_file = open(output_file,'w')
+
+
+    def make_matchmake_pairs(self, patterns, labels):
+        if (patterns is not None and labels is not None) and \
+           (len(patterns) == len(labels)):
+            result = []
+            for pattern, label in zip(patterns, labels):
+                result.append(RegexPair(pattern, label))
+                self.regex_schemas.append(pa.field(label, pa.list_(pa.string())))
+
+            return result
+        elif (patterns is None and labels is None):
+            return []
+        else:
+            sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+    def matchmake(self, rev, rev_data):
+        rev_data = self.matchmake_revision(rev.text, rev_data)
+        rev_data = self.matchmake_comment(rev.comment, rev_data)
+        return rev_data
+
+    def matchmake_revision(self, text, rev_data):
+         return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+
+    def matchmake_comment(self, comment, rev_data):
+        return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
+
+    def matchmake_pairs(self, text, rev_data, pairs):
+        for pair in pairs:
+            rev_data = pair.matchmake(text, rev_data)
+        return rev_data
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -138,6 +401,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
     def process(self):
 
         # create a regex that creates the output filename
@@ -157,38 +421,45 @@ class WikiqParser():
 
         # Iterate through pages
         for page in dump:
 
         # Iterate through pages
         for page in dump:
-            rev_detector = mwreverts.Detector()
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
 
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-                if not self.persist_legacy:
+                if self.persist == PersistMethod.sequence:
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
+                
+                rev_data = self.revdata_type(revid = rev.id,
+                                             date_time = rev.timestamp,
+                                             articleid = page.id,
+                                             editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+                                             title =  page.title,
+                                             deleted = rev.deleted.text
+                                             )
 
 
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
-
-                # if revisions are deleted, /many/ things will be missing
-                if rev.deleted.text:
-                    rev_data['text_chars'] = ""
-                    rev_data['sha1'] = ""
-                    rev_data['revert'] = ""
-                    rev_data['reverteds'] = ""
+                rev_data = self.matchmake(rev, rev_data)
 
 
-                else:
+                if not rev.deleted.text:
                     # rev.text can be None if the page has no text
                     if not rev.text:
                         rev.text = ""
                     # rev.text can be None if the page has no text
                     if not rev.text:
                         rev.text = ""
@@ -200,33 +471,25 @@ class WikiqParser():
 
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
 
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
-                    rev_data['sha1'] = text_sha1
+                    rev_data.sha1 = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
 
                     # TODO rev.bytes doesn't work.. looks like a bug
-                    rev_data['text_chars'] = len(rev.text)
-               
+                    rev_data.text_chars = len(rev.text)
+
                     # generate revert data
                     # generate revert data
-                    revert = rev_detector.process(text_sha1, rev.id)
+                    rev_data.revert = rev_detector.process(text_sha1, rev.id)
                     
                     if revert:
                     
                     if revert:
-                        rev_data['revert'] = "TRUE"
-                        rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
-                    else:
-                        rev_data['revert'] = "FALSE"
-                        rev_data['reverteds'] = ""
+                        rev_data.reverteds = revert.reverteds
 
                 # if the fact that the edit was minor can be hidden, this might be an issue
 
                 # if the fact that the edit was minor can be hidden, this might be an issue
-                rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
+                rev_data.minor = rev.minor
 
                 if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
 
                 if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.user.text + '"'
-                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
+                    rev_data.editor = rev.user.text 
+                    rev_data.anon = rev.user.id == None
                     
                     
-                else:
-                    rev_data['anon'] = ""
-                    rev_data['editor'] = ""
-
                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
                 #    redirect = True
                 #else:
                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
                 #    redirect = True
                 #else:
@@ -236,16 +499,13 @@ class WikiqParser():
                 
                 # if collapse user was on, lets run that
                 if self.collapse_user:
                 
                 # if collapse user was on, lets run that
                 if self.collapse_user:
-                    rev_data['collapsed_revs'] = rev.collapsed_revs
+                    rev_data.collapsed_revs = rev.collapsed_revs
 
 
-                if self.persist or self.persist_legacy:
-                    if rev.deleted.text:
+                if self.persist != PersistMethod.none:
 
 
-                        for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
-                            old_rev_data[k] = None
-                    else:
+                    if not rev.deleted.text:
 
 
-                        if not self.persist_legacy:
+                        if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
@@ -258,19 +518,19 @@ class WikiqParser():
                             
                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
 
                             
                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
 
-                            old_rev_data["token_revs"] = num_token_revs
-                            old_rev_data["tokens_added"] = num_tokens
-                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
-                            old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
+                            rev_data.token_revs = num_token_revs
+                            rev_data.tokens_added = num_tokens
+                            rev_data.tokens_removed = len(old_tokens_removed)
+                            rev_data.tokens_window = PERSISTENCE_RADIUS-1
 
 
-                            self.print_rev_data(old_rev_data)
+                            self.print_rev_data(rev_data)
 
                 else:
                     self.print_rev_data(rev_data)
 
                 rev_count += 1
 
 
                 else:
                     self.print_rev_data(rev_data)
 
                 rev_count += 1
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -280,11 +540,10 @@ class WikiqParser():
                     rev_id, rev_data, tokens_added, tokens_removed = item
                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
 
                     rev_id, rev_data, tokens_added, tokens_removed = item
                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
 
-                    rev_data["token_revs"] = num_token_revs
-                    rev_data["tokens_added"] = num_tokens
-                    rev_data["tokens_removed"] = len(tokens_removed)
-                    rev_data["tokens_window"] = len(window)-(i+1)
-                    
+                    rev_data.token_revs = num_token_revs
+                    rev_data.tokens_added = num_tokens
+                    rev_data.tokens_removed = len(tokens_removed)
+                    rev_data.tokens_window = len(window)-(i+1)
                     self.print_rev_data(rev_data)
 
             page_count += 1
                     self.print_rev_data(rev_data)
 
             page_count += 1
@@ -292,22 +551,53 @@ class WikiqParser():
         print("Done: %s revisions and %s pages." % (rev_count, page_count),
               file=sys.stderr)
 
         print("Done: %s revisions and %s pages." % (rev_count, page_count),
               file=sys.stderr)
 
+        if self.output_parquet is True:
+            self.flush_parquet_buffer()
+            self.pq_writer.close()
+
+        else:
+            output_file.close()
+
+
+    def write_parquet_row(self, rev_data):
+        padata = rev_data.to_pyarrow()
+        self.output_buffer.append(padata)
+
+        if len(self.output_buffer) >= self.output_buffer_size:
+            self.flush_parquet_buffer()
+
+
+    def flush_parquet_buffer(self):
+        outtable = pa.table.concat_arrays(self.output_buffer)                        
+        if self.pq_writer is None:
+            schema = pa.schema(self.revdata_type.pa_schema_field)
+            self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
+
+        self.pq_writer.write_table(outtable)
+        self.output_buffer = []
+        
     def print_rev_data(self, rev_data):
     def print_rev_data(self, rev_data):
-        # if it's the first time through, print the header
-        if self.urlencode:
-            for field in TO_ENCODE:
-                rev_data[field] = quote(str(rev_data[field]))
-
-        if not self.printed_header:
-            print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
-            self.printed_header = True
+        if self.output_parquet is False:
+            printfunc = self.write_tsv_row
+        else:
+            printfunc = self.write_parquet_row
         
         
-        print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+        printfunc(rev_data)
+
+    def write_tsv_row(self, rev_data):
+        
+        self.output_buffer.append(rev_data.to_tsv_line())
+
+        if len(self.output_buffer) >= self.output_buffer_size:
+            self.flush_tsv_buffer()
 
 
 
 
+    def flush_tsv_buffer():
+        if self.output_header:
+
 def open_input_file(input_filename):
     if re.match(r'.*\.7z$', input_filename):
 def open_input_file(input_filename):
     if re.match(r'.*\.7z$', input_filename):
-        cmd = ["7za", "x", "-so", input_filename, '*'
+        cmd = ["7za", "x", "-so", input_filename, "*.xml"
     elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
     elif re.match(r'.*\.bz2$', input_filename):
     elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
     elif re.match(r'.*\.bz2$', input_filename):
@@ -320,13 +610,19 @@ def open_input_file(input_filename):
 
     return input_file
 
 
     return input_file
 
-def open_output_file(input_filename):
-    # create a regex that creates the output filename
+def get_output_filename(input_filename, parquet = False):
     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
     output_filename = re.sub(r'\.xml', '', output_filename)
     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
     output_filename = re.sub(r'\.xml', '', output_filename)
-    output_filename = output_filename + ".tsv"
-    output_file = open(output_filename, "w")
+    if parquet is False:
+        output_filename = output_filename + ".tsv"
+    else:
+        output_filename = output_filename + ".parquet"
+    return output_filename
 
 
+def open_output_file(input_filename):
+    # create a regex that creates the output filename
+    output_filename = get_output_filename(input_filename, parquet = False)
+    output_file = open(output_filename, "w")
     return output_file
 
 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
     return output_file
 
 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
@@ -336,7 +632,7 @@ parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
 
 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
 
 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
-                    help="Directory for output files.")
+                    help="Directory for output files. If it ends with .parquet output will be in parquet format.")
 
 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
                     help="Write output to standard out (do not create dump file)")
 
 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
                     help="Write output to standard out (do not create dump file)")
@@ -344,18 +640,57 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
-                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
 
 args = parser.parse_args()
 
 
 args = parser.parse_args()
 
+
+
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
 if len(args.dumpfiles) > 0:
+    output_parquet = False
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
@@ -365,33 +700,51 @@ if len(args.dumpfiles) > 0:
         else:
             output_dir = "."
 
         else:
             output_dir = "."
 
+        if output_dir.endswith(".parquet"):
+            output_parquet = True
+
         print("Processing file: %s" % filename, file=sys.stderr)
 
         if args.stdout:
             output_file = sys.stdout
         else:
             filename = os.path.join(output_dir, os.path.basename(filename))
         print("Processing file: %s" % filename, file=sys.stderr)
 
         if args.stdout:
             output_file = sys.stdout
         else:
             filename = os.path.join(output_dir, os.path.basename(filename))
-            output_file = open_output_file(filename)
+            output_file = get_output_filename(filename, parquet = output_parquet)
 
 
-        wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file,
+                            output_file,
                             collapse_user=args.collapse_user,
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            persist_legacy=args.persist_legacy,
-                            urlencode=args.urlencode)
-
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label,
+                            output_parquet=output_parquet)
+
+        print(wikiq.output_parquet)
         wikiq.process()
 
         # close things 
         input_file.close()
         wikiq.process()
 
         # close things 
         input_file.close()
-        output_file.close()
+
 else:
 else:
-    wikiq = WikiqParser(sys.stdin, sys.stdout,
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
                         collapse_user=args.collapse_user,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
-                        persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
-    wikiq.process()
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?