]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
merging pull containing revert-radius with 2nd version of regex scanner w/ unit tests
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 8a12d90980f6379d096b6fac0fb70d8f274bdb3f..7a1b846dc08f2f86ff292ceae694ff55ef9a7382 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,6 +3,7 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
+
 import argparse
 import sys
 import os, os.path
 import argparse
 import sys
 import os, os.path
@@ -21,22 +22,82 @@ from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+def matchmake(scanned_content, rev_data, regex, label):
+    p = re.compile(regex)
+
+    temp_dict = {}
+    # if there are named capture groups in the regex
+    if bool(p.groupindex):
+        capture_groups = list(p.groupindex.keys())
+
+        # initialize the {capture_group_name:list} for each capture group
+        for cap_group in capture_groups:
+            temp_dict["{}_{}".format(label, cap_group)] = []
+
+        # if there are matches of some sort in this revision content, fill the lists for each cap_group
+        if p.search(scanned_content) is not None:
+            m = re.finditer(p,scanned_content)
+            matchobjects = list(m)
+
+            for cap_group in capture_groups:
+                temp_list = []
+                for match in matchobjects:
+                    # we only want to add the match for the capture group if the match is not None
+                    if match.group(cap_group) != None:
+                        temp_list.append(match.group(cap_group))
+
+                # if temp_list of matches is empty just make that column None
+                if len(temp_list)==0:
+                    temp_dict["{}_{}".format(label, cap_group)] = None
+                # else we put in the list we made in the for-loop above
+                else:
+                    temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list)
+        
+        # there are no matches at all in this revision content, we default values to None
+        else:
+            for cap_group in capture_groups:
+                temp_dict["{}_{}".format(label, cap_group)] = None
+
+    # there are no capture groups, we just search for all the matches of the regex
+    else:
+        #given that there are matches to be made
+        if p.search(scanned_content) is not None:
+            m = p.findall(scanned_content)
+            temp_dict[label] = ', '.join(m)
+        else:
+            temp_dict[label] = None    
+    # update rev_data with our new columns
+    rev_data.update(temp_dict)
+    print(rev_data.keys())
+    return rev_data
+
 
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
         self.mwiterator = Dump.from_file(self.fh)
 
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
         self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
 
     def __iter__(self):
         return self.__pages
 
     def __iter__(self):
         return self.__pages
@@ -49,13 +110,19 @@ class WikiqPage():
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
-    def __init__(self, page, collapse_user=False):
+    def __init__(self, page, namespace_map, collapse_user=False):
         self.id = page.id
         self.id = page.id
-        self.title = page.title
         self.namespace = page.namespace
         self.namespace = page.namespace
-        self.redirect = page.redirect
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
         self.restrictions = page.restrictions
         self.restrictions = page.restrictions
-        
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
@@ -110,19 +177,30 @@ class WikiqPage():
         return next(self.__revisions)
 
 class WikiqParser():
         return next(self.__revisions)
 
 class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
 
 
-
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
-        
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
-        self.persist_legacy = persist_legacy
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
-        
+        self.revert_radius = revert_radius
+        self.regex_match_revision = regex_match_revision
+        self.regex_revision_label = regex_revision_label
+        self.regex_match_comment = regex_match_comment
+        self.regex_comment_label = regex_comment_label
+
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -138,6 +216,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
     def process(self):
 
         # create a regex that creates the output filename
@@ -157,29 +236,81 @@ class WikiqParser():
 
         # Iterate through pages
         for page in dump:
 
         # Iterate through pages
         for page in dump:
-            rev_detector = mwreverts.Detector()
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
 
 
-            if self.persist or self.persist_legacy:
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
+            print(self.revert_radius)
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
+
+
+            if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-                if not self.persist_legacy:
+                if self.persist == PersistMethod.sequence:
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
+                
+                # initialize rev_data
+                rev_data = {}
 
 
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
+                # if the command line args only gave a label (and no regular expression is given)
+                if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None):
+                    sys.exit('The given regex label(s) has no corresponding regex to search for.')
+                
+                # if there's anything in the list of regex_match_revision
+                if self.regex_match_revision is not None:
+                    if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)):
+                        sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+                    
+                    # initialize and construct the list of regex-label tuples
+                    pairs = []
+                    for i in range(0,len(self.regex_match_revision)):
+                        pairs.append((self.regex_match_revision[i], self.regex_revision_label[i]))
+
+                    # for each regex/label pair, we now run matchmake to check and output columns
+                    for pair in pairs:
+                        # pair[0] corresponds to the regex, pair[1] to the label
+                        rev_data = matchmake(rev.text, rev_data, pair[0], pair[1])
+                
+                # if there's anything in the list of regex_match_comment
+                if self.regex_match_comment is not None:
+                    if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)):
+                        sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+                    
+                    # initialize and construct the list of regex-label tuples
+                    pairs = []
+                    for i in range(0,len(self.regex_match_comment)):
+                        pairs.append((self.regex_match_comment[i], self.regex_comment_label[i]))
+
+                    # for each regex/label pair, we now run matchmake to check and output columns
+                    for pair in pairs:
+                        # pair[0] corresponds to the regex, pair[1] to the label
+                        rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1])
+
+                # we fill out the rest of the data structure now
+                rev_data['revid'] = rev.id
+                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+                rev_data['articleid'] = page.id
+                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+                rev_data['title'] = '"' + page.title + '"'
+                rev_data['namespace'] = namespace
+                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
 
                 # if revisions are deleted, /many/ things will be missing
                 if rev.deleted.text:
 
                 # if revisions are deleted, /many/ things will be missing
                 if rev.deleted.text:
@@ -204,7 +335,7 @@ class WikiqParser():
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
-               
+
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     
@@ -238,14 +369,13 @@ class WikiqParser():
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
-                if self.persist or self.persist_legacy:
+                if self.persist != PersistMethod.none:
                     if rev.deleted.text:
                     if rev.deleted.text:
-
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
 
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
 
-                        if not self.persist_legacy:
+                        if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
@@ -270,7 +400,7 @@ class WikiqParser():
 
                 rev_count += 1
 
 
                 rev_count += 1
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -344,17 +474,53 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
-                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
 
 args = parser.parse_args()
 
 
 args = parser.parse_args()
 
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
@@ -373,12 +539,17 @@ if len(args.dumpfiles) > 0:
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
-        wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file,
+                            output_file,
                             collapse_user=args.collapse_user,
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            persist_legacy=args.persist_legacy,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label)
 
         wikiq.process()
 
 
         wikiq.process()
 
@@ -386,12 +557,20 @@ if len(args.dumpfiles) > 0:
         input_file.close()
         output_file.close()
 else:
         input_file.close()
         output_file.close()
 else:
-    wikiq = WikiqParser(sys.stdin, sys.stdout,
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
                         collapse_user=args.collapse_user,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
-                        persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
-    wikiq.process()
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?