]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
Merge branch 'master' into regex_scanner
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 9260f35710d029e99f2c4bffe470aa000e7f1615..b982eaa5f0a8bc32dd7f5dcae77941339ad38304 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,7 +3,7 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
-import pdb
+
 import argparse
 import sys
 import os, os.path
@@ -13,7 +13,7 @@ from subprocess import Popen, PIPE
 from collections import deque
 from hashlib import sha1
 
-from mw.xml_dump import Iterator
+from mwxml import Dump
 
 from deltas.tokenizers import wikitext_split
 import mwpersistence
@@ -22,22 +22,55 @@ from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
+    for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
+        if location == "comment":
+            matching_string = rev.comment
+        elif location == "text":
+            matching_string = rev.text
+        else:
+            sys.exit("regex scanner location must be 'comment' or 'text'.")
+
+        if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
+            m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
+            blob=""
+            for result in m:
+                blob = blob + "," + result.group(0)
+            # columns we want
+            rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing       
+        else:
+            rev_data['matches'] = None
+       
+    return rev_data
+
+
 
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
-        self.mwiterator = Iterator.from_file(self.fh)
+        self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
 
     def __iter__(self):
         return self.__pages
@@ -50,13 +83,19 @@ class WikiqPage():
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
-    def __init__(self, page, collapse_user=False):
+    def __init__(self, page, namespace_map, collapse_user=False):
         self.id = page.id
-        self.title = page.title
         self.namespace = page.namespace
-        self.redirect = page.redirect
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
         self.restrictions = page.restrictions
-        
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
@@ -80,7 +119,14 @@ class WikiqPage():
             else:
                 if self.collapse_user:
                     # yield if this is the last edit in a seq by a user and reset
-                    if not rev.contributor.user_text == prev_rev.contributor.user_text:
+                    # also yield if we do know who the user is
+
+                    if rev.deleted.user or prev_rev.deleted.user:
+                        yield prev_rev
+                        collapsed_revs = 1
+                        rev.collapsed_revs = collapsed_revs
+
+                    elif not rev.user.text == prev_rev.user.text:
                         yield prev_rev
                         collapsed_revs = 1
                         rev.collapsed_revs = collapsed_revs
@@ -93,6 +139,7 @@ class WikiqPage():
                     yield prev_rev
 
             prev_rev = rev
+
         # also yield the final time
         yield prev_rev
 
@@ -103,19 +150,28 @@ class WikiqPage():
         return next(self.__revisions)
 
 class WikiqParser():
+    
+    def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
 
-
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
-        
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
-        self.persist_legacy = persist_legacy
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
-        
+        self.scanner = scanner
+        self.match_regex = match_regex
+
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -131,6 +187,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
@@ -142,7 +199,7 @@ class WikiqParser():
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
         # extract list of namspaces
-        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
 
         page_count = 0
         rev_count = 0
@@ -150,49 +207,82 @@ class WikiqParser():
 
         # Iterate through pages
         for page in dump:
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
             rev_detector = mwreverts.Detector()
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-                if not self.persist_legacy:
+                if self.persist == PersistMethod.sequence:
                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
-
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
+                ## m = re.finditer() #so we can find all instances
+                ## m.groupdict() #so we can look at them all with their names
+
+                # initialize rev_dat
+                rev_data = {}
+
+                if self.scanner is not None: # we know we want to do a regex search 
+                    ## comment = want to look in comment attached to revision
+                    ## text = want to look in revision text
+
+                    ### call the scanner function
+                    rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
+       
+                if self.scanner is not None and rev_data['matches'] is None:
+                    next
+
+                # we fill out the rest of the data structure now
+                rev_data['revid'] = rev.id
+                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+                rev_data['articleid'] = page.id
+                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+                rev_data['title'] = '"' + page.title + '"'
+                rev_data['namespace'] = namespace
+                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
 
                 # if revisions are deleted, /many/ things will be missing
-                if rev.text.deleted:
+                if rev.deleted.text:
                     rev_data['text_chars'] = ""
                     rev_data['sha1'] = ""
                     rev_data['revert'] = ""
                     rev_data['reverteds'] = ""
 
                 else:
+                    # rev.text can be None if the page has no text
+                    if not rev.text:
+                        rev.text = ""
                     # if text exists, we'll check for a sha1 and generate one otherwise
+
                     if rev.sha1:
                         text_sha1 = rev.sha1
                     else:
+
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
                     rev_data['sha1'] = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
-               
+
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     
@@ -206,10 +296,10 @@ class WikiqParser():
                 # if the fact that the edit was minor can be hidden, this might be an issue
                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
 
-                if rev.contributor.user_text:
+                if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.contributor.user_text + '"'
-                    rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
+                    rev_data['editor'] = '"' + rev.user.text + '"'
+                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
                     
                 else:
                     rev_data['anon'] = ""
@@ -226,17 +316,17 @@ class WikiqParser():
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
-                if self.persist or self.persist_legacy:
-                    if rev.text.deleted:
+                if self.persist != PersistMethod.none:
+                    if rev.deleted.text:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
 
-                        if not self.persist_legacy:
+                        if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
-                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
                             
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
@@ -257,7 +347,7 @@ class WikiqParser():
 
                 rev_count += 1
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -284,7 +374,7 @@ class WikiqParser():
         if self.urlencode:
             for field in TO_ENCODE:
                 rev_data[field] = quote(str(rev_data[field]))
-            
+
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
@@ -331,17 +421,39 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
-                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
+                    help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")
+
+parser.add_argument('-R', '--match', dest="match_regex", type=str, 
+                    help="The regular expression you would like to find in the string and put in capture group")
 
 args = parser.parse_args()
 
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
@@ -362,10 +474,11 @@ if len(args.dumpfiles) > 0:
 
         wikiq = WikiqParser(input_file, output_file, 
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            persist_legacy=args.persist_legacy,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces = namespaces,
+                            match_regex=args.match_regex, # adding in the new 2 args for regex searching
+                            scanner=args.scanner)
 
         wikiq.process()
 
@@ -375,9 +488,12 @@ if len(args.dumpfiles) > 0:
 else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
-                        persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces = namespaces,
+                        match_regex=args.match_regex, # adding in the new 2 args for regex searching
+                        scanner=args.scanner)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?