]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
Merge branch 'master' into regex_scanner
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 8b741e3ee9a1f326290f3e48633c90386cb57e98..b982eaa5f0a8bc32dd7f5dcae77941339ad38304 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -34,6 +34,29 @@ def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
+    for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
+        if location == "comment":
+            matching_string = rev.comment
+        elif location == "text":
+            matching_string = rev.text
+        else:
+            sys.exit("regex scanner location must be 'comment' or 'text'.")
+
+        if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
+            m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
+            blob=""
+            for result in m:
+                blob = blob + "," + result.group(0)
+            # columns we want
+            rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing       
+        else:
+            rev_data['matches'] = None
+       
+    return rev_data
+
+
+
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
@@ -128,7 +151,7 @@ class WikiqPage():
 
 class WikiqParser():
     
-    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+    def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
         """ 
         Parameters:
            persist : what persistence method to use. Takes a PersistMethod value
@@ -141,6 +164,9 @@ class WikiqParser():
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
+        self.scanner = scanner
+        self.match_regex = match_regex
+
         if namespaces is not None:
             self.namespace_filter = set(namespaces)
         else:
@@ -161,6 +187,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
@@ -207,14 +234,30 @@ class WikiqParser():
 
             # Iterate through a page's revisions
             for rev in page:
-
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : namespace,
-                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
+                ## m = re.finditer() #so we can find all instances
+                ## m.groupdict() #so we can look at them all with their names
+
+                # initialize rev_dat
+                rev_data = {}
+
+                if self.scanner is not None: # we know we want to do a regex search 
+                    ## comment = want to look in comment attached to revision
+                    ## text = want to look in revision text
+
+                    ### call the scanner function
+                    rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
+       
+                if self.scanner is not None and rev_data['matches'] is None:
+                    next
+
+                # we fill out the rest of the data structure now
+                rev_data['revid'] = rev.id
+                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+                rev_data['articleid'] = page.id
+                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+                rev_data['title'] = '"' + page.title + '"'
+                rev_data['namespace'] = namespace
+                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
 
                 # if revisions are deleted, /many/ things will be missing
                 if rev.deleted.text:
@@ -239,7 +282,7 @@ class WikiqParser():
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
-               
+
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     
@@ -378,7 +421,7 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", default="sequence", const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
@@ -387,7 +430,11 @@ parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
                     help="Id number of namspace to include. Can be specified more than once.")
 
+parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
+                    help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")
 
+parser.add_argument('-R', '--match', dest="match_regex", type=str, 
+                    help="The regular expression you would like to find in the string and put in capture group")
 
 args = parser.parse_args()
 
@@ -429,7 +476,9 @@ if len(args.dumpfiles) > 0:
                             collapse_user=args.collapse_user,
                             persist=persist,
                             urlencode=args.urlencode,
-                            namespaces = namespaces)
+                            namespaces = namespaces,
+                            match_regex=args.match_regex, # adding in the new 2 args for regex searching
+                            scanner=args.scanner)
 
         wikiq.process()
 
@@ -440,9 +489,11 @@ else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
                         collapse_user=args.collapse_user,
                         persist=persist,
-                        persist_legacy=args.persist_legacy,
+                        #persist_legacy=args.persist_legacy,
                         urlencode=args.urlencode,
-                        namespaces = namespaces)
+                        namespaces = namespaces,
+                        match_regex=args.match_regex, # adding in the new 2 args for regex searching
+                        scanner=args.scanner)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?