]> code.communitydata.science - mediawiki_dump_tools.git/commitdiff
Merge branch 'master' into regex_scanner
authorgroceryheist <nathante@uw.edu>
Sun, 6 Oct 2019 01:17:03 +0000 (18:17 -0700)
committergroceryheist <nathante@uw.edu>
Sun, 6 Oct 2019 01:17:03 +0000 (18:17 -0700)
1  2 
wikiq

diff --combined wikiq
index ca65114416b644b05dd0e9aca484217cbb23d05d,632b05cfcad7d7544cf802db1ef37511293f68cf..b982eaa5f0a8bc32dd7f5dcae77941339ad38304
--- 1/wikiq
--- 2/wikiq
+++ b/wikiq
@@@ -34,29 -34,6 +34,29 @@@ def calculate_persistence(tokens_added)
      return(sum([(len(x.revisions)-1) for x in tokens_added]),
             len(tokens_added))
  
 +def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
 +    for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
 +        if location == "comment":
 +            matching_string = rev.comment
 +        elif location == "text":
 +            matching_string = rev.text
 +        else:
 +            sys.exit("regex scanner location must be 'comment' or 'text'.")
 +
 +        if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
 +            m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
 +            blob=""
 +            for result in m:
 +                blob = blob + "," + result.group(0)
 +            # columns we want
 +            rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing       
 +        else:
 +            rev_data['matches'] = None
 +      
 +    return rev_data
 +
 +
 +
  class WikiqIterator():
      def __init__(self, fh, collapse_user=False):
          self.fh = fh
@@@ -151,7 -128,7 +151,7 @@@ class WikiqPage()
  
  class WikiqParser():
      
 -    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
 +    def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
          """ 
          Parameters:
             persist : what persistence method to use. Takes a PersistMethod value
          self.printed_header = False
          self.namespaces = []
          self.urlencode = urlencode
 +        self.scanner = scanner
 +        self.match_regex = match_regex
 +
          if namespaces is not None:
              self.namespace_filter = set(namespaces)
          else:
          # if we've made it this far with no matches, we return the default namespace
          return default_ns
  
 +
      def process(self):
  
          # create a regex that creates the output filename
  
              # Iterate through a page's revisions
              for rev in page:
 -
 -                rev_data = {'revid' : rev.id,
 -                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
 -                            'articleid' : page.id,
 -                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
 -                            'title' : '"' + page.title + '"',
 -                            'namespace' : namespace,
 -                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
 +                ## m = re.finditer() #so we can find all instances
 +                ## m.groupdict() #so we can look at them all with their names
 +
 +                # initialize rev_dat
 +                rev_data = {}
 +
 +                if self.scanner is not None: # we know we want to do a regex search 
 +                    ## comment = want to look in comment attached to revision
 +                    ## text = want to look in revision text
 +
 +                    ### call the scanner function
 +                    rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
 +      
 +                if self.scanner is not None and rev_data['matches'] is None:
 +                    next
 +
 +                # we fill out the rest of the data structure now
 +                rev_data['revid'] = rev.id
 +                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
 +                rev_data['articleid'] = page.id
 +                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
 +                rev_data['title'] = '"' + page.title + '"'
 +                rev_data['namespace'] = namespace
 +                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
  
                  # if revisions are deleted, /many/ things will be missing
                  if rev.deleted.text:
  
                      # TODO rev.bytes doesn't work.. looks like a bug
                      rev_data['text_chars'] = len(rev.text)
 -               
 +
                      # generate revert data
                      revert = rev_detector.process(text_sha1, rev.id)
                      
@@@ -421,7 -378,7 +421,7 @@@ parser.add_argument('-s', '--stdout', d
  parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                      help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
  
- parser.add_argument('-p', '--persistence', dest="persist", default="", const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+ parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
                      help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
  
  parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
  parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
                      help="Id number of namspace to include. Can be specified more than once.")
  
 +parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
 +                    help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")
  
 +parser.add_argument('-R', '--match', dest="match_regex", type=str, 
 +                    help="The regular expression you would like to find in the string and put in capture group")
  
  args = parser.parse_args()
  
@@@ -476,9 -429,7 +476,9 @@@ if len(args.dumpfiles) > 0
                              collapse_user=args.collapse_user,
                              persist=persist,
                              urlencode=args.urlencode,
 -                            namespaces = namespaces)
 +                            namespaces = namespaces,
 +                            match_regex=args.match_regex, # adding in the new 2 args for regex searching
 +                            scanner=args.scanner)
  
          wikiq.process()
  
@@@ -489,11 -440,9 +489,11 @@@ else
      wikiq = WikiqParser(sys.stdin, sys.stdout,
                          collapse_user=args.collapse_user,
                          persist=persist,
 -                        persist_legacy=args.persist_legacy,
 +                        #persist_legacy=args.persist_legacy,
                          urlencode=args.urlencode,
 -                        namespaces = namespaces)
 +                        namespaces = namespaces,
 +                        match_regex=args.match_regex, # adding in the new 2 args for regex searching
 +                        scanner=args.scanner)
      wikiq.process()
  
  # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?