From: sohyeonhwang Date: Thu, 7 Nov 2019 19:28:17 +0000 (-0600) Subject: merging pull containing revert-radius with 2nd version of regex scanner w/ unit tests X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/commitdiff_plain/f147e1d899ea269dad715aea393153683353e946?hp=c84844cfb58f343e6632f1a6c57f702bd96c616d merging pull containing revert-radius with 2nd version of regex scanner w/ unit tests --- diff --git a/test/Wikiq_Unit_Test.py b/test/Wikiq_Unit_Test.py index 9011d79..159fd10 100644 --- a/test/Wikiq_Unit_Test.py +++ b/test/Wikiq_Unit_Test.py @@ -282,6 +282,119 @@ class Test_Stdout(unittest.TestCase): test = pd.read_table(StringIO(outs)) baseline = pd.read_table(baseline_file) assert_frame_equal(test,baseline) - + +class Test_Regex(unittest.TestCase): + + def setUp(self): + self.wiki = 'regextest' + self.wikiq_out_name = self.wiki + '.tsv' + self.infile = "{0}.xml.bz2".format(self.wiki) + + self.input_dir = "dumps" + self.input_file = os.path.join(".", self.input_dir,self.infile) + + if not os.path.exists("test_output"): + os.mkdir("test_output") + + self.test_output_dir = os.path.join(".", "test_output") + self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name) + # we have two base calls, one for checking inputs and the other for checking outputs + self.base_call = "../wikiq {0}" + self.base_call_outs = "../wikiq {0} -o {1}" + + self.baseline_output_dir = "baseline_output" + + # sample inputs for checking that bad inputs get terminated / test_regex_inputs + self.bad_input1 = "-RP '\\b\\d+\\b'" #label is missing + self.bad_input2 = "-RP 'NPO V' -RP THE -RPl testlabel" #number of reg and number of labels do not match + self.bad_input3 = "-CP '(Tamil|Li)' -RPl testlabel" #cp but rp label + self.bad_input4 = "-CPl testlabel" #regex is missing + self.bad_input5 = "-RP '\\b\\w{3}\\b' -RPl threeletters -CP '\\b\\w{3}\\b'" + + self.bad_inputs_list = [self.bad_input1,self.bad_input2,self.bad_input3,self.bad_input4,self.bad_input5] + + # sample inputs for checking the outcomes of good inputs / test_basic_regex + self.good_input1 = "-RP '\\b\\d{3}\\b' -RPl threedigits" + self.good_input2 = "-RP 'TestCase' -RP 'page' -RPl testcases -RPl page_word" + self.good_input3 = "-CP 'Chevalier' -CPl chev_com -RP 'welcome to Wikipedia' -RPl wiki_welcome -CP 'Warning' -CPl warning" + self.good_input4 = "-CP 'WP:EVADE' -CPl wp_evade" + + self.good_inputs_list = [self.good_input1,self.good_input2,self.good_input3, self.good_input4] + + # and with capture group(s) / test_capturegroup_regex + self.cap_input1 = "-RP 'Li Chevalier' -RPl li_cheval -CP '(?P\\b[a-zA-Z]{3}\\b)|(?P\\b\\d+\\b)|(?P\\bcat\\b)' -CPl three" + self.cap_input2 = "-CP '(?P\\bTestCaseA\\b)|(?P\\bTestCaseB\\b)|(?P\\bTestCaseC\\b)|(?P\\bTestCaseD\\b)' -CPl testcase -RP '(?Pnpov|NPOV)|(?Pneutral point of view)' -RPl npov" + + self.cap_inputs_list = [self.cap_input1,self.cap_input2] + + def test_regex_inputs(self): + for input in self.bad_inputs_list: + call = self.base_call.format(self.input_file) + call = call + " --stdout " + input + print(call) + proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) + stdout,stderr = proc.communicate() + #print(proc.returncode) + + # we want to check that the bad inputs were caught and sys.exit is stopping the code + print(stderr.decode("utf-8")) + self.assertNotEqual(proc.returncode,0) + + def test_basic_regex(self): + i = 1 + for input in self.good_inputs_list: + + test_filename = "basic_{0}_{1}.tsv".format(self.wikiq_out_name[:-4], str(i)) + #print(test_filename) + test_file = os.path.join(self.test_output_dir, test_filename) + if os.path.exists(test_file): + os.remove(test_file) + + call = self.base_call_outs.format(self.input_file, self.test_output_dir) + call = call + " " + input + print(call) + + proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) + proc.wait() + + copyfile(self.call_output, test_file) + f = open(self.call_output, 'w') + f.close() + + # don't have a baseline file to compare a test to?? + # baseline_file = os.path.join(".", self.baseline_output_dir, test_filename) + i += 1 + + #TODO + #a proper assert statement still needs to go here, but I checked out the generated files for now and it functions + + + def test_capturegroup_regex(self): + i = 1 + for input in self.cap_inputs_list: + test_filename = "capturegroup_{0}_{1}.tsv".format(self.wikiq_out_name[:-4], str(i)) + print(test_filename) + test_file = os.path.join(self.test_output_dir, test_filename) + if os.path.exists(test_file): + os.remove(test_file) + + call = self.base_call_outs.format(self.input_file, self.test_output_dir) + call = call + " " + input + print(call) + + proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) + proc.wait() + + copyfile(self.call_output, test_file) + f = open(self.call_output, 'w') + f.close() + + # don't have a baseline file to compare a test to?? + # baseline_file = os.path.join(".", self.baseline_output_dir, test_filename) + i += 1 + + #TODO + #a proper assert statement still needs to go here, but I checked out the generated files for now and it functions + if __name__ == '__main__': unittest.main() diff --git a/wikiq b/wikiq index 67785fe..7a1b846 100755 --- a/wikiq +++ b/wikiq @@ -34,6 +34,56 @@ def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), len(tokens_added)) +def matchmake(scanned_content, rev_data, regex, label): + p = re.compile(regex) + + temp_dict = {} + # if there are named capture groups in the regex + if bool(p.groupindex): + capture_groups = list(p.groupindex.keys()) + + # initialize the {capture_group_name:list} for each capture group + for cap_group in capture_groups: + temp_dict["{}_{}".format(label, cap_group)] = [] + + # if there are matches of some sort in this revision content, fill the lists for each cap_group + if p.search(scanned_content) is not None: + m = re.finditer(p,scanned_content) + matchobjects = list(m) + + for cap_group in capture_groups: + temp_list = [] + for match in matchobjects: + # we only want to add the match for the capture group if the match is not None + if match.group(cap_group) != None: + temp_list.append(match.group(cap_group)) + + # if temp_list of matches is empty just make that column None + if len(temp_list)==0: + temp_dict["{}_{}".format(label, cap_group)] = None + # else we put in the list we made in the for-loop above + else: + temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list) + + # there are no matches at all in this revision content, we default values to None + else: + for cap_group in capture_groups: + temp_dict["{}_{}".format(label, cap_group)] = None + + # there are no capture groups, we just search for all the matches of the regex + else: + #given that there are matches to be made + if p.search(scanned_content) is not None: + m = p.findall(scanned_content) + temp_dict[label] = ', '.join(m) + else: + temp_dict[label] = None + # update rev_data with our new columns + rev_data.update(temp_dict) + print(rev_data.keys()) + return rev_data + + class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh @@ -127,8 +177,7 @@ class WikiqPage(): return next(self.__revisions) class WikiqParser(): - - def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15): + def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15): """ Parameters: persist : what persistence method to use. Takes a PersistMethod value @@ -142,6 +191,11 @@ class WikiqParser(): self.namespaces = [] self.urlencode = urlencode self.revert_radius = revert_radius + self.regex_match_revision = regex_match_revision + self.regex_revision_label = regex_revision_label + self.regex_match_comment = regex_match_comment + self.regex_comment_label = regex_comment_label + if namespaces is not None: self.namespace_filter = set(namespaces) else: @@ -162,6 +216,7 @@ class WikiqParser(): # if we've made it this far with no matches, we return the default namespace return default_ns + def process(self): # create a regex that creates the output filename @@ -210,14 +265,52 @@ class WikiqParser(): # Iterate through a page's revisions for rev in page: + + # initialize rev_data + rev_data = {} - rev_data = {'revid' : rev.id, - 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'), - 'articleid' : page.id, - 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id, - 'title' : '"' + page.title + '"', - 'namespace' : namespace, - 'deleted' : "TRUE" if rev.deleted.text else "FALSE" } + # if the command line args only gave a label (and no regular expression is given) + if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None): + sys.exit('The given regex label(s) has no corresponding regex to search for.') + + # if there's anything in the list of regex_match_revision + if self.regex_match_revision is not None: + if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)): + sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') + + # initialize and construct the list of regex-label tuples + pairs = [] + for i in range(0,len(self.regex_match_revision)): + pairs.append((self.regex_match_revision[i], self.regex_revision_label[i])) + + # for each regex/label pair, we now run matchmake to check and output columns + for pair in pairs: + # pair[0] corresponds to the regex, pair[1] to the label + rev_data = matchmake(rev.text, rev_data, pair[0], pair[1]) + + # if there's anything in the list of regex_match_comment + if self.regex_match_comment is not None: + if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)): + sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') + + # initialize and construct the list of regex-label tuples + pairs = [] + for i in range(0,len(self.regex_match_comment)): + pairs.append((self.regex_match_comment[i], self.regex_comment_label[i])) + + # for each regex/label pair, we now run matchmake to check and output columns + for pair in pairs: + # pair[0] corresponds to the regex, pair[1] to the label + rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1]) + + # we fill out the rest of the data structure now + rev_data['revid'] = rev.id + rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S') + rev_data['articleid'] = page.id + rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id + rev_data['title'] = '"' + page.title + '"' + rev_data['namespace'] = namespace + rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE" # if revisions are deleted, /many/ things will be missing if rev.deleted.text: @@ -242,7 +335,7 @@ class WikiqParser(): # TODO rev.bytes doesn't work.. looks like a bug rev_data['text_chars'] = len(rev.text) - + # generate revert data revert = rev_detector.process(text_sha1, rev.id) @@ -398,7 +491,17 @@ parser.add_argument('-rr', default=15, help="Number of edits to check when looking for reverts (default: 15)") +parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append', + help="The regular expression to search for in revision text. The regex must be surrounded by quotes.") + +parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append', + help="The label for the outputted column based on matching the regex in revision text.") +parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append', + help="The regular expression to search for in comments of revisions.") + +parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append', + help="The label for the outputted column based on matching the regex in comments.") args = parser.parse_args() @@ -442,7 +545,11 @@ if len(args.dumpfiles) > 0: persist=persist, urlencode=args.urlencode, namespaces=namespaces, - revert_radius=args.revert_radius) + revert_radius=args.revert_radius, + regex_match_revision = args.regex_match_revision, + regex_revision_label = args.regex_revision_label, + regex_match_comment = args.regex_match_comment, + regex_comment_label = args.regex_comment_label) wikiq.process() @@ -454,11 +561,16 @@ else: sys.stdout, collapse_user=args.collapse_user, persist=persist, - persist_legacy=args.persist_legacy, + #persist_legacy=args.persist_legacy, urlencode=args.urlencode, namespaces=namespaces, - revert_radius=args.revert_radius) - wikiq.process() + revert_radius=args.revert_radius, + regex_match_revision = args.regex_match_revision, + regex_revision_label = args.regex_revision_label, + regex_match_comment = args.regex_match_comment, + regex_comment_label = args.regex_comment_label) + + wikiq.process() # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your" # stop_words = stop_words.split(",")