X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/blobdiff_plain/776b73519ac8af42b64875a5cc27125ceff7c861..HEAD:/wikiq diff --git a/wikiq b/wikiq index 47dd687..0543a33 100755 --- a/wikiq +++ b/wikiq @@ -3,7 +3,7 @@ # original wikiq headers are: title articleid revid date_time anon # editor editor_id minor text_size text_entropy text_md5 reversion # additions_size deletions_size -import pdb + import argparse import sys import os, os.path @@ -34,6 +34,7 @@ def calculate_persistence(tokens_added): return(sum([(len(x.revisions)-1) for x in tokens_added]), len(tokens_added)) + class WikiqIterator(): def __init__(self, fh, collapse_user=False): self.fh = fh @@ -126,14 +127,69 @@ class WikiqPage(): def __next__(self): return next(self.__revisions) + +class RegexPair(object): + def __init__(self, pattern, label): + self.pattern = re.compile(pattern) + self.label = label + self.has_groups = bool(self.pattern.groupindex) + if self.has_groups: + self.capture_groups = list(self.pattern.groupindex.keys()) + + def _make_key(self, cap_group): + return ("{}_{}".format(self.label, cap_group)) + + def matchmake(self, content, rev_data): + + temp_dict = {} + # if there are named capture groups in the regex + if self.has_groups: + + # if there are matches of some sort in this revision content, fill the lists for each cap_group + if self.pattern.search(content) is not None: + m = self.pattern.finditer(content) + matchobjects = list(m) + + for cap_group in self.capture_groups: + key = self._make_key(cap_group) + temp_list = [] + for match in matchobjects: + # we only want to add the match for the capture group if the match is not None + if match.group(cap_group) != None: + temp_list.append(match.group(cap_group)) + + # if temp_list of matches is empty just make that column None + if len(temp_list)==0: + temp_dict[key] = None + # else we put in the list we made in the for-loop above + else: + temp_dict[key] = ', '.join(temp_list) + + # there are no matches at all in this revision content, we default values to None + else: + for cap_group in self.capture_groups: + key = self._make_key(cap_group) + temp_dict[key] = None + + # there are no capture groups, we just search for all the matches of the regex + else: + #given that there are matches to be made + if self.pattern.search(content) is not None: + m = self.pattern.findall(content) + temp_dict[self.label] = ', '.join(m) + else: + temp_dict[self.label] = None + # update rev_data with our new columns + rev_data.update(temp_dict) + return rev_data + + class WikiqParser(): - - def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False): + def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15): """ Parameters: persist : what persistence method to use. Takes a PersistMethod value """ - self.input_file = input_file self.output_file = output_file self.collapse_user = collapse_user @@ -141,7 +197,42 @@ class WikiqParser(): self.printed_header = False self.namespaces = [] self.urlencode = urlencode + self.revert_radius = revert_radius + + if namespaces is not None: + self.namespace_filter = set(namespaces) + else: + self.namespace_filter = None + + self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label) + self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label) + + def make_matchmake_pairs(self, patterns, labels): + if (patterns is not None and labels is not None) and \ + (len(patterns) == len(labels)): + return [RegexPair(pattern, label) for pattern, label in zip(patterns, labels)] + elif (patterns is None and labels is None): + return [] + else: + sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') + + def matchmake(self, rev, rev_data): + rev_data = self.matchmake_revision(rev.text, rev_data) + rev_data = self.matchmake_comment(rev.comment, rev_data) + return rev_data + + def matchmake_revision(self, text, rev_data): + return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs) + + def matchmake_comment(self, comment, rev_data): + return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs) + + def matchmake_pairs(self, text, rev_data, pairs): + for pair in pairs: + rev_data = pair.matchmake(text, rev_data) + return rev_data + def __get_namespace_from_title(self, title): default_ns = None @@ -157,6 +248,7 @@ class WikiqParser(): # if we've made it this far with no matches, we return the default namespace return default_ns + def process(self): # create a regex that creates the output filename @@ -176,7 +268,14 @@ class WikiqParser(): # Iterate through pages for page in dump: - rev_detector = mwreverts.Detector() + namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title) + + # skip namespaces not in the filter + if self.namespace_filter is not None: + if namespace not in self.namespace_filter: + continue + + rev_detector = mwreverts.Detector(radius = self.revert_radius) if self.persist != PersistMethod.none: window = deque(maxlen=PERSISTENCE_RADIUS) @@ -196,14 +295,19 @@ class WikiqParser(): # Iterate through a page's revisions for rev in page: - - rev_data = {'revid' : rev.id, - 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'), - 'articleid' : page.id, - 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id, - 'title' : '"' + page.title + '"', - 'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title), - 'deleted' : "TRUE" if rev.deleted.text else "FALSE" } + + # initialize rev_data + rev_data = { + 'revid':rev.id, + 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'), + 'articleid' : page.id, + 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id, + 'title' : '"' + page.title + '"', + 'namespace' : namespace, + 'deleted' : "TRUE" if rev.deleted.text else "FALSE" + } + + rev_data = self.matchmake(rev, rev_data) # if revisions are deleted, /many/ things will be missing if rev.deleted.text: @@ -228,7 +332,7 @@ class WikiqParser(): # TODO rev.bytes doesn't work.. looks like a bug rev_data['text_chars'] = len(rev.text) - + # generate revert data revert = rev_detector.process(text_sha1, rev.id) @@ -368,13 +472,33 @@ parser.add_argument('--collapse-user', dest="collapse_user", action="store_true" help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.") parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?', - help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.") + help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.") parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true", help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") -parser.add_argument('-ns', '--namespace-filter', dest="namespace_filter", type=str, help="Comma-seperate list of namespaces numbers to include") +parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append', + help="Id number of namspace to include. Can be specified more than once.") + +parser.add_argument('-rr', + '--revert-radius', + dest="revert_radius", + type=int, + action='store', + default=15, + help="Number of edits to check when looking for reverts (default: 15)") + +parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append', + help="The regular expression to search for in revision text. The regex must be surrounded by quotes.") + +parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append', + help="The label for the outputted column based on matching the regex in revision text.") +parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append', + help="The regular expression to search for in comments of revisions.") + +parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append', + help="The label for the outputted column based on matching the regex in comments.") args = parser.parse_args() @@ -389,6 +513,11 @@ elif args.persist == "legacy": else: persist = PersistMethod.sequence +if args.namespace_filter is not None: + namespaces = args.namespace_filter +else: + namespaces = None + if len(args.dumpfiles) > 0: for filename in args.dumpfiles: input_file = open_input_file(filename) @@ -407,11 +536,17 @@ if len(args.dumpfiles) > 0: filename = os.path.join(output_dir, os.path.basename(filename)) output_file = open_output_file(filename) - wikiq = WikiqParser(input_file, output_file, + wikiq = WikiqParser(input_file, + output_file, collapse_user=args.collapse_user, - persist=persist, - urlencode=args.urlencode) - + persist=persist, + urlencode=args.urlencode, + namespaces=namespaces, + revert_radius=args.revert_radius, + regex_match_revision = args.regex_match_revision, + regex_revision_label = args.regex_revision_label, + regex_match_comment = args.regex_match_comment, + regex_comment_label = args.regex_comment_label) wikiq.process() @@ -419,12 +554,20 @@ if len(args.dumpfiles) > 0: input_file.close() output_file.close() else: - wikiq = WikiqParser(sys.stdin, sys.stdout, + wikiq = WikiqParser(sys.stdin, + sys.stdout, collapse_user=args.collapse_user, persist=persist, - persist_legacy=args.persist_legacy, - urlencode=args.urlencode) - wikiq.process() + #persist_legacy=args.persist_legacy, + urlencode=args.urlencode, + namespaces=namespaces, + revert_radius=args.revert_radius, + regex_match_revision = args.regex_match_revision, + regex_revision_label = args.regex_revision_label, + regex_match_comment = args.regex_match_comment, + regex_comment_label = args.regex_comment_label) + + wikiq.process() # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your" # stop_words = stop_words.split(",")