# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
-import pdb
+
import argparse
import sys
import os, os.path
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
+def matchmake(scanned_content, rev_data, regex, label):
+ p = re.compile(regex)
+
+ temp_dict = {}
+ # if there are named capture groups in the regex
+ if bool(p.groupindex):
+ capture_groups = list(p.groupindex.keys())
+
+ # initialize the {capture_group_name:list} for each capture group
+ for cap_group in capture_groups:
+ temp_dict["{}_{}".format(label, cap_group)] = []
+
+ # if there are matches of some sort in this revision content, fill the lists for each cap_group
+ if p.search(scanned_content) is not None:
+ m = re.finditer(p,scanned_content)
+ matchobjects = list(m)
+
+ for cap_group in capture_groups:
+ temp_list = []
+ for match in matchobjects:
+ # we only want to add the match for the capture group if the match is not None
+ if match.group(cap_group) != None:
+ temp_list.append(match.group(cap_group))
+
+ # if temp_list of matches is empty just make that column None
+ if len(temp_list)==0:
+ temp_dict["{}_{}".format(label, cap_group)] = None
+ # else we put in the list we made in the for-loop above
+ else:
+ temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list)
+
+ # there are no matches at all in this revision content, we default values to None
+ else:
+ for cap_group in capture_groups:
+ temp_dict["{}_{}".format(label, cap_group)] = None
+
+ # there are no capture groups, we just search for all the matches of the regex
+ else:
+ #given that there are matches to be made
+ if p.search(scanned_content) is not None:
+ m = p.findall(scanned_content)
+ temp_dict[label] = ', '.join(m)
+ else:
+ temp_dict[label] = None
+ # update rev_data with our new columns
+ rev_data.update(temp_dict)
+ print(rev_data.keys())
+ return rev_data
+
+
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
return next(self.__revisions)
class WikiqParser():
-
- def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+ def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
+ self.revert_radius = revert_radius
+ self.regex_match_revision = regex_match_revision
+ self.regex_revision_label = regex_revision_label
+ self.regex_match_comment = regex_match_comment
+ self.regex_comment_label = regex_comment_label
+
if namespaces is not None:
self.namespace_filter = set(namespaces)
else:
# if we've made it this far with no matches, we return the default namespace
return default_ns
+
def process(self):
# create a regex that creates the output filename
# skip namespaces not in the filter
if self.namespace_filter is not None:
- if namespace in self.namespace_filter:
+ if namespace not in self.namespace_filter:
continue
- rev_detector = mwreverts.Detector()
+ print(self.revert_radius)
+ rev_detector = mwreverts.Detector(radius = self.revert_radius)
+
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
from mw.lib import persistence
state = persistence.State()
-
-
# Iterate through a page's revisions
for rev in page:
+
+ # initialize rev_data
+ rev_data = {}
- rev_data = {'revid' : rev.id,
- 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
- 'articleid' : page.id,
- 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
- 'title' : '"' + page.title + '"',
- 'namespace' : namespace,
- 'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
+ # if the command line args only gave a label (and no regular expression is given)
+ if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None):
+ sys.exit('The given regex label(s) has no corresponding regex to search for.')
+
+ # if there's anything in the list of regex_match_revision
+ if self.regex_match_revision is not None:
+ if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)):
+ sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+ # initialize and construct the list of regex-label tuples
+ pairs = []
+ for i in range(0,len(self.regex_match_revision)):
+ pairs.append((self.regex_match_revision[i], self.regex_revision_label[i]))
+
+ # for each regex/label pair, we now run matchmake to check and output columns
+ for pair in pairs:
+ # pair[0] corresponds to the regex, pair[1] to the label
+ rev_data = matchmake(rev.text, rev_data, pair[0], pair[1])
+
+ # if there's anything in the list of regex_match_comment
+ if self.regex_match_comment is not None:
+ if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)):
+ sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+ # initialize and construct the list of regex-label tuples
+ pairs = []
+ for i in range(0,len(self.regex_match_comment)):
+ pairs.append((self.regex_match_comment[i], self.regex_comment_label[i]))
+
+ # for each regex/label pair, we now run matchmake to check and output columns
+ for pair in pairs:
+ # pair[0] corresponds to the regex, pair[1] to the label
+ rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1])
+
+ # we fill out the rest of the data structure now
+ rev_data['revid'] = rev.id
+ rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+ rev_data['articleid'] = page.id
+ rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+ rev_data['title'] = '"' + page.title + '"'
+ rev_data['namespace'] = namespace
+ rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
# if revisions are deleted, /many/ things will be missing
if rev.deleted.text:
# TODO rev.bytes doesn't work.. looks like a bug
rev_data['text_chars'] = len(rev.text)
-
+
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
- help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
+ help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
- help="Id number of namspace to include.")
+ help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+ '--revert-radius',
+ dest="revert_radius",
+ type=int,
+ action='store',
+ default=15,
+ help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+ help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+ help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+ help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+ help="The label for the outputted column based on matching the regex in comments.")
args = parser.parse_args()
persist = PersistMethod.sequence
if args.namespace_filter is not None:
- namespaces = [int(ns) for ns in args.namespace_filter.split(',')]
+ namespaces = args.namespace_filter
else:
namespaces = None
filename = os.path.join(output_dir, os.path.basename(filename))
output_file = open_output_file(filename)
- wikiq = WikiqParser(input_file, output_file,
- collapse_user=args.collapse_user,
- persist=persist,
- urlencode=args.urlencode,
- namespaces = namespaces)
-
+ wikiq = WikiqParser(input_file,
+ output_file,
+ collapse_user=args.collapse_user,
+ persist=persist,
+ urlencode=args.urlencode,
+ namespaces=namespaces,
+ revert_radius=args.revert_radius,
+ regex_match_revision = args.regex_match_revision,
+ regex_revision_label = args.regex_revision_label,
+ regex_match_comment = args.regex_match_comment,
+ regex_comment_label = args.regex_comment_label)
wikiq.process()
input_file.close()
output_file.close()
else:
- wikiq = WikiqParser(sys.stdin, sys.stdout,
+ wikiq = WikiqParser(sys.stdin,
+ sys.stdout,
collapse_user=args.collapse_user,
persist=persist,
- persist_legacy=args.persist_legacy,
- urlencode=args.urlencode)
- wikiq.process()
+ #persist_legacy=args.persist_legacy,
+ urlencode=args.urlencode,
+ namespaces=namespaces,
+ revert_radius=args.revert_radius,
+ regex_match_revision = args.regex_match_revision,
+ regex_revision_label = args.regex_revision_label,
+ regex_match_comment = args.regex_match_comment,
+ regex_comment_label = args.regex_comment_label)
+
+ wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
# stop_words = stop_words.split(",")