# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
+
import argparse
import sys
import os, os.path
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+class PersistMethod:
+ none = 0
+ sequence = 1
+ segment = 2
+ legacy = 3
def calculate_persistence(tokens_added):
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
+def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
+ for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
+ if location == "comment":
+ matching_string = rev.comment
+ elif location == "text":
+ matching_string = rev.text
+ else:
+ sys.exit("regex scanner location must be 'comment' or 'text'.")
+
+ if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
+ m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
+ blob=""
+ for result in m:
+ blob = blob + "," + result.group(0)
+ # columns we want
+ rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing
+ else:
+ rev_data['matches'] = None
+
+ return rev_data
+
+
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
self.mwiterator = Dump.from_file(self.fh)
+ self.namespace_map = { ns.id : ns.name for ns in
+ self.mwiterator.site_info.namespaces }
self.__pages = self.load_pages()
def load_pages(self):
for page in self.mwiterator:
- yield WikiqPage(page, collapse_user=self.collapse_user)
+ yield WikiqPage(page,
+ namespace_map = self.namespace_map,
+ collapse_user=self.collapse_user)
def __iter__(self):
return self.__pages
'restrictions', 'mwpage', '__revisions',
'collapse_user')
- def __init__(self, page, collapse_user=False):
+ def __init__(self, page, namespace_map, collapse_user=False):
self.id = page.id
- self.title = page.title
self.namespace = page.namespace
- self.redirect = page.redirect
+ # following mwxml, we assume namespace 0 in cases where
+ # page.namespace is inconsistent with namespace_map
+ if page.namespace not in namespace_map:
+ self.title = page.title
+ page.namespace = 0
+ if page.namespace != 0:
+ self.title = ':'.join([namespace_map[page.namespace], page.title])
+ else:
+ self.title = page.title
self.restrictions = page.restrictions
-
self.collapse_user = collapse_user
self.mwpage = page
self.__revisions = self.rev_list()
return next(self.__revisions)
class WikiqParser():
+
+ def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+ """
+ Parameters:
+ persist : what persistence method to use. Takes a PersistMethod value
+ """
-
- def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
-
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
- self.persist_legacy = persist_legacy
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
-
+ self.scanner = scanner
+ self.match_regex = match_regex
+
+ if namespaces is not None:
+ self.namespace_filter = set(namespaces)
+ else:
+ self.namespace_filter = None
+
def __get_namespace_from_title(self, title):
default_ns = None
# if we've made it this far with no matches, we return the default namespace
return default_ns
+
def process(self):
# create a regex that creates the output filename
# Iterate through pages
for page in dump:
+ namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+ # skip namespaces not in the filter
+ if self.namespace_filter is not None:
+ if namespace not in self.namespace_filter:
+ continue
+
rev_detector = mwreverts.Detector()
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
- if not self.persist_legacy:
+ if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
+ elif self.persist == PersistMethod.segment:
+ state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ # self.persist == PersistMethod.legacy
else:
from mw.lib import persistence
state = persistence.State()
# Iterate through a page's revisions
for rev in page:
-
- rev_data = {'revid' : rev.id,
- 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
- 'articleid' : page.id,
- 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
- 'title' : '"' + page.title + '"',
- 'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
- 'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
+ ## m = re.finditer() #so we can find all instances
+ ## m.groupdict() #so we can look at them all with their names
+
+ # initialize rev_dat
+ rev_data = {}
+
+ if self.scanner is not None: # we know we want to do a regex search
+ ## comment = want to look in comment attached to revision
+ ## text = want to look in revision text
+
+ ### call the scanner function
+ rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
+
+ if self.scanner is not None and rev_data['matches'] is None:
+ next
+
+ # we fill out the rest of the data structure now
+ rev_data['revid'] = rev.id
+ rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+ rev_data['articleid'] = page.id
+ rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+ rev_data['title'] = '"' + page.title + '"'
+ rev_data['namespace'] = namespace
+ rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
# if revisions are deleted, /many/ things will be missing
if rev.deleted.text:
# TODO rev.bytes doesn't work.. looks like a bug
rev_data['text_chars'] = len(rev.text)
-
+
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
if self.collapse_user:
rev_data['collapsed_revs'] = rev.collapsed_revs
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
if rev.deleted.text:
-
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
- if not self.persist_legacy:
+ if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
else:
rev_count += 1
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
- help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+ help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
- help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+ help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
+ help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")
+
+parser.add_argument('-R', '--match', dest="match_regex", type=str,
+ help="The regular expression you would like to find in the string and put in capture group")
args = parser.parse_args()
+# set persistence method
+
+if args.persist is None:
+ persist = PersistMethod.none
+elif args.persist == "segment":
+ persist = PersistMethod.segment
+elif args.persist == "legacy":
+ persist = PersistMethod.legacy
+else:
+ persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+ namespaces = args.namespace_filter
+else:
+ namespaces = None
+
if len(args.dumpfiles) > 0:
for filename in args.dumpfiles:
input_file = open_input_file(filename)
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
- persist=args.persist,
- persist_legacy=args.persist_legacy,
- urlencode=args.urlencode)
-
+ persist=persist,
+ urlencode=args.urlencode,
+ namespaces = namespaces,
+ match_regex=args.match_regex, # adding in the new 2 args for regex searching
+ scanner=args.scanner)
wikiq.process()
else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
- persist=args.persist,
- persist_legacy=args.persist_legacy,
- urlencode=args.urlencode)
+ persist=persist,
+ #persist_legacy=args.persist_legacy,
+ urlencode=args.urlencode,
+ namespaces = namespaces,
+ match_regex=args.match_regex, # adding in the new 2 args for regex searching
+ scanner=args.scanner)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"