from mw.xml_dump import Iterator
from mw.lib import persistence
from mw.lib import reverts
-
+from urllib.parse import quote
+TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
def calculate_persistence(tokens_added):
__slots__ = ('id', 'title', 'namespace', 'redirect',
'restrictions', 'mwpage', '__revisions',
'collapse_user')
-
+
def __init__(self, page, collapse_user=False):
self.id = page.id
self.title = page.title
return next(self.__revisions)
class WikiqParser():
- def __init__(self, input_file, output_file, collapse_user=False, persist=False):
+
+
+ def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
+
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
self.printed_header = False
+ self.namespaces = []
+ self.urlencode = urlencode
+
+ def __get_namespace_from_title(self, title):
+ default_ns = None
+
+ for ns in self.namespaces:
+ # skip if the namespace is not defined
+ if ns == None:
+ default_ns = self.namespaces[ns]
+ continue
+
+ if title.startswith(ns + ":"):
+ return self.namespaces[ns]
+
+ # if we've made it this far with no matches, we return the default namespace
+ return default_ns
def process(self):
- print("Processing file: %s" % self.input_file.name, file=sys.stderr)
# create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
# Construct dump file iterator
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
+ # extract list of namspaces
+ self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+
page_count = 0
rev_count = 0
# Iterate through pages
'articleid' : page.id,
'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
'title' : '"' + page.title + '"',
- 'namespace' : page.namespace,
+ 'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
# if revisions are deleted, /many/ things will be missing
rev_data["tokens_added"] = num_tokens
rev_data["tokens_removed"] = len(tokens_removed)
rev_data["tokens_window"] = len(window)-(i+1)
-
+
self.print_rev_data(rev_data)
page_count += 1
def print_rev_data(self, rev_data):
# if it's the first time through, print the header
+ if self.urlencode:
+ for field in TO_ENCODE:
+ rev_data[field] = quote(str(rev_data[field]))
+
if not self.printed_header:
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
self.printed_header = True
def open_input_file(input_filename):
if re.match(r'.*\.7z', input_filename):
- cmd = ["7za", "x", "-so", input_filename]
+ cmd = ["7za", "x", "-so", input_filename, '*.xml']
elif re.match(r'.*\.gz', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2', input_filename):
def open_output_file(input_filename):
# create a regex that creates the output filename
- output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
+ output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
+ output_filename = re.sub(r'\.xml', '', output_filename)
output_filename = output_filename + ".tsv"
output_file = open(output_filename, "w")
parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
+ help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
args = parser.parse_args()
if len(args.dumpfiles) > 0:
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
- persist=args.persist)
+ persist=args.persist,
+ urlencode=args.urlencode)
+
+ print("Processing file: %s" % filename, file=sys.stderr)
+
wikiq.process()
# close things
else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
- persist=args.persist)
+ persist=args.persist,
+ urlencode=args.urlencode)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"