3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
16 from mw.xml_dump import Iterator
17 from mw.lib import persistence
18 from mw.lib import reverts
19 from urllib.parse import quote
20 TO_ENCODE = ('title', 'editor')
23 def calculate_persistence(tokens_added):
24 return(sum([(len(x.revisions)-1) for x in tokens_added]),
27 class WikiqIterator():
28 def __init__(self, fh, collapse_user=False):
30 self.collapse_user = collapse_user
31 self.mwiterator = Iterator.from_file(self.fh)
32 self.__pages = self.load_pages()
35 for page in self.mwiterator:
36 yield WikiqPage(page, collapse_user=self.collapse_user)
42 return next(self._pages)
45 __slots__ = ('id', 'title', 'namespace', 'redirect',
46 'restrictions', 'mwpage', '__revisions',
49 def __init__(self, page, collapse_user=False):
51 self.title = page.title
52 self.namespace = page.namespace
53 self.redirect = page.redirect
54 self.restrictions = page.restrictions
56 self.collapse_user = collapse_user
58 self.__revisions = self.rev_list()
61 # Outline for how we want to handle collapse_user=True
62 # iteration rev.user prev_rev.user add prev_rev?
69 for i, rev in enumerate(self.mwpage):
70 # never yield the first time
72 if self.collapse_user:
74 rev.collapsed_revs = collapsed_revs
77 if self.collapse_user:
78 # yield if this is the last edit in a seq by a user and reset
79 if not rev.contributor.user_text == prev_rev.contributor.user_text:
82 rev.collapsed_revs = collapsed_revs
83 # otherwise, add one to the counter
86 rev.collapsed_revs = collapsed_revs
87 # if collapse_user is false, we always yield
92 # also yield the final time
96 return self.__revisions
99 return next(self.__revisions)
104 def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
106 self.input_file = input_file
107 self.output_file = output_file
108 self.collapse_user = collapse_user
109 self.persist = persist
110 self.printed_header = False
112 self.urlencode = urlencode
114 def __get_namespace_from_title(self, title):
117 for ns in self.namespaces:
118 # skip if the namespace is not defined
120 default_ns = self.namespaces[ns]
123 if title.startswith(ns + ":"):
124 return self.namespaces[ns]
126 # if we've made it this far with no matches, we return the default namespace
131 # create a regex that creates the output filename
132 # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
133 # r'output/wikiq-\1-\2.tsv',
136 # Construct dump file iterator
137 dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
139 # extract list of namspaces
140 self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
144 # Iterate through pages
147 state = persistence.State()
148 window = deque(maxlen=PERSISTENCE_RADIUS)
150 rev_detector = reverts.Detector()
152 # Iterate through a page's revisions
155 rev_data = {'revid' : rev.id,
156 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
157 'articleid' : page.id,
158 'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
159 'title' : '"' + page.title + '"',
160 'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
161 'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
163 # if revisions are deleted, /many/ things will be missing
165 rev_data['text_chars'] = ""
166 rev_data['sha1'] = ""
167 rev_data['revert'] = ""
168 rev_data['reverteds'] = ""
171 # if text exists, we'll check for a sha1 and generate one otherwise
175 text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
177 rev_data['sha1'] = text_sha1
179 # TODO rev.bytes doesn't work.. looks like a bug
180 rev_data['text_chars'] = len(rev.text)
182 # generate revert data
183 revert = rev_detector.process(text_sha1, rev.id)
185 rev_data['revert'] = "TRUE"
186 rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
188 rev_data['revert'] = "FALSE"
189 rev_data['reverteds'] = ""
191 # if the fact that the edit was minor can be hidden, this might be an issue
192 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
194 if rev.contributor.user_text:
195 # wrap user-defined editors in quotes for fread
196 rev_data['editor'] = '"' + rev.contributor.user_text + '"'
197 rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
200 rev_data['anon'] = ""
201 rev_data['editor'] = ""
203 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
208 #TODO missing: additions_size deletions_size
210 # if collapse user was on, lets run that
211 if self.collapse_user:
212 rev_data['collapsed_revs'] = rev.collapsed_revs
216 for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
217 old_rev_data[k] = None
219 _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
220 window.append((rev.id, rev_data, tokens_added, tokens_removed))
222 if len(window) == PERSISTENCE_RADIUS:
223 old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
225 num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
227 old_rev_data["token_revs"] = num_token_revs
228 old_rev_data["tokens_added"] = num_tokens
229 old_rev_data["tokens_removed"] = len(old_tokens_removed)
230 old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
232 self.print_rev_data(old_rev_data)
235 self.print_rev_data(rev_data)
240 # print out metadata for the last RADIUS revisions
241 for i, item in enumerate(window):
242 # if the window was full, we've already printed item 0
243 if len(window) == PERSISTENCE_RADIUS and i == 0:
246 rev_id, rev_data, tokens_added, tokens_removed = item
247 num_token_revs, num_tokens = calculate_persistence(tokens_added)
249 rev_data["token_revs"] = num_token_revs
250 rev_data["tokens_added"] = num_tokens
251 rev_data["tokens_removed"] = len(tokens_removed)
252 rev_data["tokens_window"] = len(window)-(i+1)
254 self.print_rev_data(rev_data)
258 print("Done: %s revisions and %s pages." % (rev_count, page_count),
261 def print_rev_data(self, rev_data):
262 # if it's the first time through, print the header
264 for field in TO_ENCODE:
265 rev_data[field] = quote(str(rev_data[field]))
267 if not self.printed_header:
268 print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
269 self.printed_header = True
271 print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
274 def open_input_file(input_filename):
275 if re.match(r'.*\.7z$', input_filename):
276 cmd = ["7za", "x", "-so", input_filename, '*']
277 elif re.match(r'.*\.gz$', input_filename):
278 cmd = ["zcat", input_filename]
279 elif re.match(r'.*\.bz2$', input_filename):
280 cmd = ["bzcat", "-dk", input_filename]
283 input_file = Popen(cmd, stdout=PIPE).stdout
285 input_file = open(input_filename, 'r')
289 def open_output_file(input_filename):
290 # create a regex that creates the output filename
291 output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
292 output_filename = re.sub(r'\.xml', '', output_filename)
293 output_filename = output_filename + ".tsv"
294 output_file = open(output_filename, "w")
298 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
300 # arguments for the input direction
301 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
302 help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
304 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
305 help="Directory for output files.")
307 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
308 help="Write output to standard out (do not create dump file)")
310 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
311 help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
313 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
314 help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
316 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
317 help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
319 args = parser.parse_args()
321 if len(args.dumpfiles) > 0:
322 for filename in args.dumpfiles:
323 input_file = open_input_file(filename)
325 # open directory for output
327 output_dir = args.output_dir[0]
331 print("Processing file: %s" % filename, file=sys.stderr)
334 output_file = sys.stdout
336 filename = os.path.join(output_dir, os.path.basename(filename))
337 output_file = open_output_file(filename)
339 wikiq = WikiqParser(input_file, output_file,
340 collapse_user=args.collapse_user,
341 persist=args.persist,
342 urlencode=args.urlencode)
351 wikiq = WikiqParser(sys.stdin, sys.stdout,
352 collapse_user=args.collapse_user,
353 persist=args.persist,
354 urlencode=args.urlencode)
357 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
358 # stop_words = stop_words.split(",")