3 from subprocess import Popen, PIPE
4 from collections import deque
5 from hashlib import sha1
6 from deltas.tokenizers import wikitext_split
10 from urllib.parse import quote
11 from deltas import SequenceMatcher
13 TO_ENCODE = ('title', 'editor')
16 def calculate_persistence(tokens_added):
17 return(sum([(len(x.revisions)-1) for x in tokens_added]),
20 class WikiqIterator():
21 def __init__(self, fh, collapse_user=False):
23 self.collapse_user = collapse_user
24 self.mwiterator = Dump.from_file(self.fh)
25 self.namespace_map = { ns.id : ns.name for ns in
26 self.mwiterator.site_info.namespaces }
27 self.__pages = self.load_pages()
30 for page in self.mwiterator:
32 namespace_map = self.namespace_map,
33 collapse_user=self.collapse_user)
39 return next(self._pages)
43 __slots__ = ('id', 'title', 'namespace', 'redirect',
44 'restrictions', 'mwpage', '__revisions',
47 def __init__(self, page, namespace_map, collapse_user=False):
49 self.namespace = page.namespace
50 if page.namespace != 0:
51 self.title = ':'.join([namespace_map[page.namespace], page.title])
53 self.title = page.title
54 self.restrictions = page.restrictions
55 self.collapse_user = collapse_user
57 self.__revisions = self.rev_list()
60 # Outline for how we want to handle collapse_user=True
61 # iteration rev.user prev_rev.user add prev_rev?
68 for i, rev in enumerate(self.mwpage):
69 # never yield the first time
71 if self.collapse_user:
73 rev.collapsed_revs = collapsed_revs
76 if self.collapse_user:
77 # yield if this is the last edit in a seq by a user and reset
78 # also yield if we do know who the user is
80 if rev.deleted.user or prev_rev.deleted.user:
83 rev.collapsed_revs = collapsed_revs
85 elif not rev.user.text == prev_rev.user.text:
88 rev.collapsed_revs = collapsed_revs
89 # otherwise, add one to the counter
92 rev.collapsed_revs = collapsed_revs
93 # if collapse_user is false, we always yield
99 # also yield the final time
103 return self.__revisions
106 return next(self.__revisions)
110 def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
112 self.input_file = input_file
113 self.output_file = output_file
114 self.collapse_user = collapse_user
115 self.persist = persist
116 self.persist_legacy = persist_legacy
117 self.printed_header = False
119 self.urlencode = urlencode
121 def __get_namespace_from_title(self, title):
124 for ns in self.namespaces:
125 # skip if the namespace is not defined
127 default_ns = self.namespaces[ns]
130 if title.startswith(ns + ":"):
131 return self.namespaces[ns]
133 # if we've made it this far with no matches, we return the default namespace
138 # create a regex that creates the output filename
139 # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
140 # r'output/wikiq-\1-\2.tsv',
143 # Construct dump file iterator
144 dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
146 # extract list of namspaces
147 self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
153 # Iterate through pages
155 rev_detector = mwreverts.Detector()
157 if self.persist or self.persist_legacy:
158 window = deque(maxlen=PERSISTENCE_RADIUS)
160 if not self.persist_legacy:
161 state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
162 revert_radius=PERSISTENCE_RADIUS)
165 from mw.lib import persistence
166 state = persistence.State()
168 # Iterate through a page's revisions
171 rev_data = {'revid' : rev.id,
172 'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
173 'articleid' : page.id,
174 'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
175 'title' : '"' + page.title + '"',
176 'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
177 'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
179 # if revisions are deleted, /many/ things will be missing
181 rev_data['text_chars'] = ""
182 rev_data['sha1'] = ""
183 rev_data['revert'] = ""
184 rev_data['reverteds'] = ""
187 # rev.text can be None if the page has no text
190 # if text exists, we'll check for a sha1 and generate one otherwise
196 text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
198 rev_data['sha1'] = text_sha1
200 # TODO rev.bytes doesn't work.. looks like a bug
201 rev_data['text_chars'] = len(rev.text)
203 # generate revert data
204 revert = rev_detector.process(text_sha1, rev.id)
207 rev_data['revert'] = "TRUE"
208 rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
210 rev_data['revert'] = "FALSE"
211 rev_data['reverteds'] = ""
213 # if the fact that the edit was minor can be hidden, this might be an issue
214 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
216 if not rev.deleted.user:
217 # wrap user-defined editors in quotes for fread
218 rev_data['editor'] = '"' + rev.user.text + '"'
219 rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
222 rev_data['anon'] = ""
223 rev_data['editor'] = ""
225 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
230 #TODO missing: additions_size deletions_size
232 # if collapse user was on, lets run that
233 if self.collapse_user:
234 rev_data['collapsed_revs'] = rev.collapsed_revs
236 if self.persist or self.persist_legacy:
239 for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
240 old_rev_data[k] = None
243 if not self.persist_legacy:
244 _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
247 _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
249 window.append((rev.id, rev_data, tokens_added, tokens_removed))
251 if len(window) == PERSISTENCE_RADIUS:
252 old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
254 num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
256 old_rev_data["token_revs"] = num_token_revs
257 old_rev_data["tokens_added"] = num_tokens
258 old_rev_data["tokens_removed"] = len(old_tokens_removed)
259 old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
261 self.print_rev_data(old_rev_data)
264 self.print_rev_data(rev_data)
268 if self.persist or self.persist_legacy:
269 # print out metadata for the last RADIUS revisions
270 for i, item in enumerate(window):
271 # if the window was full, we've already printed item 0
272 if len(window) == PERSISTENCE_RADIUS and i == 0:
275 rev_id, rev_data, tokens_added, tokens_removed = item
276 num_token_revs, num_tokens = calculate_persistence(tokens_added)
278 rev_data["token_revs"] = num_token_revs
279 rev_data["tokens_added"] = num_tokens
280 rev_data["tokens_removed"] = len(tokens_removed)
281 rev_data["tokens_window"] = len(window)-(i+1)
283 self.print_rev_data(rev_data)
287 print("Done: %s revisions and %s pages." % (rev_count, page_count),
290 def print_rev_data(self, rev_data):
291 # if it's the first time through, print the header
293 for field in TO_ENCODE:
294 rev_data[field] = quote(str(rev_data[field]))
296 if not self.printed_header:
297 print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
298 self.printed_header = True
300 print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
303 def open_input_file(input_filename):
304 if re.match(r'.*\.7z$', input_filename):
305 cmd = ["7za", "x", "-so", input_filename, '*']
306 elif re.match(r'.*\.gz$', input_filename):
307 cmd = ["zcat", input_filename]
308 elif re.match(r'.*\.bz2$', input_filename):
309 cmd = ["bzcat", "-dk", input_filename]
312 input_file = Popen(cmd, stdout=PIPE).stdout
314 input_file = open(input_filename, 'r')
318 def open_output_file(input_filename):
319 # create a regex that creates the output filename
320 output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
321 output_filename = re.sub(r'\.xml', '', output_filename)
322 output_filename = output_filename + ".tsv"
323 output_file = open(output_filename, "w")