]> code.communitydata.science - mediawiki_dump_tools.git/blob - wikiq_util.py
495c31c0ed45d59af23ad92a5022f36942b12cb9
[mediawiki_dump_tools.git] / wikiq_util.py
1 import sys
2 import re
3 from subprocess import Popen, PIPE
4 from collections import deque
5 from hashlib import sha1
6 from deltas.tokenizers import wikitext_split
7 from mwxml import Dump
8 import mwpersistence
9 import mwreverts
10 from urllib.parse import quote
11 from deltas import SequenceMatcher
12
13 TO_ENCODE = ('title', 'editor')
14 PERSISTENCE_RADIUS=7
15
16 def calculate_persistence(tokens_added):
17     return(sum([(len(x.revisions)-1) for x in tokens_added]),
18            len(tokens_added))
19
20 class WikiqIterator():
21     def __init__(self, fh, collapse_user=False):
22         self.fh = fh
23         self.collapse_user = collapse_user
24         self.mwiterator = Dump.from_file(self.fh)
25         self.namespace_map = { ns.id : ns.name for ns in
26                                self.mwiterator.site_info.namespaces }
27         self.__pages = self.load_pages()
28
29     def load_pages(self):
30         for page in self.mwiterator:
31             yield WikiqPage(page,
32                             namespace_map = self.namespace_map,
33                             collapse_user=self.collapse_user)
34
35     def __iter__(self):
36         return self.__pages
37
38     def __next__(self):
39         return next(self._pages)
40
41
42 class WikiqPage():
43     __slots__ = ('id', 'title', 'namespace', 'redirect',
44                  'restrictions', 'mwpage', '__revisions',
45                  'collapse_user')
46     
47     def __init__(self, page, namespace_map, collapse_user=False):
48         self.id = page.id
49         self.namespace = page.namespace
50         if page.namespace != 0:
51             self.title = ':'.join([namespace_map[page.namespace], page.title])
52         else:
53             self.title = page.title
54         self.restrictions = page.restrictions
55         self.collapse_user = collapse_user
56         self.mwpage = page
57         self.__revisions = self.rev_list()
58
59     def rev_list(self):
60         # Outline for how we want to handle collapse_user=True
61         # iteration   rev.user   prev_rev.user   add prev_rev?
62         #         0          A            None           Never
63         #         1          A               A           False
64         #         2          B               A            True
65         #         3          A               B            True
66         #         4          A               A           False
67         # Post-loop                          A          Always
68         for i, rev in enumerate(self.mwpage):
69             # never yield the first time
70             if i == 0:
71                 if self.collapse_user: 
72                     collapsed_revs = 1
73                     rev.collapsed_revs = collapsed_revs
74
75             else:
76                 if self.collapse_user:
77                     # yield if this is the last edit in a seq by a user and reset
78                     # also yield if we do know who the user is
79
80                     if rev.deleted.user or prev_rev.deleted.user:
81                         yield prev_rev
82                         collapsed_revs = 1
83                         rev.collapsed_revs = collapsed_revs
84
85                     elif not rev.user.text == prev_rev.user.text:
86                         yield prev_rev
87                         collapsed_revs = 1
88                         rev.collapsed_revs = collapsed_revs
89                     # otherwise, add one to the counter
90                     else:
91                         collapsed_revs += 1
92                         rev.collapsed_revs = collapsed_revs
93                 # if collapse_user is false, we always yield
94                 else:
95                     yield prev_rev
96
97             prev_rev = rev
98
99         # also yield the final time
100         yield prev_rev
101
102     def __iter__(self):
103         return self.__revisions
104
105     def __next__(self):
106         return next(self.__revisions)
107
108 class WikiqParser():
109
110     def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
111         
112         self.input_file = input_file
113         self.output_file = output_file
114         self.collapse_user = collapse_user
115         self.persist = persist
116         self.persist_legacy = persist_legacy
117         self.printed_header = False
118         self.namespaces = []
119         self.urlencode = urlencode
120         
121     def __get_namespace_from_title(self, title):
122         default_ns = None
123
124         for ns in self.namespaces:
125             # skip if the namespace is not defined
126             if ns == None:
127                 default_ns = self.namespaces[ns]
128                 continue
129
130             if title.startswith(ns + ":"):
131                 return self.namespaces[ns]
132
133         # if we've made it this far with no matches, we return the default namespace
134         return default_ns
135
136     def process(self):
137
138         # create a regex that creates the output filename
139         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
140         #                         r'output/wikiq-\1-\2.tsv',
141         #                         input_filename)
142
143         # Construct dump file iterator
144         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
145
146         # extract list of namspaces
147         self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
148
149         page_count = 0
150         rev_count = 0
151
152
153         # Iterate through pages
154         for page in dump:
155             rev_detector = mwreverts.Detector()
156
157             if self.persist or self.persist_legacy:
158                 window = deque(maxlen=PERSISTENCE_RADIUS)
159
160                 if not self.persist_legacy:
161                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
162                                                     revert_radius=PERSISTENCE_RADIUS)
163
164                 else:
165                     from mw.lib import persistence
166                     state = persistence.State()
167
168             # Iterate through a page's revisions
169             for rev in page:
170
171                 rev_data = {'revid' : rev.id,
172                             'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
173                             'articleid' : page.id,
174                             'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
175                             'title' : '"' + page.title + '"',
176                             'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
177                             'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
178
179                 # if revisions are deleted, /many/ things will be missing
180                 if rev.deleted.text:
181                     rev_data['text_chars'] = ""
182                     rev_data['sha1'] = ""
183                     rev_data['revert'] = ""
184                     rev_data['reverteds'] = ""
185
186                 else:
187                     # rev.text can be None if the page has no text
188                     if not rev.text:
189                         rev.text = ""
190                     # if text exists, we'll check for a sha1 and generate one otherwise
191
192                     if rev.sha1:
193                         text_sha1 = rev.sha1
194                     else:
195
196                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
197                     
198                     rev_data['sha1'] = text_sha1
199
200                     # TODO rev.bytes doesn't work.. looks like a bug
201                     rev_data['text_chars'] = len(rev.text)
202                
203                     # generate revert data
204                     revert = rev_detector.process(text_sha1, rev.id)
205                     
206                     if revert:
207                         rev_data['revert'] = "TRUE"
208                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
209                     else:
210                         rev_data['revert'] = "FALSE"
211                         rev_data['reverteds'] = ""
212
213                 # if the fact that the edit was minor can be hidden, this might be an issue
214                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
215
216                 if not rev.deleted.user:
217                     # wrap user-defined editors in quotes for fread
218                     rev_data['editor'] = '"' + rev.user.text + '"'
219                     rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
220                     
221                 else:
222                     rev_data['anon'] = ""
223                     rev_data['editor'] = ""
224
225                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
226                 #    redirect = True
227                 #else:
228                 #    redirect = False
229                 
230                 #TODO missing: additions_size deletions_size
231                 
232                 # if collapse user was on, lets run that
233                 if self.collapse_user:
234                     rev_data['collapsed_revs'] = rev.collapsed_revs
235
236                 if self.persist or self.persist_legacy:
237                     if rev.deleted.text:
238
239                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
240                             old_rev_data[k] = None
241                     else:
242
243                         if not self.persist_legacy:
244                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
245
246                         else:
247                             _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
248                             
249                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
250                         
251                         if len(window) == PERSISTENCE_RADIUS:
252                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
253                             
254                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
255
256                             old_rev_data["token_revs"] = num_token_revs
257                             old_rev_data["tokens_added"] = num_tokens
258                             old_rev_data["tokens_removed"] = len(old_tokens_removed)
259                             old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
260
261                             self.print_rev_data(old_rev_data)
262
263                 else:
264                     self.print_rev_data(rev_data)
265
266                 rev_count += 1
267
268             if self.persist or self.persist_legacy:
269                 # print out metadata for the last RADIUS revisions
270                 for i, item in enumerate(window):
271                     # if the window was full, we've already printed item 0
272                     if len(window) == PERSISTENCE_RADIUS and i == 0:
273                         continue
274
275                     rev_id, rev_data, tokens_added, tokens_removed = item
276                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
277
278                     rev_data["token_revs"] = num_token_revs
279                     rev_data["tokens_added"] = num_tokens
280                     rev_data["tokens_removed"] = len(tokens_removed)
281                     rev_data["tokens_window"] = len(window)-(i+1)
282                     
283                     self.print_rev_data(rev_data)
284
285             page_count += 1
286
287         print("Done: %s revisions and %s pages." % (rev_count, page_count),
288               file=sys.stderr)
289
290     def print_rev_data(self, rev_data):
291         # if it's the first time through, print the header
292         if self.urlencode:
293             for field in TO_ENCODE:
294                 rev_data[field] = quote(str(rev_data[field]))
295
296         if not self.printed_header:
297             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
298             self.printed_header = True
299         
300         print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
301
302
303 def open_input_file(input_filename):
304     if re.match(r'.*\.7z$', input_filename):
305         cmd = ["7za", "x", "-so", input_filename, '*'] 
306     elif re.match(r'.*\.gz$', input_filename):
307         cmd = ["zcat", input_filename] 
308     elif re.match(r'.*\.bz2$', input_filename):
309         cmd = ["bzcat", "-dk", input_filename] 
310
311     try:
312         input_file = Popen(cmd, stdout=PIPE).stdout
313     except NameError:
314         input_file = open(input_filename, 'r')
315
316     return input_file
317
318 def open_output_file(input_filename):
319     # create a regex that creates the output filename
320     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
321     output_filename = re.sub(r'\.xml', '', output_filename)
322     output_filename = output_filename + ".tsv"
323     output_file = open(output_filename, "w")
324
325     return output_file

Community Data Science Collective || Want to submit a patch?