]> code.communitydata.science - mediawiki_dump_tools.git/blob - wikiq
created new repository for wikiq with Mediawiki-Utilities as a submodule
[mediawiki_dump_tools.git] / wikiq
1 #!/usr/bin/env python3
2
3 # original wikiq headers are: title articleid revid date_time anon
4 # editor editor_id minor text_size text_entropy text_md5 reversion
5 # additions_size deletions_size
6
7 import argparse
8 import sys
9 import os, os.path
10 import re
11
12 from subprocess import Popen, PIPE
13 from collections import deque
14 from hashlib import sha1
15
16 from mw.xml_dump import Iterator
17 from mw.lib import persistence
18 from mw.lib import reverts
19
20 PERSISTENCE_RADIUS=7
21
22 def calculate_persistence(tokens_added):
23     return(sum([(len(x.revisions)-1) for x in tokens_added]),
24            len(tokens_added))
25
26 class WikiqIterator():
27     def __init__(self, fh, collapse_user=False):
28         self.fh = fh
29         self.collapse_user = collapse_user
30         self.mwiterator = Iterator.from_file(self.fh)
31         self.__pages = self.load_pages()
32
33     def load_pages(self):
34         for page in self.mwiterator:
35             yield WikiqPage(page, collapse_user=self.collapse_user)
36
37     def __iter__(self):
38         return self.__pages
39
40     def __next__(self):
41         return next(self._pages)
42
43 class WikiqPage():
44     __slots__ = ('id', 'title', 'namespace', 'redirect',
45                  'restrictions', 'mwpage', '__revisions',
46                  'collapse_user')
47
48     def __init__(self, page, collapse_user=False):
49         self.id = page.id
50         self.title = page.title
51         self.namespace = page.namespace
52         self.redirect = page.redirect
53         self.restrictions = page.restrictions
54         
55         self.collapse_user = collapse_user
56         self.mwpage = page
57         self.__revisions = self.rev_list()
58
59     def rev_list(self):
60         # Outline for how we want to handle collapse_user=True
61         # iteration   rev.user   prev_rev.user   add prev_rev?
62         #         0          A            None           Never
63         #         1          A               A           False
64         #         2          B               A            True
65         #         3          A               B            True
66         #         4          A               A           False
67         # Post-loop                          A          Always
68         for i, rev in enumerate(self.mwpage):
69             # never yield the first time
70             if i == 0:
71                 if self.collapse_user: 
72                     collapsed_revs = 1
73                     rev.collapsed_revs = collapsed_revs
74
75             else:
76                 if self.collapse_user:
77                     # yield if this is the last edit in a seq by a user and reset
78                     if not rev.contributor.user_text == prev_rev.contributor.user_text:
79                         yield prev_rev
80                         collapsed_revs = 1
81                         rev.collapsed_revs = collapsed_revs
82                     # otherwise, add one to the counter
83                     else:
84                         collapsed_revs += 1
85                         rev.collapsed_revs = collapsed_revs
86                 # if collapse_user is false, we always yield
87                 else:
88                     yield prev_rev
89
90             prev_rev = rev
91         # also yield the final time
92         yield prev_rev
93
94     def __iter__(self):
95         return self.__revisions
96
97     def __next__(self):
98         return next(self.__revisions)
99
100 class WikiqParser():
101     def __init__(self, input_file, output_file, collapse_user=False, persist=False):
102         self.input_file = input_file
103         self.output_file = output_file
104         self.collapse_user = collapse_user
105         self.persist = persist
106         self.printed_header = False
107
108     def process(self):
109         print("Processing file: %s" % self.input_file.name, file=sys.stderr)
110
111         # create a regex that creates the output filename
112         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
113         #                         r'output/wikiq-\1-\2.tsv',
114         #                         input_filename)
115
116         # Construct dump file iterator
117         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
118
119         page_count = 0
120         rev_count = 0
121         # Iterate through pages
122         for page in dump:
123             if self.persist:
124                 state = persistence.State()
125                 window = deque(maxlen=PERSISTENCE_RADIUS)
126
127             rev_detector = reverts.Detector()
128
129             # Iterate through a page's revisions
130             for rev in page:
131
132                 rev_data = {'revid' : rev.id,
133                             'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
134                             'articleid' : page.id,
135                             'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
136                             'title' : '"' + page.title + '"',
137                             'namespace' : page.namespace,
138                             'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
139
140                 # if revisions are deleted, /many/ things will be missing
141                 if rev.text.deleted:
142                     rev_data['text_chars'] = ""
143                     rev_data['sha1'] = ""
144                     rev_data['revert'] = ""
145                     rev_data['reverteds'] = ""
146
147                 else:
148                     # if text exists, we'll check for a sha1 and generate one otherwise
149                     if rev.sha1:
150                         text_sha1 = rev.sha1
151                     else:
152                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
153                     
154                     rev_data['sha1'] = text_sha1
155
156                     # TODO rev.bytes doesn't work.. looks like a bug
157                     rev_data['text_chars'] = len(rev.text)
158                
159                     # generate revert data
160                     revert = rev_detector.process(text_sha1, rev.id)
161                     if revert:
162                         rev_data['revert'] = "TRUE"
163                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
164                     else:
165                         rev_data['revert'] = "FALSE"
166                         rev_data['reverteds'] = ""
167
168                 # if the fact that the edit was minor can be hidden, this might be an issue
169                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
170
171                 if rev.contributor.user_text:
172                     # wrap user-defined editors in quotes for fread
173                     rev_data['editor'] = '"' + rev.contributor.user_text + '"'
174                     rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
175                     
176                 else:
177                     rev_data['anon'] = ""
178                     rev_data['editor'] = ""
179
180                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
181                 #    redirect = True
182                 #else:
183                 #    redirect = False
184                 
185                 #TODO missing: additions_size deletions_size
186                 
187                 # if collapse user was on, lets run that
188                 if self.collapse_user:
189                     rev_data['collapsed_revs'] = rev.collapsed_revs
190
191                 if self.persist:
192                     if rev.text.deleted:
193                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
194                             old_rev_data[k] = None
195                     else:
196                         _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
197                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
198                         
199                         if len(window) == PERSISTENCE_RADIUS:
200                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
201                             
202                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
203
204                             old_rev_data["token_revs"] = num_token_revs
205                             old_rev_data["tokens_added"] = num_tokens
206                             old_rev_data["tokens_removed"] = len(old_tokens_removed)
207                             old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
208
209                             self.print_rev_data(old_rev_data)
210
211                 else:
212                     self.print_rev_data(rev_data)
213
214                 rev_count += 1
215
216             if self.persist:
217                 # print out metadata for the last RADIUS revisions
218                 for i, item in enumerate(window):
219                     # if the window was full, we've already printed item 0
220                     if len(window) == PERSISTENCE_RADIUS and i == 0:
221                         continue
222
223                     rev_id, rev_data, tokens_added, tokens_removed = item
224                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
225
226                     rev_data["token_revs"] = num_token_revs
227                     rev_data["tokens_added"] = num_tokens
228                     rev_data["tokens_removed"] = len(tokens_removed)
229                     rev_data["tokens_window"] = len(window)-(i+1)
230                                            
231                     self.print_rev_data(rev_data)
232
233             page_count += 1
234
235         print("Done: %s revisions and %s pages." % (rev_count, page_count),
236               file=sys.stderr)
237
238     def print_rev_data(self, rev_data):
239         # if it's the first time through, print the header
240         if not self.printed_header:
241             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
242             self.printed_header = True
243         
244         print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
245
246
247 def open_input_file(input_filename):
248     if re.match(r'.*\.7z', input_filename):
249         cmd = ["7za", "x", "-so", input_filename] 
250     elif re.match(r'.*\.gz', input_filename):
251         cmd = ["zcat", input_filename] 
252     elif re.match(r'.*\.bz2', input_filename):
253         cmd = ["zcat", input_filename] 
254
255     try:
256         input_file = Popen(cmd, stdout=PIPE).stdout
257     except NameError:
258         input_file = open(input_filename, 'r')
259
260     return input_file
261
262 def open_output_file(input_filename):
263     # create a regex that creates the output filename
264     output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
265     output_filename = output_filename + ".tsv"
266     output_file = open(output_filename, "w")
267
268     return output_file
269
270 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
271
272 # arguments for the input direction
273 parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
274                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
275
276 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
277                     help="Directory for output files.")
278
279 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
280                     help="Write output to standard out (do not create dump file)")
281
282 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
283                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
284
285 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
286                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
287
288 args = parser.parse_args()
289
290 if len(args.dumpfiles) > 0:
291     for filename in args.dumpfiles:
292         input_file = open_input_file(filename)
293
294         # open file for output
295         if args.stdout:
296             output_file = sys.stdout
297         else:
298             if args.output_dir:
299                 output_dir = args.output_dir[0]
300             else:
301                 output_dir = "."
302
303             filename = os.path.join(output_dir, os.path.basename(filename))
304             output_file = open_output_file(filename)
305
306         wikiq = WikiqParser(input_file, output_file, 
307                            collapse_user=args.collapse_user,
308                            persist=args.persist)
309         wikiq.process()
310
311         # close things 
312         input_file.close()
313         output_file.close()
314 else:
315     wikiq = WikiqParser(sys.stdin, sys.stdout,
316                        collapse_user=args.collapse_user,
317                        persist=args.persist)
318     wikiq.process()
319
320 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
321 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?