]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
migrate to mwpersistence. this fixes many issues. We preserve legacy persistence...
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index f115fdc94b4ca1f25cdbcf18846d299a91bee673..9260f35710d029e99f2c4bffe470aa000e7f1615 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,7 +3,7 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
-
+import pdb
 import argparse
 import sys
 import os, os.path
@@ -14,15 +14,20 @@ from collections import deque
 from hashlib import sha1
 
 from mw.xml_dump import Iterator
-from mw.lib import persistence
-from mw.lib import reverts
 
+from deltas.tokenizers import wikitext_split
+import mwpersistence
+import mwreverts
+from urllib.parse import quote
+TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
@@ -44,7 +49,7 @@ class WikiqPage():
     __slots__ = ('id', 'title', 'namespace', 'redirect',
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
-
+    
     def __init__(self, page, collapse_user=False):
         self.id = page.id
         self.title = page.title
@@ -98,15 +103,35 @@ class WikiqPage():
         return next(self.__revisions)
 
 class WikiqParser():
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False):
+
+
+    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
+        
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
+        self.persist_legacy = persist_legacy
         self.printed_header = False
+        self.namespaces = []
+        self.urlencode = urlencode
+        
+    def __get_namespace_from_title(self, title):
+        default_ns = None
+
+        for ns in self.namespaces:
+            # skip if the namespace is not defined
+            if ns == None:
+                default_ns = self.namespaces[ns]
+                continue
+
+            if title.startswith(ns + ":"):
+                return self.namespaces[ns]
+
+        # if we've made it this far with no matches, we return the default namespace
+        return default_ns
 
     def process(self):
-        print("Processing file: %s" % self.input_file.name, file=sys.stderr)
 
         # create a regex that creates the output filename
         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
@@ -116,15 +141,27 @@ class WikiqParser():
         # Construct dump file iterator
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
+        # extract list of namspaces
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+
         page_count = 0
         rev_count = 0
+
+
         # Iterate through pages
         for page in dump:
-            if self.persist:
-                state = persistence.State()
+            rev_detector = mwreverts.Detector()
+
+            if self.persist or self.persist_legacy:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-            rev_detector = reverts.Detector()
+                if not self.persist_legacy:
+                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                else:
+                    from mw.lib import persistence
+                    state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
@@ -134,7 +171,7 @@ class WikiqParser():
                             'articleid' : page.id,
                             'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
                             'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace,
+                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
                             'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
 
                 # if revisions are deleted, /many/ things will be missing
@@ -158,6 +195,7 @@ class WikiqParser():
                
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
+                    
                     if revert:
                         rev_data['revert'] = "TRUE"
                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
@@ -188,12 +226,18 @@ class WikiqParser():
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
-                if self.persist:
+                if self.persist or self.persist_legacy:
                     if rev.text.deleted:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
-                        _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
+                        if not self.persist_legacy:
+                            _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+                        else:
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
+                            
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
                         if len(window) == PERSISTENCE_RADIUS:
@@ -213,7 +257,7 @@ class WikiqParser():
 
                 rev_count += 1
 
-            if self.persist:
+            if self.persist or self.persist_legacy:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -227,7 +271,7 @@ class WikiqParser():
                     rev_data["tokens_added"] = num_tokens
                     rev_data["tokens_removed"] = len(tokens_removed)
                     rev_data["tokens_window"] = len(window)-(i+1)
-                                           
+                    
                     self.print_rev_data(rev_data)
 
             page_count += 1
@@ -237,6 +281,10 @@ class WikiqParser():
 
     def print_rev_data(self, rev_data):
         # if it's the first time through, print the header
+        if self.urlencode:
+            for field in TO_ENCODE:
+                rev_data[field] = quote(str(rev_data[field]))
+            
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
@@ -245,12 +293,12 @@ class WikiqParser():
 
 
 def open_input_file(input_filename):
-    if re.match(r'.*\.7z', input_filename):
-        cmd = ["7za", "x", "-so", input_filename] 
-    elif re.match(r'.*\.gz', input_filename):
-        cmd = ["zcat", input_filename] 
-    elif re.match(r'.*\.bz2', input_filename):
+    if re.match(r'.*\.7z$', input_filename):
+        cmd = ["7za", "x", "-so", input_filename, '*'] 
+    elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
+    elif re.match(r'.*\.bz2$', input_filename):
+        cmd = ["bzcat", "-dk", input_filename] 
 
     try:
         input_file = Popen(cmd, stdout=PIPE).stdout
@@ -261,7 +309,8 @@ def open_input_file(input_filename):
 
 def open_output_file(input_filename):
     # create a regex that creates the output filename
-    output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
+    output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
+    output_filename = re.sub(r'\.xml', '', output_filename)
     output_filename = output_filename + ".tsv"
     output_file = open(output_filename, "w")
 
@@ -285,27 +334,39 @@ parser.add_argument('--collapse-user', dest="collapse_user", action="store_true"
 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
 
+parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
+                    help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
+parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
+                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
 args = parser.parse_args()
 
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
-        # open file for output
+        # open directory for output
+        if args.output_dir:
+            output_dir = args.output_dir[0]
+        else:
+            output_dir = "."
+
+        print("Processing file: %s" % filename, file=sys.stderr)
+
         if args.stdout:
             output_file = sys.stdout
         else:
-            if args.output_dir:
-                output_dir = args.output_dir[0]
-            else:
-                output_dir = "."
-
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
         wikiq = WikiqParser(input_file, output_file, 
-                           collapse_user=args.collapse_user,
-                           persist=args.persist)
+                            collapse_user=args.collapse_user,
+                            persist=args.persist,
+                            persist_legacy=args.persist_legacy,
+                            urlencode=args.urlencode)
+
+
         wikiq.process()
 
         # close things 
@@ -313,8 +374,10 @@ if len(args.dumpfiles) > 0:
         output_file.close()
 else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
-                       collapse_user=args.collapse_user,
-                       persist=args.persist)
+                        collapse_user=args.collapse_user,
+                        persist=args.persist,
+                        persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?