]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
add note to readme about dependency on compression software
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index f115fdc94b4ca1f25cdbcf18846d299a91bee673..e693cd2ba2dd3ab19ed61505a529f8d216219e8b 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -16,7 +16,8 @@ from hashlib import sha1
 from mw.xml_dump import Iterator
 from mw.lib import persistence
 from mw.lib import reverts
 from mw.xml_dump import Iterator
 from mw.lib import persistence
 from mw.lib import reverts
-
+from urllib.parse import quote
+TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 
 def calculate_persistence(tokens_added):
 PERSISTENCE_RADIUS=7
 
 def calculate_persistence(tokens_added):
@@ -44,7 +45,7 @@ class WikiqPage():
     __slots__ = ('id', 'title', 'namespace', 'redirect',
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     __slots__ = ('id', 'title', 'namespace', 'redirect',
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
-
+    
     def __init__(self, page, collapse_user=False):
         self.id = page.id
         self.title = page.title
     def __init__(self, page, collapse_user=False):
         self.id = page.id
         self.title = page.title
@@ -98,15 +99,34 @@ class WikiqPage():
         return next(self.__revisions)
 
 class WikiqParser():
         return next(self.__revisions)
 
 class WikiqParser():
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False):
+
+
+    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
+        
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
         self.printed_header = False
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
         self.printed_header = False
+        self.namespaces = []
+        self.urlencode = urlencode
+        
+    def __get_namespace_from_title(self, title):
+        default_ns = None
+
+        for ns in self.namespaces:
+            # skip if the namespace is not defined
+            if ns == None:
+                default_ns = self.namespaces[ns]
+                continue
+
+            if title.startswith(ns + ":"):
+                return self.namespaces[ns]
+
+        # if we've made it this far with no matches, we return the default namespace
+        return default_ns
 
     def process(self):
 
     def process(self):
-        print("Processing file: %s" % self.input_file.name, file=sys.stderr)
 
         # create a regex that creates the output filename
         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
 
         # create a regex that creates the output filename
         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
@@ -116,6 +136,9 @@ class WikiqParser():
         # Construct dump file iterator
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
         # Construct dump file iterator
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
+        # extract list of namspaces
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+
         page_count = 0
         rev_count = 0
         # Iterate through pages
         page_count = 0
         rev_count = 0
         # Iterate through pages
@@ -134,7 +157,7 @@ class WikiqParser():
                             'articleid' : page.id,
                             'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
                             'title' : '"' + page.title + '"',
                             'articleid' : page.id,
                             'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
                             'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace,
+                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
                             'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
 
                 # if revisions are deleted, /many/ things will be missing
                             'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
 
                 # if revisions are deleted, /many/ things will be missing
@@ -227,7 +250,7 @@ class WikiqParser():
                     rev_data["tokens_added"] = num_tokens
                     rev_data["tokens_removed"] = len(tokens_removed)
                     rev_data["tokens_window"] = len(window)-(i+1)
                     rev_data["tokens_added"] = num_tokens
                     rev_data["tokens_removed"] = len(tokens_removed)
                     rev_data["tokens_window"] = len(window)-(i+1)
-                                           
+                    
                     self.print_rev_data(rev_data)
 
             page_count += 1
                     self.print_rev_data(rev_data)
 
             page_count += 1
@@ -237,6 +260,10 @@ class WikiqParser():
 
     def print_rev_data(self, rev_data):
         # if it's the first time through, print the header
 
     def print_rev_data(self, rev_data):
         # if it's the first time through, print the header
+        if self.urlencode:
+            for field in TO_ENCODE:
+                rev_data[field] = quote(str(rev_data[field]))
+            
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
@@ -245,12 +272,12 @@ class WikiqParser():
 
 
 def open_input_file(input_filename):
 
 
 def open_input_file(input_filename):
-    if re.match(r'.*\.7z', input_filename):
-        cmd = ["7za", "x", "-so", input_filename] 
-    elif re.match(r'.*\.gz', input_filename):
-        cmd = ["zcat", input_filename] 
-    elif re.match(r'.*\.bz2', input_filename):
+    if re.match(r'.*\.7z$', input_filename):
+        cmd = ["7za", "x", "-so", input_filename, '*'] 
+    elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
         cmd = ["zcat", input_filename] 
+    elif re.match(r'.*\.bz2$', input_filename):
+        cmd = ["bzcat", "-dk", input_filename] 
 
     try:
         input_file = Popen(cmd, stdout=PIPE).stdout
 
     try:
         input_file = Popen(cmd, stdout=PIPE).stdout
@@ -261,7 +288,8 @@ def open_input_file(input_filename):
 
 def open_output_file(input_filename):
     # create a regex that creates the output filename
 
 def open_output_file(input_filename):
     # create a regex that creates the output filename
-    output_filename = re.sub(r'\.xml(\.(7z|gz|bz2))?$', '', input_filename)
+    output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
+    output_filename = re.sub(r'\.xml', '', output_filename)
     output_filename = output_filename + ".tsv"
     output_file = open(output_filename, "w")
 
     output_filename = output_filename + ".tsv"
     output_file = open(output_filename, "w")
 
@@ -285,27 +313,35 @@ parser.add_argument('--collapse-user', dest="collapse_user", action="store_true"
 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
 
 parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
 
+parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
+                    help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
 args = parser.parse_args()
 
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
 args = parser.parse_args()
 
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
-        # open file for output
+        # open directory for output
+        if args.output_dir:
+            output_dir = args.output_dir[0]
+        else:
+            output_dir = "."
+
+        print("Processing file: %s" % filename, file=sys.stderr)
+
         if args.stdout:
             output_file = sys.stdout
         else:
         if args.stdout:
             output_file = sys.stdout
         else:
-            if args.output_dir:
-                output_dir = args.output_dir[0]
-            else:
-                output_dir = "."
-
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
         wikiq = WikiqParser(input_file, output_file, 
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
         wikiq = WikiqParser(input_file, output_file, 
-                           collapse_user=args.collapse_user,
-                           persist=args.persist)
+                            collapse_user=args.collapse_user,
+                            persist=args.persist,
+                            urlencode=args.urlencode)
+
+
         wikiq.process()
 
         # close things 
         wikiq.process()
 
         # close things 
@@ -313,8 +349,9 @@ if len(args.dumpfiles) > 0:
         output_file.close()
 else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
         output_file.close()
 else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
-                       collapse_user=args.collapse_user,
-                       persist=args.persist)
+                        collapse_user=args.collapse_user,
+                        persist=args.persist,
+                        urlencode=args.urlencode)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?