]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
add test files
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 7cf4be26e60db633db9d210e7f5ec74addf1f357..731f59ae27b7dc93035c70734a34ab9d43c0e7d4 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -3,7 +3,6 @@
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
-import pdb
 import argparse
 import sys
 import os, os.path
@@ -13,16 +12,25 @@ from subprocess import Popen, PIPE
 from collections import deque
 from hashlib import sha1
 
-from mwxml import Dump
+from mwxml import Dump, Page
 
 from deltas.tokenizers import wikitext_split
+from mwdiffs.utilities import dump2diffs
 import mwpersistence
+from mwpersistence.state import DiffState
+
+from mwpersistence import Token
+from  mwpersistence.utilities import diffs2persistence 
 import mwreverts
 from urllib.parse import quote
-TO_ENCODE = ('title', 'editor')
-PERSISTENCE_RADIUS=7
+
 from deltas import SequenceMatcher
 from deltas import SegmentMatcher
+TO_ENCODE = ('title', 'editor')
+PERSISTENCE_RADIUS=7
+
+ws_lex = ['break','whitespace']
+punct_lex = ['period','qmark','epoint','comma','colon','scolon','paren_open','paren_close','brack_open','brack_close','dbrack_close','dbrack_open','tab_close','tab_open','dcurly_close','dcurly_open','equals','bar','etc','bold','italic','tag','comment_end','comment_start']
 
 class PersistMethod:
     none = 0
@@ -30,105 +38,166 @@ class PersistMethod:
     segment = 2
     legacy = 3
 
-def calculate_persistence(tokens_added):
+def calculate_persistence(tokens_added, tokens_removed, exclude_ws = False, exclude_punct = False, legacy = False):
+
+    if not legacy:
+        cond =  lambda t: not (exclude_punct and (t.type in punct_lex)) \
+                and not(exclude_ws and (t.type in ws_lex))
+
+        tokens_added = [t for t in tokens_added if cond(t)]
+        tokens_removed = [t for t in tokens_removed if cond(t)]
+
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
-           len(tokens_added))
+           len(tokens_added),
+           len(tokens_removed)
+    )
+
+class WikiqIterator(Dump):
+
+    @classmethod
+    def from_file(cls, fh, collapse_user = False):
+        cls.fh = fh
+        cls.collapse_user = collapse_user
+        cls = super(WikiqIterator, cls).from_file(fh)
+        return cls
+
+    @classmethod
+    def process_item(cls, item_element, namespace_map):
+        if not hasattr(cls,'inv_namespace_map'):
+            cls.inv_namespace_map = {ns.id:name for name, ns in namespace_map.items()}
+
+        if item_element.tag == "page":
+            return WikiqPage.from_element(item_element, namespace_map, cls.inv_namespace_map, cls.collapse_user)
+        elif item_element.tag == "logitem":
+            return LogItem.from_element(item_element, namespace_map)
+        else:
+            raise MalformedXML("Expected to see <page> or <logitem>.  " +
+                               "Instead saw <{0}>".format(item_element.tag))
 
-class WikiqIterator():
-    def __init__(self, fh, collapse_user=False):
-        self.fh = fh
-        self.collapse_user = collapse_user
-        self.mwiterator = Dump.from_file(self.fh)
-        self.namespace_map = { ns.id : ns.name for ns in
-                               self.mwiterator.site_info.namespaces }
-        self.__pages = self.load_pages()
+class WikiqPage(Page):
+    __slots__ = ('id', 'title', 'namespace', 'redirect',
+                 'restrictions','collapse_user')
+        
+    @classmethod
+    def from_element(cls, item_element, namespace_map, inv_namespace_map, collapse_user = False):
+        cls.prev_rev = None
 
-    def load_pages(self):
-        for page in self.mwiterator:
-            yield WikiqPage(page,
-                            namespace_map = self.namespace_map,
-                            collapse_user=self.collapse_user)
+        cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
 
-    def __iter__(self):
-        return self.__pages
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        # this undoes the "correction" of the namespace in mwxml
+        
+        if cls.namespace not in inv_namespace_map:
+            cls.namespace = 0
+        if cls.namespace != 0:
+            cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
 
-    def __next__(self):
-        return next(self._pages)
+        cls.collapse_user = collapse_user
+        cls.revisions = cls._Page__revisions
+        return cls
+
+    @staticmethod
+    def _correct_sha(rev_data):
+
+        if rev_data.deleted.text:
+            rev_data.text = ""
+            rev_data.text_chars = 0
+            rev_data.sha1 = ""
+            rev_data.revert = ""
+            rev_data.reverteds = ""
 
-class WikiqPage():
-    __slots__ = ('id', 'title', 'namespace', 'redirect',
-                 'restrictions', 'mwpage', '__revisions',
-                 'collapse_user')
-    
-    def __init__(self, page, namespace_map, collapse_user=False):
-        self.id = page.id
-        self.namespace = page.namespace
-        if page.namespace != 0:
-            self.title = ':'.join([namespace_map[page.namespace], page.title])
         else:
-            self.title = page.title
-        self.restrictions = page.restrictions
-        self.collapse_user = collapse_user
-        self.mwpage = page
-        self.__revisions = self.rev_list()
-
-    def rev_list(self):
-        # Outline for how we want to handle collapse_user=True
-        # iteration   rev.user   prev_rev.user   add prev_rev?
-        #         0          A            None           Never
-        #         1          A               A           False
-        #         2          B               A            True
-        #         3          A               B            True
-        #         4          A               A           False
-        # Post-loop                          A          Always
-        for i, rev in enumerate(self.mwpage):
-            # never yield the first time
-            if i == 0:
-                if self.collapse_user: 
-                    collapsed_revs = 1
-                    rev.collapsed_revs = collapsed_revs
+            if rev_data.text is None :
+                rev_data.text = ""
+                
+        rev_data.text_chars = len(rev_data.text)
 
-            else:
-                if self.collapse_user:
-                    # yield if this is the last edit in a seq by a user and reset
-                    # also yield if we do know who the user is
-
-                    if rev.deleted.user or prev_rev.deleted.user:
-                        yield prev_rev
-                        collapsed_revs = 1
-                        rev.collapsed_revs = collapsed_revs
-
-                    elif not rev.user.text == prev_rev.user.text:
-                        yield prev_rev
-                        collapsed_revs = 1
-                        rev.collapsed_revs = collapsed_revs
-                    # otherwise, add one to the counter
-                    else:
-                        collapsed_revs += 1
-                        rev.collapsed_revs = collapsed_revs
-                # if collapse_user is false, we always yield
-                else:
-                    yield prev_rev
+        if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
+            text_sha1 = rev_data.sha1
+
+        else:
+            text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
+
+        rev_data.sha1 = text_sha1
 
+        return rev_data 
+
+    # Outline for how we want to handle collapse_user=True
+    # iteration   rev.user   prev_rev.user   add prev_rev?
+    #         0          A            None           Never
+    #         1          A               A           False
+    #         2          B               A            True
+    #         3          A               B            True
+    #         4          A               A           False
+    # Post-loop                          A          Always
+    def __find_next_revision(self):
+
+        if self.prev_rev is None:
+            prev_rev = WikiqPage._correct_sha(next(self.revisions))
+            self.prev_rev = prev_rev
+        else:
+            prev_rev = self.prev_rev
+
+        if self.collapse_user: 
+            collapsed_revs = 1
+            self.prev_rev.collapsed_revs = collapsed_revs
+            prev_rev = self.prev_rev
+
+        for rev in self.revisions:
+            rev = WikiqPage._correct_sha(rev)
+            if self.collapse_user:
+                # yield if this is the last edit in a seq by a user and reset
+                # also yield if we do know who the user is
+
+                if rev.deleted.user or prev_rev.deleted.user:
+                    self.prev_rev = rev
+                    if prev_rev is not None:
+                        prev_rev.collapsed_revs = collapsed_revs
+                        return prev_rev
+
+                elif not rev.user.text == prev_rev.user.text:
+                    self.prev_rev = rev
+                    if prev_rev is not None:
+                        prev_rev.collapsed_revs = collapsed_revs
+                        return prev_rev
+
+                # otherwise, add one to the counter
+                else:
+                    collapsed_revs += 1
+                    rev.collapsed_revs = collapsed_revs
+                # if collapse_user is false, we always yield
+            else:
+                self.prev_rev = rev
+                if prev_rev is not None:
+                    return prev_rev
             prev_rev = rev
 
-        # also yield the final time
-        yield prev_rev
+        self.prev_rev = None
+
+        if self.collapse_user:
+            prev_rev.collapsed_revs = collapsed_revs
+        return prev_rev
 
-    def __iter__(self):
-        return self.__revisions
 
     def __next__(self):
-        return next(self.__revisions)
+        revision = self.__find_next_revision()
+        revision.page = self
+        return revision
+
+    def __iter__(self):
+        while(True):
+            revision = self.__find_next_revision()
+            revision.page = self
+            yield revision
 
 class WikiqParser():
     
-    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False):
+    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
         """ 
         Parameters:
            persist : what persistence method to use. Takes a PersistMethod value
         """
-
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
@@ -136,23 +205,10 @@ class WikiqParser():
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
-        
-    def __get_namespace_from_title(self, title):
-        default_ns = None
-
-        for ns in self.namespaces:
-            # skip if the namespace is not defined
-            if ns == None:
-                default_ns = self.namespaces[ns]
-                continue
-
-            if title.startswith(ns + ":"):
-                return self.namespaces[ns]
-
-        # if we've made it this far with no matches, we return the default namespace
-        return default_ns
-
-    def process(self):
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
 
         # create a regex that creates the output filename
         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
@@ -160,44 +216,74 @@ class WikiqParser():
         #                         input_filename)
 
         # Construct dump file iterator
-        dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
+        self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
+      
+        self.diff_engine = None
+
+        if self.persist == PersistMethod.sequence:  
+            self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
+
+        if self.persist == PersistMethod.segment:
+            self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
 
-        # extract list of namspaces
-        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
+    # def __get_namespace_from_title(self, title):
+    #     default_ns = None
 
+    #     for ns in self.namespaces:
+    #         # skip if the namespace is not defined
+    #         if ns == None:
+    #             default_ns = self.namespaces[ns]
+    #             continue
+
+    #         if title.startswith(ns + ":"):
+    #             return self.namespaces[ns]
+
+    #     # if we've made it this far with no matches, we return the default namespace
+    #     return default_ns
+
+    # def _set_namespace(self, rev_docs):
+        
+    #     for rev_data in rev_docs:
+    #         if 'namespace' not in rev_data['page']:
+    #             namespace = self.__get_namespace_from_title(page['title'])
+    #             rev_data['page']['namespace'] = namespace
+    #         yield rev_data
+
+    def process(self):
         page_count = 0
         rev_count = 0
 
+        for page in self.dump:
+
+            # skip pages not in the namespaces we want
+            if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
+                continue
 
-        # Iterate through pages
-        for page in dump:
             rev_detector = mwreverts.Detector()
 
             if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
                 if self.persist == PersistMethod.sequence:
-                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+                    state = DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
                 elif self.persist == PersistMethod.segment:
-                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                    state = DiffState(SegmentMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
-                # self.persist == PersistMethod.legacy
                 else:
                     from mw.lib import persistence
                     state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
-
                 rev_data = {'revid' : rev.id,
                             'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
                             'articleid' : page.id,
                             'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
                             'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
+                            'namespace' : page.namespace,
                             'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
 
                 # if revisions are deleted, /many/ things will be missing
@@ -254,15 +340,15 @@ class WikiqParser():
                 #TODO missing: additions_size deletions_size
                 
                 # if collapse user was on, lets run that
-                if self.collapse_user:
-                    rev_data['collapsed_revs'] = rev.collapsed_revs
+                if self.collapse_user:
+                #     rev_data.collapsed_revs = rev.collapsed_revs
 
                 if self.persist != PersistMethod.none:
                     if rev.deleted.text:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
-
                         if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
@@ -274,11 +360,11 @@ class WikiqParser():
                         if len(window) == PERSISTENCE_RADIUS:
                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
                             
-                            num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
+                            num_token_revs, num_tokens_added, num_tokens_removed  = calculate_persistence(old_tokens_added, old_tokens_removed, legacy = self.persist == PersistMethod.legacy)
 
                             old_rev_data["token_revs"] = num_token_revs
-                            old_rev_data["tokens_added"] = num_tokens
-                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
+                            old_rev_data["tokens_added"] = num_tokens_added
+                            old_rev_data["tokens_removed"] = num_tokens_removed
                             old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
 
                             self.print_rev_data(old_rev_data)
@@ -296,11 +382,12 @@ class WikiqParser():
                         continue
 
                     rev_id, rev_data, tokens_added, tokens_removed = item
-                    num_token_revs, num_tokens = calculate_persistence(tokens_added)
+
+                    num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(tokens_added, tokens_removed, legacy = self.persist == PersistMethod.legacy)
 
                     rev_data["token_revs"] = num_token_revs
-                    rev_data["tokens_added"] = num_tokens
-                    rev_data["tokens_removed"] = len(tokens_removed)
+                    rev_data["tokens_added"] = num_tokens_added
+                    rev_data["tokens_removed"] = num_tokens_removed
                     rev_data["tokens_window"] = len(window)-(i+1)
                     
                     self.print_rev_data(rev_data)
@@ -368,6 +455,11 @@ parser.add_argument('-p', '--persistence', dest="persist", default=None, const='
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+
+
 args = parser.parse_args()
 
 # set persistence method
@@ -381,6 +473,11 @@ elif args.persist == "legacy":
 else:
     persist = PersistMethod.sequence
 
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
@@ -399,11 +496,11 @@ if len(args.dumpfiles) > 0:
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
-            wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file, output_file, 
                             collapse_user=args.collapse_user,
-                                persist=persist,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces = namespaces)
 
         wikiq.process()
 
@@ -415,7 +512,8 @@ else:
                         collapse_user=args.collapse_user,
                         persist=persist,
                         persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
+                        urlencode=args.urlencode,
+                        namespaces = namespaces)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?