]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
checking in work to deepen migration to new mediawikiutils
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index 9260f35710d029e99f2c4bffe470aa000e7f1615..f8e5fd616aeeb094258c2523a6daa39b8064c437 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -1,9 +1,9 @@
-#!/usr/bin/env python3
+ #!/usr/bin/env python3
 
 # original wikiq headers are: title articleid revid date_time anon
 # editor editor_id minor text_size text_entropy text_md5 reversion
 # additions_size deletions_size
-import pdb
+import pdb 
 import argparse
 import sys
 import os, os.path
@@ -13,125 +13,237 @@ from subprocess import Popen, PIPE
 from collections import deque
 from hashlib import sha1
 
-from mw.xml_dump import Iterator
+from mwxml import Dump, Page
 
 from deltas.tokenizers import wikitext_split
+from mwdiffs.utilities import dump2diffs
 import mwpersistence
+from mwpersistence.state import Version, apply_opdocs, apply_operations, persist_revision_once
+
+from mwpersistence import Token
+from  mwpersistence.utilities import diffs2persistence 
 import mwreverts
 from urllib.parse import quote
+
+from deltas import SequenceMatcher
+from deltas import SegmentMatcher
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
-from deltas import SequenceMatcher
+
+# this is a simple override of mwpersistence.DiffState that doesn't do anything special for reverts. 
+class WikiqDiffState(mwpersistence.DiffState):
+    def _update(self, text=None, checksum=None, opdocs=None, revision=None):
+        if checksum is None:
+            if text is None:
+                raise TypeError("Either 'text' or 'checksum' must be " +
+                                "specified.")
+            else:
+                checksum = sha1(bytes(text, 'utf8')).hexdigest()
+
+        current_version = Version()
+
+        # the main difference we have is that we don't do anything special for reverts
+        if opdocs is not None:
+            transition = apply_opdocs(opdocs, self.last.tokens or [])
+            current_version.tokens, _, _ = transition
+        else:
+            # NOTICE: HEAVY COMPUTATION HERE!!!
+            #
+            # Diffs usually run in O(n^2) -- O(n^3) time and most
+            # tokenizers produce a lot of tokens.
+            if self.diff_processor is None:
+                raise RuntimeError("DiffState cannot process raw text " +
+                                   "without a diff_engine specified.")
+            operations, _, current_tokens = \
+                self.diff_processor.process(text, token_class=Token)
+
+            transition = apply_operations(operations,
+                                          self.last.tokens or [],
+                                          current_tokens)
+            current_version.tokens, _, _ = transition
+
+        # Record persistence
+        persist_revision_once(current_version.tokens, revision)
+
+        # Update last version
+        self.last = current_version
+
+        # Return the tranisitoned state
+        return transition
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+class WikiqIterator(Dump):
+
+    @classmethod
+    def from_file(cls, fh, collapse_user = False):
+        cls = super(WikiqIterator, cls).from_file(fh)
+        cls.fh = fh
+        cls.collapse_user = collapse_user
+        cls.namespace_map = { ns.id : ns.name for ns in
+                               cls.site_info.namespaces }
+        return cls
+
+    @classmethod
+    def process_item(cls, item_element, namespace_map, collapse_user = False):
+        if item_element.tag == "page":
+            return WikiqPage.from_element(item_element, namespace_map, collapse_user)
+        elif item_element.tag == "logitem":
+            return LogItem.from_element(item_element, namespace_map)
+        else:
+            raise MalformedXML("Expected to see <page> or <logitem>.  " +
+                               "Instead saw <{0}>".format(item_element.tag))
 
-class WikiqIterator():
-    def __init__(self, fh, collapse_user=False):
-        self.fh = fh
-        self.collapse_user = collapse_user
-        self.mwiterator = Iterator.from_file(self.fh)
-        self.__pages = self.load_pages()
+class WikiqPage(Page):
+    __slots__ = ('id', 'title', 'namespace', 'redirect',
+                 'restrictions','collapse_user')
+        
+    @classmethod
+    def from_element(cls, item_element, namespace_map, collapse_user = False):
+        cls.prev_rev = None
 
-    def load_pages(self):
-        for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+        inv_namespace_map = {ns.id:name for name,ns in namespace_map.items()}
+        
+        cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
 
-    def __iter__(self):
-        return self.__pages
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        # this undoes the "correction" of the namespace in mwxml
+        
+        if cls.namespace not in inv_namespace_map:
+            cls.namespace = 0
+        if cls.namespace != 0:
+            cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
 
-    def __next__(self):
-        return next(self._pages)
+        cls.collapse_user = collapse_user
+        cls.revisions = cls._Page__revisions
+        return cls
 
-class WikiqPage():
-    __slots__ = ('id', 'title', 'namespace', 'redirect',
-                 'restrictions', 'mwpage', '__revisions',
-                 'collapse_user')
-    
-    def __init__(self, page, collapse_user=False):
-        self.id = page.id
-        self.title = page.title
-        self.namespace = page.namespace
-        self.redirect = page.redirect
-        self.restrictions = page.restrictions
-        
-        self.collapse_user = collapse_user
-        self.mwpage = page
-        self.__revisions = self.rev_list()
-
-    def rev_list(self):
-        # Outline for how we want to handle collapse_user=True
-        # iteration   rev.user   prev_rev.user   add prev_rev?
-        #         0          A            None           Never
-        #         1          A               A           False
-        #         2          B               A            True
-        #         3          A               B            True
-        #         4          A               A           False
-        # Post-loop                          A          Always
-        for i, rev in enumerate(self.mwpage):
-            # never yield the first time
-            if i == 0:
-                if self.collapse_user: 
-                    collapsed_revs = 1
-                    rev.collapsed_revs = collapsed_revs
+    @staticmethod
+    def _correct_sha(rev_data):
 
-            else:
-                if self.collapse_user:
-                    # yield if this is the last edit in a seq by a user and reset
-                    if not rev.contributor.user_text == prev_rev.contributor.user_text:
-                        yield prev_rev
-                        collapsed_revs = 1
-                        rev.collapsed_revs = collapsed_revs
-                    # otherwise, add one to the counter
-                    else:
-                        collapsed_revs += 1
-                        rev.collapsed_revs = collapsed_revs
-                # if collapse_user is false, we always yield
-                else:
-                    yield prev_rev
+        if rev_data.deleted.text:
+            rev_data.text = ""
+            rev_data.text_chars = 0
+            rev_data.sha1 = ""
+            rev_data.revert = ""
+            rev_data.reverteds = ""
+
+        else:
+            if rev_data.text is None :
+                rev_data.text = ""
+                
+        rev_data.text_chars = len(rev_data.text)
+
+        if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
+            text_sha1 = rev_data.sha1
+
+        else:
+            text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
+
+        rev_data.sha1 = text_sha1
+
+        return rev_data 
 
+    # Outline for how we want to handle collapse_user=True
+    # iteration   rev.user   prev_rev.user   add prev_rev?
+    #         0          A            None           Never
+    #         1          A               A           False
+    #         2          B               A            True
+    #         3          A               B            True
+    #         4          A               A           False
+    # Post-loop                          A          Always
+    def __find_next_revision(self):
+
+        if self.prev_rev is None:
+            prev_rev = WikiqPage._correct_sha(next(self.revisions))
+            self.prev_rev = prev_rev
+        else:
+            prev_rev = self.prev_rev
+
+        if self.collapse_user: 
+            collapsed_revs = 1
+            rev.collapsed_revs = collapsed_revs
+
+        for rev in self.revisions:
+            rev = WikiqPage._correct_sha(rev)
+            if self.collapse_user:
+                # yield if this is the last edit in a seq by a user and reset
+                # also yield if we do know who the user is
+
+                if rev.deleted.user or prev_rev.deleted.user:
+                    self.prev_rev = rev
+                    if prev_rev is not None:
+                        prev_rev.collapsed_revs = collapsed_revs
+                        return prev_rev
+
+                elif not rev.user.text == prev_rev.user.text:
+                    self.prev_rev = rev
+                    if prev_rev is not None:
+                        prev_rev.collapsed_revs = collapsed_revs
+                        return prev_rev
+
+                # otherwise, add one to the counter
+                else:
+                    collapsed_revs += 1
+                    rev.collapsed_revs = collapsed_revs
+                # if collapse_user is false, we always yield
+            else:
+                self.prev_rev = rev
+                if prev_rev is not None:
+                    return prev_rev
             prev_rev = rev
-        # also yield the final time
-        yield prev_rev
 
-    def __iter__(self):
-        return self.__revisions
+        self.prev_rev = None
+
+        if self.collapse_user:
+            prev_rev.collapsed_revs = collapsed_revs
+        return prev_rev
+
 
     def __next__(self):
-        return next(self.__revisions)
+        revision = self.__find_next_revision()
+        revision.page = self
+        return revision
 
-class WikiqParser():
+    def __iter__(self):
+        while(True):
+            revision = self.__find_next_revision()
+            revision.page = self
+            yield revision
 
+    # def __iter__(self):
+    #     return self.__revisions
 
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
-        
+    # def __next__(self):
+    #     return next(self.__revisions)
+
+class WikiqParser():
+    
+    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.persist = persist
-        self.persist_legacy = persist_legacy
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
-        
-    def __get_namespace_from_title(self, title):
-        default_ns = None
-
-        for ns in self.namespaces:
-            # skip if the namespace is not defined
-            if ns == None:
-                default_ns = self.namespaces[ns]
-                continue
-
-            if title.startswith(ns + ":"):
-                return self.namespaces[ns]
-
-        # if we've made it this far with no matches, we return the default namespace
-        return default_ns
-
-    def process(self):
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
 
         # create a regex that creates the output filename
         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
@@ -139,24 +251,60 @@ class WikiqParser():
         #                         input_filename)
 
         # Construct dump file iterator
-        dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
+        self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
+      
+        self.diff_engine = None
+
+        if self.persist == PersistMethod.sequence:  
+            self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
+
+        if self.persist == PersistMethod.segment:
+            self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
+
+    # def __get_namespace_from_title(self, title):
+    #     default_ns = None
+
+    #     for ns in self.namespaces:
+    #         # skip if the namespace is not defined
+    #         if ns == None:
+    #             default_ns = self.namespaces[ns]
+    #             continue
+
+    #         if title.startswith(ns + ":"):
+    #             return self.namespaces[ns]
 
-        # extract list of namspaces
-        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+    #     # if we've made it this far with no matches, we return the default namespace
+    #     return default_ns
 
+    # def _set_namespace(self, rev_docs):
+        
+    #     for rev_data in rev_docs:
+    #         if 'namespace' not in rev_data['page']:
+    #             namespace = self.__get_namespace_from_title(page['title'])
+    #             rev_data['page']['namespace'] = namespace
+    #         yield rev_data
+
+    def process(self):
         page_count = 0
         rev_count = 0
 
+        for page in self.dump:
+
+            # skip pages not in the namespaces we want
+            if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
+                continue
 
-        # Iterate through pages
-        for page in dump:
             rev_detector = mwreverts.Detector()
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-                if not self.persist_legacy:
-                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+                if self.persist == PersistMethod.sequence:
+                    state = WikiqDiffState(SequenceMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                elif self.persist == PersistMethod.segment:
+                    state = WikiqDiffState(SegmentMatcher(tokenizer = wikitext_split),
                                                     revert_radius=PERSISTENCE_RADIUS)
 
                 else:
@@ -165,27 +313,31 @@ class WikiqParser():
 
             # Iterate through a page's revisions
             for rev in page:
-
                 rev_data = {'revid' : rev.id,
                             'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
                             'articleid' : page.id,
-                            'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
+                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
                             'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
+                            'namespace' : page.namespace,
+                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
 
                 # if revisions are deleted, /many/ things will be missing
-                if rev.text.deleted:
+                if rev.deleted.text:
                     rev_data['text_chars'] = ""
                     rev_data['sha1'] = ""
                     rev_data['revert'] = ""
                     rev_data['reverteds'] = ""
 
                 else:
+                    # rev.text can be None if the page has no text
+                    if not rev.text:
+                        rev.text = ""
                     # if text exists, we'll check for a sha1 and generate one otherwise
+
                     if rev.sha1:
                         text_sha1 = rev.sha1
                     else:
+
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
                     rev_data['sha1'] = text_sha1
@@ -206,10 +358,10 @@ class WikiqParser():
                 # if the fact that the edit was minor can be hidden, this might be an issue
                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
 
-                if rev.contributor.user_text:
+                if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.contributor.user_text + '"'
-                    rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
+                    rev_data['editor'] = '"' + rev.user.text + '"'
+                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
                     
                 else:
                     rev_data['anon'] = ""
@@ -223,20 +375,20 @@ class WikiqParser():
                 #TODO missing: additions_size deletions_size
                 
                 # if collapse user was on, lets run that
-                if self.collapse_user:
-                    rev_data['collapsed_revs'] = rev.collapsed_revs
+                if self.collapse_user:
+                #     rev_data.collapsed_revs = rev.collapsed_revs
 
-                if self.persist or self.persist_legacy:
-                    if rev.text.deleted:
+                if self.persist != PersistMethod.none:
+                    if rev.deleted.text:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
-
-                        if not self.persist_legacy:
+                        if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 
                         else:
-                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
                             
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
@@ -257,7 +409,7 @@ class WikiqParser():
 
                 rev_count += 1
 
-            if self.persist or self.persist_legacy:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -284,7 +436,7 @@ class WikiqParser():
         if self.urlencode:
             for field in TO_ENCODE:
                 rev_data[field] = quote(str(rev_data[field]))
-            
+
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
@@ -331,17 +483,35 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
-                    help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+
 
 args = parser.parse_args()
 
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
@@ -362,10 +532,9 @@ if len(args.dumpfiles) > 0:
 
         wikiq = WikiqParser(input_file, output_file, 
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            persist_legacy=args.persist_legacy,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces = namespaces)
 
         wikiq.process()
 
@@ -375,9 +544,10 @@ if len(args.dumpfiles) > 0:
 else:
     wikiq = WikiqParser(sys.stdin, sys.stdout,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
+                        persist=persist,
                         persist_legacy=args.persist_legacy,
-                        urlencode=args.urlencode)
+                        urlencode=args.urlencode,
+                        namespaces = namespaces)
     wikiq.process()
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"

Community Data Science Collective || Want to submit a patch?