# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
-import pdb
import argparse
import sys
import os, os.path
from collections import deque
from hashlib import sha1
-from mwxml import Dump
+from mwxml import Dump, Page
from deltas.tokenizers import wikitext_split
+from mwdiffs.utilities import dump2diffs
import mwpersistence
+from mwpersistence.state import DiffState
+
+from mwpersistence import Token
+from mwpersistence.utilities import diffs2persistence
import mwreverts
from urllib.parse import quote
+
+from deltas import SequenceMatcher
+from deltas import SegmentMatcher
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
-from deltas import SequenceMatcher
-def calculate_persistence(tokens_added):
- return(sum([(len(x.revisions)-1) for x in tokens_added]),
- len(tokens_added))
+ws_lex = ['break','whitespace']
+punct_lex = ['period','qmark','epoint','comma','colon','scolon','paren_open','paren_close','brack_open','brack_close','dbrack_close','dbrack_open','tab_close','tab_open','dcurly_close','dcurly_open','equals','bar','etc','bold','italic','tag','comment_end','comment_start']
+class PersistMethod:
+ none = 0
+ sequence = 1
+ segment = 2
+ legacy = 3
-class WikiqIterator():
- def __init__(self, fh, collapse_user=False):
- self.fh = fh
- self.collapse_user = collapse_user
- self.mwiterator = Dump.from_file(self.fh)
- self.namespace_map = { ns.id : ns.name for ns in
- self.mwiterator.site_info.namespaces }
- self.__pages = self.load_pages()
+def calculate_persistence(tokens_added, tokens_removed, exclude_ws = False, exclude_punct = False, legacy = False):
- def load_pages(self):
- for page in self.mwiterator:
- yield WikiqPage(page,
- namespace_map = self.namespace_map,
- collapse_user=self.collapse_user)
+ if not legacy:
+ cond = lambda t: not (exclude_punct and (t.type in punct_lex)) \
+ and not(exclude_ws and (t.type in ws_lex))
- def __iter__(self):
- return self.__pages
+ tokens_added = [t for t in tokens_added if cond(t)]
+ tokens_removed = [t for t in tokens_removed if cond(t)]
- def __next__(self):
- return next(self._pages)
+ return(sum([(len(x.revisions)-1) for x in tokens_added]),
+ len(tokens_added),
+ len(tokens_removed)
+ )
+
+class WikiqIterator(Dump):
+
+ @classmethod
+ def from_file(cls, fh, collapse_user = False):
+ cls.fh = fh
+ cls.collapse_user = collapse_user
+ cls = super(WikiqIterator, cls).from_file(fh)
+ return cls
+
+ @classmethod
+ def process_item(cls, item_element, namespace_map):
+ if not hasattr(cls,'inv_namespace_map'):
+ cls.inv_namespace_map = {ns.id:name for name, ns in namespace_map.items()}
+
+ if item_element.tag == "page":
+ return WikiqPage.from_element(item_element, namespace_map, cls.inv_namespace_map, cls.collapse_user)
+ elif item_element.tag == "logitem":
+ return LogItem.from_element(item_element, namespace_map)
+ else:
+ raise MalformedXML("Expected to see <page> or <logitem>. " +
+ "Instead saw <{0}>".format(item_element.tag))
-class WikiqPage():
+class WikiqPage(Page):
__slots__ = ('id', 'title', 'namespace', 'redirect',
- 'restrictions', 'mwpage', '__revisions',
- 'collapse_user')
-
- def __init__(self, page, namespace_map, collapse_user=False):
- self.id = page.id
- self.namespace = page.namespace
- if page.namespace != 0:
- self.title = ':'.join([namespace_map[page.namespace], page.title])
+ 'restrictions','collapse_user')
+
+ @classmethod
+ def from_element(cls, item_element, namespace_map, inv_namespace_map, collapse_user = False):
+ cls.prev_rev = None
+
+ cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
+
+ # following mwxml, we assume namespace 0 in cases where
+ # page.namespace is inconsistent with namespace_map
+ # this undoes the "correction" of the namespace in mwxml
+
+ if cls.namespace not in inv_namespace_map:
+ cls.namespace = 0
+ if cls.namespace != 0:
+ cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
+
+ cls.collapse_user = collapse_user
+ cls.revisions = cls._Page__revisions
+ return cls
+
+ @staticmethod
+ def _correct_sha(rev_data):
+
+ if rev_data.deleted.text:
+ rev_data.text = ""
+ rev_data.text_chars = 0
+ rev_data.sha1 = ""
+ rev_data.revert = ""
+ rev_data.reverteds = ""
+
else:
- self.title = page.title
- self.restrictions = page.restrictions
- self.collapse_user = collapse_user
- self.mwpage = page
- self.__revisions = self.rev_list()
-
- def rev_list(self):
- # Outline for how we want to handle collapse_user=True
- # iteration rev.user prev_rev.user add prev_rev?
- # 0 A None Never
- # 1 A A False
- # 2 B A True
- # 3 A B True
- # 4 A A False
- # Post-loop A Always
- for i, rev in enumerate(self.mwpage):
- # never yield the first time
- if i == 0:
- if self.collapse_user:
- collapsed_revs = 1
- rev.collapsed_revs = collapsed_revs
+ if rev_data.text is None :
+ rev_data.text = ""
+
+ rev_data.text_chars = len(rev_data.text)
- else:
- if self.collapse_user:
- # yield if this is the last edit in a seq by a user and reset
- # also yield if we do know who the user is
-
- if rev.deleted.user or prev_rev.deleted.user:
- yield prev_rev
- collapsed_revs = 1
- rev.collapsed_revs = collapsed_revs
-
- elif not rev.user.text == prev_rev.user.text:
- yield prev_rev
- collapsed_revs = 1
- rev.collapsed_revs = collapsed_revs
- # otherwise, add one to the counter
- else:
- collapsed_revs += 1
- rev.collapsed_revs = collapsed_revs
- # if collapse_user is false, we always yield
- else:
- yield prev_rev
+ if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
+ text_sha1 = rev_data.sha1
+
+ else:
+ text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
+
+ rev_data.sha1 = text_sha1
+ return rev_data
+
+ # Outline for how we want to handle collapse_user=True
+ # iteration rev.user prev_rev.user add prev_rev?
+ # 0 A None Never
+ # 1 A A False
+ # 2 B A True
+ # 3 A B True
+ # 4 A A False
+ # Post-loop A Always
+ def __find_next_revision(self):
+
+ if self.prev_rev is None:
+ prev_rev = WikiqPage._correct_sha(next(self.revisions))
+ self.prev_rev = prev_rev
+ else:
+ prev_rev = self.prev_rev
+
+ if self.collapse_user:
+ collapsed_revs = 1
+ self.prev_rev.collapsed_revs = collapsed_revs
+ prev_rev = self.prev_rev
+
+ for rev in self.revisions:
+ rev = WikiqPage._correct_sha(rev)
+ if self.collapse_user:
+ # yield if this is the last edit in a seq by a user and reset
+ # also yield if we do know who the user is
+
+ if rev.deleted.user or prev_rev.deleted.user:
+ self.prev_rev = rev
+ if prev_rev is not None:
+ prev_rev.collapsed_revs = collapsed_revs
+ return prev_rev
+
+ elif not rev.user.text == prev_rev.user.text:
+ self.prev_rev = rev
+ if prev_rev is not None:
+ prev_rev.collapsed_revs = collapsed_revs
+ return prev_rev
+
+ # otherwise, add one to the counter
+ else:
+ collapsed_revs += 1
+ rev.collapsed_revs = collapsed_revs
+ # if collapse_user is false, we always yield
+ else:
+ self.prev_rev = rev
+ if prev_rev is not None:
+ return prev_rev
prev_rev = rev
- # also yield the final time
- yield prev_rev
+ self.prev_rev = None
+
+ if self.collapse_user:
+ prev_rev.collapsed_revs = collapsed_revs
+ return prev_rev
- def __iter__(self):
- return self.__revisions
def __next__(self):
- return next(self.__revisions)
+ revision = self.__find_next_revision()
+ revision.page = self
+ return revision
-class WikiqParser():
+ def __iter__(self):
+ while(True):
+ revision = self.__find_next_revision()
+ revision.page = self
+ yield revision
- def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
-
+class WikiqParser():
+
+ def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+ """
+ Parameters:
+ persist : what persistence method to use. Takes a PersistMethod value
+ """
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
- self.persist_legacy = persist_legacy
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
-
- def __get_namespace_from_title(self, title):
- default_ns = None
-
- for ns in self.namespaces:
- # skip if the namespace is not defined
- if ns == None:
- default_ns = self.namespaces[ns]
- continue
-
- if title.startswith(ns + ":"):
- return self.namespaces[ns]
-
- # if we've made it this far with no matches, we return the default namespace
- return default_ns
-
- def process(self):
+ if namespaces is not None:
+ self.namespace_filter = set(namespaces)
+ else:
+ self.namespace_filter = None
# create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
# input_filename)
# Construct dump file iterator
- dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
+ self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
+
+ self.diff_engine = None
+
+ if self.persist == PersistMethod.sequence:
+ self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
+
+ if self.persist == PersistMethod.segment:
+ self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
- # extract list of namspaces
- self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
+ # def __get_namespace_from_title(self, title):
+ # default_ns = None
+ # for ns in self.namespaces:
+ # # skip if the namespace is not defined
+ # if ns == None:
+ # default_ns = self.namespaces[ns]
+ # continue
+
+ # if title.startswith(ns + ":"):
+ # return self.namespaces[ns]
+
+ # # if we've made it this far with no matches, we return the default namespace
+ # return default_ns
+
+ # def _set_namespace(self, rev_docs):
+
+ # for rev_data in rev_docs:
+ # if 'namespace' not in rev_data['page']:
+ # namespace = self.__get_namespace_from_title(page['title'])
+ # rev_data['page']['namespace'] = namespace
+ # yield rev_data
+
+ def process(self):
page_count = 0
rev_count = 0
+ for page in self.dump:
+
+ # skip pages not in the namespaces we want
+ if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
+ continue
- # Iterate through pages
- for page in dump:
rev_detector = mwreverts.Detector()
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
- if not self.persist_legacy:
- state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+ if self.persist == PersistMethod.sequence:
+ state = DiffState(SequenceMatcher(tokenizer = wikitext_split),
+ revert_radius=PERSISTENCE_RADIUS)
+
+ elif self.persist == PersistMethod.segment:
+ state = DiffState(SegmentMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
else:
# Iterate through a page's revisions
for rev in page:
-
rev_data = {'revid' : rev.id,
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'articleid' : page.id,
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
'title' : '"' + page.title + '"',
- 'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
+ 'namespace' : page.namespace,
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
# if revisions are deleted, /many/ things will be missing
#TODO missing: additions_size deletions_size
# if collapse user was on, lets run that
- if self.collapse_user:
- rev_data['collapsed_revs'] = rev.collapsed_revs
+ # if self.collapse_user:
+ # rev_data.collapsed_revs = rev.collapsed_revs
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
if rev.deleted.text:
-
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
-
- if not self.persist_legacy:
+
+ if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
else:
if len(window) == PERSISTENCE_RADIUS:
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
- num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
+ num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(old_tokens_added, old_tokens_removed, legacy = self.persist == PersistMethod.legacy)
old_rev_data["token_revs"] = num_token_revs
- old_rev_data["tokens_added"] = num_tokens
- old_rev_data["tokens_removed"] = len(old_tokens_removed)
+ old_rev_data["tokens_added"] = num_tokens_added
+ old_rev_data["tokens_removed"] = num_tokens_removed
old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
self.print_rev_data(old_rev_data)
rev_count += 1
- if self.persist or self.persist_legacy:
+ if self.persist != PersistMethod.none:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
continue
rev_id, rev_data, tokens_added, tokens_removed = item
- num_token_revs, num_tokens = calculate_persistence(tokens_added)
+
+ num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(tokens_added, tokens_removed, legacy = self.persist == PersistMethod.legacy)
rev_data["token_revs"] = num_token_revs
- rev_data["tokens_added"] = num_tokens
- rev_data["tokens_removed"] = len(tokens_removed)
+ rev_data["tokens_added"] = num_tokens_added
+ rev_data["tokens_removed"] = num_tokens_removed
rev_data["tokens_window"] = len(window)-(i+1)
self.print_rev_data(rev_data)
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
- help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+ help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
-parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
- help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+ help="Id number of namspace to include. Can be specified more than once.")
+
+
args = parser.parse_args()
+# set persistence method
+
+if args.persist is None:
+ persist = PersistMethod.none
+elif args.persist == "segment":
+ persist = PersistMethod.segment
+elif args.persist == "legacy":
+ persist = PersistMethod.legacy
+else:
+ persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+ namespaces = args.namespace_filter
+else:
+ namespaces = None
+
if len(args.dumpfiles) > 0:
for filename in args.dumpfiles:
input_file = open_input_file(filename)
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
- persist=args.persist,
- persist_legacy=args.persist_legacy,
- urlencode=args.urlencode)
-
+ persist=persist,
+ urlencode=args.urlencode,
+ namespaces = namespaces)
wikiq.process()
else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
- persist=args.persist,
+ persist=persist,
persist_legacy=args.persist_legacy,
- urlencode=args.urlencode)
+ urlencode=args.urlencode,
+ namespaces = namespaces)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"