self.assertEqual(test['tokens_added'][0],7)
self.assertEqual(test['tokens_added'][1],10)
self.assertEqual(test['tokens_added'][2],0)
- self.assertEqual(test['tokens_added'][3],11)
+ self.assertEqual(test['tokens_added'][3],8)
+ self.assertEqual(test['tokens_added'][4],0)
+ self.assertEqual(test['tokens_removed'][0],0)
+ self.assertEqual(test['tokens_removed'][1],0)
+ self.assertEqual(test['tokens_removed'][2],10)
+ self.assertEqual(test['tokens_removed'][3],4)
+ self.assertEqual(test['tokens_removed'][4],0)
+ self.assertEqual(test['token_revs'][0],8*3)
+ self.assertEqual(test['token_revs'][1],0)
+ self.assertEqual(test['token_revs'][2],0)
+ self.assertEqual(test['token_revs'][3],0)
+ self.assertEqual(test['token_revs'][4],0)
+
+ baseline = pd.read_table(baseline_file)
+ assert_frame_equal(test,baseline)
+
+
+
+ def test_segment_persistence_exclude_ws(self):
+ test_filename = "segment_excludews_" + self.wikiq_out_name
+ test_file = os.path.join(self.test_output_dir, test_filename)
+ if os.path.exists(test_file):
+ os.remove(test_file)
+
+ call = self.base_call.format(self.input_file, self.test_output_dir)
+ call = call + " --url-encode --persistence segment --exclude-whitespace"
+ print(os.path.abspath('.'))
+ print(call)
+ proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
+ proc.wait()
+
+ copyfile(self.call_output, test_file)
+ baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
+
+ # as a test let's make sure that we get equal data frames
+ test = pd.read_table(test_file)
+ self.assertEqual(test['tokens_added'][0],4)
+ self.assertEqual(test['tokens_added'][1],5)
+ self.assertEqual(test['tokens_added'][2],0)
+ self.assertEqual(test['tokens_added'][3],6)
self.assertEqual(test['tokens_added'][4],0)
self.assertEqual(test['tokens_removed'][0],0)
self.assertEqual(test['tokens_removed'][1],0)
self.assertEqual(test['tokens_removed'][2],0)
- self.assertEqual(test['tokens_removed'][3],7)
+ self.assertEqual(test['tokens_removed'][3],4)
self.assertEqual(test['tokens_removed'][4],0)
- self.assertEqual(test['token_revs'][0],7*3)
+ self.assertEqual(test['token_revs'][0],4*3)
self.assertEqual(test['token_revs'][1],0)
self.assertEqual(test['token_revs'][2],0)
self.assertEqual(test['token_revs'][3],0)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
+ def test_pwr_segment_collapse(self):
+ test_filename = "persistence_segment_collapse_" + self.wikiq_out_name
+ test_file = os.path.join(self.test_output_dir, test_filename)
+ if os.path.exists(test_file):
+ os.remove(test_file)
+
+ call = self.base_call.format(self.input_file, self.test_output_dir)
+ call = call + " --persistence segment --collapse-user"
+ print(call)
+ proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
+ proc.wait()
+
+
+ copyfile(self.call_output, test_file)
+
+ baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
+
+ test = pd.read_table(test_file)
+ print(test)
+ baseline = pd.read_table(baseline_file)
+ assert_frame_equal(test,baseline)
+
+
def test_pwr_legacy(self):
test_filename = "persistence_legacy_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
# 2 B A True
# 3 A B True
# 4 A A False
- # Post-loop A Always
+ # Post-loop A Always
def __find_next_revision(self):
-
if self.prev_rev is None:
prev_rev = WikiqPage._correct_sha(next(self.revisions))
self.prev_rev = prev_rev
class WikiqParser():
- def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+ def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None, exclude_punct = False, exclude_ws = False):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
else:
self.namespace_filter = None
- # create a regex that creates the output filename
- # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
- # r'output/wikiq-\1-\2.tsv',
- # input_filename)
-
+ self.exclude_punct = exclude_punct
+ self.exclude_ws = exclude_ws
+
# Construct dump file iterator
self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
if self.persist == PersistMethod.segment:
self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
- # def __get_namespace_from_title(self, title):
- # default_ns = None
-
- # for ns in self.namespaces:
- # # skip if the namespace is not defined
- # if ns == None:
- # default_ns = self.namespaces[ns]
- # continue
-
- # if title.startswith(ns + ":"):
- # return self.namespaces[ns]
-
- # # if we've made it this far with no matches, we return the default namespace
- # return default_ns
-
- # def _set_namespace(self, rev_docs):
-
- # for rev_data in rev_docs:
- # if 'namespace' not in rev_data['page']:
- # namespace = self.__get_namespace_from_title(page['title'])
- # rev_data['page']['namespace'] = namespace
- # yield rev_data
-
def process(self):
page_count = 0
rev_count = 0
rev_data['anon'] = ""
rev_data['editor'] = ""
- #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
- # redirect = True
- #else:
- # redirect = False
-
- #TODO missing: additions_size deletions_size
+ # we can easily add redirect info
+ # rev_data['redirect'] = rev.page.redirect
- # if collapse user was on, lets run that
- # if self.collapse_user:
- # rev_data.collapsed_revs = rev.collapsed_revs
+ if self.collapse_user:
+ rev_data['collapsed_revs'] = rev.collapsed_revs
if self.persist != PersistMethod.none:
if rev.deleted.text:
if len(window) == PERSISTENCE_RADIUS:
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
- num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(old_tokens_added, old_tokens_removed, legacy = self.persist == PersistMethod.legacy)
-
+ num_token_revs, \
+ num_tokens_added, \
+ num_tokens_removed = \
+ calculate_persistence(
+ old_tokens_added,
+ old_tokens_removed,
+ exclude_ws = self.exclude_ws,
+ exclude_punct = self.exclude_punct,
+ legacy = self.persist == PersistMethod.legacy)
+
old_rev_data["token_revs"] = num_token_revs
old_rev_data["tokens_added"] = num_tokens_added
old_rev_data["tokens_removed"] = num_tokens_removed
rev_id, rev_data, tokens_added, tokens_removed = item
- num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(tokens_added, tokens_removed, legacy = self.persist == PersistMethod.legacy)
+ num_token_revs, \
+ num_tokens_added, \
+ num_tokens_removed = calculate_persistence(
+ tokens_added,
+ tokens_removed,
+ exclude_ws = self.exclude_ws,
+ exclude_punct = self.exclude_punct,
+ legacy = self.persist == PersistMethod.legacy)
+
rev_data["token_revs"] = num_token_revs
rev_data["tokens_added"] = num_tokens_added
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
help="Id number of namspace to include. Can be specified more than once.")
+parser.add_argument('--exclude-whitespace', dest="exclude_ws", action="store_true",
+ help="Flag to remove whitespace from persistence measures.")
+parser.add_argument('--exclude-punctuation', dest="exclude_punct", action="store_true",
+ help="Flag to remove punctuation from persistence measures.")
args = parser.parse_args()
collapse_user=args.collapse_user,
persist=persist,
urlencode=args.urlencode,
- namespaces = namespaces)
+ namespaces = namespaces,
+ exclude_punct = args.exclude_punct,
+ exclude_ws = args.exclude_ws)
wikiq.process()