# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
-import pdb
+
import argparse
import sys
import os, os.path
# skip namespaces not in the filter
if self.namespace_filter is not None:
- if namespace in self.namespace_filter:
+ if namespace not in self.namespace_filter:
continue
rev_detector = mwreverts.Detector()
from mw.lib import persistence
state = persistence.State()
-
-
# Iterate through a page's revisions
for rev in page:
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
-parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
- help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
+parser.add_argument('-p', '--persistence', dest="persist", default="sequence", const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+ help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
-parser.add_argument('-ns', '--namespace-filter', dest="namespace_filter", type=str, help="Comma-seperate list of namespaces numbers to include", default=None)
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+ help="Id number of namspace to include. Can be specified more than once.")
+
args = parser.parse_args()
persist = PersistMethod.sequence
if args.namespace_filter is not None:
- namespaces = [int(ns) for ns in args.namespace_filter.split(',')]
+ namespaces = args.namespace_filter
else:
namespaces = None
filename = os.path.join(output_dir, os.path.basename(filename))
output_file = open_output_file(filename)
- wikiq = WikiqParser(input_file, output_file,
- collapse_user=args.collapse_user,
- persist=persist,
- urlencode=args.urlencode,
- namespaces = namespaces)
-
+ wikiq = WikiqParser(input_file, output_file,
+ collapse_user=args.collapse_user,
+ persist=persist,
+ urlencode=args.urlencode,
+ namespaces = namespaces)
wikiq.process()