X-Git-Url: https://code.communitydata.science/mediawiki_dump_tools.git/blobdiff_plain/e4222c45ddea22e543fe4a4bdbcc38b4ac3645bb..e871023ff54425af61097a4a95174a42c8245c99:/wikiq diff --git a/wikiq b/wikiq index dc9d772..8b741e3 100755 --- a/wikiq +++ b/wikiq @@ -3,7 +3,7 @@ # original wikiq headers are: title articleid revid date_time anon # editor editor_id minor text_size text_entropy text_md5 reversion # additions_size deletions_size -import pdb + import argparse import sys import os, os.path @@ -184,7 +184,7 @@ class WikiqParser(): # skip namespaces not in the filter if self.namespace_filter is not None: - if namespace in self.namespace_filter: + if namespace not in self.namespace_filter: continue rev_detector = mwreverts.Detector() @@ -205,8 +205,6 @@ class WikiqParser(): from mw.lib import persistence state = persistence.State() - - # Iterate through a page's revisions for rev in page: @@ -380,13 +378,15 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true", parser.add_argument('--collapse-user', dest="collapse_user", action="store_true", help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.") -parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?', - help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.") +parser.add_argument('-p', '--persistence', dest="persist", default="sequence", const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?', + help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.") parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true", help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.") -parser.add_argument('-ns', '--namespace-filter', dest="namespace_filter", type=str, help="Comma-seperate list of namespaces numbers to include", default=None) +parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append', + help="Id number of namspace to include. Can be specified more than once.") + args = parser.parse_args() @@ -403,7 +403,7 @@ else: persist = PersistMethod.sequence if args.namespace_filter is not None: - namespaces = [int(ns) for ns in args.namespace_filter.split(',')] + namespaces = args.namespace_filter else: namespaces = None @@ -425,12 +425,11 @@ if len(args.dumpfiles) > 0: filename = os.path.join(output_dir, os.path.basename(filename)) output_file = open_output_file(filename) - wikiq = WikiqParser(input_file, output_file, - collapse_user=args.collapse_user, - persist=persist, - urlencode=args.urlencode, - namespaces = namespaces) - + wikiq = WikiqParser(input_file, output_file, + collapse_user=args.collapse_user, + persist=persist, + urlencode=args.urlencode, + namespaces = namespaces) wikiq.process()