#!/usr/bin/env python3

# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size

import argparse
import sys
import os, os.path
import re

from subprocess import Popen, PIPE
from collections import deque
from hashlib import sha1

from mwxml import Dump

from deltas.tokenizers import wikitext_split
import mwpersistence
import mwreverts
from urllib.parse import quote
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
from deltas import SequenceMatcher
from deltas import SegmentMatcher

class PersistMethod:
    none = 0
    sequence = 1
    segment = 2
    legacy = 3

def calculate_persistence(tokens_added):
    return(sum([(len(x.revisions)-1) for x in tokens_added]),
           len(tokens_added))

def matchmaker(rev_data, regular_expression, scanner, rev): #rev_data,self.regex,self.scanner, rev
    for location in scanner: #presumably 'comment' 'text' 'comment text' made into a list by args
        if location == "comment":
            matching_string = rev.comment
        elif location == "text":
            matching_string = rev.text
        else:
            sys.exit("regex scanner location must be 'comment' or 'text'.")

        if (re.search(regular_expression, matching_string) is not None): # we know that there is a match somewhere
            m = re.finditer(regular_expression, matching_string) # all our matchObjects in a list
            blob=""
            for result in m:
                blob = blob + "," + result.group(0)
            # columns we want
            rev_data['matches'] = blob #### the list of matchObjects. gleaned in post-processing       
        else:
            rev_data['matches'] = None
	
    return rev_data



class WikiqIterator():
    def __init__(self, fh, collapse_user=False):
        self.fh = fh
        self.collapse_user = collapse_user
        self.mwiterator = Dump.from_file(self.fh)
        self.namespace_map = { ns.id : ns.name for ns in
                               self.mwiterator.site_info.namespaces }
        self.__pages = self.load_pages()

    def load_pages(self):
        for page in self.mwiterator:
            yield WikiqPage(page,
                            namespace_map = self.namespace_map,
                            collapse_user=self.collapse_user)

    def __iter__(self):
        return self.__pages

    def __next__(self):
        return next(self._pages)

class WikiqPage():
    __slots__ = ('id', 'title', 'namespace', 'redirect',
                 'restrictions', 'mwpage', '__revisions',
                 'collapse_user')
    
    def __init__(self, page, namespace_map, collapse_user=False):
        self.id = page.id
        self.namespace = page.namespace
        # following mwxml, we assume namespace 0 in cases where
        # page.namespace is inconsistent with namespace_map
        if page.namespace not in namespace_map:
            self.title = page.title
            page.namespace = 0
        if page.namespace != 0:
            self.title = ':'.join([namespace_map[page.namespace], page.title])
        else:
            self.title = page.title
        self.restrictions = page.restrictions
        self.collapse_user = collapse_user
        self.mwpage = page
        self.__revisions = self.rev_list()

    def rev_list(self):
        # Outline for how we want to handle collapse_user=True
        # iteration   rev.user   prev_rev.user   add prev_rev?
        #         0          A            None           Never
        #         1          A               A           False
        #         2          B               A            True
        #         3          A               B            True
        #         4          A               A           False
        # Post-loop                          A          Always
        for i, rev in enumerate(self.mwpage):
            # never yield the first time
            if i == 0:
                if self.collapse_user: 
                    collapsed_revs = 1
                    rev.collapsed_revs = collapsed_revs

            else:
                if self.collapse_user:
                    # yield if this is the last edit in a seq by a user and reset
                    # also yield if we do know who the user is

                    if rev.deleted.user or prev_rev.deleted.user:
                        yield prev_rev
                        collapsed_revs = 1
                        rev.collapsed_revs = collapsed_revs

                    elif not rev.user.text == prev_rev.user.text:
                        yield prev_rev
                        collapsed_revs = 1
                        rev.collapsed_revs = collapsed_revs
                    # otherwise, add one to the counter
                    else:
                        collapsed_revs += 1
                        rev.collapsed_revs = collapsed_revs
                # if collapse_user is false, we always yield
                else:
                    yield prev_rev

            prev_rev = rev

        # also yield the final time
        yield prev_rev

    def __iter__(self):
        return self.__revisions

    def __next__(self):
        return next(self.__revisions)

class WikiqParser():
    
    def __init__(self, input_file, output_file, scanner, match_regex, collapse_user=False, persist=None, urlencode=False, namespaces = None):
        """ 
        Parameters:
           persist : what persistence method to use. Takes a PersistMethod value
        """

        self.input_file = input_file
        self.output_file = output_file
        self.collapse_user = collapse_user
        self.persist = persist
        self.printed_header = False
        self.namespaces = []
        self.urlencode = urlencode
        self.scanner = scanner
        self.match_regex = match_regex

        if namespaces is not None:
            self.namespace_filter = set(namespaces)
        else:
            self.namespace_filter = None

    def __get_namespace_from_title(self, title):
        default_ns = None

        for ns in self.namespaces:
            # skip if the namespace is not defined
            if ns == None:
                default_ns = self.namespaces[ns]
                continue

            if title.startswith(ns + ":"):
                return self.namespaces[ns]

        # if we've made it this far with no matches, we return the default namespace
        return default_ns


    def process(self):

        # create a regex that creates the output filename
        # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
        #                         r'output/wikiq-\1-\2.tsv',
        #                         input_filename)

        # Construct dump file iterator
        dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)

        # extract list of namspaces
        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}

        page_count = 0
        rev_count = 0


        # Iterate through pages
        for page in dump:
            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)

            # skip namespaces not in the filter
            if self.namespace_filter is not None:
                if namespace not in self.namespace_filter:
                    continue

            rev_detector = mwreverts.Detector()

            if self.persist != PersistMethod.none:
                window = deque(maxlen=PERSISTENCE_RADIUS)

                if self.persist == PersistMethod.sequence:
                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
                                                    revert_radius=PERSISTENCE_RADIUS)

                elif self.persist == PersistMethod.segment:
                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
                                                    revert_radius=PERSISTENCE_RADIUS)

                # self.persist == PersistMethod.legacy
                else:
                    from mw.lib import persistence
                    state = persistence.State()

            # Iterate through a page's revisions
            for rev in page:
                ## m = re.finditer() #so we can find all instances
                ## m.groupdict() #so we can look at them all with their names

                # initialize rev_dat
                rev_data = {}

                if self.scanner is not None: # we know we want to do a regex search 
                    ## comment = want to look in comment attached to revision
                    ## text = want to look in revision text

                    ### call the scanner function
                    rev_data = matchmaker(rev_data, self.match_regex, self.scanner, rev)
	
                if self.scanner is not None and rev_data['matches'] is None:
                    next

                # we fill out the rest of the data structure now
                rev_data['revid'] = rev.id
                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
                rev_data['articleid'] = page.id
                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
                rev_data['title'] = '"' + page.title + '"'
                rev_data['namespace'] = namespace
                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"

                # if revisions are deleted, /many/ things will be missing
                if rev.deleted.text:
                    rev_data['text_chars'] = ""
                    rev_data['sha1'] = ""
                    rev_data['revert'] = ""
                    rev_data['reverteds'] = ""

                else:
                    # rev.text can be None if the page has no text
                    if not rev.text:
                        rev.text = ""
                    # if text exists, we'll check for a sha1 and generate one otherwise

                    if rev.sha1:
                        text_sha1 = rev.sha1
                    else:

                        text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                    
                    rev_data['sha1'] = text_sha1

                    # TODO rev.bytes doesn't work.. looks like a bug
                    rev_data['text_chars'] = len(rev.text)

                    # generate revert data
                    revert = rev_detector.process(text_sha1, rev.id)
                    
                    if revert:
                        rev_data['revert'] = "TRUE"
                        rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
                    else:
                        rev_data['revert'] = "FALSE"
                        rev_data['reverteds'] = ""

                # if the fact that the edit was minor can be hidden, this might be an issue
                rev_data['minor'] = "TRUE" if rev.minor else "FALSE"

                if not rev.deleted.user:
                    # wrap user-defined editors in quotes for fread
                    rev_data['editor'] = '"' + rev.user.text + '"'
                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
                    
                else:
                    rev_data['anon'] = ""
                    rev_data['editor'] = ""

                #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
                #    redirect = True
                #else:
                #    redirect = False
                
                #TODO missing: additions_size deletions_size
                
                # if collapse user was on, lets run that
                if self.collapse_user:
                    rev_data['collapsed_revs'] = rev.collapsed_revs

                if self.persist != PersistMethod.none:
                    if rev.deleted.text:
                        for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                            old_rev_data[k] = None
                    else:

                        if self.persist != PersistMethod.legacy:
                            _, tokens_added, tokens_removed = state.update(rev.text, rev.id)

                        else:
                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
                            
                        window.append((rev.id, rev_data, tokens_added, tokens_removed))
                        
                        if len(window) == PERSISTENCE_RADIUS:
                            old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
                            
                            num_token_revs, num_tokens = calculate_persistence(old_tokens_added)

                            old_rev_data["token_revs"] = num_token_revs
                            old_rev_data["tokens_added"] = num_tokens
                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
                            old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1

                            self.print_rev_data(old_rev_data)

                else:
                    self.print_rev_data(rev_data)

                rev_count += 1

            if self.persist != PersistMethod.none:
                # print out metadata for the last RADIUS revisions
                for i, item in enumerate(window):
                    # if the window was full, we've already printed item 0
                    if len(window) == PERSISTENCE_RADIUS and i == 0:
                        continue

                    rev_id, rev_data, tokens_added, tokens_removed = item
                    num_token_revs, num_tokens = calculate_persistence(tokens_added)

                    rev_data["token_revs"] = num_token_revs
                    rev_data["tokens_added"] = num_tokens
                    rev_data["tokens_removed"] = len(tokens_removed)
                    rev_data["tokens_window"] = len(window)-(i+1)
                    
                    self.print_rev_data(rev_data)

            page_count += 1

        print("Done: %s revisions and %s pages." % (rev_count, page_count),
              file=sys.stderr)

    def print_rev_data(self, rev_data):
        # if it's the first time through, print the header
        if self.urlencode:
            for field in TO_ENCODE:
                rev_data[field] = quote(str(rev_data[field]))

        if not self.printed_header:
            print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
            self.printed_header = True
        
        print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)


def open_input_file(input_filename):
    if re.match(r'.*\.7z$', input_filename):
        cmd = ["7za", "x", "-so", input_filename, '*'] 
    elif re.match(r'.*\.gz$', input_filename):
        cmd = ["zcat", input_filename] 
    elif re.match(r'.*\.bz2$', input_filename):
        cmd = ["bzcat", "-dk", input_filename] 

    try:
        input_file = Popen(cmd, stdout=PIPE).stdout
    except NameError:
        input_file = open(input_filename, 'r')

    return input_file

def open_output_file(input_filename):
    # create a regex that creates the output filename
    output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
    output_filename = re.sub(r'\.xml', '', output_filename)
    output_filename = output_filename + ".tsv"
    output_file = open(output_filename, "w")

    return output_file

parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')

# arguments for the input direction
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
                    help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")

parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
                    help="Directory for output files.")

parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
                    help="Write output to standard out (do not create dump file)")

parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                    help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")

parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")

parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                    help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")

parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
                    help="Id number of namspace to include. Can be specified more than once.")

parser.add_argument('-rs', '--regex-scanner', dest="scanner",type=str, action='append',
                    help="Find the regex match specified by -R/--match searching in: (1) comment (2) text.")

parser.add_argument('-R', '--match', dest="match_regex", type=str, 
                    help="The regular expression you would like to find in the string and put in capture group")

args = parser.parse_args()

# set persistence method

if args.persist is None:
    persist = PersistMethod.none
elif args.persist == "segment":
    persist = PersistMethod.segment
elif args.persist == "legacy":
    persist = PersistMethod.legacy
else:
    persist = PersistMethod.sequence

if args.namespace_filter is not None:
    namespaces = args.namespace_filter
else:
    namespaces = None

if len(args.dumpfiles) > 0:
    for filename in args.dumpfiles:
        input_file = open_input_file(filename)

        # open directory for output
        if args.output_dir:
            output_dir = args.output_dir[0]
        else:
            output_dir = "."

        print("Processing file: %s" % filename, file=sys.stderr)

        if args.stdout:
            output_file = sys.stdout
        else:
            filename = os.path.join(output_dir, os.path.basename(filename))
            output_file = open_output_file(filename)

        wikiq = WikiqParser(input_file, output_file, 
                            collapse_user=args.collapse_user,
                            persist=persist,
                            urlencode=args.urlencode,
                            namespaces = namespaces,
                            match_regex=args.match_regex, # adding in the new 2 args for regex searching
                            scanner=args.scanner)

        wikiq.process()

        # close things 
        input_file.close()
        output_file.close()
else:
    wikiq = WikiqParser(sys.stdin, sys.stdout,
                        collapse_user=args.collapse_user,
                        persist=persist,
                        #persist_legacy=args.persist_legacy,
                        urlencode=args.urlencode,
                        namespaces = namespaces,
                        match_regex=args.match_regex, # adding in the new 2 args for regex searching
                        scanner=args.scanner)
    wikiq.process()

# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
# stop_words = stop_words.split(",")