]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
remove dependency on pandas.
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index ae6b40f07970b08975afbd545827c09b5509d08a..03b3147446b5af284efc01d6d2435e0ee1ba9c5c 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -8,6 +8,7 @@ import argparse
 import sys
 import os, os.path
 import re
+from datetime import datetime
 
 from subprocess import Popen, PIPE
 from collections import deque
@@ -24,6 +25,10 @@ PERSISTENCE_RADIUS=7
 from deltas import SequenceMatcher
 from deltas import SegmentMatcher
 
+from dataclasses import dataclass
+import pyarrow as pa
+import pyarrow.parquet as pq
+
 class PersistMethod:
     none = 0
     sequence = 1
@@ -34,6 +39,7 @@ def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
@@ -126,26 +132,260 @@ class WikiqPage():
     def __next__(self):
         return next(self.__revisions)
 
-class WikiqParser():
+
+class RegexPair(object):
+    def __init__(self, pattern, label):
+        self.pattern = re.compile(pattern)
+        self.label = label
+        self.has_groups = bool(self.pattern.groupindex)
+        if self.has_groups:
+            self.capture_groups = list(self.pattern.groupindex.keys())
+            
+    def _make_key(self, cap_group):
+        return ("{}_{}".format(self.label, cap_group))
+
+    def matchmake(self, content, rev_data):
+        
+        temp_dict = {}
+        # if there are named capture groups in the regex
+        if self.has_groups:
+
+            # if there are matches of some sort in this revision content, fill the lists for each cap_group
+            if self.pattern.search(content) is not None:
+                m = self.pattern.finditer(content)
+                matchobjects = list(m)
+
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_list = []
+                    for match in matchobjects:
+                        # we only want to add the match for the capture group if the match is not None
+                        if match.group(cap_group) != None:
+                            temp_list.append(match.group(cap_group))
+
+                    # if temp_list of matches is empty just make that column None
+                    if len(temp_list)==0:
+                        temp_dict[key] = None
+                    # else we put in the list we made in the for-loop above
+                    else:
+                        temp_dict[key] = ', '.join(temp_list)
+
+            # there are no matches at all in this revision content, we default values to None
+            else:
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_dict[key] = None
+
+        # there are no capture groups, we just search for all the matches of the regex
+        else:
+            #given that there are matches to be made
+            if type(content) in(str, bytes):
+                if self.pattern.search(content) is not None:
+                    m = self.pattern.findall(content)
+                    temp_dict[self.label] = ', '.join(m)
+                else:
+                    temp_dict[self.label] = None
+
+        # update rev_data with our new columns
+        for k, v in temp_dict:
+            rev_data.setattr(k,v)
+
+        return rev_data
+
+@dataclass()
+class RevDataBase():
+    revid: int 
+    date_time: datetime
+    articleid: int
+    editorid: int
+    title: str
+    namespace: int
+    deleted: bool
+    text_chars: int = None
+    revert: bool = None
+    reverteds: list[int] = None
+    sha1: str = None
+    minor: bool = None
+    editor: str = None
+    anon: bool = None
+    collapsed_revs:int = None
+
+    pa_schema_fields = [
+        pa.field("revid", pa.int64),
+        pa.field("date_time",pa.timestamp('ms')),
+        pa.field("articleid",pa.int64()),
+        pa.field("editorid",pa.int64()),
+        pa.field("title",pa.string()),
+        pa.field("namespace",pa.int32()),
+        pa.field("deleted",pa.binary()),
+        pa.field("test_chars",pa.int32()),
+        pa.field("revert",pa.binary()),
+        pa.field("reverteds",pa.list_(pa.int64())),
+        pa.field("sha1",pa.string()),
+        pa.field("minor",pa.binary()),
+        pa.field("editor",pa.string()),
+        pa.field("anon",pa.binary())
+    ]
+
+    def to_pyarrow(self):
+        return pa.array(self.astuple(), map(self.pa_schema_fields, pa.field.type))
+        
+
+    def to_tsv_row(self):
+        
+        row = []
+        for f in self.fields():
+            val = getattr(self, f.name)
+            if getattr(self, f.name) is None:
+                row.append("")
+            elif f.type == bool:
+                row.append("TRUE" if val else "FALSE")
+
+            elif f.type == datetime:
+                row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
+
+            elif f.name in {'editor','title'}:
+                s = '"' + val + '"'
+                if f.name in TO_ENCODE:
+                    row.append(quote(str(val)))
+
+            elif f.type == list[int]:
+                row.append('"' + ",".join([str(x) for x in val]) + '"')
+
+            elif f.type == str:
+                if f.name in TO_ENCODE:
+                    row.append(quote(str(val)))
+            else:
+                row.append(val)
+
+        return '\t'.join(row)
     
-    def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
+    # def __init__(revid: int,
+    #              date_time: datetime,
+    #              articleid: int,
+    #              editorid: int,
+    #              title: str,
+    #              namespace: int,
+    #              deleted: bool,
+    #              test_chars: int,
+    #              revert: bool,
+    #              reverteds: list[bool],
+    #              sha1: str,
+    #              minor: bool,
+    #              editor: str,
+    #              anon: bool):
+
+        
+
+@dataclass()
+class RevDataCollapse(RevDataBase):
+    collapsed_revs:int = None
+    pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
+    pa_schema_fields = RevDataBase.pa_schema_fields + pa_collapsed_revs_schema
+    pa_schema = pa.schema(pa_schema_fields)    
+
+@dataclass()
+class RevDataPersistence(RevDataBase):
+    token_revs:int = None
+    tokens_added:int = None
+    tokens_removed:int = None
+    tokens_window:int = None
+    pa_persistence_schema_fields = [
+        pa.field(token_revs, pa.int64()),
+        pa.field(tokens_added, pa.int64()),
+        pa.field(tokens_removed, pa.int64()),
+        pa.tokens_window, pa.int64()]
+        
+    pa_schema_fields = RevDataBase.pa_schema_fields  + pa_persistence_schema_fields
+
+@dataclass()
+class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
+    pa_scehma_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
+
+class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
         """ 
         Parameters:
            persist : what persistence method to use. Takes a PersistMethod value
         """
-
         self.input_file = input_file
-        self.output_file = output_file
+
         self.collapse_user = collapse_user
         self.persist = persist
-        self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
+        self.revert_radius = revert_radius
+        
+        self.output_buffer = []
+        self.output_buffer_size = output_buffer_size
+
         if namespaces is not None:
             self.namespace_filter = set(namespaces)
         else:
             self.namespace_filter = None
 
+        self.regex_schemas = []
+        self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
+        self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+
+        if self.collapse_user is True:
+            if self.persist == PersistMethod.none:
+                revdata_type = RevDataCollapse
+            else:
+                revdata_type = RevDataCollapsePersistence
+        elif self.persist != PersistMethod.none:
+            revdata_type = RevDataPersistence
+        else:
+            revdata_type = RevDataBase
+
+        regex_fields = [(field.name, list[str]), for field in self.regex_schemas]
+        self.revdata_type = dataclasses.make_dataclass('RevData_Parser',
+                                                       fields=map(regex_fields,
+                                                                  lambda pa_field: (pa_field.name,
+                                                                                    list[string],
+                                                                                    field(default=None))),
+                                                       bases=(revdata_type))
+        
+        self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + regex_fields
+                        
+        if output_parquet is True:
+            self.output_parquet = True
+            self.pq_writer = None
+            self.output_file = output_file
+        else:
+            self.output_file = open(output_file,'w')
+
+
+    def make_matchmake_pairs(self, patterns, labels):
+        if (patterns is not None and labels is not None) and \
+           (len(patterns) == len(labels)):
+            result = []
+            for pattern, label in zip(patterns, labels):
+                result.append(RegexPair(pattern, label))
+                self.regex_schemas.append(pa.field(label, pa.list_(pa.string())))
+
+            return result
+        elif (patterns is None and labels is None):
+            return []
+        else:
+            sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+    def matchmake(self, rev, rev_data):
+        rev_data = self.matchmake_revision(rev.text, rev_data)
+        rev_data = self.matchmake_comment(rev.comment, rev_data)
+        return rev_data
+
+    def matchmake_revision(self, text, rev_data):
+         return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+
+    def matchmake_comment(self, comment, rev_data):
+        return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
+
+    def matchmake_pairs(self, text, rev_data, pairs):
+        for pair in pairs:
+            rev_data = pair.matchmake(text, rev_data)
+        return rev_data
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -161,6 +401,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
@@ -187,7 +428,7 @@ class WikiqParser():
                 if namespace not in self.namespace_filter:
                     continue
 
-            rev_detector = mwreverts.Detector()
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
 
             if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
@@ -207,23 +448,18 @@ class WikiqParser():
 
             # Iterate through a page's revisions
             for rev in page:
+                
+                rev_data = self.revdata_type(revid = rev.id,
+                                             date_time = rev.timestamp,
+                                             articleid = page.id,
+                                             editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+                                             title =  page.title,
+                                             deleted = rev.deleted.text
+                                             )
 
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : namespace,
-                            'deleted' : "TRUE" if rev.deleted.text else "FALSE" } 
-
-                # if revisions are deleted, /many/ things will be missing
-                if rev.deleted.text:
-                    rev_data['text_chars'] = ""
-                    rev_data['sha1'] = ""
-                    rev_data['revert'] = ""
-                    rev_data['reverteds'] = ""
+                rev_data = self.matchmake(rev, rev_data)
 
-                else:
+                if not rev.deleted.text:
                     # rev.text can be None if the page has no text
                     if not rev.text:
                         rev.text = ""
@@ -235,33 +471,25 @@ class WikiqParser():
 
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
-                    rev_data['sha1'] = text_sha1
+                    rev_data.sha1 = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
-                    rev_data['text_chars'] = len(rev.text)
-               
+                    rev_data.text_chars = len(rev.text)
+
                     # generate revert data
-                    revert = rev_detector.process(text_sha1, rev.id)
+                    rev_data.revert = rev_detector.process(text_sha1, rev.id)
                     
                     if revert:
-                        rev_data['revert'] = "TRUE"
-                        rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
-                    else:
-                        rev_data['revert'] = "FALSE"
-                        rev_data['reverteds'] = ""
+                        rev_data.reverteds = revert.reverteds
 
                 # if the fact that the edit was minor can be hidden, this might be an issue
-                rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
+                rev_data.minor = rev.minor
 
                 if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.user.text + '"'
-                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
+                    rev_data.editor = rev.user.text 
+                    rev_data.anon = rev.user.id == None
                     
-                else:
-                    rev_data['anon'] = ""
-                    rev_data['editor'] = ""
-
                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
                 #    redirect = True
                 #else:
@@ -271,13 +499,11 @@ class WikiqParser():
                 
                 # if collapse user was on, lets run that
                 if self.collapse_user:
-                    rev_data['collapsed_revs'] = rev.collapsed_revs
+                    rev_data.collapsed_revs = rev.collapsed_revs
 
                 if self.persist != PersistMethod.none:
-                    if rev.deleted.text:
-                        for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
-                            old_rev_data[k] = None
-                    else:
+
+                    if not rev.deleted.text:
 
                         if self.persist != PersistMethod.legacy:
                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
@@ -292,12 +518,12 @@ class WikiqParser():
                             
                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
 
-                            old_rev_data["token_revs"] = num_token_revs
-                            old_rev_data["tokens_added"] = num_tokens
-                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
-                            old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
+                            rev_data.token_revs = num_token_revs
+                            rev_data.tokens_added = num_tokens
+                            rev_data.tokens_removed = len(old_tokens_removed)
+                            rev_data.tokens_window = PERSISTENCE_RADIUS-1
 
-                            self.print_rev_data(old_rev_data)
+                            self.print_rev_data(rev_data)
 
                 else:
                     self.print_rev_data(rev_data)
@@ -314,11 +540,10 @@ class WikiqParser():
                     rev_id, rev_data, tokens_added, tokens_removed = item
                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
 
-                    rev_data["token_revs"] = num_token_revs
-                    rev_data["tokens_added"] = num_tokens
-                    rev_data["tokens_removed"] = len(tokens_removed)
-                    rev_data["tokens_window"] = len(window)-(i+1)
-                    
+                    rev_data.token_revs = num_token_revs
+                    rev_data.tokens_added = num_tokens
+                    rev_data.tokens_removed = len(tokens_removed)
+                    rev_data.tokens_window = len(window)-(i+1)
                     self.print_rev_data(rev_data)
 
             page_count += 1
@@ -326,22 +551,53 @@ class WikiqParser():
         print("Done: %s revisions and %s pages." % (rev_count, page_count),
               file=sys.stderr)
 
+        if self.output_parquet is True:
+            self.flush_parquet_buffer()
+            self.pq_writer.close()
+
+        else:
+            output_file.close()
+
+
+    def write_parquet_row(self, rev_data):
+        padata = rev_data.to_pyarrow()
+        self.output_buffer.append(padata)
+
+        if len(self.output_buffer) >= self.output_buffer_size:
+            self.flush_parquet_buffer()
+
+
+    def flush_parquet_buffer(self):
+        outtable = pa.table.concat_arrays(self.output_buffer)                        
+        if self.pq_writer is None:
+            schema = pa.schema(self.revdata_type.pa_schema_field)
+            self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
+
+        self.pq_writer.write_table(outtable)
+        self.output_buffer = []
+        
     def print_rev_data(self, rev_data):
-        # if it's the first time through, print the header
-        if self.urlencode:
-            for field in TO_ENCODE:
-                rev_data[field] = quote(str(rev_data[field]))
-
-        if not self.printed_header:
-            print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
-            self.printed_header = True
+        if self.output_parquet is False:
+            printfunc = self.write_tsv_row
+        else:
+            printfunc = self.write_parquet_row
         
-        print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+        printfunc(rev_data)
+
+    def write_tsv_row(self, rev_data):
+        
+        self.output_buffer.append(rev_data.to_tsv_line())
+
+        if len(self.output_buffer) >= self.output_buffer_size:
+            self.flush_tsv_buffer()
 
 
+    def flush_tsv_buffer():
+        if self.output_header:
+
 def open_input_file(input_filename):
     if re.match(r'.*\.7z$', input_filename):
-        cmd = ["7za", "x", "-so", input_filename, '*'
+        cmd = ["7za", "x", "-so", input_filename, "*.xml"
     elif re.match(r'.*\.gz$', input_filename):
         cmd = ["zcat", input_filename] 
     elif re.match(r'.*\.bz2$', input_filename):
@@ -354,13 +610,19 @@ def open_input_file(input_filename):
 
     return input_file
 
-def open_output_file(input_filename):
-    # create a regex that creates the output filename
+def get_output_filename(input_filename, parquet = False):
     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
     output_filename = re.sub(r'\.xml', '', output_filename)
-    output_filename = output_filename + ".tsv"
-    output_file = open(output_filename, "w")
+    if parquet is False:
+        output_filename = output_filename + ".tsv"
+    else:
+        output_filename = output_filename + ".parquet"
+    return output_filename
 
+def open_output_file(input_filename):
+    # create a regex that creates the output filename
+    output_filename = get_output_filename(input_filename, parquet = False)
+    output_file = open(output_filename, "w")
     return output_file
 
 parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
@@ -370,7 +632,7 @@ parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
 
 parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
-                    help="Directory for output files.")
+                    help="Directory for output files. If it ends with .parquet output will be in parquet format.")
 
 parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
                     help="Write output to standard out (do not create dump file)")
@@ -379,7 +641,7 @@ parser.add_argument('--collapse-user', dest="collapse_user", action="store_true"
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
 parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
@@ -387,10 +649,30 @@ parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
 parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
                     help="Id number of namspace to include. Can be specified more than once.")
 
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
 
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
 
 args = parser.parse_args()
 
+
+
 # set persistence method
 
 if args.persist is None:
@@ -408,6 +690,7 @@ else:
     namespaces = None
 
 if len(args.dumpfiles) > 0:
+    output_parquet = False
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 
@@ -417,33 +700,51 @@ if len(args.dumpfiles) > 0:
         else:
             output_dir = "."
 
+        if output_dir.endswith(".parquet"):
+            output_parquet = True
+
         print("Processing file: %s" % filename, file=sys.stderr)
 
         if args.stdout:
             output_file = sys.stdout
         else:
             filename = os.path.join(output_dir, os.path.basename(filename))
-            output_file = open_output_file(filename)
+            output_file = get_output_filename(filename, parquet = output_parquet)
 
-        wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file,
+                            output_file,
                             collapse_user=args.collapse_user,
                             persist=persist,
                             urlencode=args.urlencode,
-                            namespaces = namespaces)
-
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label,
+                            output_parquet=output_parquet)
+
+        print(wikiq.output_parquet)
         wikiq.process()
 
         # close things 
         input_file.close()
-        output_file.close()
+
 else:
-    wikiq = WikiqParser(sys.stdin, sys.stdout,
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
                         collapse_user=args.collapse_user,
                         persist=persist,
-                        persist_legacy=args.persist_legacy,
+                        #persist_legacy=args.persist_legacy,
                         urlencode=args.urlencode,
-                        namespaces = namespaces)
-    wikiq.process()
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?