]> code.communitydata.science - mediawiki_dump_tools.git/blobdiff - wikiq
added regex scanner v2's dump unit test file regextest.xml.bz2
[mediawiki_dump_tools.git] / wikiq
diff --git a/wikiq b/wikiq
index e693cd2ba2dd3ab19ed61505a529f8d216219e8b..7a1b846dc08f2f86ff292ceae694ff55ef9a7382 100755 (executable)
--- a/wikiq
+++ b/wikiq
@@ -13,27 +13,91 @@ from subprocess import Popen, PIPE
 from collections import deque
 from hashlib import sha1
 
 from collections import deque
 from hashlib import sha1
 
-from mw.xml_dump import Iterator
-from mw.lib import persistence
-from mw.lib import reverts
+from mwxml import Dump
+
+from deltas.tokenizers import wikitext_split
+import mwpersistence
+import mwreverts
 from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
 from urllib.parse import quote
 TO_ENCODE = ('title', 'editor')
 PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
 
 def calculate_persistence(tokens_added):
     return(sum([(len(x.revisions)-1) for x in tokens_added]),
            len(tokens_added))
 
+def matchmake(scanned_content, rev_data, regex, label):
+    p = re.compile(regex)
+
+    temp_dict = {}
+    # if there are named capture groups in the regex
+    if bool(p.groupindex):
+        capture_groups = list(p.groupindex.keys())
+
+        # initialize the {capture_group_name:list} for each capture group
+        for cap_group in capture_groups:
+            temp_dict["{}_{}".format(label, cap_group)] = []
+
+        # if there are matches of some sort in this revision content, fill the lists for each cap_group
+        if p.search(scanned_content) is not None:
+            m = re.finditer(p,scanned_content)
+            matchobjects = list(m)
+
+            for cap_group in capture_groups:
+                temp_list = []
+                for match in matchobjects:
+                    # we only want to add the match for the capture group if the match is not None
+                    if match.group(cap_group) != None:
+                        temp_list.append(match.group(cap_group))
+
+                # if temp_list of matches is empty just make that column None
+                if len(temp_list)==0:
+                    temp_dict["{}_{}".format(label, cap_group)] = None
+                # else we put in the list we made in the for-loop above
+                else:
+                    temp_dict["{}_{}".format(label, cap_group)] = ', '.join(temp_list)
+        
+        # there are no matches at all in this revision content, we default values to None
+        else:
+            for cap_group in capture_groups:
+                temp_dict["{}_{}".format(label, cap_group)] = None
+
+    # there are no capture groups, we just search for all the matches of the regex
+    else:
+        #given that there are matches to be made
+        if p.search(scanned_content) is not None:
+            m = p.findall(scanned_content)
+            temp_dict[label] = ', '.join(m)
+        else:
+            temp_dict[label] = None    
+    # update rev_data with our new columns
+    rev_data.update(temp_dict)
+    print(rev_data.keys())
+    return rev_data
+
+
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
 class WikiqIterator():
     def __init__(self, fh, collapse_user=False):
         self.fh = fh
         self.collapse_user = collapse_user
-        self.mwiterator = Iterator.from_file(self.fh)
+        self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
         self.__pages = self.load_pages()
 
     def load_pages(self):
         for page in self.mwiterator:
-            yield WikiqPage(page, collapse_user=self.collapse_user)
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
 
     def __iter__(self):
         return self.__pages
 
     def __iter__(self):
         return self.__pages
@@ -46,13 +110,19 @@ class WikiqPage():
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
                  'restrictions', 'mwpage', '__revisions',
                  'collapse_user')
     
-    def __init__(self, page, collapse_user=False):
+    def __init__(self, page, namespace_map, collapse_user=False):
         self.id = page.id
         self.id = page.id
-        self.title = page.title
         self.namespace = page.namespace
         self.namespace = page.namespace
-        self.redirect = page.redirect
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
         self.restrictions = page.restrictions
         self.restrictions = page.restrictions
-        
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
         self.collapse_user = collapse_user
         self.mwpage = page
         self.__revisions = self.rev_list()
@@ -76,7 +146,14 @@ class WikiqPage():
             else:
                 if self.collapse_user:
                     # yield if this is the last edit in a seq by a user and reset
             else:
                 if self.collapse_user:
                     # yield if this is the last edit in a seq by a user and reset
-                    if not rev.contributor.user_text == prev_rev.contributor.user_text:
+                    # also yield if we do know who the user is
+
+                    if rev.deleted.user or prev_rev.deleted.user:
+                        yield prev_rev
+                        collapsed_revs = 1
+                        rev.collapsed_revs = collapsed_revs
+
+                    elif not rev.user.text == prev_rev.user.text:
                         yield prev_rev
                         collapsed_revs = 1
                         rev.collapsed_revs = collapsed_revs
                         yield prev_rev
                         collapsed_revs = 1
                         rev.collapsed_revs = collapsed_revs
@@ -89,6 +166,7 @@ class WikiqPage():
                     yield prev_rev
 
             prev_rev = rev
                     yield prev_rev
 
             prev_rev = rev
+
         # also yield the final time
         yield prev_rev
 
         # also yield the final time
         yield prev_rev
 
@@ -99,10 +177,12 @@ class WikiqPage():
         return next(self.__revisions)
 
 class WikiqParser():
         return next(self.__revisions)
 
 class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
 
 
-
-    def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
-        
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
         self.input_file = input_file
         self.output_file = output_file
         self.collapse_user = collapse_user
@@ -110,7 +190,17 @@ class WikiqParser():
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
         self.printed_header = False
         self.namespaces = []
         self.urlencode = urlencode
-        
+        self.revert_radius = revert_radius
+        self.regex_match_revision = regex_match_revision
+        self.regex_revision_label = regex_revision_label
+        self.regex_match_comment = regex_match_comment
+        self.regex_comment_label = regex_comment_label
+
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
     def __get_namespace_from_title(self, title):
         default_ns = None
 
     def __get_namespace_from_title(self, title):
         default_ns = None
 
@@ -126,6 +216,7 @@ class WikiqParser():
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
         # if we've made it this far with no matches, we return the default namespace
         return default_ns
 
+
     def process(self):
 
         # create a regex that creates the output filename
     def process(self):
 
         # create a regex that creates the output filename
@@ -137,50 +228,117 @@ class WikiqParser():
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
         # extract list of namspaces
         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 
         # extract list of namspaces
-        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
 
         page_count = 0
         rev_count = 0
 
         page_count = 0
         rev_count = 0
+
+
         # Iterate through pages
         for page in dump:
         # Iterate through pages
         for page in dump:
-            if self.persist:
-                state = persistence.State()
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
+            print(self.revert_radius)
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
+
+
+            if self.persist != PersistMethod.none:
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
                 window = deque(maxlen=PERSISTENCE_RADIUS)
 
-            rev_detector = reverts.Detector()
+                if self.persist == PersistMethod.sequence:
+                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
+                else:
+                    from mw.lib import persistence
+                    state = persistence.State()
 
             # Iterate through a page's revisions
             for rev in page:
 
             # Iterate through a page's revisions
             for rev in page:
+                
+                # initialize rev_data
+                rev_data = {}
 
 
-                rev_data = {'revid' : rev.id,
-                            'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
-                            'articleid' : page.id,
-                            'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
-                            'title' : '"' + page.title + '"',
-                            'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
-                            'deleted' : "TRUE" if rev.text.deleted else "FALSE" } 
+                # if the command line args only gave a label (and no regular expression is given)
+                if (self.regex_revision_label != None and self.regex_match_revision == None) or (self.regex_comment_label != None and self.regex_match_comment == None):
+                    sys.exit('The given regex label(s) has no corresponding regex to search for.')
+                
+                # if there's anything in the list of regex_match_revision
+                if self.regex_match_revision is not None:
+                    if (self.regex_revision_label == None) or (len(self.regex_match_revision) != len(self.regex_revision_label)):
+                        sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+                    
+                    # initialize and construct the list of regex-label tuples
+                    pairs = []
+                    for i in range(0,len(self.regex_match_revision)):
+                        pairs.append((self.regex_match_revision[i], self.regex_revision_label[i]))
+
+                    # for each regex/label pair, we now run matchmake to check and output columns
+                    for pair in pairs:
+                        # pair[0] corresponds to the regex, pair[1] to the label
+                        rev_data = matchmake(rev.text, rev_data, pair[0], pair[1])
+                
+                # if there's anything in the list of regex_match_comment
+                if self.regex_match_comment is not None:
+                    if (self.regex_comment_label == None) or (len(self.regex_match_comment) != len(self.regex_comment_label)):
+                        sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+                    
+                    # initialize and construct the list of regex-label tuples
+                    pairs = []
+                    for i in range(0,len(self.regex_match_comment)):
+                        pairs.append((self.regex_match_comment[i], self.regex_comment_label[i]))
+
+                    # for each regex/label pair, we now run matchmake to check and output columns
+                    for pair in pairs:
+                        # pair[0] corresponds to the regex, pair[1] to the label
+                        rev_data = matchmake(rev.comment, rev_data, pair[0], pair[1])
+
+                # we fill out the rest of the data structure now
+                rev_data['revid'] = rev.id
+                rev_data['date_time'] = rev.timestamp.strftime('%Y-%m-%d %H:%M:%S')
+                rev_data['articleid'] = page.id
+                rev_data['editor_id'] = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id
+                rev_data['title'] = '"' + page.title + '"'
+                rev_data['namespace'] = namespace
+                rev_data['deleted'] = "TRUE" if rev.deleted.text else "FALSE"
 
                 # if revisions are deleted, /many/ things will be missing
 
                 # if revisions are deleted, /many/ things will be missing
-                if rev.text.deleted:
+                if rev.deleted.text:
                     rev_data['text_chars'] = ""
                     rev_data['sha1'] = ""
                     rev_data['revert'] = ""
                     rev_data['reverteds'] = ""
 
                 else:
                     rev_data['text_chars'] = ""
                     rev_data['sha1'] = ""
                     rev_data['revert'] = ""
                     rev_data['reverteds'] = ""
 
                 else:
+                    # rev.text can be None if the page has no text
+                    if not rev.text:
+                        rev.text = ""
                     # if text exists, we'll check for a sha1 and generate one otherwise
                     # if text exists, we'll check for a sha1 and generate one otherwise
+
                     if rev.sha1:
                         text_sha1 = rev.sha1
                     else:
                     if rev.sha1:
                         text_sha1 = rev.sha1
                     else:
+
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
                     rev_data['sha1'] = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
                     
                     rev_data['sha1'] = text_sha1
 
                     # TODO rev.bytes doesn't work.. looks like a bug
                     rev_data['text_chars'] = len(rev.text)
-               
+
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
                     # generate revert data
                     revert = rev_detector.process(text_sha1, rev.id)
+                    
                     if revert:
                         rev_data['revert'] = "TRUE"
                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
                     if revert:
                         rev_data['revert'] = "TRUE"
                         rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
@@ -191,10 +349,10 @@ class WikiqParser():
                 # if the fact that the edit was minor can be hidden, this might be an issue
                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
 
                 # if the fact that the edit was minor can be hidden, this might be an issue
                 rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
 
-                if rev.contributor.user_text:
+                if not rev.deleted.user:
                     # wrap user-defined editors in quotes for fread
                     # wrap user-defined editors in quotes for fread
-                    rev_data['editor'] = '"' + rev.contributor.user_text + '"'
-                    rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
+                    rev_data['editor'] = '"' + rev.user.text + '"'
+                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
                     
                 else:
                     rev_data['anon'] = ""
                     
                 else:
                     rev_data['anon'] = ""
@@ -211,12 +369,18 @@ class WikiqParser():
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
                 if self.collapse_user:
                     rev_data['collapsed_revs'] = rev.collapsed_revs
 
-                if self.persist:
-                    if rev.text.deleted:
+                if self.persist != PersistMethod.none:
+                    if rev.deleted.text:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
                         for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
                             old_rev_data[k] = None
                     else:
-                        _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+
+                        if self.persist != PersistMethod.legacy:
+                            _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+                        else:
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+                            
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
                         if len(window) == PERSISTENCE_RADIUS:
                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
                         
                         if len(window) == PERSISTENCE_RADIUS:
@@ -236,7 +400,7 @@ class WikiqParser():
 
                 rev_count += 1
 
 
                 rev_count += 1
 
-            if self.persist:
+            if self.persist != PersistMethod.none:
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
                 # print out metadata for the last RADIUS revisions
                 for i, item in enumerate(window):
                     # if the window was full, we've already printed item 0
@@ -263,7 +427,7 @@ class WikiqParser():
         if self.urlencode:
             for field in TO_ENCODE:
                 rev_data[field] = quote(str(rev_data[field]))
         if self.urlencode:
             for field in TO_ENCODE:
                 rev_data[field] = quote(str(rev_data[field]))
-            
+
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
         if not self.printed_header:
             print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
             self.printed_header = True
@@ -310,14 +474,53 @@ parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
 parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 
-parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
-                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
 
 parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
+
 args = parser.parse_args()
 
 args = parser.parse_args()
 
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
 if len(args.dumpfiles) > 0:
     for filename in args.dumpfiles:
         input_file = open_input_file(filename)
@@ -336,11 +539,17 @@ if len(args.dumpfiles) > 0:
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
             filename = os.path.join(output_dir, os.path.basename(filename))
             output_file = open_output_file(filename)
 
-        wikiq = WikiqParser(input_file, output_file, 
+        wikiq = WikiqParser(input_file,
+                            output_file,
                             collapse_user=args.collapse_user,
                             collapse_user=args.collapse_user,
-                            persist=args.persist,
-                            urlencode=args.urlencode)
-
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label)
 
         wikiq.process()
 
 
         wikiq.process()
 
@@ -348,11 +557,20 @@ if len(args.dumpfiles) > 0:
         input_file.close()
         output_file.close()
 else:
         input_file.close()
         output_file.close()
 else:
-    wikiq = WikiqParser(sys.stdin, sys.stdout,
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
                         collapse_user=args.collapse_user,
                         collapse_user=args.collapse_user,
-                        persist=args.persist,
-                        urlencode=args.urlencode)
-    wikiq.process()
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")
 
 # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 # stop_words = stop_words.split(",")

Community Data Science Collective || Want to submit a patch?