]> code.communitydata.science - covid19.git/commitdiff
monitor pages from dsaez's wikidata crawler
authorNathan TeBlunthuis <nathante@uw.edu>
Sat, 4 Apr 2020 22:23:33 +0000 (15:23 -0700)
committerNathan TeBlunthuis <nathante@uw.edu>
Sat, 4 Apr 2020 22:23:33 +0000 (15:23 -0700)
wikipedia/scripts/fetch_daily_views.py [changed mode: 0755->0644]
wikipedia/scripts/fetch_revisions.py [changed mode: 0755->0644]

old mode 100755 (executable)
new mode 100644 (file)
index 9f147e0..8f56e83
 #!/usr/bin/env python3
-
-###############################################################################
-#
-# This script assumes the presence of the COVID-19 repo.
-# 
-# It (1) reads in the article list and then (2) calls the Wikimedia API to 
-# fetch view information for each article. Output is to (3) JSON and TSV.
-#
-###############################################################################
-
-import sys
-import requests
 import argparse
-import json
-import time
-import os.path
-import datetime
+import sqlite3
+import requests
+from datetime import datetime, timedelta
 import logging
-from csv import DictWriter
 import digobs
-#import feather #TBD
+from os import path, mkdir
+from functools import partial
+from itertools import chain
 
-def parse_args():
-    parser = argparse.ArgumentParser(description='Call the views API to collect Wikipedia view data.')
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description="Get a list of pages related to COVID19, pandemic, and SARS-COV2 virus related entities.")
     parser.add_argument('-o', '--output_folder', help='Where to save output', default="wikipedia/data", type=str)
-    parser.add_argument('-i', '--article_file', help='File listing article names', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
-    parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format.', type=str)
-    parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=digobs.get_loglevel), 
-    parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str), 
-    args = parser.parse_args()
-    return(args)
+    parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=digobs.get_loglevel)
+    parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=argparse.FileType('a'))
+    parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format.', type=lambda s: datetime.strptime(s, "%Y%m%d"))
 
-def main():
+    parser.add_argument('-i', '--input_file', help="Input a file of page names from the English Wikiproject.", type=argparse.FileType('r'), default='./wikipedia/resources/enwp_wikiproject_covid19_articles.txt')
 
-    args = parse_args()
+    parser.add_argument('-b', '--input_db', help="Input a path to a sqlite3 database from the real-time-covid-tracker project", type = sqlite3.connect, default='real-time-wiki-covid-tracker/AllWikidataItems.sqlite')
 
-    outputPath = args.output_folder
-    articleFile = args.article_file
+    args = parser.parse_args()
+    conn = args.input_db
+    conn.row_factory = sqlite3.Row
 
     #handle -d
     if args.query_date:
-        query_date = args.query_date
+        query_date = args.query_date.strftime("%Y%m%d")
     else:
-        yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
+        yesterday = datetime.today() - timedelta(days=1)
         query_date = yesterday.strftime("%Y%m%d")
 
-    #handle -W
-    if args.logging_destination:
-        logging.basicConfig(filename=args.logging_destination, filemode='a', level=args.logging_level)
-    else:
-        logging.basicConfig(level=args.logging_level)
-
-    export_time = str(datetime.datetime.now())
-    export_date = datetime.datetime.today().strftime("%Y%m%d")
+    digobs.init_logging(args)
 
-    logging.info(f"Starting run at {export_time}")
-    logging.info(f"Last commit: {digobs.git_hash()}")
+    logging.info(f"Destructively outputting results to {args.output_folder}")
 
     #1 Load up the list of article names
-    j_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{query_date}.json")
-    t_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{query_date}.tsv")
 
-    with open(articleFile, 'r') as infile:
-        articleList = list(map(str.strip, infile))
+    logging.info("loading info from database")
+    projects = [row['project'] for row in conn.execute("SELECT DISTINCT project from pagesPerProjectTable;").fetchall()]
+
+    successes = 0
+    failures = 0
 
-    success = 0 #for logging how many work/fail
-    failure = 0 
+    for project in projects:
+        project_folder = path.join(args.output_folder, project)
+        if not path.exists(project.folder):
+            mkdir(project_folder)
 
-    #3 Save results as a JSON and TSV
-    with open(j_outfilename, 'w') as j_outfile, \
-         open(t_outfilename, 'w') as t_outfile:
+        dump_folder = path.join(projct.folder, export_date)
+        if not path.exists(dump_folder):
+            mkdir(dump_folder)
 
-        #2 Repeatedly call the API with that list of names
-        for a in articleList:
-            url= f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{a}/daily/{query_date}00/{query_date}00"
+        logging.info(f"Getting page views for {project}")
+        rows = conn.execute(f"SELECT DISTINCT page from pagesPerProjectTable WHERE project='{project}';").fetchall()
+        pages = (row['page'] for row in rows)
 
-            response = requests.get(url)
-            if response.ok:
-                jd = response.json()["items"][0]
-                success = success + 1
-            else:
-                failure = failure + 1
-                logging.warning(f"Failure: {response.status_code} from {url}")
-                continue
+        # special case for english, we have a wikiproject input file
+        if project == "en.wikipedia":
+            pages = chain(pages, map(str.strip,args.input_file))
 
-            # start writing the CSV File if it doesn't exist yet
-            try:
-                dw
-            except NameError:
-                dw = DictWriter(t_outfile, sorted(jd.keys()), delimiter='\t')
-                dw.writeheader()
+        call_view_api = partial(digobs.call_view_api, project=project, query_date = query_date)
 
-            logging.debug(f"printing data: {jd}")
+        responses = map(call_view_api, pages)
+        
+        j_outfilename = path.join(j_output_folder, f"digobs_covid19_{project}_dailyviews-{query_date}.json")
+        t_outfilename = path.join(t_output_folder, f"digobs_covid19_{project}_dailyviews-{query_date}.tsv")
 
-            # write out the line of the json file
-            print(json.dumps(jd), file=j_outfile)
+        with open(j_outfilename, 'w') as j_outfile, \
+             open(t_outfilename, 'w') as t_outfile:
 
-            # write out of the csv file
-            dw.writerow(jd)
+            proj_successes, proj_failures = digobs.process_view_responses(responses, j_outfile, t_outfile, logging)
+        logging.info(f"(Processed {proj_successes} successes and {proj_failures} for {project}")
+        successes = proj_successes + successes
+        failures = proj_failures + failures
 
+    conn.close()
     # f_Out = outputPath + "dailyviews" + query_date + ".feather"
     # read the json back in and make a feather file? 
-    logging.debug(f"Run complete at {datetime.datetime.now()}")
-    logging.info(f"Processed {success} successful URLs and {failure} failures.")
-
-
-if __name__ == "__main__":
+    logging.debug(f"Run complete at {datetime.now()}")
+    logging.info(f"Processed {successes} successful URLs and {failures} failures.")
 
-    main()
+        
old mode 100755 (executable)
new mode 100644 (file)
index 2d25e85..9acaef1
 
 import argparse
 import logging
-import os.path
+from os import path, mkdir
 import json
 import datetime
-
-from requests import Request
+import sqlite3
+from functools import partial
+from itertools import chain
 from csv import DictWriter
-from mw import api
 import digobs
 
-
-def parse_args():
+def main():
 
     parser = argparse.ArgumentParser(description='Call the views API to collect Wikipedia revision data.')
     parser.add_argument('-o', '--output_folder', help='Where to save output', default="wikipedia/data", type=str)
-    parser.add_argument('-i', '--article_file', help='File listing article names', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
+    parser.add_argument('-i', '--input_file', help="Input a file of page names from the English Wikiproject.", type=argparse.FileType('r'), default='./wikipedia/resources/enwp_wikiproject_covid19_articles.txt')
+    parser.add_argument('-d', '--input_db', help="Input a path to a sqlite3 database from the real-time-covid-tracker project", type = sqlite3.connect, default='real-time-wiki-covid-tracker/AllWikidataItems.sqlite')
     parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=digobs.get_loglevel), 
-    parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str), 
-    args = parser.parse_args()
-    return(args)
-
-def main():
-    args = parse_args()
+    parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=argparse.FileType('a'))
 
-    output_path = args.output_folder
-    article_filename = args.article_file
+    args = parser.parse_args()
 
-    #handle -W
-    if args.logging_destination:
-        logging.basicConfig(filename=args.logging_destination, filemode='a', level=args.logging_level)
-    else:
-        logging.basicConfig(level=args.logging_level)
+    logging = digobs.init_logging(args)
 
-    export_time = str(datetime.datetime.now())
-    export_date = datetime.datetime.today().strftime("%Y%m%d")
+    conn = args.input_db
+    conn.row_factory = sqlite3.Row
 
-    logging.info(f"Starting run at {export_time}")
-    logging.info(f"Last commit: {digobs.git_hash()}")
+    projects = (row['project'] for row in conn.execute("SELECT DISTINCT project from pagesPerProjectTable;").fetchall())
 
-    json_output_filename = os.path.join(output_path, f"digobs_covid19-wikipedia-enwiki_revisions-{export_date}.json")
-    tsv_output_filename =  os.path.join(output_path, f"digobs_covid19-wikipedia-enwiki_revisions-{export_date}.tsv")
-    
-    api_session = api.Session("https://en.wikipedia.org/w/api.php")
+    tsv_fields = ['title', 'pageid', 'namespace']
 
     # list of properties from the API we want to gather (basically all of
     # them supported by mediawik-utilities)
@@ -68,81 +54,67 @@ def main():
                  'flags' : 'flags',
                  'comment' : 'comment',
                  'content' : 'content' }
+    
+    def get_project_pages(project):
+        return (row['page'] for row in conn.execute(f"SELECT DISTINCT page FROM pagesPerProjectTable WHERE project == '{project}';").fetchall())
 
-    exclude_from_tsv = ['tags', 'comment', 'content', 'flags']
-
-    # load the list of articles
-    with open(article_filename, 'r') as infile:
-        article_list= list(map(str.strip, infile))
-
-    def get_revisions_for_page(title):
-        return api_session.revisions.query(properties=rv_props.values(),
-                                           titles={title},
-                                           direction="newer")
+    def get_project_revisions(project):
+        pages = get_project_pages(project)
+        if project=="en.wikipedia":
+            pages = chain(pages, map(str.strip,args.input_file))
+        return digobs.get_pages_revisions(pages, project=project, logging=logging, rv_props=rv_props)
 
-    tsv_fields = ['title', 'pageid', 'namespace']
     tsv_fields = tsv_fields + list(rv_props.keys())
 
+    exclude_from_tsv = ['tags', 'comment', 'content', 'flags']
+
     # drop fields that we identified for exclusion
     tsv_fields = [e for e in tsv_fields if e not in exclude_from_tsv]
 
     # add special export fields
     tsv_fields = tsv_fields + ['anon', 'minor', 'url', 'export_timestamp', 'export_commit']
+    
+    export_time = str(datetime.datetime.now())
+
+    rev_batch_to_tsv = partial(digobs.rev_batch_to_tsv,
+                         tsv_fields = tsv_fields,
+                         export_info={'export_timestamp':export_time,
+                                      'export_commit':digobs.git_hash(short=True)})    
 
     export_info = { 'git_commit' : digobs.git_hash(),
                     'timestamp' : export_time }
 
-    with open(json_output_filename, 'w') as json_output, \
-         open(tsv_output_filename, 'w') as tsv_output:
-
-        tsv_writer = DictWriter(tsv_output, fieldnames=tsv_fields, delimiter="\t")
-        tsv_writer.writeheader()
-
-        for article in article_list:
-            logging.info(f"pulling revisions for: {article}")
-            for rev in get_revisions_for_page(article):
-                logging.debug(f"processing raw revision: {rev}")
-
-                # add export metadata
-                rev['exported'] = export_info
-
-                # save the json version of the code
-                print(json.dumps(rev), file=json_output)
-
-                # handle missing data
-                if "sha1" not in rev:
-                    rev["sha1"] = ""
-
-                if "userhidden" in rev:
-                    rev["user"] = ""
-                    rev["userid"] = ""
-
-                # recode anon so it's true or false instead of present/missing
-                if "anon" in rev:
-                    rev["anon"] = True
-                else:
-                    rev["anon"] = False
-                    
-                # let's recode "minor" in the same way
-                if "minor" in rev:
-                    rev["minor"] = True
-                else:
-                    rev["minor"] = False
-
-                # add page title information
-                rev['title'] = rev['page']['title']
-                rev['pageid'] = rev['page']['pageid']
-                rev['namespace'] = rev['page']['ns']
-
-                # construct a URL
-                rev['url'] = Request('GET', 'https://en.wikipedia.org/w/index.php',
-                                     params={'title' : rev['title'].replace(" ", "_"),
-                                            'oldid' : rev['revid']}).prepare().url
-
-                rev['export_timestamp'] = export_time
-                rev['export_commit'] = digobs.git_hash(short=True)
-
-                tsv_writer.writerow({k: rev[k] for k in tsv_fields})
+    export_date = datetime.datetime.today().strftime("%Y%m%d")
+
+    rev_batch_to_json = partial(digobs.rev_batch_to_json,
+                                export_info = export_info)
+
+    def write_project_pages(project):
+        project_folder = path.join(args.output_folder, project)
+        if not path.exists(project_folder):
+            mkdir(project_folder)
+
+        dump_folder = path.join(project_folder, export_date)
+        if not path.exists(dump_folder):
+            mkdir(dump_folder)
+
+        project_revs = get_project_revisions(project)
+        
+        json_output_filename = path.join(dump_folder, f"digobs_covid19_{project}_revisions-{export_date}.json")
+        tsv_output_filename =  path.join(dump_folder, f"digobs_covid19_{project}_revisions-{export_date}.tsv")
+
+        with open(json_output_filename, 'w') as json_output, \
+             open(tsv_output_filename, 'w') as tsv_output:
+            tsv_writer = DictWriter(tsv_output, fieldnames=tsv_fields, delimiter="\t")
+            tsv_writer.writeheader()
+  
+            for rev_batch in project_revs:
+                logging.debug(f"processing raw revision: {rev_batch}")
+                rev_batch_to_json(rev_batch, json_output=json_output)
+                rev_batch_to_tsv(rev_batch, project=project, tsv_writer=tsv_writer)
+
+    for project in projects:
+        write_project_pages(project)
 
 if __name__ == "__main__":
     main()

Community Data Science Collective || Want to submit a patch?