X-Git-Url: https://code.communitydata.science/covid19.git/blobdiff_plain/c690df4852ffab274eecb9f86d6d4c6f48936f2a..38fdd07b39f63de88dd985787eb2ac3a5866670c:/wikipedia_views/scripts/fetch_daily_views.py diff --git a/wikipedia_views/scripts/fetch_daily_views.py b/wikipedia_views/scripts/fetch_daily_views.py index b604e26..ed36129 100755 --- a/wikipedia_views/scripts/fetch_daily_views.py +++ b/wikipedia_views/scripts/fetch_daily_views.py @@ -9,28 +9,30 @@ # ############################################################################### - +import sys +import subprocess import requests import argparse import json -import csv import time import os.path +import argparse import datetime -#import feather - +import logging +from csv import DictWriter +#import feather #TBD def parse_args(): - parser = argparse.ArgumentParser(description='Call the views API repeatedly.') - parser.add_argument('-o', '--output_folder', help='Where to save output', default="../data/", type=str) - parser.add_argument('-i', '--article_file', help='File listing article names', default="../resources/articles.txt", type=str) - parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format please.', type=str) + parser = argparse.ArgumentParser(description='Call the views API to collect data view data.') + parser.add_argument('-o', '--output_folder', help='Where to save output', default="wikipedia_views/data", type=str) + parser.add_argument('-i', '--article_file', help='File listing article names', default="wikipedia_views/resources/enwp_wikiproject_covid19_articles.txt", type=str) + parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format.', type=str) + parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=str), + parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str), args = parser.parse_args() - return(args) - def main(): args = parse_args() @@ -38,7 +40,8 @@ def main(): outputPath = args.output_folder articleFile = args.article_file - if (args.query_date): + #handle -d + if args.query_date: queryDate = args.query_date else: yesterday = datetime.datetime.today() - datetime.timedelta(days=1) @@ -46,47 +49,78 @@ def main(): queryDate = queryDate + "00" #requires specifying hours + #handle -L + loglevel_mapping = { 'debug' : logging.DEBUG, + 'info' : logging.INFO, + 'warning' : logging.WARNING, + 'error' : logging.ERROR, + 'critical' : logging.CRITICAL } - articleList = [] - #1 Load up the list of article names + if args.logging_level in loglevel_mapping: + loglevel = loglevel_mapping[args.logging_level] + else: + print("Choose a valid log level: debug, info, warning, error, or critical") + exit - j_Out = f"{outputPath}dailyviews{queryDate}.json" - t_Out = f"{outputPath}dailyviews{queryDate}.tsv" + #handle -W + if args.logging_destination: + logging.basicConfig(filename=args.logging_destination, filemode='a', level=loglevel) + else: + logging.basicConfig(level=loglevel) + + export_git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() + export_git_short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode().strip() + export_time = str(datetime.datetime.now()) + + logging.info(f"Starting run at {export_time}") + logging.info(f"Last commit: {export_git_hash}") + + #1 Load up the list of article names + j_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{queryDate}.json") + t_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{queryDate}.tsv") with open(articleFile, 'r') as infile: - next(infile) #skip header articleList = list(infile) - j = [] + success = 0 #for logging how many work/fail + failure = 0 - #2 Repeatedly call the API with that list of names + #3 Save results as a JSON and TSV + with open(j_outfilename, 'w') as j_outfile, \ + open(t_outfilename, 'w') as t_outfile: - for a in articleList: - a = a.strip("\"\n") #destringify - url= f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{a}/daily/{queryDate}/{queryDate}" + #2 Repeatedly call the API with that list of names + for a in articleList: + a = a.strip("\"\n") #destringify + url= f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{a}/daily/{queryDate}/{queryDate}" - response = requests.get(url) - if response.ok: - jd = json.loads(response.content) - j.append(jd["items"][0]) - time.sleep(.1) - else: - print(f"Not ok response: {response.status_code} from {url}") + response = requests.get(url) + if response.ok: + jd = response.json()["items"][0] + success = success + 1 + else: + failure = failure + 1 + logging.warning(f"Failure: {response.status_code} from {url}") - #3 Save results as a JSON and TSV + # start writing the CSV File if it doesn't exist yet + try: + dw + except NameError: + dw = DictWriter(t_outfile, sorted(jd.keys()), delimiter='\t') + dw.writeheader() - #all data in j now, make json file - with open(j_Out, 'w') as j_outfile: - json.dump(j, j_outfile, indent=2) + logging.debug(f"printing data: {jd}") - with open(t_Out, 'w') as t_outfile: - dw = csv.DictWriter(t_outfile, sorted(j[0].keys()), delimiter='\t') - dw.writeheader() - dw.writerows(j) + # write out the line of the json file + print(json.dumps(jd), file=j_outfile) + # write out of the csv file + dw.writerow(jd) # f_Out = outputPath + "dailyviews" + queryDate + ".feather" # read the json back in and make a feather file? + logging.debug(f"Run complete at {datetime.datetime.now()}") + logging.info(f"Processed {success} successful URLs and {failure} failures.") if __name__ == "__main__":