]> code.communitydata.science - covid19.git/blob - wikipedia/scripts/wikiproject_scraper.py
528f0d7366321d48b08aa4b0dea1fb01ca63e7d6
[covid19.git] / wikipedia / scripts / wikiproject_scraper.py
1 #!/usr/bin/env python3
2
3 ###############################################################################
4 #
5 # This script scrapes the Covid-19 Wikiproject
6
7 # It (1) hits the fcgi to find out how many rounds. Then (2) hit the fcgi 
8 # that many rounds, cooking that information down to just a list of article names and
9 # then (3) saves it out.
10 #
11 # At time of writing:
12 # the fCGI returns only 1000 max, no matter what you put in the limit. page 1 looks like this....
13 # https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset=1&sorta=Importance&sortb=Quality
14 #
15 # and page 2 looks like this
16 # https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?namespace=&run=yes&projecta=COVID-19&score=&sorta=Importance&importance=&limit=1000&pagename=&quality=&sortb=Quality&&offset=1001
17 #
18 ###############################################################################
19
20 import argparse
21 import subprocess
22 import requests
23 import datetime
24 import logging
25 import re
26 import math
27 from bs4 import BeautifulSoup
28
29 def parse_args():
30
31     parser = argparse.ArgumentParser(description='Get a list of pages tracked by the COVID-19 Wikiproject.')
32     parser.add_argument('-o', '--output_file', help='Where to save output', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
33     parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=digobs.get_loglevel), 
34     parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str), 
35     args = parser.parse_args()
36
37     return(args)
38
39 def main():
40
41     args = parse_args()
42     outputFile = args.output_file
43
44     #handle -W
45     if args.logging_destination:
46         logging.basicConfig(filename=args.logging_destination, filemode='a', level=args.logging_level)
47     else:
48         logging.basicConfig(level=args.logging_level)
49
50     export_git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
51     export_git_short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode().strip()
52     export_time = str(datetime.datetime.now())
53
54     logging.info(f"Starting at {export_time} and destructively outputting article list to {outputFile}.")
55     logging.info(f"Last commit: {export_git_hash}")
56
57     #1 How many hits to the fcgi?
58     session = requests.Session()
59
60     originalURL = "https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset=1&sorta=Importance&sortb=Quality"
61     headURL = "https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset=" 
62     tailURL = "&sorta=Importance&sortb=Quality" #head + offset + tail = original when offset = 1
63
64     # find out how many results we have
65     response = session.get(originalURL)
66
67     soup = BeautifulSoup(response.text, features="html.parser")
68     nodes = soup.find_all('div', class_="navbox")
69     rx = re.compile("Total results:\D*(\d+)") 
70     m = rx.search(nodes[0].get_text())
71     #print(nodes[0].get_text())
72     numResults = int(m.group(1))
73
74     logging.debug(f"fcgi returned {numResults}")
75     rounds = math.ceil(numResults/1000) 
76
77     #2 Fetch and parse down to just the article names
78     articleNames = []
79
80     for i in range(1, rounds+1):
81         offset = (i - 1)*1000 + 1 #offset is 1, then 1001, then 2001 
82         url = f"{headURL}{offset}{tailURL}"
83         response = session.get(url)
84         soup = BeautifulSoup(response.text, features="html.parser") #make fresh soup
85         article_rows = soup.find_all('tr', class_="list-odd") #just the odds first
86         for row in article_rows:
87             a = row.find('a')
88             articleNames.append(a.get_text())
89         article_rows = soup.find_all('tr', class_="list-even") #now the events
90         for row in article_rows:
91             a = row.find('a')
92             articleNames.append(a.get_text())
93
94     #3 Saves the list to a file
95
96     with open(outputFile, 'w') as f:
97         f.write('\n'.join(articleNames)+'\n')
98     logging.debug(f"Finished scrape and made a new article file at {datetime.datetime.now()}")
99
100
101 if __name__ == "__main__":
102
103     main()
104

Community Data Science Collective || Want to submit a patch?