]> code.communitydata.science - articlequality_ordinal.git/blob - ores_scores_sample.py
add the rest of the code.
[articlequality_ordinal.git] / ores_scores_sample.py
1 import mwapi
2 from revscoring import Model
3 import articlequality
4 import pyarrow
5 import pandas as pd
6 import scoring_utils
7 from itertools import chain, zip_longest
8 from multiprocessing import Pool
9 from functools import partial
10 from pyRemembeR import Remember
11 import fire
12 from pathlib import Path
13 import tqdm
14 remember = Remember("score_sample_articles.RDS")
15
16 def get_revision_text(revid_batch, api):
17     revid_batch = filter(lambda rid: rid is not None, revid_batch)
18     doc = api.get(action='query',
19                   prop='revisions',
20                   revids=revid_batch,
21                   rvprop=['ids','content'],
22                   rvslots=['main'])
23     pages = doc.get('query',{}).get('pages',{})
24     for pageid, doc in pages.items():
25         revisions = doc.get('revisions',[])
26         for revision in revisions:
27             text = revision.get('slots',{}).get('main',{}).get('*',{})
28             yield {'revid':revision.get('revid',{}), 'text':text}
29
30 def grouper(n, iterable, fillvalue=None):
31     "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
32     args = [iter(iterable)] * n
33     return zip_longest(fillvalue=fillvalue, *args)
34     
35 def pull_revision_texts(revids, api, api_batch_size):
36     batches = grouper(api_batch_size,revids)
37     get_revision_text_2 = partial(get_revision_text,api=api)
38     revs = chain(* map(get_revision_text_2, batches))
39     yield from revs
40
41 def score_revisions(revids, api, api_batch_size=50, parallel=True):
42
43     revs = pull_revision_texts(revids, api, api_batch_size)
44
45     ncores = 28
46     pool = Pool(ncores)
47     scorer_model = Model.load(open('articlequality/models/enwiki.nettrom_wp10.gradient_boosting.model', 'rb'))
48     add_score = partial(scoring_utils.add_score, scorer_model=scorer_model)
49
50     if parallel:
51         ncores = 48
52         pool = Pool(ncores)
53
54         revs = pool.imap_unordered(add_score, revs, chunksize = api_batch_size*4)
55     else:
56         revs = map(add_score,revs)
57
58     to_pddict = partial(scoring_utils.to_pddict,kept_keys=['revid'])
59     revs = map(to_pddict, revs)
60     yield from revs
61
62 #sample_file_parquet = "data/article_sample_set.parquet"; output_feather="data/scored_article_sample.feather";
63
64 sample_file="/data/nti9383home/production_functions/data/20200301_article_labelings_sample.feather";output="/data/nti9383home/production_functions/data/scored_article_sample.feather"
65
66 def score_sample(sample_file = "data/article_sample_set.feather", output="data/scored_article_sample.feather"):
67     
68     sample = pd.read_feather(sample_file)
69
70     revids = set(sample.revid)
71     user_agent = "Nate TeBlunthuis <nathante@uw.edu>. What's the relationship between contributors and article quality?"
72     api = mwapi.Session("https://en.wikipedia.org",user_agent=user_agent)
73
74     scores = tqdm.tqdm(score_revisions(revids, api, 50, True),total=len(revids),miniters=100,smoothing=0.2)
75
76     p = Path(output)
77     output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
78     output_json =  Path(str(p).replace("".join(p.suffixes), ".json"))
79     output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
80
81     saved_scores = list()
82     with open(output_json,'w') as of:
83         for score in scores:
84             of.write(str(score) + '\n')
85             saved_scores.append(score)
86
87
88     scored_revids = pd.DataFrame(saved_scores)
89     sample_1 = sample.merge(scored_revids,left_on="revid",right_on="revid")
90     remember(sample_1.shape[0],"sample_size_unscored")
91
92     remember(sample_1.shape[0],"sample_size_scored")
93     sample_1.to_feather(output_feather)
94     sample_1.to_csv(output_csv)
95
96 if __name__ == "__main__":
97     fire.Fire(score_sample)

Community Data Science Collective || Want to submit a patch?