]> code.communitydata.science - articlequality_ordinal.git/blob - load_data.R
add the rest of the code.
[articlequality_ordinal.git] / load_data.R
1 library(MASS)
2 library(brms)
3 options(mc.cores=28)
4
5 library(data.table)
6 library(arrow)
7
8 sample.params <- readRDS("remember_sample_quality_labels.RDS")
9
10 df <- data.table(read_feather("data/scored_article_sample.feather"))
11 wp10dict <- list('start','stub','c','b','a','ga','fa')
12 df[,wp10:=wp10dict[wp10]]
13 df <- df[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
14 ## remove 'a' class articles for a fair comparison.
15 df <- df[wp10!='a']
16 df <- df[,datetime := as.POSIXct(timestamp,format="%Y%m%d%H%M%S")]
17 df <- df[,datetime.numeric := as.numeric(timestamp)]
18 df <- df[,datetime.numeric := (datetime.numeric - min(datetime.numeric))]
19 df <- df[,datetime.numeric := datetime.numeric/max(datetime.numeric)]
20
21 data.counts <- data.table(sample.params$label_sample_counts)
22 #data.counts <- data.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
23 data.counts <- data.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
24 sample.counts <- df[,.(.N),by=.(wp10)][order(wp10)]
25 #sample.counts <- sample.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
26 sample.counts <- sample.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','ga','fa'),ordered=TRUE)]
27 weights <- data.counts[sample.counts,on=.(wp10)]
28 weights <- weights[,article_weight:=(n_articles/sum(weights$n_articles))/(N/sum(weights$N))]
29 weights <- weights[,revision_weight:=(n_revisions/sum(weights$n_revisions))/(N/sum(weights$N))]
30 df <- df[weights,on=.(wp10)]
31 df[,quality.even6 := apply(df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(1,2,3,4,5,6))]

Community Data Science Collective || Want to submit a patch?