]> code.communitydata.science - articlequality_ordinal.git/commitdiff
add the rest of the code. master
authorNathan TeBlunthuis <nathante@uw.edu>
Tue, 12 Mar 2024 16:39:12 +0000 (09:39 -0700)
committerNathan TeBlunthuis <nathante@uw.edu>
Tue, 12 Mar 2024 16:39:12 +0000 (09:39 -0700)
15 files changed:
Makefile [new file with mode: 0644]
add_quality_scores.R [new file with mode: 0644]
analyze_quality_models.R [new file with mode: 0644]
analyze_quality_models_revisions.R [new file with mode: 0644]
analyze_quality_models_unweighted.R [new file with mode: 0644]
load_data.R [new file with mode: 0644]
ordinal_quality_models.R [new file with mode: 0644]
ores_score_sample.sh [new file with mode: 0755]
ores_scores_sample.py [new file with mode: 0644]
run_ordinal_quality.sh [new file with mode: 0755]
run_wikiq.tar.gz [new file with mode: 0644]
sample_training_labels.py [new file with mode: 0755]
score_sample_articles.RDS [new symlink]
wikiq [new file with mode: 0755]
wikiq_to_parquet.py [new file with mode: 0644]

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..a9cb94a
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,37 @@
+SHELL:=/bin/bash
+
+data/20200301_article_labelings.json_SUCCESS:
+       ./run_aql_jobs.sh
+
+data/20200301_article_labelings_sample.json:sample_training_labels.py
+       source ./bin/activate; \
+       ./sample_training_labels.py
+
+
+data/article_sample.csv:sample_articles.py
+       source ./bin/activate; \
+       start_spark_and_run.sh 1 sample_articles.py
+
+data/scored_article_sample.feather:data/article_sample_set.csv ores_scores_sample.py
+       source ./bin/activate; \
+       python3 ores_scores_sample.py data/article_sample_set.parquet data/scored_article_sample.feather
+
+# run this step on kibo
+data/20200301_al_sample_revisions.w_text.json:data/20200301_article_labelings_sample.json
+       source ./bin/activate; \
+       python3 articlequality/utility fetch_text \
+         --api-host=https://en.wikipedia.org \
+         --labelings=data/20200301_article_labelings_sample.json \
+         --output=data/20200301_al_sample_revisions.w_text.json \
+
+# run this step on kibo
+data/20200301_al_sample_revisions.w_scores.json:data/20200301_al_sample_revisions.w_text.json
+       python3 score_sample_labels.py
+
+models/ordinal_quality.RDS:data/20200301_al_sample_revisions.w_text.json ordinal_quality_models.R
+       Rscript ordinal_quality_models.R
+
+
+
+
+PHONY: data/20200301_article_labelings.json
diff --git a/add_quality_scores.R b/add_quality_scores.R
new file mode 100644 (file)
index 0000000..be4fbc5
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/env Rscript
+library(arrow)
+library(brms)
+library(data.table)
+library(ggplot2)
+library(parallel)
+options(mc.cores=26)
+registerDoParallel(cores=26)
+
+dataset <- as.data.table(read_feather("data/scored_article_sample.feather"))
+dataset <- dataset[order(articleid,time_session_end)]
+quality_model <- readRDS("models/ordinal_quality_noFa.RDS")
+posterior_coefs <- as.data.table(quality_model)
+
+f <- function(cols){
+    post_qual <- as.matrix(posterior_coefs[,.(b_Stub, b_Start, b_C, b_B, b_GA)]) %*% as.numeric(cols)
+    list(med_quality = median(post_qual),
+         mean_quality = mean(post_qual),
+         sd_quality = sd(post_qual)
+         )
+
+}
+
+cl <- makeForkCluster(nnodes=26)
+res <- rbindlist(parApply(cl,dataset[,.(Stub,Start,C,B,GA)],1,f))
+dataset[,names(res):=res]
+
+f2 <- function(revscores){
+    posterior_quality <- as.matrix(posterior_coefs[,.(b_Stub,b_Start,b_C,b_B,b_GA)]) %*% t(as.matrix(revscores))
+    posterior_quality_diff <- apply(posterior_quality, 1, function(x) diff(x,1,1))
+    posterior_quality_diff2 <- apply(posterior_quality, 1, function(x) diff(x,1,2))
+    list(
+         mean_quality_diff1 = c(NA,apply(posterior_quality_diff,1,mean)),
+         sd_quality_diff1 = c(NA,apply(posterior_quality_diff,1,sd)),
+         median_quality_diff1 = c(NA,apply(posterior_quality_diff,1,median)),
+         mean_quality_diff2 = c(c(NA,NA),apply(posterior_quality_diff2,1,mean)),
+         sd_quality_diff2 = c(c(NA,NA),apply(posterior_quality_diff2,1,sd)),
+         median_quality_diff2 = c(c(NA,NA),apply(posterior_quality_diff2,1,median)))
+}
+
+
+dataset[,c("mean_qual_diff1","sd_qual_diff1","median_qual_diff1","mean_qual_diff2","sd_qual_diff2","median_qual_diff2"):=f2(.SD),by=.(articleid),.SDcols=c("Stub","Start","C","B","GA")]
+
+write_feather(dataset,'data/ordinal_scored_article_sample.feather')
+
+## in an earlier version I computed the full posterior of quality for the dataset, but it took too much memory.
+## Lines below checked (and confirmed) that posteriors were approximately normal. 
+## we can check that the means and the medians are close as a clue that normality is a good assumptoin
+## mean(med_quality/mean_quality)
+## mean(med_quality - mean_quality)
+## mean((med_quality - mean_quality)^2)
+
+## ## plot some of the posteriors to check.
+## quality_post <- dataset[1:8]
+## quality_post <- melt(quality_post)
+## p <- ggplot(quality_post, aes(x=value,group=variable)) + geom_histogram(bins=50) + facet_wrap(.~variable)
+## ggsave("plots/quality_posterior_normality.pdf",device='pdf')
+
+
diff --git a/analyze_quality_models.R b/analyze_quality_models.R
new file mode 100644 (file)
index 0000000..077cec6
--- /dev/null
@@ -0,0 +1,167 @@
+library(MASS)
+library(brms)
+options(mc.cores=28)
+library(ggplot2)
+library(data.table)
+library(arrow)
+library(wCorr)
+
+source("RemembR/R/RemembeR.R")
+
+change.remember.file("ordinal.quality.analysis.RDS")
+
+#model.1 <- readRDS("models/ordinal_quality_intercept.RDS")
+model.main.pca <- readRDS("models/ordinal_quality_pca.RDS")
+model.main.pca.cumulative <- readRDS("models/ordinal_quality_pca.cumulative.RDS")
+model.qe6 <- readRDS("models/ordinal_quality_qe6.RDS")
+df <- readRDS("data/training_quality_labels.RDS")
+
+# then compare them with loo
+comparison.loo <- loo_compare(model.main.pca,model.qe6,model.main.pca.cumulative)
+#comparison.waic <- loo_compare(model.main.noC,model.main.noB,model.main.noFa,model.main.noGa,model.main.noStart,model.main.noStub,criterion='waic')
+print(comparison.loo,simplify=F,digits=2)
+remember(comparison.loo,"comparison.loo")
+
+# LOO Chooses NoC
+best.model <- model.main.pca.cumulative
+
+pca_features <- readRDS("data/ores_pca_features.RDS")
+pca_features_unweighted <- readRDS("data/ores_pca_features.noweights.RDS")
+
+test.df <- readRDS("data/holdout_quality_labels.RDS")
+
+wpca_transform <- function(wpca, x){
+    x <- as.matrix(x)
+    centered <- as.matrix(t(t(x) - wpca$means))
+    return(centered %*% wpca$basis)
+}
+
+new_pca_features <- wpca_transform(pca_features, test.df[,.(Stub, Start, C, B, GA, FA)])
+
+test.df<-test.df[,":="(pca1 = new_pca_features[,1],
+                       pca2 = new_pca_features[,2],
+                       pca3 = new_pca_features[,3],
+                       pca4 = new_pca_features[,4],
+                       pca5 = new_pca_features[,5])]
+
+unweighted.pca <- wpca_transform(pca_features_unweighted, test.df[,.(Stub, Start, C, B, GA, FA)])
+
+test.df <- test.df[,":="(pca1.noweights = unweighted.pca[,1],
+                         pca2.noweights = unweighted.pca[,2],
+                         pca3.noweights = unweighted.pca[,3],
+                         pca4.noweights = unweighted.pca[,4],
+                         pca5.noweights = unweighted.pca[,5],
+                         pca6.noweights = unweighted.pca[,6])]
+
+draws <- as.data.table(best.model)
+
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+top.preds <- test.df[,MPQC]
+
+#ordinal.fitted.1 <- fitted(best.model, test.df, scale='response')
+ordinal.fitted <- data.table(fitted(best.model, test.df, scale='linear'))
+ordinal.pred <- ordinal.fitted$Estimate
+remember(ordinal.fitted,'ordinal.fitted')
+ordinal.quality <- ordinal.pred
+quality.even6 <- apply(test.df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(0,1,2,3,4,5))
+quality.even5 <- apply(test.df[,.(Stub,Start,B,GA,FA)],1,function(r) r %*% c(1,2,3,4,5))
+
+test.df <- test.df[,quality.ordinal := ordinal.quality]
+test.df <- test.df[,quality.even6 := quality.even6]
+
+(spearcor <- weightedCorr(test.df$quality.ordinal, test.df$quality.even6, method='spearman', weights=test.df$article_weight))
+remember(spearcor, 'spearman.corr')
+(pearsoncor <- weightedCorr(test.df$quality.ordinal, test.df$quality.even6, method='pearson', weights=test.df$article_weight))
+remember(pearsoncor, 'pearson.corr')
+
+ordinal.preds <- data.table(predict(best.model, test.df, robust=F))
+#names(ordinal.preds) <- c("Stub","Start","C","B","A","GA","FA")
+names(ordinal.preds) <- c("Stub","Start","C","B","GA","FA")
+ordinal.preds <- ordinal.preds[,idx.max:=.(apply(ordinal.preds[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+pred.qe6 <- data.table(predict(model.qe6,test.df))
+names(pred.qe6) <- c("Stub","Start","C","B","GA","FA")
+pred.qe6 <- pred.qe6[,idx.max:=.(apply(pred.qe6[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+test.df <- test.df[,ordinal.pred := ordinal.preds$predicted]
+test.df <- test.df[,pred.qe6 := pred.qe6$predicted]
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+(top.pred.accuracy <- weighted.mean(test.df[,.(MPQC)] == test.df[,.(wp10)],test.df$article_weight))
+remember(top.pred.accuracy, "top.pred.accuracy")
+(ordinal.pred.accuracy <- weighted.mean(test.df[,.(ordinal.pred)] == test.df[,.(wp10)], test.df$article_weight))
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+quality.even6 <- apply(df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(1,2,3,4,5,6))
+(pred.qe6.accuracy <- weighted.mean(test.df[,.(pred.qe6)] == test.df[,.(wp10)], test.df$article_weight))
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+remember(best.model, "best.model")
+remember(test.df,'test.df')
+
+ordinal.preds[,wp10:=test.df$wp10]
+ordinal.preds[,weight:=test.df$article_weight]
+total.weight <- sum(ordinal.preds$weight)
+library(modi)
+print("Calibration stats 1")
+calibration.stats.1 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                      var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+calibration.stats.1[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.1 <- calibration.stats.1[calip.data,on=.(wp10)]
+
+calibration.stats.1$weighttype <- 'Article weight'
+
+ordinal.preds[,weight:=test.df$revision_weight]
+total.weight <- sum(ordinal.preds$weight)
+print("Calibration stats 2")
+calibration.stats.2 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.2[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.2 <- calibration.stats.2[calip.data,on=.(wp10)]
+
+calibration.stats.2$weighttype <- 'Revision weight'
+
+
+ordinal.preds[,weight:=rep(1,nrow(ordinal.preds))]
+total.weight <- sum(ordinal.preds$weight)
+print("Calibration stats 3")
+calibration.stats.3 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.3[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.3 <- calibration.stats.3[calip.data,on=.(wp10)]
+
+calibration.stats.3$weighttype <- 'No weight'
+
+calibration.stats <- rbind(calibration.stats.1,calibration.stats.2,calibration.stats.3)
+
+calibration.stats[,calibration:=prob.data - prob.predicted]
+
+remember(calibration.stats, "calibration.stats")
+
+
+## p <- ggplot(data.frame(quality.ordinal, quality.even6, quality.even5))
+## p <- p + geom_point(aes(x=quality.even6,y=quality.ordinal)) + geom_smooth(aes(x=quality.even6,y=quality.ordinal))
+
+## print(p)
+## dev.off()
+
+## post.pred <- posterior_predict(model.main)
+## preds <- as.character(predict(polrmodel))
+## polrmodel.accuracy <- weighted.mean(preds==df$wp10,df$weight)
diff --git a/analyze_quality_models_revisions.R b/analyze_quality_models_revisions.R
new file mode 100644 (file)
index 0000000..74d7f54
--- /dev/null
@@ -0,0 +1,160 @@
+library(MASS)
+library(brms)
+options(mc.cores=28)
+library(ggplot2)
+library(data.table)
+library(arrow)
+library(wCorr)
+
+source("RemembR/R/RemembeR.R")
+
+change.remember.file("ordinal.quality.analysis_revisions.RDS")
+
+#model.1 <- readRDS("models/ordinal_quality_intercept.RDS")
+model.main.pca <- readRDS("models/ordinal_quality_pca_revision.RDS")
+model.main.pca.cumulative <- readRDS("models/ordinal_quality_pca_revision.cumulative.RDS")
+model.qe6 <- readRDS("models/ordinal_quality_qe6_revision.RDS")
+df <- readRDS("data/training_quality_labels.RDS")
+
+# then compare them with loo
+comparison.loo <- loo_compare(model.main.pca,model.qe6,model.main.pca.cumulative)
+#comparison.waic <- loo_compare(model.main.noC,model.main.noB,model.main.noFa,model.main.noGa,model.main.noStart,model.main.noStub,criterion='waic')
+print(comparison.loo,simplify=F,digits=2)
+remember(comparison.loo,"comparison.loo")
+
+# LOO Chooses NoC
+best.model <- model.main.pca.cumulative
+
+pca_features <- readRDS("data/ores_pca_features_revisions.RDS")
+pca_features_unweighted <- readRDS("data/ores_pca_features.noweights.RDS")
+
+test.df <- readRDS("data/holdout_quality_labels.RDS")
+
+wpca_transform <- function(wpca, x){
+    x <- as.matrix(x)
+    centered <- as.matrix(t(t(x) - wpca$means))
+    return(centered %*% wpca$basis)
+}
+
+new_pca_features <- wpca_transform(pca_features, test.df[,.(Stub, Start, C, B, GA, FA)])
+
+test.df<-test.df[,":="(pca1.revision = new_pca_features[,1],
+                       pca2.revision = new_pca_features[,2],
+                       pca3.revision = new_pca_features[,3],
+                       pca4.revision = new_pca_features[,4],
+                       pca5.revision = new_pca_features[,5])]
+
+draws <- as.data.table(best.model)
+
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+top.preds <- test.df[,MPQC]
+
+#ordinal.fitted.1 <- fitted(best.model, test.df, scale='response')
+ordinal.fitted <- data.table(fitted(best.model, test.df, scale='linear'))
+remember(ordinal.fitted,'ordinal.fitted')
+ordinal.pred <- ordinal.fitted$Estimate
+
+quality.ordinal <- ordinal.pred
+quality.even6 <- apply(test.df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(0,1,2,3,4,5))
+quality.even5 <- apply(test.df[,.(Stub,Start,B,GA,FA)],1,function(r) r %*% c(1,2,3,4,5))
+
+test.df <- test.df[,quality.ordinal := quality.ordinal]
+test.df <- test.df[,quality.even6 := quality.even6]
+
+(spearcor <- weightedCorr(test.df$quality.ordinal, test.df$quality.even6, method='spearman', weights=test.df$revision_weight))
+remember(spearcor, 'spearman.corr')
+(pearsoncor <- weightedCorr(test.df$quality.ordinal, test.df$quality.even6, method='pearson', weights=test.df$revision_weight))
+remember(pearsoncor, 'pearson.corr')
+
+ordinal.preds <- data.table(predict(best.model, test.df, robust=F))
+#names(ordinal.preds) <- c("Stub","Start","C","B","A","GA","FA")
+names(ordinal.preds) <- c("Stub","Start","C","B","GA","FA")
+ordinal.preds <- ordinal.preds[,idx.max:=.(apply(ordinal.preds[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+pred.qe6 <- data.table(predict(model.qe6,test.df))
+names(pred.qe6) <- c("Stub","Start","C","B","GA","FA")
+pred.qe6 <- pred.qe6[,idx.max:=.(apply(pred.qe6[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+test.df <- test.df[,ordinal.pred := ordinal.preds$predicted]
+test.df <- test.df[,pred.qe6 := pred.qe6$predicted]
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+(top.pred.accuracy <- weighted.mean(test.df[,.(MPQC)] == test.df[,.(wp10)],test.df$revision_weight))
+remember(top.pred.accuracy, "top.pred.accuracy")
+(ordinal.pred.accuracy <- weighted.mean(test.df[,.(ordinal.pred)] == test.df[,.(wp10)], test.df$revision_weight))
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+quality.even6 <- apply(df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(1,2,3,4,5,6))
+(pred.qe6.accuracy <- weighted.mean(test.df[,.(pred.qe6)] == test.df[,.(wp10)], test.df$revision_weight))
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+remember(best.model, "best.model")
+
+remember(test.df,'test.df')
+
+
+ordinal.preds[,wp10:=test.df$wp10]
+ordinal.preds[,weight:=test.df$article_weight]
+total.weight <- sum(ordinal.preds$weight)
+library(modi)
+calibration.stats.1 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                      var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+calibration.stats.1[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.1 <- calibration.stats.1[calip.data,on=.(wp10)]
+
+calibration.stats.1$weighttype <- 'Article weight'
+
+ordinal.preds[,weight:=test.df$revision_weight]
+total.weight <- sum(ordinal.preds$weight)
+
+calibration.stats.2 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.2[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.2 <- calibration.stats.2[calip.data,on=.(wp10)]
+
+calibration.stats.2$weighttype <- 'Revision weight'
+
+
+ordinal.preds[,weight:=rep(1,nrow(ordinal.preds))]
+total.weight <- sum(ordinal.preds$weight)
+
+calibration.stats.3 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.3[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.3 <- calibration.stats.3[calip.data,on=.(wp10)]
+
+calibration.stats.3$weighttype <- 'No weight'
+
+calibration.stats <- rbind(calibration.stats.1,calibration.stats.2,calibration.stats.3)
+
+calibration.stats[,'calibration':=prob.data - prob.predicted]
+
+remember(calibration.stats, "calibration.stats")
+
+
+## p <- ggplot(data.frame(quality.ordinal, quality.even6, quality.even5))
+## p <- p + geom_point(aes(x=quality.even6,y=quality.ordinal)) + geom_smooth(aes(x=quality.even6,y=quality.ordinal))
+
+## print(p)
+## dev.off()
+
+## post.pred <- posterior_predict(model.main)
+## preds <- as.character(predict(polrmodel))
+## polrmodel.accuracy <- weighted.mean(preds==df$wp10,df$weight)
diff --git a/analyze_quality_models_unweighted.R b/analyze_quality_models_unweighted.R
new file mode 100644 (file)
index 0000000..80d9f01
--- /dev/null
@@ -0,0 +1,166 @@
+library(MASS)
+library(brms)
+options(mc.cores=28)
+library(ggplot2)
+library(data.table)
+library(arrow)
+library(wCorr)
+
+source("RemembR/R/RemembeR.R")
+
+change.remember.file("ordinal.quality.analysis.noweights.RDS")
+
+#model.1 <- readRDS("models/ordinal_quality_intercept.RDS")
+model.main.pca <- readRDS("models/ordinal_quality_pca.noweights.RDS")
+model.main.pca.cumulative <- readRDS("models/ordinal_quality_pca.noweights.cumulative.RDS")
+model.qe6 <- readRDS("models/ordinal_quality_qe6.noweights.RDS")
+df <- readRDS("data/training_quality_labels.RDS")
+
+# then compare them with loo
+comparison.loo <- loo_compare(model.main.pca,model.qe6,model.main.pca.cumulative)
+#comparison.waic <- loo_compare(model.main.noC,model.main.noB,model.main.noFa,model.main.noGa,model.main.noStart,model.main.noStub,criterion='waic')
+print(comparison.loo,simplify=F,digits=2)
+remember(comparison.loo,"comparison.loo")
+
+# LOO Chooses NoC
+best.model <- model.main.pca.cumulative
+
+pca_features_unweighted <- readRDS("data/ores_pca_features.noweights.RDS")
+
+test.df <- readRDS("data/holdout_quality_labels.RDS")
+
+wpca_transform <- function(wpca, x){
+    x <- as.matrix(x)
+    centered <- as.matrix(t(t(x) - wpca$means))
+    return(centered %*% wpca$basis)
+}
+
+unweighted.pca <- wpca_transform(pca_features_unweighted, test.df[,.(Stub, Start, C, B, GA, FA)])
+
+test.df <- test.df[,":="(pca1.noweights = unweighted.pca[,1],
+                         pca2.noweights = unweighted.pca[,2],
+                         pca3.noweights = unweighted.pca[,3],
+                         pca4.noweights = unweighted.pca[,4],
+                         pca5.noweights = unweighted.pca[,5],
+                         pca6.noweights = unweighted.pca[,6])]
+
+draws <- as.data.table(best.model)
+
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+top.preds <- test.df[,MPQC]
+
+#ordinal.fitted.1 <- fitted(best.model, test.df, scale='response')
+ordinal.fitted <- data.table(fitted(best.model, test.df, scale='linear'))
+ordinal.pred <- ordinal.fitted$Estimate
+remember(ordinal.fitted,'ordinal.fitted')
+
+quality.ordinal <- ordinal.pred
+quality.even6 <- apply(test.df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(0,1,2,3,4,5))
+quality.even5 <- apply(test.df[,.(Stub,Start,B,GA,FA)],1,function(r) r %*% c(1,2,3,4,5))
+
+test.df <- test.df[,quality.ordinal := quality.ordinal]
+test.df <- test.df[,quality.even6 := quality.even6]
+
+(spearcor <- cor(test.df$quality.ordinal, test.df$quality.even6, method='spearman'))
+remember(spearcor, 'spearman.corr')
+(pearsoncor <- cor(test.df$quality.ordinal, test.df$quality.even6, method='pearson'))
+remember(pearsoncor, 'pearson.corr')
+
+ordinal.preds <- data.table(predict(best.model, test.df, robust=T))
+#names(ordinal.preds) <- c("Stub","Start","C","B","A","GA","FA")
+names(ordinal.preds) <- c("Stub","Start","C","B","GA","FA")
+ordinal.preds <- ordinal.preds[,idx.max:=.(apply(ordinal.preds[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+ordinal.preds <- ordinal.preds[,predicted:=.(apply(ordinal.preds[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+pred.qe6 <- data.table(predict(model.qe6,test.df))
+names(pred.qe6) <- c("Stub","Start","C","B","GA","FA")
+pred.qe6 <- pred.qe6[,idx.max:=.(apply(pred.qe6[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+#pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b",'a',"ga","fa")[idx]))]
+pred.qe6 <- pred.qe6[,predicted:=.(apply(pred.qe6[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+test.df <- test.df[,ordinal.pred := ordinal.preds$predicted]
+test.df <- test.df[,pred.qe6 := pred.qe6$predicted]
+test.df <- test.df[,idx.max:=.(apply(test.df[,.(Stub,Start,C,B,GA,FA)],1,which.max))]
+test.df <- test.df[,MPQC:=.(apply(test.df[,.(idx.max)],1,function(idx) c("stub","start","c","b","ga","fa")[idx]))]
+
+(top.pred.accuracy <- test.df[,mean(MPQC==wp10)])
+remember(top.pred.accuracy, "top.pred.accuracy")
+(ordinal.pred.accuracy <- test.df[,mean(ordinal.pred == wp10)])
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+quality.even6 <- apply(df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(1,2,3,4,5,6))
+(pred.qe6.accuracy <- mean(test.df[,.(pred.qe6)] == test.df[,.(wp10)]))
+remember(ordinal.pred.accuracy, "ordinal.pred.accuracy")
+remember(best.model, "best.model")
+
+(accuracy.macro <- test.df[,.(top.pred.accuracy = mean(MPQC==wp10),
+                              ordinal.pred.accuracy = mean(ordinal.pred==wp10),
+                              pred.qe6.accuracy = mean(pred.qe6==wp10)),by=.(wp10)])
+
+accuracy.macro[,sapply(.SD,mean), .SDcols=c("top.pred.accuracy","ordinal.pred.accuracy","pred.qe6.accuracy")]
+    
+
+remember(test.df,'test.df')
+
+
+ordinal.preds[,wp10:=test.df$wp10]
+ordinal.preds[,weight:=test.df$article_weight]
+total.weight <- sum(ordinal.preds$weight)
+library(modi)
+calibration.stats.1 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                      var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+calibration.stats.1[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.1 <- calibration.stats.1[calip.data,on=.(wp10)]
+
+calibration.stats.1$weighttype <- 'Article weight'
+
+ordinal.preds[,weight:=test.df$revision_weight]
+total.weight <- sum(ordinal.preds$weight)
+
+calibration.stats.2 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.2[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.2 <- calibration.stats.2[calip.data,on=.(wp10)]
+
+calibration.stats.2$weighttype <- 'Revision weight'
+
+
+ordinal.preds[,weight:=rep(1,nrow(ordinal.preds))]
+total.weight <- sum(ordinal.preds$weight)
+
+calibration.stats.3 <- ordinal.preds[,.(prob.predicted=apply(.SD,2,function(c) weighted.mean(c,weight)),
+                                        var.predicted=apply(.SD,2,function(c) weighted.var(c,weight))),.SDcols=c("Stub","Start","C","B","GA","FA")]
+
+
+calibration.stats.3[,wp10:=c("stub","start","c","b","ga","fa")]
+calip.data = ordinal.preds[order(wp10),.(prob.data=sum(weight)/total.weight,
+                                         var.data=var(weight)/total.weight),by=.(wp10)]
+
+calibration.stats.3 <- calibration.stats.3[calip.data,on=.(wp10)]
+
+calibration.stats.3$weighttype <- 'No weight'
+
+calibration.stats <- rbind(calibration.stats.1,calibration.stats.2,calibration.stats.3)
+
+calibration.stats[,'calibration':=prob.data - prob.predicted]
+
+remember(calibration.stats, "calibration.stats")
+
+## p <- ggplot(data.frame(quality.ordinal, quality.even6, quality.even5))
+## p <- p + geom_point(aes(x=quality.even6,y=quality.ordinal)) + geom_smooth(aes(x=quality.even6,y=quality.ordinal))
+
+## print(p)
+## dev.off()
+
+## post.pred <- posterior_predict(model.main)
+## preds <- as.character(predict(polrmodel))
+## polrmodel.accuracy <- weighted.mean(preds==df$wp10,df$weight)
diff --git a/load_data.R b/load_data.R
new file mode 100644 (file)
index 0000000..5216a9a
--- /dev/null
@@ -0,0 +1,31 @@
+library(MASS)
+library(brms)
+options(mc.cores=28)
+
+library(data.table)
+library(arrow)
+
+sample.params <- readRDS("remember_sample_quality_labels.RDS")
+
+df <- data.table(read_feather("data/scored_article_sample.feather"))
+wp10dict <- list('start','stub','c','b','a','ga','fa')
+df[,wp10:=wp10dict[wp10]]
+df <- df[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
+## remove 'a' class articles for a fair comparison.
+df <- df[wp10!='a']
+df <- df[,datetime := as.POSIXct(timestamp,format="%Y%m%d%H%M%S")]
+df <- df[,datetime.numeric := as.numeric(timestamp)]
+df <- df[,datetime.numeric := (datetime.numeric - min(datetime.numeric))]
+df <- df[,datetime.numeric := datetime.numeric/max(datetime.numeric)]
+
+data.counts <- data.table(sample.params$label_sample_counts)
+#data.counts <- data.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
+data.counts <- data.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
+sample.counts <- df[,.(.N),by=.(wp10)][order(wp10)]
+#sample.counts <- sample.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','a','ga','fa'),ordered=TRUE)]
+sample.counts <- sample.counts[,wp10:=factor(wp10,levels=c('stub','start','c','b','ga','fa'),ordered=TRUE)]
+weights <- data.counts[sample.counts,on=.(wp10)]
+weights <- weights[,article_weight:=(n_articles/sum(weights$n_articles))/(N/sum(weights$N))]
+weights <- weights[,revision_weight:=(n_revisions/sum(weights$n_revisions))/(N/sum(weights$N))]
+df <- df[weights,on=.(wp10)]
+df[,quality.even6 := apply(df[,.(Stub,Start,B,C,GA,FA)],1,function(r) r %*% c(1,2,3,4,5,6))]
diff --git a/ordinal_quality_models.R b/ordinal_quality_models.R
new file mode 100644 (file)
index 0000000..99163bc
--- /dev/null
@@ -0,0 +1,202 @@
+source("RemembR/R/RemembeR.R")
+source("load_data.R")
+change.remember.file("ordinal.quality.model.RDS")
+
+test <- F
+
+remember(weights, "sample.weights")
+
+n.holdout <- 4000
+remember(n.holdout,"n.holdout")
+holdout <- df[sample(.N,n.holdout)]
+saveRDS(holdout,'data/holdout_quality_labels.RDS')
+df <- df[!(revid %in% holdout$revid)]
+saveRDS(df,'data/training_quality_labels.RDS')
+
+if(test == TRUE){
+    df <- df[sample(.N,2000)]
+}
+
+## So it turns out that the 6 predictors we have are highly correlated creating problems for sampling so use QR decomposition
+df <- df[!is.na(wp10)]
+
+df[, start.p.stub := Start + Stub]
+
+saveRDS(df,"data/training_quality_labels.RDS")
+
+## So it turns out that the 6 predictors we have are highly correlated creating problems for sampling so use QR decomposition
+df <- df[!is.na(wp10)]
+
+df[, start.p.stub := Start + Stub]
+
+fam.cloglog <- sratio(link='cloglog', threshold='flexible')
+#formula.1 <- brmsformula(wp10 | weights(weight) ~  1,decomp='QR',center=TRUE)
+
+fam <- sratio(link='logit', threshold='flexible')
+fam.cumulative <- sratio(link='logit', threshold='flexible')
+
+## It turns out that the matrix is singular if we include all the predictors.
+## C is the most correlated with the other variables so for now let's remove it.
+
+## it turns out that we don't need to do model selection at all since we don't care about the coefficients.
+## we can just do the csv!
+x <- df[,.(Stub,Start,C,B,GA,FA)]
+
+wpca <- function(x, weight){
+    name <- names(x)
+    x <- as.matrix(x)
+    means <- unlist(lapply(1:dim(x)[2], function(i) weighted.mean(x[,i], weight)))
+    names(means) <- name
+    centered <- as.matrix(t(t(x) - means))
+    weightmat <- diag(weight)
+    covmat <- t(centered) %*% weightmat %*% centered / (sum(weight) - 1)
+
+    factors <- eigen(covmat)
+    basis <- factors$vectors
+    result <- centered %*% basis
+    # return a list with the info we need to do the transformation
+    return(list(means=means, basis=basis, x=result))
+}
+
+#unweighted.pca <- wpca(df[,.(Stub,Start,C,B,GA,FA)],rep(1,nrow(df)))
+upca <- prcomp(df[,.(Stub,Start,C,B,GA,FA)])
+unweighted.pca <- list(means = upca$center, basis=upca$rotation, x=upca$x)
+saveRDS(unweighted.pca,"data/ores_pca_features.noweights.RDS")
+
+weighted.pca <- wpca(df[,.(Stub,Start,C,B,GA,FA)],df$article_weight)
+saveRDS(weighted.pca, "data/ores_pca_features.RDS")
+
+revision.pca <- wpca(df[,.(Stub,Start,C,B,GA,FA)],df$revision_weight)
+saveRDS(revision.pca, "data/ores_pca_features_revisions.RDS")
+
+df <- df[,":="(pca1 = weighted.pca$x[,1],
+               pca2 = weighted.pca$x[,2],
+               pca3 = weighted.pca$x[,3],
+               pca4 = weighted.pca$x[,4],
+               pca5 = weighted.pca$x[,5],
+               pca6 = weighted.pca$x[,6])]
+
+df <- df[,":="(pca1.revision = revision.pca$x[,1],
+               pca2.revision = revision.pca$x[,2],
+               pca3.revision = revision.pca$x[,3],
+               pca4.revision = revision.pca$x[,4],
+               pca5.revision = revision.pca$x[,5],
+               pca6.revision = revision.pca$x[,6])]
+
+df <- df[,":="(pca1.noweights = unweighted.pca$x[,1],
+               pca2.noweights = unweighted.pca$x[,2],
+               pca3.noweights = unweighted.pca$x[,3],
+               pca4.noweights = unweighted.pca$x[,4],
+               pca5.noweights = unweighted.pca$x[,5],
+               pca6.noweights = unweighted.pca$x[,6])]
+
+qformula.main.pca.cs <- brmsformula(wp10 | weights(article_weight) ~  cs(pca1) + cs(pca2) + cs(pca3) + cs(pca4) + cs(pca5))
+formula.main.pca.noweights.cs <- brmsformula(wp10  ~  cs(pca1.noweights) + cs(pca2.noweights) + cs(pca3.noweights) + cs(pca4.noweights) + cs(pca5.noweights))
+formula.revision.pca.cs <- brmsformula(wp10 | weights(revision_weight) ~  cs(pca1.revision) + cs(pca2.revision) + cs(pca3.revision) + cs(pca4.revision) + cs(pca5.revision))
+formula.qe6.cs <- brmsformula(wp10 | weights(article_weight) ~ cs(quality.even6))
+formula.qe6.revision.cs <- brmsformula(wp10 | weights(revision_weight) ~ cs(quality.even6))
+formula.qe6.noweights.cs <- brmsformula(wp10 ~ cs(quality.even6))
+
+formula.main.pca <- brmsformula(wp10 | weights(article_weight) ~  pca1 + pca2 + pca3 + pca4 + pca5)
+formula.main.pca.noweights <- brmsformula(wp10  ~  pca1.noweights + pca2.noweights + pca3.noweights + pca4.noweights + pca5.noweights)
+formula.revision.pca <- brmsformula(wp10 | weights(revision_weight) ~  pca1.revision + pca2.revision + pca3.revision + pca4.revision + pca5.revision)
+formula.qe6 <- brmsformula(wp10 | weights(article_weight) ~ quality.even6)
+formula.qe6.revision <- brmsformula(wp10 | weights(revision_weight) ~ quality.even6)
+formula.qe6.noweights <- brmsformula(wp10 ~ quality.even6)
+
+formula.scores.noweights <- brmsformula(wp10 ~ Start + Stub + GA + FA + B)
+                                        
+
+library(future)
+library(parallel)
+options(mc.cores = parallel::detectCores())
+
+plan(lapply(1:7,function(x) tweak(multisession, workers=4)))
+
+model.main.pca %<-% brm(formula=formula.main.pca, data=df, family=fam, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+model.qe6 %<-% brm(formula.qe6, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+
+model.main.revision %<-% brm(formula=formula.revision.pca, data=df, family=fam, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+
+model.qe6.revision %<-% brm(formula.qe6.revision, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+model.qe6.noweights %<-% brm(formula.qe6.noweights, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+model.main.pca.noweights %<-% brm(formula=formula.main.pca.noweights, data=df, family=fam, control=list(max_treedepth=15), future=TRUE,save_pars=save_pars(all=TRUE))
+
+## model.main.pca.cs %<-% brm(formula=formula.main.pca.cs, data=df, family=fam, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+## model.qe6.cs %<-% brm(formula.qe6.cs, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+
+## model.main.revision.cs %<-% brm(formula=formula.revision.pca.cs, data=df, family=fam, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+
+## model.qe6.revision.cs %<-% brm(formula.qe6.revision.cs, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+## model.qe6.noweights.cs %<-% brm(formula.qe6.noweights.cs, data=df, family=fam, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+## model.main.pca.noweights.cs %<-% brm(formula=formula.main.pca.noweights.cs, data=df, family=fam, control=list(max_treedepth=15), future=TRUE,save_pars=save_pars(all=TRUE))
+
+model.main.pca.cumulative %<-% brm(formula=formula.main.pca, data=df, family=fam.cumulative, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+model.qe6.cumulative %<-% brm(formula.qe6, data=df, family=fam.cumulative, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+
+model.main.revision.cumulative %<-% brm(formula=formula.revision.pca, data=df, family=fam.cumulative, control=list(max_treedepth=15), future=TRUE, save_pars=save_pars(all=TRUE))
+
+model.qe6.revision.cumulative %<-% brm(formula.qe6.revision, data=df, family=fam.cumulative, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+model.qe6.noweights.cumulative %<-% brm(formula.qe6.noweights, data=df, family=fam.cumulative, control=list(max_treedepth=15),future=TRUE,save_pars=save_pars(all=TRUE))
+model.main.pca.noweights.cumulative %<-% brm(formula=formula.main.pca.noweights, data=df, family=fam.cumulative, control=list(max_treedepth=15), future=TRUE,save_pars=save_pars(all=TRUE))
+
+
+#model.scores.noweights <- brm(formula=formula.scores.noweights, data=df, family=fam, control=list(max_treedepth=15), future=TRUE,save_pars=save_pars(all=TRUE))
+
+models <- resolve(globalenv(),result=F) 
+print("adding loo")
+
+model.main.revision <- add_criterion(model.main.revision,'loo',moment_match=T)
+model.main.pca <- add_criterion(model.main.pca,'loo',moment_match=T)
+model.qe6.revision <- add_criterion(model.qe6.revision,'loo')
+model.qe6 <- add_criterion(model.qe6,'loo')
+model.main.pca.noweights <- add_criterion(model.main.pca.noweights,'loo',moment_match=T)
+model.qe6.noweights <- add_criterion(model.qe6.noweights,'loo')
+
+model.main.revision.cumulative <- add_criterion(model.main.revision.cumulative,'loo',moment_match=T)
+model.main.pca.cumulative <- add_criterion(model.main.pca.cumulative,'loo',moment_match=T)
+model.qe6.revision.cumulative <- add_criterion(model.qe6.revision.cumulative,'loo')
+model.qe6.cumulative <- add_criterion(model.qe6.cumulative,'loo')
+model.main.pca.noweights.cumulative <- add_criterion(model.main.pca.noweights.cumulative,'loo',moment_match=T)
+model.qe6.noweights.cumulative <- add_criterion(model.qe6.noweights.cumulative,'loo')
+
+
+## model.main.revision.cs <- add_criterion(model.main.revision.cs,'loo',moment_match=T)
+## model.main.pca.cs <- add_criterion(model.main.pca.cs,'loo',moment_match=T)
+## model.qe6.revision.cs <- add_criterion(model.qe6.revision.cs,'loo')
+## model.qe6.cs <- add_criterion(model.qe6.cs,'loo')
+## model.main.pca.noweights.cs <- add_criterion(model.main.pca.noweights.cs,'loo',moment_match=T)
+## model.qe6.noweights.cs <- add_criterion(model.qe6.noweights.cs,'loo')
+
+saveRDS(model.qe6.revision,"models/ordinal_quality_qe6_revision.RDS")
+saveRDS(model.qe6,"models/ordinal_quality_qe6.RDS")
+saveRDS(model.main.pca.noweights,"models/ordinal_quality_pca.noweights.RDS")
+saveRDS(model.qe6.noweights,"models/ordinal_quality_qe6.noweights.RDS")
+saveRDS(model.main.pca,"models/ordinal_quality_pca.RDS")
+saveRDS(model.main.revision,"models/ordinal_quality_pca_revision.RDS")
+
+saveRDS(model.qe6.revision.cumulative,"models/ordinal_quality_qe6_revision.cumulative.RDS")
+saveRDS(model.qe6.cumulative,"models/ordinal_quality_qe6.cumulative.RDS")
+saveRDS(model.main.pca.noweights.cumulative,"models/ordinal_quality_pca.noweights.cumulative.RDS")
+saveRDS(model.qe6.noweights.cumulative,"models/ordinal_quality_qe6.noweights.cumulative.RDS")
+saveRDS(model.main.pca.cumulative,"models/ordinal_quality_pca.cumulative.RDS")
+saveRDS(model.main.revision.cumulative,"models/ordinal_quality_pca_revision.cumulative.RDS")
+
+## saveRDS(model.qe6.revision.cs,"models/ordinal_quality_qe6_revision.RDS")
+## saveRDS(model.qe6.cs,"models/ordinal_quality_qe6.RDS")
+## saveRDS(model.main.pca.noweights.cs,"models/ordinal_quality_pca.noweights.RDS")
+## saveRDS(model.qe6.noweights.cs,"models/ordinal_quality_qe6.noweights.RDS")
+## saveRDS(model.main.pca.cs,"models/ordinal_quality_pca.RDS")
+## saveRDS(model.main.revision.cs,"models/ordinal_quality_pca_revision.RDS")
+
+
+
+models <- resolve(globalenv(),result=F) 
+
+
+
+
+
+
+
+
diff --git a/ores_score_sample.sh b/ores_score_sample.sh
new file mode 100755 (executable)
index 0000000..0e05e54
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 ores_scores_sample.py --sample_file="/data/nti9383home/production_functions/data/20200301_article_labelings_sample.feather" --output=/data/nti9383home/production_functions/data/scored_article_sample.feather
diff --git a/ores_scores_sample.py b/ores_scores_sample.py
new file mode 100644 (file)
index 0000000..b881e4c
--- /dev/null
@@ -0,0 +1,97 @@
+import mwapi
+from revscoring import Model
+import articlequality
+import pyarrow
+import pandas as pd
+import scoring_utils
+from itertools import chain, zip_longest
+from multiprocessing import Pool
+from functools import partial
+from pyRemembeR import Remember
+import fire
+from pathlib import Path
+import tqdm
+remember = Remember("score_sample_articles.RDS")
+
+def get_revision_text(revid_batch, api):
+    revid_batch = filter(lambda rid: rid is not None, revid_batch)
+    doc = api.get(action='query',
+                  prop='revisions',
+                  revids=revid_batch,
+                  rvprop=['ids','content'],
+                  rvslots=['main'])
+    pages = doc.get('query',{}).get('pages',{})
+    for pageid, doc in pages.items():
+        revisions = doc.get('revisions',[])
+        for revision in revisions:
+            text = revision.get('slots',{}).get('main',{}).get('*',{})
+            yield {'revid':revision.get('revid',{}), 'text':text}
+
+def grouper(n, iterable, fillvalue=None):
+    "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
+    args = [iter(iterable)] * n
+    return zip_longest(fillvalue=fillvalue, *args)
+    
+def pull_revision_texts(revids, api, api_batch_size):
+    batches = grouper(api_batch_size,revids)
+    get_revision_text_2 = partial(get_revision_text,api=api)
+    revs = chain(* map(get_revision_text_2, batches))
+    yield from revs
+
+def score_revisions(revids, api, api_batch_size=50, parallel=True):
+
+    revs = pull_revision_texts(revids, api, api_batch_size)
+
+    ncores = 28
+    pool = Pool(ncores)
+    scorer_model = Model.load(open('articlequality/models/enwiki.nettrom_wp10.gradient_boosting.model', 'rb'))
+    add_score = partial(scoring_utils.add_score, scorer_model=scorer_model)
+
+    if parallel:
+        ncores = 48
+        pool = Pool(ncores)
+
+        revs = pool.imap_unordered(add_score, revs, chunksize = api_batch_size*4)
+    else:
+        revs = map(add_score,revs)
+
+    to_pddict = partial(scoring_utils.to_pddict,kept_keys=['revid'])
+    revs = map(to_pddict, revs)
+    yield from revs
+
+#sample_file_parquet = "data/article_sample_set.parquet"; output_feather="data/scored_article_sample.feather";
+
+sample_file="/data/nti9383home/production_functions/data/20200301_article_labelings_sample.feather";output="/data/nti9383home/production_functions/data/scored_article_sample.feather"
+
+def score_sample(sample_file = "data/article_sample_set.feather", output="data/scored_article_sample.feather"):
+    
+    sample = pd.read_feather(sample_file)
+
+    revids = set(sample.revid)
+    user_agent = "Nate TeBlunthuis <nathante@uw.edu>. What's the relationship between contributors and article quality?"
+    api = mwapi.Session("https://en.wikipedia.org",user_agent=user_agent)
+
+    scores = tqdm.tqdm(score_revisions(revids, api, 50, True),total=len(revids),miniters=100,smoothing=0.2)
+
+    p = Path(output)
+    output_csv =  Path(str(p).replace("".join(p.suffixes), ".csv"))
+    output_json =  Path(str(p).replace("".join(p.suffixes), ".json"))
+    output_feather =  Path(str(p).replace("".join(p.suffixes), ".feather"))
+
+    saved_scores = list()
+    with open(output_json,'w') as of:
+        for score in scores:
+            of.write(str(score) + '\n')
+            saved_scores.append(score)
+
+
+    scored_revids = pd.DataFrame(saved_scores)
+    sample_1 = sample.merge(scored_revids,left_on="revid",right_on="revid")
+    remember(sample_1.shape[0],"sample_size_unscored")
+
+    remember(sample_1.shape[0],"sample_size_scored")
+    sample_1.to_feather(output_feather)
+    sample_1.to_csv(output_csv)
+
+if __name__ == "__main__":
+    fire.Fire(score_sample)
diff --git a/run_ordinal_quality.sh b/run_ordinal_quality.sh
new file mode 100755 (executable)
index 0000000..1c43760
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+Rscript ordinal_quality_models.R && \
+    Rscript analyze_quality_models.R && \
+    Rscript analyze_quality_models_unweighted.R
+    Rscript analyze_quality_models_revisions.R
diff --git a/run_wikiq.tar.gz b/run_wikiq.tar.gz
new file mode 100644 (file)
index 0000000..cdb1ea4
Binary files /dev/null and b/run_wikiq.tar.gz differ
diff --git a/sample_training_labels.py b/sample_training_labels.py
new file mode 100755 (executable)
index 0000000..bb3db8c
--- /dev/null
@@ -0,0 +1,348 @@
+#!/usr/bin/env python3
+
+'''
+Take a stratified sample of article quality labels. 
+
+For now we just stratify by label type. 
+Later we might add date. 
+Later we might stratify by wikiproject too.
+
+A key limitation of this approach is that we can sample on the level of the page. 
+We'd really like to be able to sample on the level of edit session. 
+But that isn't possible because of how article assessments work. 
+'''
+from itertools import islice, chain
+from pathlib import Path
+import pandas as pd
+import numpy as np
+random = np.random.RandomState(1968)
+import json
+import pyarrow.feather as feather
+import fire
+from collections import Counter
+from pyRemembeR import Remember
+from enum import IntEnum, unique
+from datetime import datetime
+from dataclasses import dataclass, asdict
+from multiprocessing import Pool
+from urllib.parse import unquote
+from pyspark.sql import functions as f
+from pyspark.sql import SparkSession, Window
+from pyspark.sql.functions import udf
+from pyspark.sql.types import StringType
+from numpy import dtype
+import csv
+
+def wikiq_to_parquet():
+
+    path = Path("/gscratch/comdata/users/nathante/wikiqRunning/wikiq_output/")
+    outpath = Path("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante_parquet/")
+    files = list(map(Path,path.glob("*.tsv")))
+    dumpfile = files[0]
+
+    def wikiq_tsv_to_parquet(dumpfile, outpath = Path("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante.parquet/")):
+        outfile = outpath / (dumpfile.name + ".parquet")
+        outpath.mkdir(parents=True, exist_ok=True)
+        _wikiq_tsv_to_parquet(dumpfile,outfile)
+
+    dumpfile = Path("/gscratch/comdata/users/nathante/wikiqRunning/wikiq_output/enwiki-20200301-pages-meta-history12-p4980874p5038451.tsv")
+
+    def _wikiq_tsv_to_parquet(dumpfile, outfile):
+
+        dtypes = {'anon': dtype('O'), 'articleid': dtype('int64'), 'deleted': dtype('bool'), 'editor': dtype('O'), 'editor_id': dtype('float64'), 'minor': dtype('bool'), 'namespace': dtype('int64'), 'revert': dtype('O'), 'reverteds': dtype('O'), 'revid': dtype('int64'), 'sha1': dtype('O'), 'text_chars': dtype('float64'), 'title': dtype('O')}
+
+        print(dumpfile)
+        df = pd.read_csv(dumpfile,sep='\t',quoting=csv.QUOTE_NONE,error_bad_lines=False, warn_bad_lines=True,parse_dates=['date_time'],dtype=dtypes)
+
+        df.to_parquet(outfile)
+
+    with Pool(28) as pool:
+        jobs = pool.imap_unordered(wikiq_tsv_to_parquet, files)
+        list(jobs)
+
+    spark = SparkSession.builder.getOrCreate()
+
+    @udf(StringType())
+    def decode_strip_udf(val):
+        if val is None:
+            return ""
+        else:
+            return unquote(val).strip('\"')
+    df = spark.read.parquet('/gscratch/comdata/output/wikiq_enwiki_20200301_nathante.parquet')
+    df = df.withColumnRenamed("anon","anonRaw")
+    df = df.withColumn("anon",f.when(f.col("anonRaw")=="TRUE",True).otherwise(False))
+    df = df.drop("anonRaw")
+    df = df.withColumnRenamed("text_chars","text_chars_raw")
+    df = df.withColumn("text_chars",f.col("text_chars_raw").cast('int'))
+    df = df.drop("text_chars_raw")
+    df = df.withColumnRenamed("editor_id",'editor_id_raw')
+    df = df.withColumn("editor_id",f.col("editor_id_raw").cast("int"))
+    df = df.drop("editor_id_raw")
+    df = df.withColumnRenamed("revert","revert_raw")
+    df = df.withColumn("revert",f.when(f.col("revert_raw")=="TRUE",True).otherwise(False))
+    df = df.drop("revert_raw")
+    df = df.withColumnRenamed("title","title_raw")
+    df = df.withColumn("title", decode_strip_udf(f.col("title_raw")))
+    df = df.drop("title_raw")
+    df = df.withColumnRenamed("editor","editor_raw")
+    df = df.withColumn("editor", decode_strip_udf(f.col("editor_raw")))
+    df = df.drop("editor_raw")
+    df = df.repartition(400,'articleid')
+    df.write.parquet("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante_partitioned.parquet",mode='overwrite')
+
+@unique
+class WP10(IntEnum):
+    start = 1
+    stub = 2
+    c = 3
+    b = 4
+    a = 5
+    ga = 6
+    fa = 7
+
+    @staticmethod
+    def from_string(s):
+        return {'start':WP10.start,
+                'stub':WP10.stub,
+                'c':WP10.c,
+                'b':WP10.b,
+                'a':WP10.a,
+                'ga':WP10.ga,
+                'fa':WP10.fa}.get(s,None)
+    
+    def to_string(self):
+        return {WP10.start:'start',
+                WP10.stub:'stub',
+                WP10.c:'c',
+                WP10.b:'b',
+                WP10.a:'a',
+                WP10.ga:'ga',
+                WP10.fa:'fa'}[self]
+
+
+@dataclass
+class PageLabel:
+    timestamp:datetime
+    wp10:WP10
+
+    @staticmethod
+    def from_json(obj):
+        timestamp = obj.get('timestamp',None) 
+        if timestamp is not None:
+            timestamp = datetime.strptime(obj['timestamp'],'%Y%m%d%H%M%S')
+        else:
+            timestamp = None
+
+        return PageLabel(timestamp=timestamp,
+                         wp10=WP10.from_string(obj.get('wp10')))
+
+    @staticmethod
+    def from_row(row):
+        return PageLabel(timestamp = row.timestamp,
+                         wp10 = WP10(row.wp10))
+
+    def to_json(self):
+        d = asdict(self)
+
+        if self.timestamp is not None:
+            d['timestamp'] =  self.timestamp.strftime('%Y%m%d%H%M%S')
+
+        if self.wp10 is not None:
+            d['wp10'] = self.wp10.to_string()
+
+        return json.dumps(d)
+
+@dataclass
+class TalkPageLabel(PageLabel):
+    dump_talk_page_title:str
+    talk_page_id:int
+    project:str
+
+    @staticmethod
+    def from_json(obj):
+        res = PageLabel.from_json(obj)  
+
+        return TalkPageLabel(dump_talk_page_title=obj.get('dump_talk_page_title',None),
+                             talk_page_id=obj.get('talk_page_id',None),
+                             project=obj.get("project",None),
+                             **asdict(res)
+                             )
+    @staticmethod
+    def from_row(row):
+        res = PageLabel.from_row(row)
+        return TalkPageLabel(dump_talk_page_title = row.dump_talk_page_title,
+                             talk_page_id = row.talk_page_id,
+                             project = row.project
+                             **asdict(res))
+
+
+                         
+@dataclass
+class ArticlePageLabel(PageLabel):
+    '''class representing labels to a page'''
+    title: str
+    articleid: int
+    revid:int
+
+    @staticmethod
+    def from_json(obj):
+        res = PageLabel.from_json(obj)
+
+        return ArticlePageLabel(title=obj.get('title',None),
+                                articleid=obj.get('articleid',None),
+                                **asdict(res)
+                                )
+
+    @staticmethod
+    def from_row(row):
+        res = PageLabel.from_row(row)
+        return ArticlePageLabel(title = row.title,
+                                articleid = row.articleid,
+                                revid = row.revid,
+                                **asdict(res))
+                         
+infiles="enwiki-20200301-pages-meta-history*.xml-p*.7z_article_labelings.json"; samplesize=5000*7
+
+def main(infiles="enwiki-20200301-pages-meta-history*.xml-p*.7z_article_labelings.json", samplesize=5000*7):
+    path = Path('data')
+    infiles = path.glob(infiles)
+
+    pool = Pool(28)
+
+    lines = chain(* map(lambda f: open(f,'r'), infiles))
+
+    parsed = pool.imap_unordered(json.loads, lines, chunksize=int(1e3))
+    formatted = pool.imap_unordered(TalkPageLabel.from_json, parsed, chunksize=int(1e3))
+    dicted = pool.imap_unordered(asdict,formatted, chunksize=int(1e3))
+
+    # data frame of the the latest labels.
+    df = pd.DataFrame(dicted)
+
+    df = df.loc[df.timestamp <= datetime(2019,1,1)]
+
+    groups = df.groupby(["talk_page_id"])
+    max_labels = groups.wp10.max().reset_index()
+
+    df2 = pd.merge(df,max_labels,on=['talk_page_id','wp10'],how='right')
+    last_timestamp = df2.groupby(['talk_page_id']).timestamp.max().reset_index()
+    
+    df2 = pd.merge(df2, last_timestamp, on=['talk_page_id','timestamp'], how='right')
+    first_project = df2.groupby(['talk_page_id']).project.first()
+    df2 = pd.merge(df2, first_project,on=['talk_page_id','project'], how='right')
+
+    tpid = df2
+
+    #.wp10.max().reset_index()
+    tpid = tpid.loc[~tpid.dump_talk_page_title.isna()]
+    # pick out just the samples we want.
+    spark = SparkSession.builder.getOrCreate()
+
+    sparkdf = spark.read.parquet("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante_partitioned.parquet")
+    
+    tpid['timestamp'] = tpid['timestamp'].dt.tz_localize('utc')
+    labels = spark.createDataFrame(tpid)
+    talks = sparkdf.filter(sparkdf.namespace==1)
+    articles = sparkdf.filter(sparkdf.namespace==0)
+
+    # labels = labels.join(talks,on=[labels.talk_page_id == talks.articleid],how='left_outer')
+
+    talks = talks.join(labels,on=[labels.talk_page_id == talks.articleid])
+
+    #talks.filter(talks.wp10==7).select('talk_page_id').distinct().count()
+
+    talks = talks.withColumn('timediff', f.datediff(talks.timestamp, talks.date_time))
+
+    talks = talks.filter(talks.timediff <= 0)
+
+    win = Window.partitionBy("talk_page_id")
+    talks = talks.withColumn('best_timediff', f.max('timediff').over(win))
+    talks = talks.filter(talks.timediff == talks.best_timediff)
+
+    talks = talks.withColumn('article_title',f.substring_index(f.col("title"),':',-1))
+    talks = talks.select(['article_title','wp10',f.col('timestamp').alias('timestamp'),'talk_page_id']).distinct()
+
+    articles = articles.join(talks,on=[talks.article_title == articles.title])
+
+    articles = articles.withColumn('timediff', f.datediff(articles.timestamp, articles.date_time))
+    articles = articles.filter(articles.timediff <= 0)
+
+    win2 = Window.partitionBy("articleid")
+    articles = articles.filter(f.col("revert")==False)
+    articles = articles.withColumn('best_timediff', f.max('timediff').over(win2))
+    articles = articles.filter(articles.timediff == articles.best_timediff)
+    articles = articles.select(['revid','timestamp','wp10','articleid','title'])
+    
+    articles = articles.groupby(['timestamp','wp10','articleid','title']).agg(f.first(f.col("revid")).alias("revid"))
+
+    articles.write.parquet("data/article_quality_data.parquet",mode='overwrite')
+    
+    tpid = pd.read_parquet("data/article_quality_data.parquet")
+
+    # we want to sample /papges/ not /labels/.
+    # so we need to do a /full/ groupby pages.
+    # this is why we have a lot of RAM!
+    # we need the number of 
+    label_counts = {}
+    sample_page_ids = {}
+    label_max_samplesize = int(samplesize / len(WP10))
+    sample_chunks = []
+    
+    for lab in WP10:
+        print(lab)
+        page_ids = tpid.loc[tpid.wp10==lab].articleid
+        label_counts[lab] = len(page_ids)
+        print(lab,label_counts)
+        if(label_counts[lab] <= label_max_samplesize):
+            sample_page_ids[lab] = page_ids
+        else:
+            sample_page_ids[lab] = random.choice(page_ids,label_max_samplesize,replace=False)
+           
+        # get the labels for each sampled article
+        sample_data_lab = tpid.loc[(tpid.articleid.isin(sample_page_ids[lab]))]
+
+        sample_chunks.append(sample_data_lab)
+
+    remember = Remember(f='remember_sample_quality_labels.RDS')
+
+    remember(label_max_samplesize, 'label_max_samplesize')
+    
+
+    # Note that different wikiprojects can have different labels
+    sample = pd.concat(sample_chunks,ignore_index=True)
+
+    revisions_per_article = sparkdf.filter(sparkdf.namespace==0).select(['revid','articleid','date_time','title'])
+    revisions_per_article = revisions_per_article.filter(f.col("date_time") >= datetime(2019,1,1))
+    revisions_per_article = revisions_per_article.filter(f.col("date_time") <= datetime(2019,12,31))
+    revisions_per_article = revisions_per_article.groupby(["articleid",'title']).count().toPandas()
+
+    revisions_per_article['title'] = revisions_per_article.title.apply(lambda s: unquote(s).strip('\"'))
+
+    revisions_per_article = pd.merge(revisions_per_article,tpid,left_on='articleid',right_on='articleid')
+    
+    revisions_per_class = revisions_per_article.groupby('wp10').agg({'count':'sum'}).reset_index()
+    revisions_per_class['wp10'] = revisions_per_class.wp10.apply(lambda s: WP10(s).to_string())
+    
+    label_counts = pd.DataFrame({'wp10':map(lambda x: x.to_string(),label_counts.keys()),'n_articles':label_counts.values()})
+    label_counts = pd.merge(label_counts,revisions_per_class,left_on='wp10',right_on='wp10')
+    label_counts = label_counts.rename(columns={'count':'n_revisions'})
+
+    remember(label_counts, 'label_sample_counts')
+    
+    sample.to_feather("data/20200301_article_labelings_sample.feather")
+
+    sample = pd.read_feather("data/20200301_article_labelings_sample.feather")
+    sample_counts = sample.articleid.groupby(sample.wp10).count().reset_index()
+    remember(sample_counts,'sample_counts')
+
+    sample_labels = sample.apply(ArticlePageLabel.from_row,axis=1)
+    sample_labels = map(PageLabel.to_json, sample_labels)
+
+    with open("data/20200301_article_labelings_sample.json",'w') as of:
+        of.writelines((l + '\n' for l in sample_labels))
+
+    pool.close()
+
+if __name__ == "__main__":
+    fire.Fire(main)
+
diff --git a/score_sample_articles.RDS b/score_sample_articles.RDS
new file mode 120000 (symlink)
index 0000000..b8c1293
--- /dev/null
@@ -0,0 +1 @@
+.git/annex/objects/J8/FP/SHA256E-s123--ca8ebf4d8748b52e9edeca96f14f4132042c8039a4d6376ffa87033adc36d8cb.RDS/SHA256E-s123--ca8ebf4d8748b52e9edeca96f14f4132042c8039a4d6376ffa87033adc36d8cb.RDS
\ No newline at end of file
diff --git a/wikiq b/wikiq
new file mode 100755 (executable)
index 0000000..0543a33
--- /dev/null
+++ b/wikiq
@@ -0,0 +1,573 @@
+#!/usr/bin/env python3
+
+# original wikiq headers are: title articleid revid date_time anon
+# editor editor_id minor text_size text_entropy text_md5 reversion
+# additions_size deletions_size
+
+import argparse
+import sys
+import os, os.path
+import re
+
+from subprocess import Popen, PIPE
+from collections import deque
+from hashlib import sha1
+
+from mwxml import Dump
+
+from deltas.tokenizers import wikitext_split
+import mwpersistence
+import mwreverts
+from urllib.parse import quote
+TO_ENCODE = ('title', 'editor')
+PERSISTENCE_RADIUS=7
+from deltas import SequenceMatcher
+from deltas import SegmentMatcher
+
+class PersistMethod:
+    none = 0
+    sequence = 1
+    segment = 2
+    legacy = 3
+
+def calculate_persistence(tokens_added):
+    return(sum([(len(x.revisions)-1) for x in tokens_added]),
+           len(tokens_added))
+
+
+class WikiqIterator():
+    def __init__(self, fh, collapse_user=False):
+        self.fh = fh
+        self.collapse_user = collapse_user
+        self.mwiterator = Dump.from_file(self.fh)
+        self.namespace_map = { ns.id : ns.name for ns in
+                               self.mwiterator.site_info.namespaces }
+        self.__pages = self.load_pages()
+
+    def load_pages(self):
+        for page in self.mwiterator:
+            yield WikiqPage(page,
+                            namespace_map = self.namespace_map,
+                            collapse_user=self.collapse_user)
+
+    def __iter__(self):
+        return self.__pages
+
+    def __next__(self):
+        return next(self._pages)
+
+class WikiqPage():
+    __slots__ = ('id', 'title', 'namespace', 'redirect',
+                 'restrictions', 'mwpage', '__revisions',
+                 'collapse_user')
+    
+    def __init__(self, page, namespace_map, collapse_user=False):
+        self.id = page.id
+        self.namespace = page.namespace
+        # following mwxml, we assume namespace 0 in cases where
+        # page.namespace is inconsistent with namespace_map
+        if page.namespace not in namespace_map:
+            self.title = page.title
+            page.namespace = 0
+        if page.namespace != 0:
+            self.title = ':'.join([namespace_map[page.namespace], page.title])
+        else:
+            self.title = page.title
+        self.restrictions = page.restrictions
+        self.collapse_user = collapse_user
+        self.mwpage = page
+        self.__revisions = self.rev_list()
+
+    def rev_list(self):
+        # Outline for how we want to handle collapse_user=True
+        # iteration   rev.user   prev_rev.user   add prev_rev?
+        #         0          A            None           Never
+        #         1          A               A           False
+        #         2          B               A            True
+        #         3          A               B            True
+        #         4          A               A           False
+        # Post-loop                          A          Always
+        for i, rev in enumerate(self.mwpage):
+            # never yield the first time
+            if i == 0:
+                if self.collapse_user: 
+                    collapsed_revs = 1
+                    rev.collapsed_revs = collapsed_revs
+
+            else:
+                if self.collapse_user:
+                    # yield if this is the last edit in a seq by a user and reset
+                    # also yield if we do know who the user is
+
+                    if rev.deleted.user or prev_rev.deleted.user:
+                        yield prev_rev
+                        collapsed_revs = 1
+                        rev.collapsed_revs = collapsed_revs
+
+                    elif not rev.user.text == prev_rev.user.text:
+                        yield prev_rev
+                        collapsed_revs = 1
+                        rev.collapsed_revs = collapsed_revs
+                    # otherwise, add one to the counter
+                    else:
+                        collapsed_revs += 1
+                        rev.collapsed_revs = collapsed_revs
+                # if collapse_user is false, we always yield
+                else:
+                    yield prev_rev
+
+            prev_rev = rev
+
+        # also yield the final time
+        yield prev_rev
+
+    def __iter__(self):
+        return self.__revisions
+
+    def __next__(self):
+        return next(self.__revisions)
+
+
+class RegexPair(object):
+    def __init__(self, pattern, label):
+        self.pattern = re.compile(pattern)
+        self.label = label
+        self.has_groups = bool(self.pattern.groupindex)
+        if self.has_groups:
+            self.capture_groups = list(self.pattern.groupindex.keys())
+            
+    def _make_key(self, cap_group):
+        return ("{}_{}".format(self.label, cap_group))
+
+    def matchmake(self, content, rev_data):
+        
+        temp_dict = {}
+        # if there are named capture groups in the regex
+        if self.has_groups:
+
+            # if there are matches of some sort in this revision content, fill the lists for each cap_group
+            if self.pattern.search(content) is not None:
+                m = self.pattern.finditer(content)
+                matchobjects = list(m)
+
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_list = []
+                    for match in matchobjects:
+                        # we only want to add the match for the capture group if the match is not None
+                        if match.group(cap_group) != None:
+                            temp_list.append(match.group(cap_group))
+
+                    # if temp_list of matches is empty just make that column None
+                    if len(temp_list)==0:
+                        temp_dict[key] = None
+                    # else we put in the list we made in the for-loop above
+                    else:
+                        temp_dict[key] = ', '.join(temp_list)
+
+            # there are no matches at all in this revision content, we default values to None
+            else:
+                for cap_group in self.capture_groups:
+                    key = self._make_key(cap_group)
+                    temp_dict[key] = None
+
+        # there are no capture groups, we just search for all the matches of the regex
+        else:
+            #given that there are matches to be made
+            if self.pattern.search(content) is not None:
+                m = self.pattern.findall(content)
+                temp_dict[self.label] = ', '.join(m)
+            else:
+                temp_dict[self.label] = None    
+        # update rev_data with our new columns
+        rev_data.update(temp_dict)
+        return rev_data
+
+        
+class WikiqParser():
+    def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15):
+        """ 
+        Parameters:
+           persist : what persistence method to use. Takes a PersistMethod value
+        """
+        self.input_file = input_file
+        self.output_file = output_file
+        self.collapse_user = collapse_user
+        self.persist = persist
+        self.printed_header = False
+        self.namespaces = []
+        self.urlencode = urlencode
+        self.revert_radius = revert_radius
+
+        if namespaces is not None:
+            self.namespace_filter = set(namespaces)
+        else:
+            self.namespace_filter = None
+
+        self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
+        self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
+        
+
+    def make_matchmake_pairs(self, patterns, labels):
+        if (patterns is not None and labels is not None) and \
+           (len(patterns) == len(labels)):
+            return [RegexPair(pattern, label) for pattern, label in zip(patterns, labels)]
+        elif (patterns is None and labels is None):
+            return []
+        else:
+            sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
+
+    def matchmake(self, rev, rev_data):
+        rev_data = self.matchmake_revision(rev.text, rev_data)
+        rev_data = self.matchmake_comment(rev.comment, rev_data)
+        return rev_data
+
+    def matchmake_revision(self, text, rev_data):
+        return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
+
+    def matchmake_comment(self, comment, rev_data):
+        return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
+
+    def matchmake_pairs(self, text, rev_data, pairs):
+        for pair in pairs:
+            rev_data = pair.matchmake(text, rev_data)
+        return rev_data
+
+    def __get_namespace_from_title(self, title):
+        default_ns = None
+
+        for ns in self.namespaces:
+            # skip if the namespace is not defined
+            if ns == None:
+                default_ns = self.namespaces[ns]
+                continue
+
+            if title.startswith(ns + ":"):
+                return self.namespaces[ns]
+
+        # if we've made it this far with no matches, we return the default namespace
+        return default_ns
+
+
+    def process(self):
+
+        # create a regex that creates the output filename
+        # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
+        #                         r'output/wikiq-\1-\2.tsv',
+        #                         input_filename)
+
+        # Construct dump file iterator
+        dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
+
+        # extract list of namspaces
+        self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
+
+        page_count = 0
+        rev_count = 0
+
+
+        # Iterate through pages
+        for page in dump:
+            namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
+
+            # skip namespaces not in the filter
+            if self.namespace_filter is not None:
+                if namespace not in self.namespace_filter:
+                    continue
+
+            rev_detector = mwreverts.Detector(radius = self.revert_radius)
+
+            if self.persist != PersistMethod.none:
+                window = deque(maxlen=PERSISTENCE_RADIUS)
+
+                if self.persist == PersistMethod.sequence:
+                    state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                elif self.persist == PersistMethod.segment:
+                    state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
+                                                    revert_radius=PERSISTENCE_RADIUS)
+
+                # self.persist == PersistMethod.legacy
+                else:
+                    from mw.lib import persistence
+                    state = persistence.State()
+
+            # Iterate through a page's revisions
+            for rev in page:
+                
+                # initialize rev_data
+                rev_data = {
+                    'revid':rev.id,
+                    'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
+                    'articleid' : page.id,
+                    'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
+                    'title' : '"' + page.title + '"',
+                    'namespace' : namespace,
+                    'deleted' : "TRUE" if rev.deleted.text else "FALSE"
+                }
+
+                rev_data = self.matchmake(rev, rev_data)
+
+                # if revisions are deleted, /many/ things will be missing
+                if rev.deleted.text:
+                    rev_data['text_chars'] = ""
+                    rev_data['sha1'] = ""
+                    rev_data['revert'] = ""
+                    rev_data['reverteds'] = ""
+
+                else:
+                    # rev.text can be None if the page has no text
+                    if not rev.text:
+                        rev.text = ""
+                    # if text exists, we'll check for a sha1 and generate one otherwise
+
+                    if rev.sha1:
+                        text_sha1 = rev.sha1
+                    else:
+
+                        text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
+                    
+                    rev_data['sha1'] = text_sha1
+
+                    # TODO rev.bytes doesn't work.. looks like a bug
+                    rev_data['text_chars'] = len(rev.text)
+
+                    # generate revert data
+                    revert = rev_detector.process(text_sha1, rev.id)
+                    
+                    if revert:
+                        rev_data['revert'] = "TRUE"
+                        rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
+                    else:
+                        rev_data['revert'] = "FALSE"
+                        rev_data['reverteds'] = ""
+
+                # if the fact that the edit was minor can be hidden, this might be an issue
+                rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
+
+                if not rev.deleted.user:
+                    # wrap user-defined editors in quotes for fread
+                    rev_data['editor'] = '"' + rev.user.text + '"'
+                    rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
+                    
+                else:
+                    rev_data['anon'] = ""
+                    rev_data['editor'] = ""
+
+                #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
+                #    redirect = True
+                #else:
+                #    redirect = False
+                
+                #TODO missing: additions_size deletions_size
+                
+                # if collapse user was on, lets run that
+                if self.collapse_user:
+                    rev_data['collapsed_revs'] = rev.collapsed_revs
+
+                if self.persist != PersistMethod.none:
+                    if rev.deleted.text:
+                        for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
+                            old_rev_data[k] = None
+                    else:
+
+                        if self.persist != PersistMethod.legacy:
+                            _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
+
+                        else:
+                            _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
+                            
+                        window.append((rev.id, rev_data, tokens_added, tokens_removed))
+                        
+                        if len(window) == PERSISTENCE_RADIUS:
+                            old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
+                            
+                            num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
+
+                            old_rev_data["token_revs"] = num_token_revs
+                            old_rev_data["tokens_added"] = num_tokens
+                            old_rev_data["tokens_removed"] = len(old_tokens_removed)
+                            old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
+
+                            self.print_rev_data(old_rev_data)
+
+                else:
+                    self.print_rev_data(rev_data)
+
+                rev_count += 1
+
+            if self.persist != PersistMethod.none:
+                # print out metadata for the last RADIUS revisions
+                for i, item in enumerate(window):
+                    # if the window was full, we've already printed item 0
+                    if len(window) == PERSISTENCE_RADIUS and i == 0:
+                        continue
+
+                    rev_id, rev_data, tokens_added, tokens_removed = item
+                    num_token_revs, num_tokens = calculate_persistence(tokens_added)
+
+                    rev_data["token_revs"] = num_token_revs
+                    rev_data["tokens_added"] = num_tokens
+                    rev_data["tokens_removed"] = len(tokens_removed)
+                    rev_data["tokens_window"] = len(window)-(i+1)
+                    
+                    self.print_rev_data(rev_data)
+
+            page_count += 1
+
+        print("Done: %s revisions and %s pages." % (rev_count, page_count),
+              file=sys.stderr)
+
+    def print_rev_data(self, rev_data):
+        # if it's the first time through, print the header
+        if self.urlencode:
+            for field in TO_ENCODE:
+                rev_data[field] = quote(str(rev_data[field]))
+
+        if not self.printed_header:
+            print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
+            self.printed_header = True
+        
+        print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
+
+
+def open_input_file(input_filename):
+    if re.match(r'.*\.7z$', input_filename):
+        cmd = ["7za", "x", "-so", input_filename, '*'] 
+    elif re.match(r'.*\.gz$', input_filename):
+        cmd = ["zcat", input_filename] 
+    elif re.match(r'.*\.bz2$', input_filename):
+        cmd = ["bzcat", "-dk", input_filename] 
+
+    try:
+        input_file = Popen(cmd, stdout=PIPE).stdout
+    except NameError:
+        input_file = open(input_filename, 'r')
+
+    return input_file
+
+def open_output_file(input_filename):
+    # create a regex that creates the output filename
+    output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
+    output_filename = re.sub(r'\.xml', '', output_filename)
+    output_filename = output_filename + ".tsv"
+    output_file = open(output_filename, "w")
+
+    return output_file
+
+parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
+
+# arguments for the input direction
+parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
+                    help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
+
+parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
+                    help="Directory for output files.")
+
+parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
+                    help="Write output to standard out (do not create dump file)")
+
+parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
+                    help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
+
+parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
+                    help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
+
+parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
+                    help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
+
+parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
+                    help="Id number of namspace to include. Can be specified more than once.")
+
+parser.add_argument('-rr',
+                    '--revert-radius',
+                    dest="revert_radius",
+                    type=int,
+                    action='store',
+                    default=15,
+                    help="Number of edits to check when looking for reverts (default: 15)")
+
+parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
+                    help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
+
+parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in revision text.")
+
+parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
+                    help="The regular expression to search for in comments of revisions.")
+
+parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
+                    help="The label for the outputted column based on matching the regex in comments.")
+
+args = parser.parse_args()
+
+# set persistence method
+
+if args.persist is None:
+    persist = PersistMethod.none
+elif args.persist == "segment":
+    persist = PersistMethod.segment
+elif args.persist == "legacy":
+    persist = PersistMethod.legacy
+else:
+    persist = PersistMethod.sequence
+
+if args.namespace_filter is not None:
+    namespaces = args.namespace_filter
+else:
+    namespaces = None
+
+if len(args.dumpfiles) > 0:
+    for filename in args.dumpfiles:
+        input_file = open_input_file(filename)
+
+        # open directory for output
+        if args.output_dir:
+            output_dir = args.output_dir[0]
+        else:
+            output_dir = "."
+
+        print("Processing file: %s" % filename, file=sys.stderr)
+
+        if args.stdout:
+            output_file = sys.stdout
+        else:
+            filename = os.path.join(output_dir, os.path.basename(filename))
+            output_file = open_output_file(filename)
+
+        wikiq = WikiqParser(input_file,
+                            output_file,
+                            collapse_user=args.collapse_user,
+                            persist=persist,
+                            urlencode=args.urlencode,
+                            namespaces=namespaces,
+                            revert_radius=args.revert_radius,
+                            regex_match_revision = args.regex_match_revision,
+                            regex_revision_label = args.regex_revision_label,
+                            regex_match_comment = args.regex_match_comment,
+                            regex_comment_label = args.regex_comment_label)
+
+        wikiq.process()
+
+        # close things 
+        input_file.close()
+        output_file.close()
+else:
+    wikiq = WikiqParser(sys.stdin,
+                        sys.stdout,
+                        collapse_user=args.collapse_user,
+                        persist=persist,
+                        #persist_legacy=args.persist_legacy,
+                        urlencode=args.urlencode,
+                        namespaces=namespaces,
+                        revert_radius=args.revert_radius,
+                        regex_match_revision = args.regex_match_revision,
+                        regex_revision_label = args.regex_revision_label,
+                        regex_match_comment = args.regex_match_comment,
+                        regex_comment_label = args.regex_comment_label)
+
+    wikiq.process() 
+
+# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
+# stop_words = stop_words.split(",")
diff --git a/wikiq_to_parquet.py b/wikiq_to_parquet.py
new file mode 100644 (file)
index 0000000..f200870
--- /dev/null
@@ -0,0 +1,61 @@
+from pathlib import Path
+import pandas as pd
+from multiprocessing import Pool
+from pyspark.sql import functions as f
+from pyspark.sql import SparkSession, Window
+from pyspark.sql.functions import udf
+from pyspark.sql.types import StringType
+import csv
+
+path = Path("/gscratch/comdata/users/nathante/wikiqRunning/wikiq_output/")
+outpath = Path("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante_parquet/")
+files = list(map(Path,path.glob("*.tsv")))
+dumpfile = files[0]
+
+def wikiq_tsv_to_parquet(dumpfile, outpath = Path("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante.parquet/")):
+    outfile = outpath / (dumpfile.name + ".parquet")
+    outpath.mkdir(parents=True, exist_ok=True)
+    _wikiq_tsv_to_parquet(dumpfile,outfile)
+
+def _wikiq_tsv_to_parquet(dumpfile, outfile):
+
+    dtypes = {'anon': dtype('O'), 'articleid': dtype('int64'), 'deleted': dtype('bool'), 'editor': dtype('O'), 'editor_id': dtype('float64'), 'minor': dtype('bool'), 'namespace': dtype('int64'), 'revert': dtype('O'), 'reverteds': dtype('O'), 'revid': dtype('int64'), 'sha1': dtype('O'), 'text_chars': dtype('float64'), 'title': dtype('O')}
+
+    print(dumpfile)
+    df = pd.read_csv(dumpfile,sep='\t',quoting=csv.QUOTE_NONE,error_bad_lines=False, warn_bad_lines=True,parse_dates=['date_time'],dtype=dtypes)
+
+    df.to_parquet(outfile)
+
+with Pool(28) as pool:
+    jobs = pool.imap_unordered(wikiq_tsv_to_parquet, files)
+    list(jobs)
+
+spark = SparkSession.builder.getOrCreate()
+
+@udf(StringType())
+def decode_strip_udf(val):
+    if val is None:
+        return ""
+    else:
+        return unquote(val).strip('\"')
+df = spark.read.parquet('/gscratch/comdata/output/wikiq_enwiki_20200301_nathante.parquet')
+df = df.withColumnRenamed("anon","anonRaw")
+df = df.withColumn("anon",f.when(f.col("anonRaw")=="TRUE",True).otherwise(False))
+df = df.drop("anonRaw")
+df = df.withColumnRenamed("text_chars","text_chars_raw")
+df = df.withColumn("text_chars",f.col("text_chars_raw").cast('int'))
+df = df.drop("text_chars_raw")
+df = df.withColumnRenamed("editor_id",'editor_id_raw')
+df = df.withColumn("editor_id",f.col("editor_id_raw").cast("int"))
+df = df.drop("editor_id_raw")
+df = df.withColumnRenamed("revert","revert_raw")
+df = df.withColumn("revert",f.when(f.col("revert_raw")=="TRUE",True).otherwise(False))
+df = df.drop("revert_raw")
+df = df.withColumnRenamed("title","title_raw")
+df = df.withColumn("title", decode_strip_udf(f.col("title_raw")))
+df = df.drop("title_raw")
+df = df.withColumnRenamed("editor","editor_raw")
+df = df.withColumn("editor", decode_strip_udf(f.col("editor_raw")))
+df = df.drop("editor_raw")
+df = df.repartition(400,'articleid')
+df.write.parquet("/gscratch/comdata/output/wikiq_enwiki_20200301_nathante_partitioned.parquet",mode='overwrite')

Community Data Science Collective || Want to submit a patch?