source("RemembR/R/RemembeR.R") library(arrow) library(data.table) library(ggplot2) library(filelock) library(argparser) parser <- arg_parser("Simulate data and fit corrected models.") parser <- add_argument(parser, "--infile", default="", help="name of the file to read.") parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") args <- parse_args(parser) source("summarize_estimator.R") build_plot_dataset <- function(df){ x.true <- summarize.estimator(df, 'true','x') z.true <- summarize.estimator(df, 'true','z') x.naive <- summarize.estimator(df, 'naive','x') z.naive <- summarize.estimator(df, 'naive','z') x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x') z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z') x.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'x') z.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'z') x.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'x') z.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'z') x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x') z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z') x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x') z.loco.amelia <- summarize.estimator(df, 'amelia.full', 'z') x.loco.amelia <- summarize.estimator(df, 'amelia.full', 'x') z.loco.zhang <- summarize.estimator(df, 'zhang', 'z') x.loco.zhang <- summarize.estimator(df, 'zhang', 'x') z.loco.gmm <- summarize.estimator(df, 'gmm', 'z') x.loco.gmm <- summarize.estimator(df, 'gmm', 'x') ## x.mle <- summarize.estimator(df, 'mle', 'x') ## z.mle <- summarize.estimator(df, 'mle', 'z') accuracy <- df[,mean(accuracy)] plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, x.loco.mle, z.loco.mle, x.loco.amelia, z.loco.amelia,x.loco.zhang, z.loco.zhang,x.loco.gmm, z.loco.gmm,x.naive,z.naive),use.names=T) plot.df[,accuracy := accuracy] plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] return(plot.df) } sims.df <- read_feather(args$infile) print(unique(sims.df$N)) # df <- df[apply(df,1,function(x) !any(is.na(x)))] if(!('Bzx' %in% names(sims.df))) sims.df[,Bzx:=NA] if(!('accuracy_imbalance_difference' %in% names(sims.df))) sims.df[,accuracy_imbalance_difference:=NA] unique(sims.df[,'accuracy_imbalance_difference']) #plot.df <- build_plot_dataset(df[accuracy_imbalance_difference==0.1][N==700]) plot.df <- build_plot_dataset(sims.df) change.remember.file("remember_irr.RDS",clear=TRUE) remember(plot.df,args$name) set.remember.prefix(gsub("plot.df.","",args$name)) remember(median(sims.df$loco.accuracy),'med.loco.acc') #ggplot(df,aes(x=Bxy.est.mle)) + geom_histogram() + facet_grid(accuracy_imbalance_difference ~ Bzy) ## ## ## df[gmm.ER_pval<0.05] ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), ## N=factor(N), ## m=factor(m))] ## plot.df.test <- plot.df.test[(variable=='x') & (method!="Multiple imputation (Classifier features unobserved)")] ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=0.1),linetype=2) ## p <- p + geom_pointrange() + facet_grid(N~m,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4)) ## print(p) ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), ## N=factor(N), ## m=factor(m))] ## plot.df.test <- plot.df.test[(variable=='z') & (method!="Multiple imputation (Classifier features unobserved)")] ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=-0.1),linetype=2) ## p <- p + geom_pointrange() + facet_grid(m~N,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4)) ## print(p) ## x.mle <- df[,.(N,m,Bxy.est.mle,Bxy.ci.lower.mle, Bxy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)] ## x.mle.plot <- x.mle[,.(mean.est = mean(Bxy.est.mle), ## var.est = var(Bxy.est.mle), ## N.sims = .N, ## variable='z', ## method='Bespoke MLE' ## ), ## by=c("N","m",'y_explained_variance', 'Bzx', 'Bzy','accuracy_imbalance_difference')] ## z.mle <- df[,.(N,m,Bzy.est.mle,Bzy.ci.lower.mle, Bzy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)] ## z.mle.plot <- z.mle[,.(mean.est = mean(Bzy.est.mle), ## var.est = var(Bzy.est.mle), ## N.sims = .N, ## variable='z', ## method='Bespoke MLE' ## ), ## by=c("N","m",'y_explained_variance','Bzx')] ## plot.df <- z.mle.plot ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), ## N=factor(N), ## m=factor(m))] ## plot.df.test <- plot.df.test[(variable=='z') & (m != 1000) & (m!=500) & (method!="Multiple imputation (Classifier features unobserved)")] ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) ## p <- p + geom_hline(aes(yintercept=0.2),linetype=2) ## p <- p + geom_pointrange() + facet_grid(m~Bzx, Bzy,as.table=F) + scale_x_discrete(labels=label_wrap_gen(4)) ## print(p) ## ## ggplot(plot.df[variable=='x'], aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) + geom_pointrange() + facet_grid(-m~N) + scale_x_discrete(labels=label_wrap_gen(10)) ## ## ggplot(plot.df,aes(y=N,x=m,color=p.sign.correct)) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size") ## ## ggplot(plot.df,aes(y=N,x=m,color=abs(mean.bias))) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size")