]> code.communitydata.science - ml_measurement_error_public.git/blob - simulations/plot_dv_example.R
update simulation code for examples 1-3
[ml_measurement_error_public.git] / simulations / plot_dv_example.R
1 source("RemembR/R/RemembeR.R")
2 library(arrow)
3 library(data.table)
4 library(ggplot2)
5 library(filelock)
6 library(argparser)
7
8 parser <- arg_parser("Simulate data and fit corrected models.")
9 parser <- add_argument(parser, "--infile", default="", help="name of the file to read.")
10 parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.")
11 args <- parse_args(parser)
12
13
14
15 summarize.estimator <- function(df, suffix='naive', coefname='x'){
16
17     part <- df[,c('N',
18                   'm',
19                   'Bxy',
20                   paste0('B',coefname,'y.est.',suffix),
21                   paste0('B',coefname,'y.ci.lower.',suffix),
22                   paste0('B',coefname,'y.ci.upper.',suffix),
23                   'y_explained_variance',
24                   'Bzy',
25                   'accuracy_imbalance_difference'
26                   ),
27                with=FALSE]
28     
29     true.in.ci <- as.integer((part$Bxy >= part[[paste0('B',coefname,'y.ci.lower.',suffix)]]) & (part$Bxy <= part[[paste0('B',coefname,'y.ci.upper.',suffix)]]))
30     zero.in.ci <- as.integer(0 >= part[[paste0('B',coefname,'y.ci.lower.',suffix)]]) & (0 <= part[[paste0('B',coefname,'y.ci.upper.',suffix)]])
31     bias <- part$Bxy - part[[paste0('B',coefname,'y.est.',suffix)]]
32     sign.correct <- as.integer(sign(part$Bxy) == sign(part[[paste0('B',coefname,'y.est.',suffix)]]))
33
34     part <- part[,':='(true.in.ci = true.in.ci,
35                        zero.in.ci = zero.in.ci,
36                        bias=bias,
37                        sign.correct =sign.correct)]
38
39     part.plot <- part[, .(p.true.in.ci = mean(true.in.ci),
40                           mean.bias = mean(bias),
41                           mean.est = mean(.SD[[paste0('B',coefname,'y.est.',suffix)]]),
42                           var.est = var(.SD[[paste0('B',coefname,'y.est.',suffix)]]),
43                           est.upper.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.95),
44                           est.lower.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.05),
45                           N.sims = .N,
46                           p.sign.correct = mean(as.integer(sign.correct & (! zero.in.ci))),
47                           variable=coefname,
48                           method=suffix
49                           ),
50                       by=c("N","m",'Bzy','accuracy_imbalance_difference','y_explained_variance')
51                       ]
52     
53     return(part.plot)
54 }
55
56
57 build_plot_dataset <- function(df){
58
59     x.true <- summarize.estimator(df, 'true','x')
60     z.true <- summarize.estimator(df, 'true','z')
61
62     x.naive <- summarize.estimator(df, 'naive','x')
63     z.naive <- summarize.estimator(df, 'naive','z')
64     
65     x.feasible <- summarize.estimator(df, 'feasible','x')
66     z.feasible <- summarize.estimator(df, 'feasible','z')
67     
68     x.amelia.full <- summarize.estimator(df, 'amelia.full','x')
69     z.amelia.full <- summarize.estimator(df, 'amelia.full','z')
70
71     x.mle <- summarize.estimator(df, 'mle','x')
72     z.mle <- summarize.estimator(df, 'mle','z')
73
74     x.zhang <- summarize.estimator(df, 'zhang','x')
75     z.zhang <- summarize.estimator(df, 'zhang','z')
76     
77     accuracy <- df[,mean(accuracy)]
78
79     plot.df <- rbindlist(list(x.true, z.true, x.naive,z.naive,x.amelia.full,z.amelia.full,x.mle, z.mle, x.zhang, z.zhang, x.feasible, z.feasible),use.names=T)
80
81     plot.df[,accuracy := accuracy]
82
83     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)]
84
85     return(plot.df)
86 }
87
88
89 df <- read_feather(args$infile)
90 plot.df <- build_plot_dataset(df)
91
92 remember(plot.df,args$name)
93
94
95 ## df[gmm.ER_pval<0.05]
96 ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T),
97 ##                                    N=factor(N),
98 ##                                    m=factor(m))]
99
100
101 ## plot.df.test <- plot.df.test[(variable=='z') & (m != 1000) & (m!=500) & !is.na(p.true.in.ci) & (method!="Multiple imputation (Classifier features unobserved)")]
102 ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method))
103 ## p <- p + geom_hline(aes(yintercept=-0.05),linetype=2)
104
105 ## p <- p + geom_pointrange() + facet_grid(m~N,as.table=F) + scale_x_discrete(labels=label_wrap_gen(4))
106 ## print(p)
107 ## ggplot(plot.df[variable=='x'], aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) + geom_pointrange() + facet_grid(-m~N) + scale_x_discrete(labels=label_wrap_gen(10))
108
109 ## ggplot(plot.df,aes(y=N,x=m,color=p.sign.correct)) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size") 
110
111 ## ggplot(plot.df,aes(y=N,x=m,color=abs(mean.bias))) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size") 

Community Data Science Collective || Want to submit a patch?