]> code.communitydata.science - ml_measurement_error_public.git/blob - simulations/plot_irr_example.R
Make summarize estimator group correctly for robustness checks.
[ml_measurement_error_public.git] / simulations / plot_irr_example.R
1 source("RemembR/R/RemembeR.R")
2 library(arrow)
3 library(data.table)
4 library(ggplot2)
5 library(filelock)
6 library(argparser)
7
8 parser <- arg_parser("Simulate data and fit corrected models.")
9 parser <- add_argument(parser, "--infile", default="", help="name of the file to read.")
10 parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.")
11 args <- parse_args(parser)
12 source("summarize_estimator.R")
13
14 build_plot_dataset <- function(df){
15     
16     x.true <-  summarize.estimator(df, 'true','x')
17
18     z.true <-  summarize.estimator(df, 'true','z')
19
20     x.naive <-  summarize.estimator(df, 'naive','x')
21
22     z.naive <-  summarize.estimator(df, 'naive','z')
23
24     x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x')
25     
26     z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z')
27
28     x.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'x')
29
30     z.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'z')
31
32     x.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'x')
33
34     z.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'z')
35
36     x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x')
37
38     z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z')
39
40     x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x')
41
42     z.loco.amelia <- summarize.estimator(df, 'amelia.full', 'z')
43     x.loco.amelia <- summarize.estimator(df, 'amelia.full', 'x')
44
45     z.loco.zhang <- summarize.estimator(df, 'zhang', 'z')
46     x.loco.zhang <- summarize.estimator(df, 'zhang', 'x')
47
48
49     z.loco.gmm <- summarize.estimator(df, 'gmm', 'z')
50     x.loco.gmm <- summarize.estimator(df, 'gmm', 'x')
51
52     
53
54
55     ## x.mle <- summarize.estimator(df, 'mle', 'x')
56
57     ## z.mle <- summarize.estimator(df, 'mle', 'z')
58
59     accuracy <- df[,mean(accuracy)]
60     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, x.loco.mle, z.loco.mle, x.loco.amelia, z.loco.amelia,x.loco.zhang, z.loco.zhang,x.loco.gmm, z.loco.gmm,x.naive,z.naive),use.names=T)
61     plot.df[,accuracy := accuracy]
62     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)]
63     return(plot.df)
64 }
65
66
67 sims.df <- read_feather(args$infile)
68 print(unique(sims.df$N))
69
70 # df <- df[apply(df,1,function(x) !any(is.na(x)))]
71
72 if(!('Bzx' %in% names(sims.df)))
73     sims.df[,Bzx:=NA]
74
75 if(!('accuracy_imbalance_difference' %in% names(sims.df)))
76     sims.df[,accuracy_imbalance_difference:=NA]
77
78 unique(sims.df[,'accuracy_imbalance_difference'])
79
80 #plot.df <- build_plot_dataset(df[accuracy_imbalance_difference==0.1][N==700])
81 plot.df <- build_plot_dataset(sims.df)
82 change.remember.file("remember_irr.RDS",clear=TRUE)
83 remember(plot.df,args$name)
84
85
86 set.remember.prefix(gsub("plot.df.","",args$name))
87 remember(median(sims.df$loco.accuracy),'med.loco.acc')
88
89 #ggplot(df,aes(x=Bxy.est.mle)) + geom_histogram() + facet_grid(accuracy_imbalance_difference ~ Bzy)
90
91 ## ## ## df[gmm.ER_pval<0.05]
92
93 ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T),
94 ##                                    N=factor(N),
95 ##                                    m=factor(m))]
96
97 ## plot.df.test <- plot.df.test[(variable=='x') & (method!="Multiple imputation (Classifier features unobserved)")]
98 ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method))
99 ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=0.1),linetype=2)
100
101 ## p <- p + geom_pointrange() + facet_grid(N~m,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4))
102 ## print(p)
103
104 ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T),
105 ##                                    N=factor(N),
106 ##                                    m=factor(m))]
107
108 ## plot.df.test <- plot.df.test[(variable=='z') & (method!="Multiple imputation (Classifier features unobserved)")]
109 ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method))
110 ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=-0.1),linetype=2)
111
112 ## p <- p + geom_pointrange() + facet_grid(m~N,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4))
113 ## print(p)
114
115
116 ## x.mle <- df[,.(N,m,Bxy.est.mle,Bxy.ci.lower.mle, Bxy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)]
117 ## x.mle.plot <- x.mle[,.(mean.est = mean(Bxy.est.mle),
118 ##                        var.est = var(Bxy.est.mle),
119 ##                        N.sims = .N,
120 ##                        variable='z',
121 ##                        method='Bespoke MLE'
122 ##                        ),
123 ##                     by=c("N","m",'y_explained_variance', 'Bzx', 'Bzy','accuracy_imbalance_difference')]
124
125 ## z.mle <- df[,.(N,m,Bzy.est.mle,Bzy.ci.lower.mle, Bzy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)]
126
127 ## z.mle.plot <- z.mle[,.(mean.est = mean(Bzy.est.mle),
128 ##                        var.est = var(Bzy.est.mle),
129 ##                        N.sims = .N,
130 ##                        variable='z',
131 ##                        method='Bespoke MLE'
132 ##                        ),
133 ##                     by=c("N","m",'y_explained_variance','Bzx')]
134
135 ## plot.df <- z.mle.plot
136 ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T),
137 ##                                    N=factor(N),
138 ##                                    m=factor(m))]
139
140 ## plot.df.test <- plot.df.test[(variable=='z') & (m != 1000) & (m!=500) & (method!="Multiple imputation (Classifier features unobserved)")]
141 ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method))
142 ## p <- p + geom_hline(aes(yintercept=0.2),linetype=2)
143
144 ## p <- p + geom_pointrange() + facet_grid(m~Bzx, Bzy,as.table=F) + scale_x_discrete(labels=label_wrap_gen(4))
145 ## print(p)
146
147
148 ## ## ggplot(plot.df[variable=='x'], aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) + geom_pointrange() + facet_grid(-m~N) + scale_x_discrete(labels=label_wrap_gen(10))
149
150 ## ## ggplot(plot.df,aes(y=N,x=m,color=p.sign.correct)) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size") 
151
152 ## ## ggplot(plot.df,aes(y=N,x=m,color=abs(mean.bias))) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size") 

Community Data Science Collective || Want to submit a patch?