1 ### EXAMPLE 2_b: demonstrates how measurement error can lead to a type
2 ### sign error in a covariate This is the same as example 2, only
3 ### instead of x->k we have k->x. Even when you have a good
4 ### predictor, if it's biased against a covariate you can get the
5 ### wrong sign. Even when you include the proxy variable in the
6 ### regression. But with some ground truth and multiple imputation,
18 library(predictionError)
19 options(amelia.parallel="no", amelia.ncpus=1)
21 source("simulation_base.R")
24 ### we want to estimate x -> y; x is MAR
25 ### we have x -> k; k -> w; x -> w is used to predict x via the model w.
26 ### A realistic scenario is that we have an NLP model predicting something like "racial harassment" in social media comments
27 ### The labels x are binary, but the model provides a continuous predictor
30 #### how much power do we get from the model in the first place? (sweeping N and m)
33 simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_variance=0.025, prediction_accuracy=0.73, seed=1){
35 z <- rbinom(N, 1, 0.5)
36 # x.var.epsilon <- var(Bzx *z) * ((1-zx_explained_variance)/zx_explained_variance)
37 xprime <- Bzx * z #+ x.var.epsilon
38 x <- rbinom(N,1,plogis(xprime))
40 y.var.epsilon <- (var(Bzy * z) + var(Bxy *x) + 2*cov(Bxy*x,Bzy*z)) * ((1-y_explained_variance)/y_explained_variance)
41 y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon))
42 y <- Bzy * z + Bxy * x + y.epsilon
44 df <- data.table(x=x,y=y,z=z)
47 df <- df[sample(nrow(df), m), x.obs := x]
49 df <- df[, x.obs := x]
52 ## how can you make a model with a specific accuracy?
53 w0 =(1-x)**2 + (-1)**(1-x) * prediction_accuracy
55 ## how can you make a model with a specific accuracy, with a continuous latent variable.
56 # now it makes the same amount of mistake to each point, probably
57 # add mean0 noise to the odds.
59 w.noisey.odds = rlogis(N,qlogis(w0))
60 df[,w := plogis(w.noisey.odds)]
61 df[,w_pred:=as.integer(w > 0.5)]
62 (mean(df$x==df$w_pred))
66 parser <- arg_parser("Simulate data and fit corrected models")
67 parser <- add_argument(parser, "--N", default=1000, help="number of observations of w")
68 parser <- add_argument(parser, "--m", default=200, help="m the number of ground truth observations")
69 parser <- add_argument(parser, "--seed", default=57, help='seed for the rng')
70 parser <- add_argument(parser, "--outfile", help='output file', default='example_1.feather')
71 parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.05)
72 # parser <- add_argument(parser, "--zx_explained_variance", help='what proportion of the variance of x can be explained by z?', default=0.3)
73 parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73)
74 parser <- add_argument(parser, "--Bzx", help='coefficient of z on x?', default=1)
75 args <- parse_args(parser)
84 df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed + 500, y_explained_variance = args$y_explained_variance, prediction_accuracy=args$prediction_accuracy)
86 result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'Bzx'=Bzx, 'seed'=args$seed, 'y_explained_variance' = args$y_explained_variance, 'zx_explained_variance' = args$zx_explained_variance, "prediction_accuracy"=args$prediction_accuracy, "error"="")
88 outline <- run_simulation(df, result)
90 outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE)
91 if(file.exists(args$outfile)){
92 logdata <- read_feather(args$outfile)
93 logdata <- rbind(logdata,as.data.table(outline),fill=TRUE)
95 logdata <- as.data.table(outline)
99 write_feather(logdata, args$outfile)