### EXAMPLE 2_b: demonstrates how measurement error can lead to a type sign error in a covariate ### This is the same as example 2, only instead of x->k we have k->x. ### Even when you have a good predictor, if it's biased against a covariate you can get the wrong sign. ### Even when you include the proxy variable in the regression. ### But with some ground truth and multiple imputation, you can fix it. library(argparser) library(mecor) library(ggplot2) library(data.table) library(filelock) library(arrow) library(Amelia) library(Zelig) library(predictionError) options(amelia.parallel="no", amelia.ncpus=1) source("simulation_base.R") ## SETUP: ### we want to estimate x -> y; x is MAR ### we have x -> k; k -> w; x -> w is used to predict x via the model w. ### A realistic scenario is that we have an NLP model predicting something like "racial harassment" in social media comments ### The labels x are binary, but the model provides a continuous predictor ### simulation: #### how much power do we get from the model in the first place? (sweeping N and m) #### simulate_data <- function(N, m, B0=0, Bxy=0.2, Bgy=-0.2, Bgx=0.2, y_explained_variance=0.025, gx_explained_variance=0.15, prediction_accuracy=0.73, seed=1){ set.seed(seed) g <- rbinom(N, 1, 0.5) x.var.epsilon <- var(Bgx *g) * ((1-gx_explained_variance)/gx_explained_variance) x.epsilon <- rnorm(N,sd=sqrt(x.var.epsilon)) xprime <- Bgx * g + x.epsilon x <- as.integer(logistic(scale(xprime)) > 0.5) y.var.epsilon <- (var(Bgy * g) + var(Bxy *x) + 2*cov(Bxy*x,Bgy*g)) * ((1-y_explained_variance)/y_explained_variance) y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon)) y <- Bgy * g + Bxy * x + y.epsilon df <- data.table(x=x,xprime=xprime,y=y,g=g) if(m < N){ df <- df[sample(nrow(df), m), x.obs := x] } else { df <- df[, x.obs := x] } df <- df[,w_pred:=x] df <- df[sample(1:N,(1-prediction_accuracy)*N),w_pred:=(w_pred-1)**2] w <- predict(glm(x ~ w_pred,data=df,family=binomial(link='logit')),type='response') df <- df[,':='(w=w, w_pred = w_pred)] return(df) } parser <- arg_parser("Simulate data and fit corrected models") parser <- add_argument(parser, "--N", default=500, help="number of observations of w") parser <- add_argument(parser, "--m", default=100, help="m the number of ground truth observations") parser <- add_argument(parser, "--seed", default=4321, help='seed for the rng') parser <- add_argument(parser, "--outfile", help='output file', default='example_1.feather') parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.005) parser <- add_argument(parser, "--gx_explained_variance", help='what proportion of the variance of x can be explained by g?', default=0.15) parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73) args <- parse_args(parser) B0 <- 0 Bxy <- 0.2 Bgy <- -0.2 Bgx <- 0.4 df <- simulate_data(args$N, args$m, B0, Bxy, Bgy, Bgx, seed=args$seed, y_explained_variance = args$y_explained_variance, gx_explained_variance = args$gx_explained_variance, prediction_accuracy=args$prediction_accuracy) result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bgy'=Bgy, 'Bgx'=Bgx, 'seed'=args$seed, 'y_explained_variance' = args$y_explained_variance, 'gx_explained_variance' = args$gx_explained_variance, "prediction_accuracy"=args$prediction_accuracy) outline <- run_simulation(df, result) outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) if(file.exists(args$outfile)){ logdata <- read_feather(args$outfile) logdata <- rbind(logdata,as.data.table(outline)) } else { logdata <- as.data.table(outline) } print(outline) write_feather(logdata, args$outfile) unlock(outfile_lock)