### EXAMPLE 2_b: demonstrates how measurement error can lead to a type
### sign error in a covariate This is the same as example 2, only
### instead of x->k we have k->x.  Even when you have a good
### predictor, if it's biased against a covariate you can get the
### wrong sign.  Even when you include the proxy variable in the
### regression.  But with some ground truth and multiple imputation,
### you can fix it.

library(argparser)
library(mecor)
library(ggplot2)
library(data.table)
library(filelock)
library(arrow)
library(Amelia)
library(Zelig)

library(predictionError)
options(amelia.parallel="no", amelia.ncpus=1)

source("irr_simulation_base.R")

## SETUP:
### we want to estimate x -> y; x is MAR
### we have x -> k; k -> w; x -> w is used to predict x via the model w.
### A realistic scenario is that we have an NLP model predicting something like "racial harassment" in social media comments
### The labels x are binary, but the model provides a continuous predictor

### simulation:
#### how much power do we get from the model in the first place? (sweeping N and m)
#### 

simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_variance=0.025, prediction_accuracy=0.73, coder_accuracy=0.9, seed=1){
    set.seed(seed)
    z <- rbinom(N, 1, 0.5)
                                        #    x.var.epsilon <- var(Bzx *z) * ((1-zx_explained_variance)/zx_explained_variance)
    xprime <- Bzx * z #+ x.var.epsilon
    x <- rbinom(N,1,plogis(xprime))

    y.var.epsilon <- (var(Bzy * z) + var(Bxy *x) + 2*cov(Bxy*x,Bzy*z)) * ((1-y_explained_variance)/y_explained_variance)
    y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon))
    y <- Bzy * z + Bxy * x + y.epsilon + B0

    df <- data.table(x=x,y=y,z=z)

    if(m < N){
        df <- df[sample(nrow(df), m), x.obs := x]
    } else {
        df <- df[, x.obs := x]
    }

    coder.0.correct <- rbinom(m, 1, coder_accuracy)
    coder.1.correct <- rbinom(m, 1, coder_accuracy)

    df[!is.na(x.obs),x.obs.0 := as.numeric((x.obs & coder.0.correct) | (!x.obs & !coder.0.correct))]
    df[!is.na(x.obs),x.obs.1 := as.numeric((x.obs & coder.1.correct) | (!x.obs & !coder.1.correct))]


    ## how can you make a model with a specific accuracy?
    w0 =(1-x)**2 + (-1)**(1-x) * prediction_accuracy

    ## how can you make a model with a specific accuracy, with a continuous latent variable.
    # now it makes the same amount of mistake to each point, probably
    # add mean0 noise to the odds.
    
    w.noisey.odds = rlogis(N,qlogis(w0))
    df[,w := plogis(w.noisey.odds)]
    df[,w_pred:=as.integer(w > 0.5)]
    (mean(df$x==df$w_pred))
    return(df)
}

parser <- arg_parser("Simulate data and fit corrected models")
parser <- add_argument(parser, "--N", default=1000, help="number of observations of w")
parser <- add_argument(parser, "--m", default=150, help="m the number of ground truth observations")
parser <- add_argument(parser, "--seed", default=1, help='seed for the rng')
parser <- add_argument(parser, "--outfile", help='output file', default='example_1.feather')
parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1)
# parser <- add_argument(parser, "--zx_explained_variance", help='what proportion of the variance of x can be explained by z?', default=0.3)
parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73)
parser <- add_argument(parser, "--coder_accuracy", help='how accurate are the human coders?', default=0.85)
parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z")
parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~x")

# parser <- add_argument(parser, "--rater_formula", help='formula for the true variable', default="x.obs~x")
parser <- add_argument(parser, "--truth_formula", help='formula for the true variable', default="x~z")
parser <- add_argument(parser, "--Bzx", help='Effect of z on x', default=-0.3)
parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=0.27)
parser <- add_argument(parser, "--Bxy", help='Effect of x on y', default=-0.33)

args <- parse_args(parser)
B0 <- 0
Bxy <- args$Bxy
Bzy <- args$Bzy
Bzx <- args$Bzx

if (args$m < args$N){

    df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy, coder_accuracy=args$coder_accuracy)

    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, Bzx=Bzx, 'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'outcome_formula'=args$outcome_formula, 'truth_formula'=args$truth_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, 'coder_accuracy'=args$coder_accuracy, error='')

    outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula))
    
    outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE)
    if(file.exists(args$outfile)){
        logdata <- read_feather(args$outfile)
        logdata <- rbind(logdata,as.data.table(outline),fill=TRUE)
    } else {
        logdata <- as.data.table(outline)
    }

    print(outline)
    write_feather(logdata, args$outfile)
    unlock(outfile_lock)
}