]> code.communitydata.science - ml_measurement_error_public.git/commitdiff
check in some old simulation updates and a dv examples with real data
authorNathan TeBlunthuis <nathante@uw.edu>
Fri, 6 Jan 2023 20:22:41 +0000 (12:22 -0800)
committerNathan TeBlunthuis <nathante@uw.edu>
Fri, 6 Jan 2023 20:22:41 +0000 (12:22 -0800)
civil_comments/01_dv_example.R [new file with mode: 0644]
civil_comments/design_example.R
civil_comments/load_perspective_data.R [new file with mode: 0644]
simulations/02_indep_differential.R
simulations/Makefile
simulations/grid_sweep.py
simulations/measerr_methods.R
simulations/pl_methods.R
simulations/robustness_check_notes.md
simulations/run_job.sbatch [new file with mode: 0644]
simulations/simulation_base.R

diff --git a/civil_comments/01_dv_example.R b/civil_comments/01_dv_example.R
new file mode 100644 (file)
index 0000000..4092243
--- /dev/null
@@ -0,0 +1,54 @@
+source('load_perspective_data.R')
+source("../simulations/measerr_methods.R")
+source("../simulations/RemembR/R/RemembeR.R")
+
+change.remember.file("dv_perspective_example.RDS")
+
+# for reproducibility
+set.seed(1111)
+
+## another simple enough example: is P(toxic | funny and white) > P(toxic | funny nand white)? Or, are funny comments more toxic when people disclose that they are white?
+
+compare_dv_models <-function(pred_formula, outcome_formula, proxy_formula, df, sample.prop, remember_prefix){
+    pred_model <- glm(pred_formula, df, family=binomial(link='logit'))
+
+    remember(coef(pred_model), paste0(remember_prefix, "coef_pred_model"))
+    remember(diag(vcov((pred_model))), paste0(remember_prefix, "se_pred_model"))
+
+    coder_model <- glm(outcome_formula, df, family=binomial(link='logit'))
+    remember(coef(coder_model), paste0(remember_prefix, "coef_coder_model"))
+    remember(diag(vcov((coder_model))), paste0(remember_prefix, "se_coder_model"))
+
+    df_measerr_method <- copy(df)[sample(1:.N, sample.prop * .N), toxicity_coded_1 := toxicity_coded]
+    df_measerr_method <- df_measerr_method[,toxicity_coded := toxicity_coded_1]
+    sample_model <- glm(outcome_formula, df_measerr_method, family=binomial(link='logit'))
+    remember(coef(sample_model), paste0(remember_prefix, "coef_sample_model"))
+    remember(diag(vcov((sample_model))), paste0(remember_prefix, "se_sample_model"))
+
+    measerr_model <- measerr_mle_dv(df_measerr_method, outcome_formula, outcome_family=binomial(link='logit'), proxy_formula=proxy_formula, proxy_family=binomial(link='logit'))
+
+    inv_hessian = solve(measerr_model$hessian)
+    stderr = diag(inv_hessian)
+    remember(stderr, paste0(remember_prefix, "measerr_model_stderr"))
+    remember(measerr_model$par, paste0(remember_prefix, "measerr_model_par"))
+}
+
+print("running first example")
+
+compare_dv_models(pred_formula = toxicity_pred ~ funny*white,
+                  outcome_formula = toxicity_coded ~ funny*white, proxy_formula,
+                  proxy_formula = toxicity_pred ~ toxicity_coded*funny*white,
+                  df=df,
+                  sample.prop=0.01,
+                  remember_prefix='cc_ex_tox.funny.white')
+
+
+print("running second example")
+
+compare_dv_models(pred_formula = toxicity_pred ~ likes+race_disclosed,
+                  outcome_formula = toxicity_coded ~ likes + race_disclosed, proxy_formula,
+                  proxy_formula = toxicity_pred ~ toxicity_coded*likes*race_disclosed,
+                  df=df,
+                  sample.prop=0.01,
+                  remember_prefix='cc_ex_tox.funny.race_disclosed')
+
index 5991334c3cbbdc69649da5bdce4d835a8c95eced..1a83a81a791adf9f167723d885ded05dd3d1d0eb 100644 (file)
@@ -1,18 +1,5 @@
-library(data.table)
-library(MASS)
-
-scores <- fread("perspective_scores.csv")
-scores <- scores[,id:=as.character(id)]
-
-df <- fread("all_data.csv")
-
-# only use the data that has identity annotations
-df <- df[identity_annotator_count > 0]
-
-(df[!(df$id %in% scores$id)])
-
-df <- df[scores,on='id',nomatch=NULL]
-
+set.seed(1111)
+source('load_perspective_data.R')
 ## how accurate are the classifiers?
 
 ## the API claims that these scores are "probabilities"
 ## how accurate are the classifiers?
 
 ## the API claims that these scores are "probabilities"
@@ -27,21 +14,6 @@ F1 <- function(y, predictions){
     return (2 * precision * recall ) / (precision + recall)
 }
 
     return (2 * precision * recall ) / (precision + recall)
 }
 
-df[, ":="(identity_attack_pred = identity_attack_prob >=0.5,
-          insult_pred = insult_prob >= 0.5,
-          profanity_pred = profanity_prob >= 0.5,
-          severe_toxicity_pred = severe_toxicity_prob >= 0.5,
-          threat_pred = threat_prob >= 0.5,
-          toxicity_pred = toxicity_prob >= 0.5,
-          identity_attack_coded = identity_attack >= 0.5,
-          insult_coded = insult >= 0.5,
-          profanity_coded = obscene >= 0.5,
-          severe_toxicity_coded = severe_toxicity >= 0.5,
-          threat_coded = threat >= 0.5,
-          toxicity_coded = toxicity >= 0.5
-          )]
-
-
 
 ## toxicity is about 93% accurate, with an f1 of 0.8
 ## identity_attack has high accuracy 97%, but an unfortunant f1 of 0.5.
 
 ## toxicity is about 93% accurate, with an f1 of 0.8
 ## identity_attack has high accuracy 97%, but an unfortunant f1 of 0.5.
@@ -88,6 +60,7 @@ df <- df[,":="(identity_error = identity_attack_coded - identity_attack_pred,
 
 ## what's correlated with toxicity_error ?
 df <- df[,approved := rating == "approved"]
 
 ## what's correlated with toxicity_error ?
 df <- df[,approved := rating == "approved"]
+df <- df[,white := white > 0.5]
 
 cortab <- cor(df[,.(toxicity_error,
                     identity_error,
 
 cortab <- cor(df[,.(toxicity_error,
                     identity_error,
@@ -134,14 +107,62 @@ cortab['toxicity_coded',]
 cortab['identity_error',]
 cortab['white',]
 
 cortab['identity_error',]
 cortab['white',]
 
-glm(white ~ toxicity_coded + psychiatric_or_mental_illness, data = df, family=binomial(link='logit'))
+cortab <- cor(df[,.(toxicity_error,
+                    identity_error,
+                    toxicity_coded,
+                    funny,
+                    approved,
+                    sad,
+                    wow,
+                    likes,
+                    disagree,
+                    gender_disclosed,
+                    sexuality_disclosed,
+                    religion_disclosed,
+                    race_disclosed,
+                    disability_disclosed)])
+
+
+## here's a simple example, is P(white | toxic and mentally ill) > P(white | toxic or mentally ill). Are people who discuss their mental illness in a toxic way more likely to be white compared to those who just talk about their mental illness or are toxic? 
+summary(glm(white ~ toxicity_coded*psychiatric_or_mental_illness, data = df, family=binomial(link='logit')))
+
+summary(glm(white ~ toxicity_pred*psychiatric_or_mental_illness, data = df, family=binomial(link='logit')))
+
+summary(glm(white ~ toxicity_coded*male, data = df, family=binomial(link='logit')))
+
+summary(glm(white ~ toxicity_pred*male, data = df, family=binomial(link='logit')))
 
 
-glm(white ~ toxicity_pred + psychiatric_or_mental_illness, data = df, family=binomial(link='logit'))
+summary(glm(toxicity_coded ~ white*psychiatric_or_mental_illness, data = df, family=binomial(link='logit')))
 
 
-m1 <- glm.nb(funny ~ (male + female + transgender + other_gender + heterosexual + bisexual + other_sexual_orientation + christian + jewish + hindu + buddhist + atheist + other_religion + asian + latino + other_race_or_ethnicity + physical_disability + intellectual_or_learning_disability + white + black + psychiatric_or_mental_illness)*toxicity_coded, data = df)
+summary(glm(toxicity_pred ~ white*psychiatric_or_mental_illness, data = df, family=binomial(link='logit')))
+
+
+## another simple enough example: is P(toxic | funny and white) > P(toxic | funny nand white)? Or, are funny comments more toxic when people disclose that they are white?
+
+summary(glm(toxicity_pred ~ funny*white, data=df, family=binomial(link='logit')))
+summary(glm(toxicity_coded ~ funny*white, data=df, family=binomial(link='logit')))
+
+source("../simulations/measerr_methods.R")
+                                                                                       
+saved_model_file <- "measerr_model_tox.eq.funny.cross.white.RDS"
+overwrite_model <- TRUE 
+
+# it works so far with a 20% and 15% sample. Smaller is better. let's try a 10% sample again. It didn't work out. We'll go forward with a 15% sample.
+df_measerr_method <- copy(df)[sample(1:.N, 0.05 * .N), toxicity_coded_1 := toxicity_coded]
+df_measerr_method <- df_measerr_method[,toxicity_coded := toxicity_coded_1]
+summary(glm(toxicity_coded ~ funny*white, data=df_measerr_method[!is.na(toxicity_coded)], family=binomial(link='logit')))
+
+if(!file.exists(saved_model_file) || (overwrite_model == TRUE)){
+    measerr_model <- measerr_mle_dv(df_measerr_method,toxicity_coded ~ funny*white,outcome_family=binomial(link='logit'), proxy_formula=toxicity_pred ~ toxicity_coded*funny*white)
+saveRDS(measerr_model, saved_model_file)
+} else {
+    measerr_model <- readRDS(saved_model_file)
+}
 
 
-m2 <- glm.nb(funny ~ (male + female + transgender + other_gender + heterosexual + bisexual + other_sexual_orientation + christian + jewish + hindu + buddhist + atheist + other_religion + asian + latino + other_race_or_ethnicity + physical_disability + intellectual_or_learning_disability + white + black + psychiatric_or_mental_illness)*toxicity_pred, data = df)
+inv_hessian <- solve(measerr_model$hessian)
+se <- diag(inv_hessian)
 
 
+lm2 <- glm.nb(funny ~ (male + female + transgender + other_gender + heterosexual + bisexual + other_sexual_orientation + christian + jewish + hindu + buddhist + atheist + other_religion + asian + latino + other_race_or_ethnicity + physical_disability + intellectual_or_learning_disability + white + black + psychiatric_or_mental_illness)*toxicity_pred, data = df)
 m3 <- glm.nb(funny ~ (male + female + transgender + other_gender + heterosexual + bisexual + other_sexual_orientation + christian + jewish + hindu + buddhist + atheist + other_religion + asian + latino + other_race_or_ethnicity + physical_disability + intellectual_or_learning_disability + white + black + psychiatric_or_mental_illness)*toxicity, data = df)
 
 
 m3 <- glm.nb(funny ~ (male + female + transgender + other_gender + heterosexual + bisexual + other_sexual_orientation + christian + jewish + hindu + buddhist + atheist + other_religion + asian + latino + other_race_or_ethnicity + physical_disability + intellectual_or_learning_disability + white + black + psychiatric_or_mental_illness)*toxicity, data = df)
 
 
diff --git a/civil_comments/load_perspective_data.R b/civil_comments/load_perspective_data.R
new file mode 100644 (file)
index 0000000..636c423
--- /dev/null
@@ -0,0 +1,41 @@
+library(data.table)
+library(MASS)
+
+set.seed(1111)
+
+scores <- fread("perspective_scores.csv")
+scores <- scores[,id:=as.character(id)]
+
+df <- fread("all_data.csv")
+
+# only use the data that has identity annotations
+df <- df[identity_annotator_count > 0]
+
+(df[!(df$id %in% scores$id)])
+
+df <- df[scores,on='id',nomatch=NULL]
+
+df[, ":="(identity_attack_pred = identity_attack_prob >=0.5,
+          insult_pred = insult_prob >= 0.5,
+          profanity_pred = profanity_prob >= 0.5,
+          severe_toxicity_pred = severe_toxicity_prob >= 0.5,
+          threat_pred = threat_prob >= 0.5,
+          toxicity_pred = toxicity_prob >= 0.5,
+          identity_attack_coded = identity_attack >= 0.5,
+          insult_coded = insult >= 0.5,
+          profanity_coded = obscene >= 0.5,
+          severe_toxicity_coded = severe_toxicity >= 0.5,
+          threat_coded = threat >= 0.5,
+          toxicity_coded = toxicity >= 0.5
+          )]
+
+gt.0.5 <- function(v) { v >= 0.5 }
+dt.apply.any <- function(fun, ...){apply(apply(cbind(...), 2, fun),1,any)}
+
+df <- df[,":="(gender_disclosed = dt.apply.any(gt.0.5, male, female, transgender, other_gender),
+               sexuality_disclosed = dt.apply.any(gt.0.5, heterosexual, bisexual, other_sexual_orientation),
+               religion_disclosed = dt.apply.any(gt.0.5, christian, jewish, hindu, buddhist, atheist, muslim, other_religion),
+               race_disclosed = dt.apply.any(gt.0.5, white, black, asian, latino, other_race_or_ethnicity), 
+               disability_disclosed = dt.apply.any(gt.0.5,physical_disability, intellectual_or_learning_disability, psychiatric_or_mental_illness, other_disability))]
+
+df <- df[,white:=gt.0.5(white)]
index 80e19be78f8badbad9eb3e557ea6de5a356da384..4e3a1324339856d0edf41d1a96c1a4ecb62fa3cb 100644 (file)
@@ -159,7 +159,7 @@ if(args$m < args$N){
     ## pc.df <- pc(suffStat=list(C=cor(df.pc),n=nrow(df.pc)),indepTest=gaussCItest,labels=names(df.pc),alpha=0.05)
     ## plot(pc.df)
 
     ## pc.df <- pc(suffStat=list(C=cor(df.pc),n=nrow(df.pc)),indepTest=gaussCItest,labels=names(df.pc),alpha=0.05)
     ## plot(pc.df)
 
-    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, 'Bzx'=args$Bzx, 'Bzy'=Bzy, 'Px'=Px, .'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'y_bias'=args$y_bias,'outcome_formula'=args$outcome_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, error='')
+    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, 'Bzx'=args$Bzx, 'Bzy'=Bzy, 'Px'=Px, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'y_bias'=args$y_bias,'outcome_formula'=args$outcome_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, error='')
 
     outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula))
     
 
     outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula))
     
index feeeaa54dbc1f311152a9f44c026e3fca49c54d7..1cab47320e5238ff9afa7b6938003a56136aa34f 100644 (file)
@@ -1,4 +1,4 @@
-
+.ONESHELL:
 SHELL=bash
 
 Ns=[1000, 5000, 10000]
 SHELL=bash
 
 Ns=[1000, 5000, 10000]
@@ -6,8 +6,9 @@ ms=[100, 200, 400]
 seeds=[$(shell seq -s, 1 500)]
 explained_variances=[0.1]
 
 seeds=[$(shell seq -s, 1 500)]
 explained_variances=[0.1]
 
-all:remembr.RDS remember_irr.RDS
-supplement: remember_robustness_misspec.RDS
+all:main supplement
+main:remembr.RDS 
+supplement:robustness_1.RDS robustness_1_dv.RDS robustness_2.RDS robustness_2_dv.RDS robustness_3.RDS robustness_3_dv.RDS robustness_4.RDS robustness_4_dv.RDS 
 
 srun=sbatch --wait --verbose run_job.sbatch
 
 
 srun=sbatch --wait --verbose run_job.sbatch
 
@@ -24,7 +25,7 @@ joblists:example_1_jobs example_2_jobs example_3_jobs
 
 
 example_1_jobs: 01_two_covariates.R simulation_base.R grid_sweep.py pl_methods.R
 
 
 example_1_jobs: 01_two_covariates.R simulation_base.R grid_sweep.py pl_methods.R
-       sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 01_two_covariates.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_1.feather"], "y_explained_variance":${explained_variances}, "Bzx":[1]}' --outfile example_1_jobs
+       ${srun} grid_sweep.py --command "Rscript 01_two_covariates.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_1.feather"], "y_explained_variance":${explained_variances}, "Bzx":[1]}' --outfile example_1_jobs
 
 example_1.feather: example_1_jobs 
        rm -f example_1.feather
 
 example_1.feather: example_1_jobs 
        rm -f example_1.feather
@@ -124,7 +125,14 @@ robustness_1_jobs: 02_indep_differential.R simulation_base.R grid_sweep.py
        sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_1.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~1"]}' --outfile robustness_1_jobs
 
 
        sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_1.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~1"]}' --outfile robustness_1_jobs
 
 
+
+START=0
+STEP=1000
+ONE=1
+
 robustness_1.feather: robustness_1_jobs
 robustness_1.feather: robustness_1_jobs
+       $(eval END_1!=cat robustness_1_jobs | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
        rm -f robustness_1.feather
        sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 robustness_1_jobs
        sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 robustness_1_jobs
        rm -f robustness_1.feather
        sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 robustness_1_jobs
        sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 robustness_1_jobs
@@ -132,22 +140,25 @@ robustness_1.feather: robustness_1_jobs
        sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 robustness_1_jobs
        sbatch --wait --verbose --array=4001-$(shell cat robustness_1_jobs | wc -l)  run_simulation.sbatch 0 robustness_1_jobs
 
        sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 robustness_1_jobs
        sbatch --wait --verbose --array=4001-$(shell cat robustness_1_jobs | wc -l)  run_simulation.sbatch 0 robustness_1_jobs
 
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_1_jobs;)
+
 robustness_1.RDS: robustness_1.feather
        rm -f robustness_1.RDS
        ${srun} Rscript plot_example.R --infile $< --name "robustness_1" --remember-file $@
 
 robustness_1_dv_jobs: simulation_base.R 04_depvar_differential.R grid_sweep.py
 robustness_1.RDS: robustness_1.feather
        rm -f robustness_1.RDS
        ${srun} Rscript plot_example.R --infile $< --name "robustness_1" --remember-file $@
 
 robustness_1_dv_jobs: simulation_base.R 04_depvar_differential.R grid_sweep.py
-       ${srun} bash -c "source ~/.bashrc && grid_sweep.py --command 'Rscript 04_depvar_differential.R' --arg_dict \"{'N':${Ns},'m':${ms}, 'seed':${seeds}, 'outfile':['robustness_1_dv.feather'], 'y_explained_variance':${explained_variances}, 'proxy_formula':['w_pred~y']}\" --outfile robustness_1_dv_jobs"
-
+       ${srun} grid_sweep.py --command 'Rscript 04_depvar_differential.R' --arg_dict '{"N":${Ns},"Bxy":[0.7],"Bzy":[-0.7],"m":${ms}, "seed":${seeds}, "outfile":["robustness_1_dv.feather"], "proxy_formula":["w_pred~y"],"z_bias":[0.5]}' --outfile robustness_1_dv_jobs
 
 robustness_1_dv.feather: robustness_1_dv_jobs
        rm -f robustness_1_dv.feather
 
 robustness_1_dv.feather: robustness_1_dv_jobs
        rm -f robustness_1_dv.feather
-       sbatch --wait --verbose --array=1-$(shell cat example_3_jobs | wc -l)  run_simulation.sbatch 0 robustness_1_dv_jobs
+       $(eval END_1!=cat robustness_1_dv_jobs | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_1_dv_jobs;)
 
 
 robustness_1_dv.RDS: robustness_1_dv.feather
        rm -f $@
 
 
 robustness_1_dv.RDS: robustness_1_dv.feather
        rm -f $@
-       ${srun} Rscript plot_dv_example.R --infile $< --name "robustness_1_dv" --outfile $@
+       ${srun} Rscript plot_dv_example.R --infile $< --name "robustness_1_dv" --remember-file $@
 
 
 robustness_2_jobs_p1: grid_sweep.py 01_two_covariates.R simulation_base.R grid_sweep.py
 
 
 robustness_2_jobs_p1: grid_sweep.py 01_two_covariates.R simulation_base.R grid_sweep.py
@@ -166,59 +177,59 @@ robustness_2_jobs_p4: grid_sweep.py 01_two_covariates.R simulation_base.R grid_s
        rm -f $@
        ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.90,0.95]}' --outfile $@
 
        rm -f $@
        ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.90,0.95]}' --outfile $@
 
-START=0
-END_1=$(shell cat robustness_2_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_2_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_2_jobs_p3 | wc -l)
-END_4=$(shell cat robustness_2_jobs_p4 | wc -l)
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
-ITEMS_4=$(shell seq $(START) $(STEP) $(END_4))
-
 robustness_2.feather: robustness_2_jobs_p1 robustness_2_jobs_p2 robustness_2_jobs_p3 robustness_2_jobs_p4
 robustness_2.feather: robustness_2_jobs_p1 robustness_2_jobs_p2 robustness_2_jobs_p3 robustness_2_jobs_p4
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p1)
+       $(eval END_1!=cat robustness_2_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_2_jobs_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_2_jobs_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+       $(eval END_4!=cat robustness_2_jobs_p4 | wc -l)
+       $(eval ITEMS_4!=seq $(START) $(STEP) $(END_4))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p3;)
        $(foreach item,$(ITEMS_4),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p4;)
 
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p3;)
        $(foreach item,$(ITEMS_4),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_jobs_p4;)
 
+robustness_2.RDS: plot_example.R robustness_2.feather 
+       rm -f $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_2" --remember-file $@
 
 robustness_2_dv_jobs_p1: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_2_dv_jobs_p1: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.60,0.65]}' --outfile $@
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.60,0.65]}' --outfile $@
 
 robustness_2_dv_jobs_p2: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_2_dv_jobs_p2: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.70,0.75]}' --outfile $@
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.70,0.75]}' --outfile $@
 
 robustness_2_dv_jobs_p3: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_2_dv_jobs_p3: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.80,0.85]}' --outfile $@
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.80,0.85]}' --outfile $@
 
 robustness_2_dv_jobs_p4: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_2_dv_jobs_p4: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.90,0.95]}' --outfile $@
-
-START=0
-END_1=$(shell cat robustness_2_dv_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_2_dv_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_2_dv_jobs_p3 | wc -l)
-END_4=$(shell cat robustness_2_dv_jobs_p4 | wc -l)
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
-ITEMS_4=$(shell seq $(START) $(STEP) $(END_4))
+       ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_2_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.90,0.95]}' --outfile $@
 
 robustness_2_dv.feather: robustness_2_dv_jobs_p1 robustness_2_dv_jobs_p2 robustness_2_dv_jobs_p3 robustness_2_dv_jobs_p4
 
 robustness_2_dv.feather: robustness_2_dv_jobs_p1 robustness_2_dv_jobs_p2 robustness_2_dv_jobs_p3 robustness_2_dv_jobs_p4
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p1)
+       $(eval END_1!=cat robustness_2_dv_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_2_dv_jobs_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_2_dv_jobs_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+       $(eval END_4!=cat robustness_2_dv_jobs_p4 | wc -l)
+       $(eval ITEMS_4!=seq $(START) $(STEP) $(END_4))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p3;)
        $(foreach item,$(ITEMS_4),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p4;)
 
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p3;)
        $(foreach item,$(ITEMS_4),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_2_dv_jobs_p4;)
 
+robustness_2_dv.RDS: plot_example.R robustness_2_dv.feather 
+       rm -f $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_2_dv" --remember-file $@
 
 
 robustness_3_jobs_p1: grid_sweep.py 01_two_covariates.R simulation_base.R grid_sweep.py
 
 
 robustness_3_jobs_p1: grid_sweep.py 01_two_covariates.R simulation_base.R grid_sweep.py
@@ -233,125 +244,131 @@ robustness_3_jobs_p3: grid_sweep.py 01_two_covariates.R simulation_base.R grid_s
        rm -f $@
        ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"Px":[0.9,0.95], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85]}' --outfile $@
 
        rm -f $@
        ${srun} $< --command 'Rscript 01_two_covariates.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"Px":[0.9,0.95], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85]}' --outfile $@
 
-START=0
-END_1=$(shell cat robustness_3_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_3_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_3_jobs_p3 | wc -l)
-
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
-
 robustness_3.feather: robustness_3_jobs_p1 robustness_3_jobs_p2 robustness_3_jobs_p3
 robustness_3.feather: robustness_3_jobs_p1 robustness_3_jobs_p2 robustness_3_jobs_p3
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p1)
+       $(eval END_1!=cat robustness_3_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_3_jobs_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_3_jobs_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p3;)
 
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_jobs_p3;)
 
+robustness_3.RDS: plot_example.R robustness_3.feather 
+       rm -f $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_3" --remember-file $@
 
 robustness_3_dv_jobs_p1: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_3_dv_jobs_p1: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.5,0.6], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.5,0.6], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
+
+
 
 robustness_3_dv_jobs_p2: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_3_dv_jobs_p2: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.7,0.8], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.7,0.8], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
+
 
 robustness_3_dv_jobs_p3: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_3_dv_jobs_p3: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "B0":[0.9,0.95], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
-
-START=0
-END_1=$(shell cat robustness_3_dv_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_3_dv_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_3_dv_jobs_p3 | wc -l)
-
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
+       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_3_dv.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "B0":[0.9,0.95], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85]}' --outfile $@
 
 robustness_3_dv.feather: robustness_3_dv_jobs_p1 robustness_3_dv_jobs_p2 robustness_3_dv_jobs_p3
 
 robustness_3_dv.feather: robustness_3_dv_jobs_p1 robustness_3_dv_jobs_p2 robustness_3_dv_jobs_p3
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p1)
+       $(eval END_1!=cat robustness_3_dv_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_3_dv_jobs_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_3_dv_jobs_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p2;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p2;)
-       $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p3;)
+        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_3_dv_jobs_p3;)
 
 
 
 
+robustness_3_dv.RDS: plot_dv_example.R robustness_3_dv.feather 
+       rm -f $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_3_dv" --remember-file $@
+
 
 robustness_4_jobs_p1: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_4_jobs_p1: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],y_bias=[-1,-0.85]}' --outfile $@
+       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],"y_bias":[-1,-0.85]}' --outfile $@
 
 robustness_4_jobs_p2: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_4_jobs_p2: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85], y_bias=[-0.70,-0.55]}' --outfile $@
+       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85], "y_bias":[-0.70,-0.55]}' --outfile $@
 
 robustness_4_jobs_p3: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_4_jobs_p3: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],y_bias=[-0.4,-0.25]}' --outfile $@
+       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],"y_bias":[-0.4,-0.25]}' --outfile $@
 
 robustness_4_jobs_p4: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
 
 robustness_4_jobs_p4: grid_sweep.py 02_indep_differential.R simulation_base.R grid_sweep.py
        rm -f $@
-       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],y_bias=[-0.1,0]}' --outfile $@
-
-START=0
-END_1=$(shell cat robustness_4_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_4_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_4_jobs_p3 | wc -l)
-END_4=$(shell cat robustness_4_jobs_p3 | wc -l)
-
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
-ITEMS_4=$(shell seq $(START) $(STEP) $(END_4))
+       ${srun} $< --command 'Rscript 02_indep_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y+x"], "truth_formula":["x~z"], "prediction_accuracy":[0.85],"y_bias":[-0.1,0]}' --outfile $@
 
 robustness_4.feather: robustness_4_jobs_p1 robustness_4_jobs_p2 robustness_4_jobs_p3
 
 robustness_4.feather: robustness_4_jobs_p1 robustness_4_jobs_p2 robustness_4_jobs_p3
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p1)
+       $(eval END_1!=cat robustness_4_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_4_jobs_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_4_jobs_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p3;)
 
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_jobs_p3;)
 
-
-robustness_4_dv_jobs_p1: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
+robustness_4.RDS: plot_example.R robustness_4.feather 
        rm -f $@
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.5] "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],z_bias=[0,0.1]}' --outfile $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_4" --remember-file $@
 
 
-robustness_4_dv_jobs_p2: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
-       rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3],"B0":[0.5] "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],z_bias=[0.25,0.4]}' --outfile $@
 
 
-robustness_4_dv_jobs_p3: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
-       rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "B0":[0.5], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],z_bias=[0.55,0.7]}' --outfile $@
-robustness_4_dv_jobs_p4: grid_sweep.py 03_depvar.R simulation_base.R grid_sweep.py
+# '{"N":${Ns},"Bxy":[0.7],"Bzy":[-0.7],"m":${ms}, "seed":${seeds}, "outfile":["example_4.feather"], "z_bias":[0.5]}' --outfile example_4_jobs
+
+robustness_4_dv_jobs_p1: grid_sweep.py 04_depvar_differential.R simulation_base.R grid_sweep.py
        rm -f $@
        rm -f $@
-       ${srun} $< --command 'Rscript 03_depvar.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "B0":[0.5], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],z_bias=[0.85,1]}' --outfile $@
+       ${srun} $< --command 'Rscript 04_depvar_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4_dv.feather"], "Bzy":[-0.7],"Bxy":[0.7], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],"z_bias":[0,0.1]}' --outfile $@
 
 
+robustness_4_dv_jobs_p2: grid_sweep.py 04_depvar_differential.R simulation_base.R grid_sweep.py
+       rm -f $@
+       ${srun} $< --command 'Rscript 04_depvar_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4_dv.feather"], "Bzy":[-0.7],"Bxy":[0.7], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],"z_bias":[0.25,0.4]}' --outfile $@
 
 
-START=0
-END_1=$(shell cat robustness_4_dv_jobs_p1 | wc -l)
-END_2=$(shell cat robustness_4_dv_jobs_p2 | wc -l)
-END_3=$(shell cat robustness_4_dv_jobs_p3 | wc -l)
+robustness_4_dv_jobs_p3: grid_sweep.py 04_depvar_differential.R simulation_base.R grid_sweep.py
+       rm -f $@
+       ${srun} $< --command 'Rscript 04_depvar_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4_dv.feather"], "Bzy":[-0.7],"Bxy":[0.7],"outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],"z_bias":[0.55,0.7]}' --outfile $@
 
 
-STEP=1000
-ONE=1
-ITEMS_1=$(shell seq $(START) $(STEP) $(END_1))
-ITEMS_2=$(shell seq $(START) $(STEP) $(END_2))
-ITEMS_3=$(shell seq $(START) $(STEP) $(END_3))
+robustness_4_dv_jobs_p4: grid_sweep.py 04_depvar_differential.R simulation_base.R grid_sweep.py
+       rm -f $@
+       ${srun} $< --command 'Rscript 04_depvar_differential.R'  --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["robustness_4_dv.feather"],"Bzy":[-0.7],"Bxy":[0.7], "outcome_formula":["y~x+z"], "prediction_accuracy":[0.85],"z_bias":[0.85,1]}' --outfile $@
 
 robustness_4_dv.feather: robustness_4_dv_jobs_p1 robustness_4_dv_jobs_p2 robustness_4_dv_jobs_p3
 
 robustness_4_dv.feather: robustness_4_dv_jobs_p1 robustness_4_dv_jobs_p2 robustness_4_dv_jobs_p3
-       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p1)
+       $(eval END_1!=cat robustness_4_dv_jobs_p1 | wc -l)
+       $(eval ITEMS_1!=seq $(START) $(STEP) $(END_1))
+       $(eval END_2!=cat robustness_4_dv_p2 | wc -l)
+       $(eval ITEMS_2!=seq $(START) $(STEP) $(END_2))
+       $(eval END_3!=cat robustness_4_dv_p3 | wc -l)
+       $(eval ITEMS_3!=seq $(START) $(STEP) $(END_3))
+
+       $(foreach item,$(ITEMS_1),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p1;)
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p3;)
 
        $(foreach item,$(ITEMS_2),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p2;)
        $(foreach item,$(ITEMS_3),sbatch --wait --verbose --array=$(shell expr $(item) + $(ONE))-$(shell expr $(item) + $(STEP)) run_simulation.sbatch 0 robustness_4_dv_jobs_p3;)
 
+
+robustness_4_dv.RDS: plot_dv_example.R robustness_4_dv.feather 
+       rm -f $@
+       ${srun} Rscript $< --infile $(word 2, $^) --name "robustness_4" --remember-file $@
+
 #      
 clean:
        rm *.feather
        rm -f remembr.RDS
 #      
 clean:
        rm *.feather
        rm -f remembr.RDS
+       rm -f remembr*.RDS
+       rm -f robustness*.RDS
        rm -f example_*_jobs
        rm -f example_*_jobs
+       rm -f robustness_*_jobs_*
 #      sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_2_B_jobs
 
 # example_2_B_mecor_jobs:
 #      sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_2_B_jobs
 
 # example_2_B_mecor_jobs:
index 7db920d2099577b0d24ec2c20b4704a98d682a45..b4287299938bb4ecae82db12dc830bc1e340809c 100755 (executable)
@@ -5,6 +5,7 @@ from itertools import product
 import pyRemembeR
 
 def main(command, arg_dict, outfile, remember_file='remember_grid_sweep.RDS'):
 import pyRemembeR
 
 def main(command, arg_dict, outfile, remember_file='remember_grid_sweep.RDS'):
+    print(remember_file)
     remember = pyRemembeR.remember.Remember()
     remember.set_file(remember_file)
     remember[outfile] = arg_dict
     remember = pyRemembeR.remember.Remember()
     remember.set_file(remember_file)
     remember[outfile] = arg_dict
index 63f8bc19df116b2f6d148c55cefbd89cf2dc7b85..fdc4978b72e32c7988634fb2265681c61a033b96 100644 (file)
@@ -19,14 +19,29 @@ library(bbmle)
 
 ## outcome_formula <- y ~ x + z; proxy_formula <- w_pred ~ y + x + z + x:z + x:y + z:y 
 measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='logit'), proxy_formula, proxy_family=binomial(link='logit'),method='optim'){
 
 ## outcome_formula <- y ~ x + z; proxy_formula <- w_pred ~ y + x + z + x:z + x:y + z:y 
 measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='logit'), proxy_formula, proxy_family=binomial(link='logit'),method='optim'){
+    df.obs <- model.frame(outcome_formula, df)
+    proxy.model.matrix <- model.matrix(proxy_formula, df)
+    proxy.variable <- all.vars(proxy_formula)[1]
+
+    df.proxy.obs <- model.frame(proxy_formula,df)
+    proxy.obs <- with(df.proxy.obs, eval(parse(text=proxy.variable)))
+
+    response.var <- all.vars(outcome_formula)[1]
+    y.obs <- with(df.obs,eval(parse(text=response.var)))
+    outcome.model.matrix <- model.matrix(outcome_formula, df.obs)
+
+    df.unobs <- df[is.na(df[[response.var]])]
+    df.unobs.y1 <- copy(df.unobs)
+    df.unobs.y1[[response.var]] <- 1
+    df.unobs.y0 <- copy(df.unobs)
+    df.unobs.y0[[response.var]] <- 0
+
+    outcome.model.matrix.y1 <- model.matrix(outcome_formula, df.unobs.y1)
+    proxy.model.matrix.y1 <- model.matrix(proxy_formula, df.unobs.y1)
+    proxy.model.matrix.y0 <- model.matrix(proxy_formula, df.unobs.y0)
+    proxy.unobs <- with(df.unobs, eval(parse(text=proxy.variable)))
 
     nll <- function(params){
 
     nll <- function(params){
-        df.obs <- model.frame(outcome_formula, df)
-        proxy.variable <- all.vars(proxy_formula)[1]
-        proxy.model.matrix <- model.matrix(proxy_formula, df)
-        response.var <- all.vars(outcome_formula)[1]
-        y.obs <- with(df.obs,eval(parse(text=response.var)))
-        outcome.model.matrix <- model.matrix(outcome_formula, df.obs)
 
         param.idx <- 1
         n.outcome.model.covars <- dim(outcome.model.matrix)[2]
 
         param.idx <- 1
         n.outcome.model.covars <- dim(outcome.model.matrix)[2]
@@ -39,12 +54,9 @@ measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='lo
             ll.y.obs[y.obs==0] <- plogis(outcome.params %*% t(outcome.model.matrix[y.obs==0,]),log=TRUE,lower.tail=FALSE)
         }
 
             ll.y.obs[y.obs==0] <- plogis(outcome.params %*% t(outcome.model.matrix[y.obs==0,]),log=TRUE,lower.tail=FALSE)
         }
 
-        df.obs <- model.frame(proxy_formula,df)
         n.proxy.model.covars <- dim(proxy.model.matrix)[2]
         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)]
         n.proxy.model.covars <- dim(proxy.model.matrix)[2]
         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)]
-
         param.idx <- param.idx + n.proxy.model.covars
         param.idx <- param.idx + n.proxy.model.covars
-        proxy.obs <- with(df.obs, eval(parse(text=proxy.variable)))
 
         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){
             ll.w.obs <- vector(mode='numeric',length=dim(proxy.model.matrix)[1])
 
         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){
             ll.w.obs <- vector(mode='numeric',length=dim(proxy.model.matrix)[1])
@@ -53,15 +65,8 @@ measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='lo
         }
 
         ll.obs <- sum(ll.y.obs + ll.w.obs)
         }
 
         ll.obs <- sum(ll.y.obs + ll.w.obs)
-
-        df.unobs <- df[is.na(df[[response.var]])]
-        df.unobs.y1 <- copy(df.unobs)
-        df.unobs.y1[[response.var]] <- 1
-        df.unobs.y0 <- copy(df.unobs)
-        df.unobs.y0[[response.var]] <- 0
         
         ## integrate out y
         
         ## integrate out y
-        outcome.model.matrix.y1 <- model.matrix(outcome_formula, df.unobs.y1)
 
         if((outcome_family$family == "binomial") & (outcome_family$link == 'logit')){
             ll.y.unobs.1 <- vector(mode='numeric', length=dim(outcome.model.matrix.y1)[1])
 
         if((outcome_family$family == "binomial") & (outcome_family$link == 'logit')){
             ll.y.unobs.1 <- vector(mode='numeric', length=dim(outcome.model.matrix.y1)[1])
@@ -70,10 +75,6 @@ measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='lo
             ll.y.unobs.0 <- plogis(outcome.params %*% t(outcome.model.matrix.y1),log=TRUE,lower.tail=FALSE)
         }
 
             ll.y.unobs.0 <- plogis(outcome.params %*% t(outcome.model.matrix.y1),log=TRUE,lower.tail=FALSE)
         }
 
-        proxy.model.matrix.y1 <- model.matrix(proxy_formula, df.unobs.y1)
-        proxy.model.matrix.y0 <- model.matrix(proxy_formula, df.unobs.y0)
-        proxy.unobs <- with(df.unobs, eval(parse(text=proxy.variable)))
-
         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){
             ll.w.unobs.1 <- vector(mode='numeric',length=dim(proxy.model.matrix.y1)[1])
             ll.w.unobs.0 <- vector(mode='numeric',length=dim(proxy.model.matrix.y0)[1])
         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){
             ll.w.unobs.1 <- vector(mode='numeric',length=dim(proxy.model.matrix.y1)[1])
             ll.w.unobs.0 <- vector(mode='numeric',length=dim(proxy.model.matrix.y0)[1])
@@ -431,7 +432,7 @@ measerr_irr_mle <- function(df, outcome_formula, outcome_family=gaussian(), code
 ## Experimental, and does not work.
 measerr_irr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='logit'), coder_formulas=list(y.obs.0~y+w_pred+y.obs.1,y.obs.1~y+w_pred+y.obs.0), proxy_formula=w_pred~y, proxy_family=binomial(link='logit'),method='optim'){
     integrate.grid <- expand.grid(replicate(1 + length(coder_formulas), c(0,1), simplify=FALSE))
 ## Experimental, and does not work.
 measerr_irr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='logit'), coder_formulas=list(y.obs.0~y+w_pred+y.obs.1,y.obs.1~y+w_pred+y.obs.0), proxy_formula=w_pred~y, proxy_family=binomial(link='logit'),method='optim'){
     integrate.grid <- expand.grid(replicate(1 + length(coder_formulas), c(0,1), simplify=FALSE))
-    print(integrate.grid)
+#    print(integrate.grid)
 
 
     outcome.model.matrix <- model.matrix(outcome_formula, df)
 
 
     outcome.model.matrix <- model.matrix(outcome_formula, df)
@@ -527,8 +528,8 @@ measerr_irr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link
 
             ## likelihood of observed data 
             target <- -1 * sum(lls)
 
             ## likelihood of observed data 
             target <- -1 * sum(lls)
-            print(target)
-            print(params)
+#            print(target)
+#            print(params)
             return(target)
         }
     }
             return(target)
         }
     }
index b3007d12579dde9d358fe850fb2b86def89ed7b2..f014eec1b8f00479417ad9181d6c6bc5127a6af0 100644 (file)
@@ -31,8 +31,8 @@ zhang.mle.dv <- function(df){
         (1-w_pred) * (log(1-fpr) - exp(log(1-fnr-fpr)+pi.y.1)))))
     
         ll <- ll + sum(lls)
         (1-w_pred) * (log(1-fpr) - exp(log(1-fnr-fpr)+pi.y.1)))))
     
         ll <- ll + sum(lls)
-        print(paste0(B0,Bxy,Bzy))
-        print(ll)
+#        print(paste0(B0,Bxy,Bzy))
+#        print(ll)
         return(-ll)
     }    
     mlefit <- mle2(minuslogl = nll, control=list(maxit=1e6),method='L-BFGS-B',lower=c(B0=-Inf, Bxy=-Inf, Bzy=-Inf),
         return(-ll)
     }    
     mlefit <- mle2(minuslogl = nll, control=list(maxit=1e6),method='L-BFGS-B',lower=c(B0=-Inf, Bxy=-Inf, Bzy=-Inf),
index 1c786e994f47dd36f596942d1e2d1f5e2e8157f1..64a472d17bd54b14e426d943952f2122ba4cb4ea 100644 (file)
@@ -10,11 +10,11 @@ Like `robustness\_1.RDS` but with a less precise model for $w_pred$.  In the mai
 
 # robustness_2.RDS
 
 
 # robustness_2.RDS
 
-This is just example 1 with varying levels of classifier accuracy. 
+This is just example 1 with varying levels of classifier accuracy indicated by the `prediction_accuracy` variable.
 
 # robustness_2_dv.RDS
 
 
 # robustness_2_dv.RDS
 
-Example 3 with varying levels of classifier accuracy
+Example 3 with varying levels of classifier accuracy indicated by the `prediction_accuracy` variable.
 
 # robustness_3.RDS
 
 
 # robustness_3.RDS
 
diff --git a/simulations/run_job.sbatch b/simulations/run_job.sbatch
new file mode 100644 (file)
index 0000000..3ff4f80
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/bash
+#SBATCH --job-name="simulate measurement error models"
+## Allocation Definition
+#SBATCH --account=comdata
+#SBATCH --partition=compute-bigmem,compute-hugemem
+## Resources
+#SBATCH --nodes=1    
+## Walltime (4 hours)
+#SBATCH --time=4:00:00
+## Memory per node
+#SBATCH --mem=4G
+#SBATCH --cpus-per-task=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --chdir /gscratch/comdata/users/nathante/ml_measurement_error_public/simulations
+#SBATCH --output=simulation_jobs/%A_%a.out
+#SBATCH --error=simulation_jobs/%A_%a.err
+"$@"
index 08b11ec9595a49a34553b3b1eaaaa3fb1463e27f..e715edfaf61b9b1f423001808187ad66828a43b0 100644 (file)
@@ -180,26 +180,35 @@ run_simulation_depvar <- function(df, result, outcome_formula=y~x+z, proxy_formu
     
 
     # amelia says use normal distribution for binary variables.
     
 
     # amelia says use normal distribution for binary variables.
-
-    amelia.out.k <- amelia(df, m=200, p2s=0, idvars=c('y','ystar','w'))
-    mod.amelia.k <- zelig(y.obs~x+z, model='ls', data=amelia.out.k$imputations, cite=FALSE)
-    (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE))
-    est.x.mi <- coefse['x','Estimate']
-    est.x.se <- coefse['x','Std.Error']
-    result <- append(result,
-                     list(Bxy.est.amelia.full = est.x.mi,
+    amelia_result <- list(Bxy.est.amelia.full = NA,
+                          Bxy.ci.upper.amelia.full = NA,
+                          Bxy.ci.lower.amelia.full = NA,
+                          Bzy.est.amelia.full = NA,
+                          Bzy.ci.upper.amelia.full = NA,
+                          Bzy.ci.lower.amelia.full = NA
+                          )
+
+    tryCatch({
+        amelia.out.k <- amelia(df, m=200, p2s=0, idvars=c('y','ystar','w'))
+        mod.amelia.k <- zelig(y.obs~x+z, model='ls', data=amelia.out.k$imputations, cite=FALSE)
+        (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE))
+        est.x.mi <- coefse['x','Estimate']
+        est.x.se <- coefse['x','Std.Error']
+
+        est.z.mi <- coefse['z','Estimate']
+        est.z.se <- coefse['z','Std.Error']
+        amelia_result <- list(Bxy.est.amelia.full = est.x.mi,
                           Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se,
                           Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se,
-                          Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se
-                          ))
-
-    est.z.mi <- coefse['z','Estimate']
-    est.z.se <- coefse['z','Std.Error']
-
-    result <- append(result,
-                     list(Bzy.est.amelia.full = est.z.mi,
+                          Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se,
+                          Bzy.est.amelia.full = est.z.mi,
                           Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se,
                           Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se
                           Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se,
                           Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se
-                          ))
+                          )
+    },
+    error = function(e){
+    result[['error']] <- e}
+    )
+    result <- append(result,amelia_result)
 
     return(result)
 
 
     return(result)
 

Community Data Science Collective || Want to submit a patch?