]> code.communitydata.science - ml_measurement_error_public.git/blobdiff - civil_comments/load_perspective_data.R
update real data examples code and rerun project.
[ml_measurement_error_public.git] / civil_comments / load_perspective_data.R
index 636c423710ed3ba82e94a903dbd7c49cf4e8a344..d6ef4927ca460494e6f19e9063143b5c8aa2a834 100644 (file)
@@ -39,3 +39,98 @@ df <- df[,":="(gender_disclosed = dt.apply.any(gt.0.5, male, female, transgender
                disability_disclosed = dt.apply.any(gt.0.5,physical_disability, intellectual_or_learning_disability, psychiatric_or_mental_illness, other_disability))]
 
 df <- df[,white:=gt.0.5(white)]
+
+
+F1 <- function(y, predictions){
+    tp <- sum( (predictions == y) & (predictions==1))
+    fn <- sum( (predictions != y) & (predictions!=1))
+    fp <- sum( (predictions != y) & (predictions==1))
+    precision <- tp / (tp + fp)
+    recall <- tp / (tp + fn)
+    return (2 * precision * recall ) / (precision + recall)
+}
+
+
+## toxicity is about 93% accurate, with an f1 of 0.8
+## identity_attack has high accuracy 97%, but an unfortunant f1 of 0.5.
+## threat has high accuracy 99%, but a really bad looking f1 of 0.48.
+accuracies <- df[,.(identity_attack_acc = mean(identity_attack_pred == identity_attack_coded),
+                    insult_pred_acc = mean(insult_pred == insult_coded),
+                    profanity_acc = mean(profanity_pred == profanity_coded),
+                    severe_toxicity_acc = mean(severe_toxicity_pred == severe_toxicity_coded),
+                    theat_acc = mean(threat_pred == threat_coded),
+                    toxicity_acc = mean(toxicity_pred == toxicity_coded))]
+
+f1s <- df[,.(identity_attack_f1 = F1(identity_attack_coded,identity_attack_pred),
+                    insult_f1 = F1(insult_coded,insult_pred),
+                    profanity_f1 = F1(profanity_coded,profanity_pred),
+                    severe_toxicity_f1 = F1(severe_toxicity_coded,severe_toxicity_pred),
+                    theat_f1 = F1(threat_coded,threat_pred),
+                    toxicity_f1 = F1(toxicity_coded,toxicity_pred))]
+
+positive_cases <- df[,.(identity_attacks = sum(identity_attack_coded),
+                        insults = sum(insult_coded),
+                        profanities = sum(profanity_coded),
+                        severe_toxic_comments = sum(severe_toxicity_coded),
+                        threats = sum(threat_coded),
+                        toxic_comments = sum(toxicity_coded))]
+
+## there are 50,000 toxic comments, 13000 identity attacks, 30000 insults, 3000 profanities, 8 severe toxic, and 1000 threats.
+
+proportions_cases <- df[,.(prop_identity = mean(identity_attack_coded),
+                           prop_insults = mean(insult_coded),
+                           prop_profanity = mean(profanity_coded),
+                           prop_severe = mean(severe_toxicity_coded),
+                           prop_threats = mean(threat_coded),
+                           prop_toxic = mean(toxicity_coded))]
+
+## at 11% of comments, "toxicity" seems not so badly skewed. Try toxicity first, and if it doesn't work out try insults.
+
+## now look for an example where differential error affects an identity, or a reaction.
+df <- df[,":="(identity_error = identity_attack_coded - identity_attack_pred,
+               insult_error = insult_coded - insult_pred,
+               profanity_error = profanity_coded - profanity_pred,
+               severe_toxic_error = severe_toxicity_coded - severe_toxicity_pred,
+               threat_error = threat_coded - threat_pred,
+               toxicity_error = toxicity_coded - toxicity_pred)]
+
+## what's correlated with toxicity_error ?
+df <- df[,approved := rating == "approved"]
+df <- df[,white := white > 0.5]
+
+cortab <- cor(df[,.(toxicity_error,
+                    identity_error,
+                    toxicity_coded,
+                    funny,
+                    approved,
+                    sad,
+                    wow,
+                    likes,
+                    disagree,
+                    male,
+                    female,
+                    transgender,
+                    other_gender,
+                    heterosexual,
+                    bisexual,
+                    other_sexual_orientation,
+                    christian,
+                    jewish,
+                    hindu,
+                    buddhist,
+                    atheist,
+                    other_religion,
+                    black,
+                    white,
+                    asian,
+                    latino,
+                    other_race_or_ethnicity,
+                    physical_disability,
+                    intellectual_or_learning_disability,
+                    psychiatric_or_mental_illness,
+                    other_disability,
+                    gender_disclosed,
+                    sexuality_disclosed,
+                    religion_disclosed,
+                    race_disclosed,
+                    disability_disclosed)])

Community Data Science Collective || Want to submit a patch?