diff --git a/R/tailor.R b/R/tailor.R index 5186e33..7678acd 100644 --- a/R/tailor.R +++ b/R/tailor.R @@ -167,6 +167,9 @@ fit.tailor <- function(object, .data, outcome, estimate, probabilities = c(), if (any(c("probability", "everything") %in% purrr::map_chr(object$adjustments, purrr::pluck, "inputs"))) { check_selection(enquo(probabilities), columns$probabilities, "probabilities") + for (col in columns$probabilities) { + check_variable_type(.data[[col]], "probability", "probabilities") + } } .data <- .data[, names(.data) %in% unlist(columns)] @@ -177,6 +180,9 @@ fit.tailor <- function(object, .data, outcome, estimate, probabilities = c(), object <- set_tailor_type(object, .data[[columns$outcome]]) + check_variable_type(.data[[columns$outcome]], object$type, "outcome") + check_variable_type(.data[[columns$estimate]], object$type, "estimate") + object <- new_tailor( object$type, adjustments = object$adjustments, @@ -210,7 +216,7 @@ predict.tailor <- function(object, new_data, ...) { set_tailor_type <- function(object, y, call = caller_env()) { if (object$type != "unknown") { - check_outcome_type(y, object$type, call = call) + check_variable_type(y, object$type, "outcome", call = call) return(object) } if (is.factor(y)) { diff --git a/R/utils.R b/R/utils.R index 38ce547..378f30e 100644 --- a/R/utils.R +++ b/R/utils.R @@ -343,22 +343,26 @@ check_method <- function(method, method } -# at `fit()` time, we check the type of the outcome vs the type +# at `fit()` time, we check the type of inputted variables vs the type # supported by the applied adjustments. where this is called currently, # we know already that `type` is not "unknown" -check_outcome_type <- function(outcome, type, call) { - outcome_is_compatible <- +check_variable_type <- function(variable, type, description, call = caller_env()) { + if (identical(type, "unknown")) { + return() + } + + is_compatible <- switch( type, - regression = is.numeric(outcome), - binary = , multiclass = is.factor(outcome), + probability = , regression = is.numeric(variable), + binary = , multiclass = is.factor(variable), FALSE ) - if (!outcome_is_compatible) { + if (!is_compatible) { cli_abort( "Tailors with {type} adjustments are not compatible - with {.cls {class(outcome)}} outcomes.", + with a {.cls {class(variable)}} {.arg {description}}.", call = call ) } diff --git a/tests/testthat/_snaps/utils.md b/tests/testthat/_snaps/utils.md index 7f78056..dd88c5c 100644 --- a/tests/testthat/_snaps/utils.md +++ b/tests/testthat/_snaps/utils.md @@ -45,7 +45,7 @@ test_numeric), estimate = c(predicted), probabilities = c(Class1, Class2)) Condition Error in `fit()`: - ! Tailors with binary adjustments are not compatible with outcomes. + ! Tailors with binary adjustments are not compatible with a `outcome`. --- @@ -54,7 +54,7 @@ outcome = c(truth), estimate = c(Class1)) Condition Error in `fit()`: - ! Tailors with regression adjustments are not compatible with outcomes. + ! Tailors with regression adjustments are not compatible with a `outcome`. --- @@ -63,7 +63,7 @@ test_date), estimate = c(predicted), probabilities = c(Class1, Class2)) Condition Error in `fit()`: - ! Tailors with binary adjustments are not compatible with outcomes. + ! Tailors with binary adjustments are not compatible with a `outcome`. --- @@ -74,6 +74,51 @@ Error in `fit()`: ! Only factor and numeric outcomes are currently supported. +# fit.tailor() errors informatively with incompatible estimate + + Code + fit(tailor() %>% adjust_probability_threshold(0.1), two_class_example, outcome = c( + predicted), estimate = c(test_numeric), probabilities = c(Class1, Class2)) + Condition + Error in `fit()`: + ! Tailors with binary adjustments are not compatible with a `estimate`. + +--- + + Code + fit(tailor() %>% adjust_numeric_range(lower_limit = 0.1), two_class_example, + outcome = c(Class1), estimate = c(truth)) + Condition + Error in `fit()`: + ! Tailors with regression adjustments are not compatible with a `estimate`. + +--- + + Code + fit(tailor() %>% adjust_probability_threshold(0.1), two_class_example, outcome = c( + truth), estimate = c(test_date), probabilities = c(Class1, Class2)) + Condition + Error in `fit()`: + ! Tailors with binary adjustments are not compatible with a `estimate`. + +--- + + Code + fit(tailor() %>% adjust_predictions_custom(hey = "there"), two_class_example, + outcome = c(truth), estimate = c(test_date), probabilities = c(Class1)) + Condition + Error in `fit()`: + ! Tailors with binary adjustments are not compatible with a `estimate`. + +# fit.tailor() errors informatively with incompatible probability + + Code + fit(tailor() %>% adjust_probability_threshold(0.1), two_class_example, outcome = c( + truth), estimate = c(predicted), probabilities = c(test_date)) + Condition + Error in `fit()`: + ! Tailors with probability adjustments are not compatible with a `probabilities`. + # find_tune_id() works Code diff --git a/tests/testthat/test-utils.R b/tests/testthat/test-utils.R index f8a405c..49daa50 100644 --- a/tests/testthat/test-utils.R +++ b/tests/testthat/test-utils.R @@ -166,6 +166,80 @@ test_that("fit.tailor() errors informatively with incompatible outcome", { ) }) +test_that("fit.tailor() errors informatively with incompatible estimate", { + skip_if_not_installed("modeldata") + library(modeldata) + + two_class_example$test_numeric <- two_class_example$Class1 + 1 + two_class_example$test_date <- as.POSIXct(two_class_example$Class1) + + # supply a numeric estimate to a binary tailor + expect_snapshot( + error = TRUE, + fit( + tailor() %>% adjust_probability_threshold(.1), + two_class_example, + outcome = c(predicted), + estimate = c(test_numeric), + probabilities = c(Class1, Class2) + ) + ) + + # supply a factor estimate to a regression tailor + expect_snapshot( + error = TRUE, + fit( + tailor() %>% adjust_numeric_range(lower_limit = .1), + two_class_example, + outcome = c(Class1), + estimate = c(truth) + ) + ) + + # supply a totally wild estimate to a regression tailor + expect_snapshot( + error = TRUE, + fit( + tailor() %>% adjust_probability_threshold(.1), + two_class_example, + outcome = c(truth), + estimate = c(test_date), + probabilities = c(Class1, Class2) + ) + ) + + # supply a totally wild estimate to an unknown tailor + expect_snapshot( + error = TRUE, + fit( + tailor() %>% adjust_predictions_custom(hey = "there"), + two_class_example, + outcome = c(truth), + estimate = c(test_date), + probabilities = c(Class1) + ) + ) +}) + +test_that("fit.tailor() errors informatively with incompatible probability", { + skip_if_not_installed("modeldata") + library(modeldata) + + two_class_example$test_date <- as.POSIXct(two_class_example$Class1) + + # supply a date probability to a binary tailor + expect_snapshot( + error = TRUE, + fit( + tailor() %>% adjust_probability_threshold(.1), + two_class_example, + outcome = c(truth), + estimate = c(predicted), + probabilities = c(test_date) + ) + ) +}) + test_that("find_tune_id() works", { # empty input expect_equal(find_tune_id(list()), NA_character_)