Closed spsanderson closed 2 years ago
Function:
#' Boilerplate Workflow #' #' @family Boiler_Plate #' @family Earth #' #' @author Steven P. Sanderson II, MPH #' #' @details #' This uses the `parsnip::mars()` with the `engine` set to `earth` #' #' @description This is a boilerplate function to create automatically the following: #' - recipe #' - model specification #' - workflow #' - tuned model (grid ect) #' #' @seealso \url{http://uc-r.github.io/mars} #' #' @param .data The data being passed to the function. The time-series object. #' @param .rec_obj This is the recipe object you want to use. You can use #' `hai_earth_data_prepper()` an automatic recipe_object. #' @param .splits_obj NULL is the default, when NULL then one will be created. #' @param .rsamp_obj NULL is the default, when NULL then one will be created. It #' will default to creating an [rsample::mc_cv()] object. #' @param .tune Default is TRUE, this will create a tuning grid and tuned workflow #' @param .grid_size Default is 10 #' @param .num_cores Default is 1 #' @param .best_metric Default is "f_meas". You can choose a metric depending on the #' model_type used. If `regression` then see [healthyR.ai::hai_default_regression_metric_set()], #' if `classification` then see [healthyR.ai::hai_default_classification_metric_set()]. #' @param .model_type Default is `classification`, can also be `regression`. #' #' @examples #' \dontrun{ #' data <- iris #' #' rec_obj <- hai_earth_data_prepper(data, Species ~ .) #' #' auto_earth <- hai_auto_earth( #' .data = data, #' .rec_obj = rec_obj, #' .best_metric = "f_meas", #' .model_type = "classification" #' ) #' #' auto_earth$recipe_info #' } #' #' @return #' A list #' #' @export #' hai_auto_earth <- function(.data, .rec_obj, .splits_obj = NULL, .rsamp_obj = NULL, .tune = TRUE, .grid_size = 10, .num_cores = 1, .best_metric = "f_meas", .model_type = "classification"){ # Tidyeval ---- grid_size <- as.numeric(.grid_size) num_cores <- as.numeric(.num_cores) best_metric <- as.character(.best_metric) data_tbl <- dplyr::as_tibble(.data) splits <- .splits_obj rec_obj <- .rec_obj rsamp_obj <- .rsamp_obj model_type <- as.character(.model_type) # Checks ---- if (!inherits(x = splits, what = "rsplit") && !is.null(splits)){ rlang::abort( message = "'.splits_obj' must have a class of 'rsplit', use the rsample package.", use_cli_format = TRUE ) } if (!inherits(x = rec_obj, what = "recipe")){ rlang::abort( message = "'.rec_obj' must have a class of 'recipe'." ) } if (!model_type %in% c("regression","classification")){ rlang::abort( message = paste0( "You chose a mode of: '", model_type, "' this is unsupported. Choose from either 'regression' or 'classification'." ), use_cli_format = TRUE ) } if (!inherits(x = rsamp_obj, what = "rset") && !is.null(rsamp_obj)){ rlang::abort( message = "The '.rsamp_obj' argument must either be NULL or an object of calss 'rset'.", use_cli_format = TRUE ) } if (!inherits(x = splits, what = "rsplit") && !is.null(splits)){ rlang::abort( message = "The '.splits_obj' argument must either be NULL or an object of class 'rsplit'", use_cli_format = TRUE ) } # Set default metric set ---- if (model_type == "classification"){ ms <- healthyR.ai::hai_default_classification_metric_set() } else { ms <- healthyR.ai::hai_default_regression_metric_set() } # Get splits if not then create if (is.null(splits)){ splits <- rsample::initial_split(data = data_tbl) } else { splits <- splits } # Tune/Spec ---- if (.tune){ # Model Specification model_spec <- parsnip::mars( num_terms = tune::tune(), prod_degree = tune::tune(), prune_method = "none" ) } else { model_spec <- parsnip::mars() } # Model Specification ---- model_spec <- model_spec %>% parsnip::set_mode(mode = model_type) %>% parsnip::set_engine(engine = "earth") # Workflow ---- wflw <- workflows::workflow() %>% workflows::add_recipe(rec_obj) %>% workflows::add_model(model_spec) # Tuning Grid --- if (.tune){ # Make tuning grid tuning_grid_spec <- dials::grid_latin_hypercube( hardhat::extract_parameter_set_dials(model_spec), size = grid_size ) # Cross validation object if (is.null(rsamp_obj)){ cv_obj <- rsample::mc_cv( data = rsample::training(splits) ) } else { cv_obj <- rsamp_obj } # Tune the workflow # Start parallel backed modeltime::parallel_start(num_cores) tuned_results <- wflw %>% tune::tune_grid( resamples = cv_obj, grid = tuning_grid_spec, metrics = ms ) modeltime::parallel_stop() # Get the best result set by a specified metric best_result_set <- tuned_results %>% tune::show_best(metric = best_metric, n = 1) # Plot results tune_results_plt <- tuned_results %>% tune::autoplot() + ggplot2::theme_minimal() + ggplot2::geom_smooth(se = FALSE) + ggplot2::theme(legend.position = "bottom") # Make final workflow wflw_fit <- wflw %>% tune::finalize_workflow( tuned_results %>% tune::show_best(metric = best_metric, n = 1) ) %>% parsnip::fit(rsample::training(splits)) } else { wflw_fit <- wflw %>% parsnip::fit(rsample::training(splits)) } # Return ---- output <- list( recipe_info = rec_obj, model_info = list( model_spec = model_spec, wflw = wflw, fitted_wflw = wflw_fit, was_tuned = ifelse(.tune, "tuned", "not_tuned") ) ) if (.tune){ output$tuned_info = list( tuning_grid = tuning_grid_spec, cv_obj = cv_obj, tuned_results = tuned_results, grid_size = grid_size, best_metric = best_metric, best_result_set = best_result_set, tuning_grid_plot = tune_results_plt, plotly_grid_plot = plotly::ggplotly(tune_results_plt) ) } attr(output, "function_type") <- "boilerplate" attr(output, ".grid_size") <- .grid_size attr(output, ".tune") <- .tune attr(output, ".best_metric") <- .best_metric attr(output, ".model_type") <- model_type attr(output, ".engine") <- "earth" return(invisible(output)) }
Example:
data <- iris rec_obj <- hai_earth_data_prepper(data, Species ~ .) auto_earth <- hai_auto_earth( .data = data, .rec_obj = rec_obj, .best_metric = "f_meas" ) > auto_earth $recipe_info Recipe Inputs: role #variables outcome 1 predictor 4 Operations: Factor variables from tidyselect::vars_select_helpers$wher(is.character) Novel factor level assignment for recipes::all_nominal_predictors() Dummy variables from recipes::all_nominal_predictors() Zero variance filter on recipes::all_predictors() $model_info $model_info$model_spec MARS Model Specification (classification) Main Arguments: num_terms = tune::tune() prod_degree = tune::tune() prune_method = none Computational engine: earth $model_info$wflw == Workflow =============================================================================== Preprocessor: Recipe Model: mars() -- Preprocessor --------------------------------------------------------------------------- 4 Recipe Steps * step_string2factor() * step_novel() * step_dummy() * step_zv() -- Model ---------------------------------------------------------------------------------- MARS Model Specification (classification) Main Arguments: num_terms = tune::tune() prod_degree = tune::tune() prune_method = none Computational engine: earth $model_info$fitted_wflw == Workflow [trained] ===================================================================== Preprocessor: Recipe Model: mars() -- Preprocessor --------------------------------------------------------------------------- 4 Recipe Steps * step_string2factor() * step_novel() * step_dummy() * step_zv() -- Model ---------------------------------------------------------------------------------- GLM (family binomial, link logit): nulldev df dev df devratio AIC iters converged setosa 142.113 111 59.9447 110 0.5780 63.94 22 1 versicolor 147.130 111 136.1428 110 0.0747 140.10 4 1 virginica 137.505 111 22.7716 110 0.8340 26.77 8 1 Earth selected 2 of 18 terms, and 1 of 4 predictors (pmethod="none") (nprune=2) Termination condition: Reached nk 21 Importance: Petal.Length-unused, Sepal.Length-unused, Sepal.Width-unused, Petal.Width Number of terms at each degree of interaction: 1 1 (additive model) Earth GCV RSS GRSq RSq setosa 0.16192779 17.020128 0.28104063 0.3130615 versicolor 0.22562605 23.715413 0.04502284 0.0875554 virginica 0.04988629 5.243517 0.76823192 0.7785543 All 0.35875853 37.708885 0.46986522 0.4934762 $model_info$was_tuned [1] "tuned" $tuned_info $tuned_info$tuning_grid # A tibble: 6 x 2 num_terms prod_degree <int> <int> 1 3 2 2 4 1 3 2 2 4 5 1 5 4 2 6 3 1 $tuned_info$cv_obj # Monte Carlo cross-validation (0.75/0.25) with 25 resamples # A tibble: 25 x 2 splits id <list> <chr> 1 <split [84/28]> Resample01 2 <split [84/28]> Resample02 3 <split [84/28]> Resample03 4 <split [84/28]> Resample04 5 <split [84/28]> Resample05 6 <split [84/28]> Resample06 7 <split [84/28]> Resample07 8 <split [84/28]> Resample08 9 <split [84/28]> Resample09 10 <split [84/28]> Resample10 # ... with 15 more rows $tuned_info$tuned_results # Tuning results # Monte Carlo cross-validation (0.75/0.25) with 25 resamples # A tibble: 25 x 4 splits id .metrics .notes <list> <chr> <list> <list> 1 <split [84/28]> Resample01 <tibble [66 x 6]> <tibble [5 x 3]> 2 <split [84/28]> Resample02 <tibble [66 x 6]> <tibble [5 x 3]> 3 <split [84/28]> Resample03 <tibble [66 x 6]> <tibble [5 x 3]> 4 <split [84/28]> Resample04 <tibble [66 x 6]> <tibble [5 x 3]> 5 <split [84/28]> Resample05 <tibble [66 x 6]> <tibble [5 x 3]> 6 <split [84/28]> Resample06 <tibble [66 x 6]> <tibble [5 x 3]> 7 <split [84/28]> Resample07 <tibble [66 x 6]> <tibble [5 x 3]> 8 <split [84/28]> Resample08 <tibble [66 x 6]> <tibble [5 x 3]> 9 <split [84/28]> Resample09 <tibble [66 x 6]> <tibble [5 x 3]> 10 <split [84/28]> Resample10 <tibble [66 x 6]> <tibble [5 x 3]> # ... with 15 more rows There were issues with some computations: - Warning(s) x73: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x1: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x1: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x25: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x2: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x6: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x1: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x1: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x3: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x8: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... - Warning(s) x4: glm.fit: algorithm did not converge, glm.fit: fitted probabilities numeric... Use `collect_notes(object)` for more information. $tuned_info$grid_size [1] 10 $tuned_info$best_metric [1] "f_meas" $tuned_info$best_result_set # A tibble: 1 x 8 num_terms prod_degree .metric .estimator mean n std_err .config <int> <int> <chr> <chr> <dbl> <int> <dbl> <chr> 1 2 2 f_meas macro 0.128 25 0.0128 Preprocessor1_Model4 $tuned_info$tuning_grid_plot `geom_smooth()` using method = 'loess' and formula 'y ~ x' $tuned_info$plotly_grid_plot attr(,"function_type") [1] "boilerplate" attr(,".grid_size") [1] 10 attr(,".tune") [1] TRUE attr(,".best_metric") [1] "f_meas" attr(,".model_type") [1] "classification" attr(,".engine") [1] "earth" There were 50 or more warnings (use warnings() to see the first 50)
Function:
Example: