mlr-org / mlr3tuning

Hyperparameter optimization package of the mlr3 ecosystem
https://mlr3tuning.mlr-org.com/
GNU Lesser General Public License v3.0
53 stars 5 forks source link

retrain #297

Closed be-marc closed 2 years ago

be-marc commented 3 years ago
library(mlr3learners) # @continue branch
library(data.table)

learner = lrn("classif.xgboost")
learner$param_set$values$nrounds = to_tune(1, 16)
learner$param_set$values$eta = to_tune(0.01, 0.1) 

instance = TuningInstanceSingleCrit$new(
  task = tsk("pima"),
  learner = learner,
  resampling = rsmp("holdout"),
  measure = msr("classif.ce"),
  terminator = trm("none"),
  store_models = TRUE,
  allow_retrain = TRUE)

design = data.table(nrounds = rep(2^(0:4), each = 2), eta = rep(c(0.1, 0.01), times = 5))
tuner = tnr("design_points", design = design, batch_size = 2)

tuner$optimize(instance)
be-marc commented 2 years ago

Covered by #312