Не готовьте рецепт перед тем, как передать его train
, и используйте оригинальный тренировочный набор:
library(caret)
#> Loading required package: lattice
#> Loading required package: ggplot2
library(tidyverse)
library(recipes)
#>
#> Attaching package: 'recipes'
#> The following object is masked from 'package:stringr':
#>
#> fixed
#> The following object is masked from 'package:stats':
#>
#> step
library(rsample)
data("credit_data")
## Split the data into training (75%) and test sets (25%)
set.seed(100)
train_test_split <- initial_split(credit_data)
credit_train <- training(train_test_split)
credit_test <- testing(train_test_split)
# Create recipe for data pre-processing
rec_obj <-
recipe(Status ~ ., data = credit_train) %>%
step_knnimpute(all_predictors()) %>%
#step_other(Home, Marital, threshold = .2, other = "other") %>%
#step_other(Job, threshold = .2, other = "others") %>%
step_dummy(Records) %>%
step_center(all_numeric()) %>%
step_scale(all_numeric())
set.seed(1055)
# the glm function models the second factor level.
lrfit <- train(rec_obj, data = credit_train,
method = "glm",
trControl = trainControl(method = "repeatedcv",
repeats = 5))
lrfit
#> Generalized Linear Model
#>
#> 3341 samples
#> 13 predictor
#> 2 classes: 'bad', 'good'
#>
#> Recipe steps: knnimpute, dummy, center, scale
#> Resampling: Cross-Validated (10 fold, repeated 5 times)
#> Summary of sample sizes: 3006, 3008, 3007, 3007, 3007, 3007, ...
#> Resampling results:
#>
#> Accuracy Kappa
#> 0.7965349 0.4546223
Создан в 2019-03-20 пакетом представ. (v0.2.1)