Да, вы все сделали правильно.Кроме того, вы можете установить уровень verbose
, чтобы увидеть используемые гиперпараметры последней перекрестной проверки, например, [CV] activation=tanh, alpha=1e+100, hidden_layer_sizes=(30, 10), score=-4.180054117738231, total= 2.7s
.
Я выбрал GridSearchCV
вместо RandomizedSearchCV
, чтобы найти лучший параметрустановить и на моей машине это заняло пять минут.
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import explained_variance_score
X, y = load_boston(return_X_y=True)
# Split data for final evaluation:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=42)
# Define base regressor:
base_reg = MLPRegressor(learning_rate='adaptive', max_iter=5000, random_state=42)
# Define search space:
params = {
'activation': ['logistic', 'relu', 'tanh'], # <-- added 'tanh' as third non-linear activation function
'alpha': np.logspace(0.0001, 100, 10),
'hidden_layer_sizes': [
(10, 10), (20, 10), (30, 10),
(40, 10), (90, 10), (90, 30, 10) # <-- added more neurons or layers
]
}
# Find best hyper params and then refit on all training data:
reg = GridSearchCV(estimator=base_reg, param_grid=params,
n_jobs=8, cv=3, refit=True, verbose=5) # <-- verbose=5
reg.fit(X_train, y_train)
print(reg.best_estimator_)
# MLPRegressor(activation='logistic', alpha=1.0002302850208247,
# batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
# epsilon=1e-08, hidden_layer_sizes=(30, 10),
# learning_rate='adaptive', learning_rate_init=0.001, max_iter=5000,
# momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,
# power_t=0.5, random_state=42, shuffle=True, solver='adam',
# tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
print(reg.best_params_)
# {'activation': 'logistic', 'alpha': 1.0002302850208247, 'hidden_layer_sizes': (30, 10)}
# Evaluate on unseen test data:
err = explained_variance_score(y_train, reg.predict(X_train))
print(err) # 0.8936815412058757
err = explained_variance_score(y_test, reg.predict(X_test))
print(err) # 0.801353064635174