мой полный пример:
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from scipy.optimize import minimize
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
from math import sqrt
# generate 2d regression dataset
X, y = make_regression(n_samples=1100,n_features=2, noise=0.7)
# split into train and test
n_train = 100
train_x, test_x = X[:n_train, :], X[n_train:, :]
train_y, test_y = y[:n_train], y[n_train:]
print(train_x.shape, test_x.shape)
### building the regressors
rgs = []
rfr = RandomForestRegressor(n_estimators=10, random_state=4141, n_jobs=-1)
rfr.fit(train_x, train_y)
print('RF RMSE {score}'.format(score= sqrt(mean_squared_error(test_y,
rfr.predict(test_x)))))
rgs.append(rfr)
RF = rfr.predict(test_x)
### AdaBoost
ADA = AdaBoostRegressor()
ADA.fit(train_x, train_y)
print('ADA RMSE {score}'.format(score= sqrt(mean_squared_error(test_y, ADA.predict(test_x)))))
rgs.append(ADA)
ADB= ADA.predict(test_x)
# generate random floating point values for w[i]
from numpy.random import seed
from numpy.random import rand
# seed random number generator
seed(511)
# generate random numbers between 0-1
w1 = rand(1000)
оптимизация только для одних данных:
def objective(b):
b0 = b[0]
b1 = b[1]
return sqrt((w1[0]**(b0+b1*ADB[0])*ADB[0]+(1-w1[0])**(b0+b1*ADB[0])*RF[0]-test_y[0])**2)
bi = [1,0]
sol= minimize(objective, bi, method='SLSQP')
sol
Я попытался сделать l oop, но это дает ValueError: Цель функция должен вернуть скаляр
loss_es = list()
def objective(b):
b0 = b[0]
b1 = b[1]
for i in range(1000):
loss = sqrt(((w1[i]**(b0+b1*ADB[i]))*ADB[i]+((1-w1[i])**(b0+b1*ADB[i]))*RF[i]-test_y[i])**2)
loss_es.append(loss)
return loss_es
bi = [1,0]
for loss in range(1000):
sol = minimize(objective, bi, method='SLSQP' )