Доброе утро, я построил нейронную сеть, чтобы предсказать физический квантовый процесс. Я хочу запустить модель в 10 раз, чтобы увидеть стабильность модели. Как я могу создать DataFrame со всеми функциями потерь (как проверки, так и обучения), оцененными в 10 попытках?
for i in range(10): #per in numero di esperimenti
test_size = 0.2
dataset = pd.read_csv('CompleteDataSet_original_Clean_TP.csv', decimal=',', delimiter = ";")
label = dataset.iloc[:,-1]
features = dataset[feat_labels]
y_max_pre_normalize = max(label)
y_min_pre_normalize = min(label)
def denormalize(y):
final_value = y*(y_max_pre_normalize-y_min_pre_normalize)+y_min_pre_normalize
return final_value
X_train1, X_test1, y_train1, y_test1 = train_test_split(features, label, test_size = test_size, shuffle = True)
y_test2 = y_test1.to_frame()
y_train2 = y_train1.to_frame()
scaler1 = preprocessing.MinMaxScaler()
scaler2 = preprocessing.MinMaxScaler()
X_train = scaler1.fit_transform(X_train1)
X_test = scaler2.fit_transform(X_test1)
scaler3 = preprocessing.MinMaxScaler()
scaler4 = preprocessing.MinMaxScaler()
y_train = scaler3.fit_transform(y_train2)
y_test = scaler4.fit_transform(y_test2)
from keras import backend as K
# =============================================================================
# Creo la rete
# =============================================================================
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model = Sequential()
model.add(Dense(100, input_shape = (X_train.shape[1],), activation = 'relu',kernel_initializer='glorot_uniform'))
model.add(Dropout(0.2))
model.add(Dense(100, activation = 'relu',kernel_initializer='glorot_uniform'))
model.add(Dropout(0.2))
model.add(Dense(100, activation = 'relu',kernel_initializer='glorot_uniform'))
model.add(Dropout(0.2))
model.add(Dense(100, activation = 'relu',kernel_initializer='glorot_uniform'))
model.add(Dense(1,activation = 'linear',kernel_initializer='glorot_uniform'))
model.compile(loss = 'mse', optimizer = optimizer, metrics = ['mse', r2_score])
history = model.fit(X_train, y_train, epochs = 200,
validation_split = 0.1, shuffle=False, batch_size=250)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
y_train_pred = denormalize(y_train_pred)
y_test_pred = denormalize(y_test_pred)
from sklearn.metrics import r2_score
from sklearn import metrics
r2_test.append(r2_score(y_test_pred, y_test1))
r2_train.append(r2_score(y_train_pred, y_train1))
# Measure MSE error.
MSE_test.append(metrics.mean_squared_error(y_test_pred,y_test1))
MSE_train.append(metrics.mean_squared_error(y_train_pred,y_train1))
RMSE_test.append(np.sqrt(metrics.mean_squared_error(y_test_pred,y_test1)))
RMSE_train.append(np.sqrt(metrics.mean_squared_error(y_train_pred,y_train1)))