Я пытаюсь создать путаницу после подгонки модели и тренировки данных в формате маринада. Однако я получаю сообщение об ошибке continuous is not supported
, и я не уверен, почему это так.
Здесь загружаются данные:
# for reproducibility
np.random.seed(1337)
#LOAD THE PICKLE DATA
name = '2f3stest1top'
# open a file, where you stored the pickled data
xfile = open('X_'+name, 'rb')
yfile = open('Y_'+name, 'rb')
# dump information to that file
xData = pickle.load(xfile)
yData = pickle.load(yfile)
# close the file
xfile.close()
yfile.close()
#LOAD THE PICKLE DATA 1
name1 = '2f3stest2top'
# open a file, where you stored the pickled data
xfile1 = open('X_'+name1, 'rb')
yfile1 = open('Y_'+name1, 'rb')
# dump information to that file
xData1 = pickle.load(xfile1)
yData1 = pickle.load(yfile1)
# close the file
xfile1.close()
yfile1.close()
# convert list array to numpy array have shapes
train_X = np.array(xData)
train_Y = np.array(yData)
test_X = np.array(xData1)
test_Y = np.array(yData1)
Вот код для обучения модели LSTM:
# define LSTM configuration
n_neurons = 30 #20
n_batch = 5 #len(train_X)
n_epoch = 100 #250
n_features = 4
time_step = 5
warnings.filterwarnings("ignore")
# create LSTM
model = Sequential()
model.add(LSTM(n_neurons, batch_input_shape=(n_batch, time_step, n_features), stateful=True)) #(sample, time-step, features) n_batch
# model.add(LSTM(5)) #addition layter
model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss="mean_squared_error", optimizer="rmsprop", metrics=['accuracy'])
start = time.time()
# train LSTM
epochs = 1 # default 1
print('Training')
history = model.fit(train_X, train_Y, epochs=n_epoch, batch_size=n_batch, verbose=1, callbacks=[EarlyStopping(monitor='loss', patience=5)] , validation_data=(test_X, test_Y), shuffle=False) # n_batch ,callbacks=[EarlyStopping(monitor='loss', patience=10)] ,validation_data=(test_X, test_Y),
model.reset_states()
# model.fit(train_X1, train_Y1, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
# model.reset_states()
print('Predicting')
predicted_stateful = model.predict(test_X, batch_size=n_batch)
# model.predict(train_X,batch_size=5)#n_batch
end = time.time()
print("Time Took :{:3.2f} min".format( (end-start)/60 ))
Здесь я сохраняю Точность обучения и значения потерь:
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
epochs = range(1, len(loss_values) + 1)
Вот оценка модели:
test_loss, test_acc = model.evaluate (test_X, test_Y, batch_size = n_batch)
print('\ntest_acc:', test_acc)
А вот как далеко я продвинулась с моей матрицей путаницы:
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
conf_mat = confusion_matrix(acc_values, val_acc_values)
print(conf_mat)
Любая помощь или совет будут высоко оценены