Обычно я использую LSTM с отслеживанием состояния для прогнозирования.Когда я тренирую LSTM, точность вывода довольно высока.Однако, когда я тестирую модель LSTM на тренировочном наборе , точность низкая!Это действительно смутило меня, я думал, что они должны быть одинаковыми.Вот мои коды и выводы.Кто-нибудь знает, почему такие вещи случаются?Спасибо!
model = Sequential()
adam = keras.optimizers.Adam(lr=0.0001)
model.add(LSTM(512, batch_input_shape=(12, 1, 120), return_sequences=False, stateful=True))
model.add(Dense(8, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print 'Train...'
for epoch in range(30):
mean_tr_acc = []
mean_tr_loss = []
current_data, current_label, origin_label, is_shuffled = train_iter.next()
for i in range(current_data.shape[1]):
if i%1000==0:
print "current iter at {} with {} iteration".format(i, epoch)
data_slice = current_data[:,i,:]
# Data slice dim: [batch size = 12, time_step=1, feature_dim=120]
data_slice = np.expand_dims(data_slice, axis=1)
label_slice = current_label[:,i,:]
one_hot_labels = keras.utils.to_categorical(label_slice, num_classes=8)
last_element = one_hot_labels[:,-1,:]
tr_loss, tr_acc = model.train_on_batch(np.array(data_slice), np.array(last_element))
mean_tr_acc.append(tr_acc)
mean_tr_loss.append(tr_loss)
model.reset_states()
print 'accuracy training = {}'.format(np.mean(mean_tr_acc))
print 'loss training = {}'.format(np.mean(mean_tr_loss))
print '___________________________________'
# At here, just evaluate the model on the training dataset
mean_te_acc = []
mean_te_loss = []
for i in range(current_data.shape[1]):
if i%1000==0:
print "current val iter at {} with {} iteration".format(i, epoch)
data_slice = current_data[:,i,:]
data_slice = np.expand_dims(data_slice, axis=1)
label_slice = current_label[:,i,:]
one_hot_labels = keras.utils.to_categorical(label_slice, num_classes=8)
last_element = one_hot_labels[:,-1,:]
te_loss, te_acc = model.test_on_batch(np.array(data_slice), np.array(last_element))
mean_te_acc.append(te_acc)
mean_te_loss.append(te_loss)
model.reset_states()
Вот вывод программы:
current iter at 0 with 13 iteration
current iter at 1000 with 13 iteration
accuracy training = 0.991784930229
loss training = 0.0320105217397
___________________________________
Batch shuffled
current val iter at 0 with 13 iteration
current val iter at 1000 with 13 iteration
accuracy testing = 0.927557885647
loss testing = 0.230829760432
___________________________________