После нескольких дней борьбы с той же проблемой вот что я обнаружил:
# using multiple LSTM layers for encoding is not a problem at all.
# Here I used 3. Pay attention to the flags. The sequence of the last
# layer is not returned because we want a single vector that stores everything, not a time-sequence...
encoder_input = Input(shape=(None, num_allowed_chars), name='encoder_input')
encoder_lstm1 = LSTM(state_size, name='encoder_lstm1',
return_sequences=True, return_state=True)
encoder_lstm2 = LSTM(state_size, name='encoder_lstm2',
return_sequences=True, return_state=True)
encoder_lstm3 = LSTM(state_size, name='encoder_lstm3',
return_sequences=False, return_state=True)
# Connect all the LSTM-layers.
x = encoder_input
x, _, _ = encoder_lstm1(x)
x, _, _ = encoder_lstm2(x)
# only the states of the last layer are of interest.
x, state_h, state_c = encoder_lstm3(x)
encoder_output = x # This is the encoded, fix-sized vector which seq2seq is all about
encoder_states = [state_h, state_c]
Теперь давайте посмотрим на бит декодирования (и самый сложный):
# here is something new: for every decoding layer we need an Input variable for both states hidden (h)
# and cell state (c). Here I will use two stacked decoding layers and therefore initialize h1,c1,h2,c2.
decoder_initial_state_h1 = Input(shape=(state_size,),
name='decoder_initial_state_h1')
decoder_initial_state_c1 = Input(shape=(state_size,),
name='decoder_initial_state_c1')
decoder_initial_state_h2 = Input(shape=(state_size,),
name='decoder_initial_state_h2')
decoder_initial_state_c2 = Input(shape=(state_size,),
name='decoder_initial_state_c2')
decoder_input = Input(shape=(None, num_allowed_chars), name='decoder_input')
# pay attention of the return_sequence and return_state flags.
decoder_lstm1 = LSTM(state_size, name='decoder_lstm1',
return_sequences=True, return_state=True)
decoder_lstm2 = LSTM(state_size, name='decoder_lstm2',
return_sequences=True, return_state=True)
decoder_dense = Dense(
num_allowed_chars, activation='softmax', name="decoder_output")
# connect the decoder for training (initial state = encoder_state)
# I feed the encoder_states as inital input to both decoding lstm layers
x = decoder_input
x, h1, c1 = decoder_lstm1(x, initial_state=encoder_states)
# I tried to pass [h1, c1] as initial states in line below, but that result in rubbish
x, _, _ = decoder_lstm2(x, initial_state=encoder_states)
decoder_output = decoder_dense(x)
model_train = Model(inputs=[encoder_input, decoder_input],
outputs=decoder_output)
model_encoder = Model(inputs=encoder_input,
outputs=encoder_states)
Это часть, где декодер подключен для вывода.Он немного отличается от настройки декодера для обучения
# this decoder model setup is used for inference
# important! Every layer keeps its own states. This is, again, important in decode_sequence()
x = decoder_input
x, h1, c1 = decoder_lstm1(
x, initial_state=[decoder_initial_state_h1, decoder_initial_state_c1])
x, h2, c2 = decoder_lstm2(
x, initial_state=[decoder_initial_state_h2, decoder_initial_state_c2])
decoder_output = decoder_dense(x)
decoder_states = [h1, c1, h2, c2]
model_decoder = Model(
inputs=[decoder_input] + [decoder_initial_state_h1, decoder_initial_state_c1,
decoder_initial_state_h2, decoder_initial_state_c2],
outputs=[decoder_output] + decoder_states) # model outputs h1,c1,h2,c2!
model_train.summary()
model_train.compile(optimizer='rmsprop',
loss='categorical_crossentropy', metrics=["acc"])
plot_model(model_train, to_file=data_path_prefix +
'spellchecker/model_train.png')
plot_model(model_encoder, to_file=data_path_prefix +
'spellchecker/model_encode.png')
plot_model(model_decoder, to_file=data_path_prefix +
'spellchecker/model_decode.png')
Это часть декодирования.В зависимости от вашего кода, просто обратите внимание, как я предсказал вектор кодирования вне цикла, и повторил его, чтобы он мог быть передан в decoder_model.predict и быть введен для обоих lstm-слоев.
Второй хитрый битэто получить все четыре выходных состояния из .predict () и передать их обратно в прогноз на следующем шаге времени.
def decode_sequence(input_seq, maxlen_decoder_sequence):
# Encode the input as state vectors.
initial_state = model_encoder.predict(input_seq)
# I simply repeat the encoder states since
# both decoding layers were trained on the encoded-vector
# as initialization. I pass them into model_decoder.predict()
initial_state = initial_state + initial_state
# Generate empty target sequence of length 1.
decoder_input_data = np.zeros((1, 1, num_allowed_chars))
# Populate the first character of target sequence with the start character.
decoder_input_data[0, 0, char_to_int['a']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
# catch all returning states to feed them back in (see end of loop)
one_hot_char, h1, c1, h2, c2 = model_decoder.predict(
[decoder_input_data] + initial_state)
one_hot_char = one_hot_char[0][-1]
char_as_int = np.argmax(one_hot_char)
# print(char_as_int)
char_as_char = int_to_char[char_as_int]
decoded_sentence += char_as_char
# Exit condition: either hit max length or find stop character.
# (z is stop-char in this case)
if (char_as_char == 'z' or
len(decoded_sentence) >= maxlen_decoder_sequence):
stop_condition = True
# feed the predicted char back into next prediction step
decoder_input_data = np.zeros((1, 1, num_allowed_chars))
decoder_input_data[0, 0, char_as_int] = 1.
# Update states
initial_state = [h1, c1, h2, c2]
return decoded_sentence
Надеюсь, это поможет.Существуют миллионы простых однослойных примеров, но ни одного с большим.Очевидно, теперь его легко расширить до более чем двух слоев декодирования.
Удачи!(мой первый ответ так :-)!)