Я использую модель кодера-декодера на основе LSTM RNN для предсказаний seq2seq.Что будет выход из плотного слоя?Я запутался, будет ли это вложение векторов выходной последовательности или токена IDS для выходных слов напрямую.
Спасибо.
Вот структура модели:
shared_embedding_layer = Embedding(vocab_size,
embedding_matrix.shape[1],
weights = [embedding_matrix],
trainable = False,
name = 'embedding_layer')
encoder_inputs = Input(batch_shape = (my_batch_size, num_encoder_tokens), name = 'encoder_input')
encoder_embedding_layer = shared_embedding_layer(encoder_inputs)
encoder_lstm = LSTM(latent_dim,
return_sequences = False,
return_state = True,
name = 'encoder_lstm')
_, state_h, state_c = encoder_lstm(encoder_embedding_layer)
encoder_states = [state_h, state_c]
decoder_inputs = Input(batch_shape = (my_batch_size, num_decoder_tokens), name = 'decoder_input')
decoder_embedding_layer = shared_embedding_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim,
return_state = False,
return_sequences = True,
name = 'decoder_lstm')
decoder_lstm_output = decoder_lstm(decoder_embedding_layer, initial_state = encoder_states)
time_distributed_decoder_dense = TimeDistributed(Dense(num_decoder_tokens,
activation = 'softmax',
name = 'decoder_dense'))
decoder_outputs = time_distributed_decoder_dense(decoder_lstm_output)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)