Должна быть возможность комбинировать любую модель с оболочкой TimeDistributed . Таким образом, вы можете создать deconv-модель и применить ее к выводу (который представляет собой последовательность векторов) LSTM, используя оболочку TimeDistributed.
Пример. Сначала создайте сеть deconv, используя слои Conv2DTranspose.
from keras.models import Model
from keras.layers import LSTM,Conv2DTranspose, Input, Activation, Dense, Reshape, TimeDistributed
# Hyperparameters
layer_filters = [32, 64]
# Deconv Model
# (adapted from https://github.com/keras-team/keras/blob/master/examples/mnist_denoising_autoencoder.py )
deconv_inputs = Input(shape=(lstm_dim,), name='deconv_input')
feature_map_shape = (None, 50, 50, 64) # deconvolve from [batch_size, 50,50,64] => [batch_size, 200,200,3]
x = Dense(feature_map_shape[1] * feature_map_shape[2] * feature_map_shape[3])(deconv_inputs)
x = Reshape((feature_map_shape[1], feature_map_shape[2],feature_map_shape[3]))(x)
for filters in layer_filters[::-1]:
x = Conv2DTranspose(filters=16,kernel_size=3,strides=2,activation='relu',padding='same')(x)
x = Conv2DTranspose(filters=3,kernel_size=3, padding='same')(x) # last layer has 3 channels
deconv_output = Activation('sigmoid', name='deconv_output')(x)
deconv_model = Model(deconv_inputs, deconv_output, name='deconv_network')
Затем вы можете применить эту deconv-модель к выходам вашего LSTM, используя слой TimeDistributed.
# LSTM
lstm_input = Input(shape=(None,16), name='lstm_input') # => [batch_size, timesteps, input_dim]
lstm_outputs = LSTM(units=64, return_sequences=True)(lstm_input) # => [batch_size, timesteps, output_dim]
predicted_images = TimeDistributed(deconv_model)(lstm_outputs)
model = Model(lstm_input , predicted_images , name='lstm_deconv')
model.summary()