Я хочу добавить многократный слой поверх автоэнкодера LSTM.
Уровень умножения должен умножить тензор на постоянное значение.
Я написал следующий код, который работает без слоя умножения.
Кто-нибудь знает, как настроить и заставить это работать?
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Input, LSTM, RepeatVector, TimeDistributed
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.optimizers import SGD, RMSprop, Adam
from keras import objectives
from keras.engine.topology import Layer
import numpy as np
class LayerKMultiply(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.k = Null
super(LayerKMultiply, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.k = self.add_weight(
name='k',
shape=(),
initializer='ones',
dtype='float32',
trainable=True,
)
super(LayerKMultiply, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
#return K.tf.multiply(self.k, x)
return self.k * x
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
timesteps, input_dim, latent_dim = 10, 3, 32
inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(latent_dim, return_sequences=False, activation='linear')(inputs)
decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True, activation='linear')(decoded)
decoded = TimeDistributed(Dense(input_dim, activation='linear'))(decoded)
#decoded = LayerKMultiply(k = 20)(decoded)
sequence_autoencoder = Model(inputs, decoded)
encoder = Model(inputs, encoded)
autoencoder = Model(inputs, decoded)
autoencoder.compile(optimizer='adam', loss='mse')
X = np.array([[[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]]])
X = X.reshape(1,10,3)
p = autoencoder.predict(x=X, batch_size=1)
print(p)
Я получаю следующую ошибку:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-b2f9497bbf47> in <module>()
7 decoded = LSTM(input_dim, return_sequences=True, activation='linear')(decoded)
8 decoded = TimeDistributed(Dense(input_dim, activation='linear'))(decoded)
----> 9 decoded = LayerKMultiply(k = 20)(decoded)
10
11 sequence_autoencoder = Model(inputs, decoded)
TypeError: __init__() missing 1 required positional argument: 'output_dim'
EDIT
Чего я хочу добиться, так это архитектуры, описанной на следующих изображениях:
https://github.com/mg64ve/SMTDAE/blob/master/images/SMTDAE.png
https://github.com/mg64ve/SMTDAE/blob/master/images/REF.png
Таким образом, в этом смысле я считаю, что многослойный слой должен быть до слоев TimeDistributed и Dense.
Я изменил код следующим образом:
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Input, LSTM, RepeatVector, TimeDistributed
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.optimizers import SGD, RMSprop, Adam
from keras import objectives
from keras.engine.topology import Layer
import numpy as np
class LayerKMultiply(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.k = None
super(LayerKMultiply, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.k = self.add_weight(
name='k',
shape=(),
initializer='ones',
dtype='float32',
trainable=True,
)
super(LayerKMultiply, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return self.k * x
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim)
timesteps, input_dim, latent_dim = 31, 31, 32
inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(latent_dim, return_sequences=False, activation='linear')(inputs)
decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True, activation='linear')(decoded)
decoded = LayerKMultiply(20)(decoded)
decoded = TimeDistributed(Dense(input_dim, activation='linear'))(decoded)
autoencoder = Model(inputs, decoded)
batch_size = 100
X = np.zeros([5000,31,31])
autoencoder.fit(X, X, batch_size = batch_size, epochs=3)
autoencoder.compile(optimizer='adam', loss='mse')
Но я все еще получаю следующую ошибку:
InvalidArgumentError: Incompatible shapes: [155,31,31] vs. [100,31,31]