При реализации LSTM на кератах входное горячее кодирование shape.x (None, 78) преобразуется в shape.x (None, 1, 78), но не уверен, почему это преобразование требуется в кератах.
Я смотрю на приведенный ниже пример кода для курса DL:
reshapor = Reshape((1, 78))
LSTM_cell = LSTM(n_a, return_state = True)
densor = Dense(n_values, activation='softmax')
"""
Tx: 30
n_a = 64
n_values = 78
"""
def djmodel(Tx, n_a, n_values):
"""
Implement the model
Arguments:
Tx -- length of the sequence in a corpus
n_a -- the number of activations used in our model
n_values -- number of unique values in the music data
Returns:
model -- a keras model with the
Read this for input shape and dim on keras:
https://stackoverflow.com/questions/44747343/keras-input-explanation-input-shape-units-batch-size-dim-etc
"""
# Define the input of your model with a shape
X = Input(shape=(Tx, n_values))
# Define s0, initial hidden state for the decoder LSTM
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
### START CODE HERE ###
# Step 1: Create empty list to append the outputs while you iterate (≈1 line)
outputs = []
# Step 2: Loop
for t in range(Tx):
# Step 2.A: select the "t"th time step vector from X.
x = Lambda(lambda x : X[:,t,:]) (X)
# Step 2.B: Use reshapor to reshape x to be (1, n_values) (≈1 line)
print("x.shape:{}".format(x.shape))
#x = reshapor(x)
print("x.shape:{}".format(x.shape))
# Step 2.C: Perform one step of the LSTM_cell
a, _, c = LSTM_cell(x, initial_state = [a,c])
# Step 2.D: Apply densor to the hidden state output of LSTM_Cell
out = densor(a)
# Step 2.E: add the output to "outputs"
outputs.append(out)
# Step 3: Create model instance
model = Model(inputs = [X,a0,c0],outputs = outputs)
### END CODE HERE ###
return model
Я хотел знать, почему изменение формы выполняется над примером кода.
Спасибо