Я пытаюсь создать вход следующим образом:
Tx = 318
n_freq = 101
input_anchor = Input(shape=(n_freq,Tx), name='input_anchor')
Когда я бегу:
input_anchor.shape
Я получаю:
TensorShape([None, 101, 318])
Позже, когда я пытаюсь использовать этот вход в моей модели, я получаю следующую ошибку:
TypeError: Cannot iterate over a tensor with unknown first dimension.
В opy.py * 104 * Тензорного потока я нашел этот блок кода, который наиболее вероятен там, где мой код дает сбой:
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are only iterable when eager execution is "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
Если вы хотите увидеть всю мою реализацию модели, вот она:
def base_model(input_shape):
X_input = Input(shape = input_shape)
# Step 1: CONV layer (≈4 lines)
X = Conv1D(196,kernel_size = 15, strides = 4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation('relu')(X) # ReLu activation
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = LSTM(units = 128, return_sequences = True)(X_input) # GRU (use 128 units and return the sequences)
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (≈4 lines)
X = LSTM(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
# Step 4: Third GRU Layer (≈4 lines)
X = LSTM(units = 128)(X) # GRU (use 128 units and return the sequences)
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(rate = 0.2)(X) # dropout (use 0.8)
X = Dense(64)(X)
base_model = Model(inputs = X_input, outputs = X)
return base_model
def speech_model(input_shape, base_model):
#get triplets vectors
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_positive')
input_negative = Input(shape=input_shape, name='input_negative')
vec_anchor = base_model(input_anchor)
vec_positive = base_model(input_positive)
vec_negative = base_model(input_negative)
#Concatenate vectors vec_positive, vec_negative
concat_layer = concatenate([vec_anchor,vec_positive,vec_negative], axis = -1, name='concat_layer')
model = Model(inputs = [input_anchor,input_positive,input_negative], outputs = concat_layer, name = 'speech_to_vec')
#model = Model(inputs = [input_anchor,input_positive,input_negative], outputs = [vec_anchor,vec_positive,vec_negative], name = 'speech_to_vec')
#model = Model(inputs = [input_anchor,input_positiv], outputs=vec_anchor)
return model
И строка, которая ломает все это и генерирует ошибку, упомянутую ранее
speech_model = speech_model(input_shape = (n_freq, Tx), base_model = base_model)
Большое спасибо за чтение, любая помощь в решении этой проблемы очень ценится.