Может кто-нибудь порекомендовать лучший путь для меня, чтобы исправить этот тип ошибки? Я не могу понять, что я сделал неправильно с моими размерами. У меня есть предтренированное вложение, которое происходит в модели Word2Vec gensim, которую я хочу использовать для инициализации CNN. Извините за относительно простой вопрос, но очень новый для Keras и Tensorflow
#CNN architecture
num_classes = num_labels
#Training params
batch_size = 8
num_epochs = 25
#Model parameters
num_filters = 64
weight_decay = 1e-4
kernel_size = 7 #this is the size of the window during convolution...making match the window size in Word2Vec...unsure if needed
print("training CNN ...")
model = Sequential()
#------------------------
FIXED_LENGTH=embedding_matrix.shape[1]
#------------------------
print('Vocab size:', vocab_size)
print('Output_Dim size:', w2v.vector_size)
print('Weights:', pd.Series([embedding_matrix]).shape)
print('Weights underlying shape:', embedding_matrix.shape)
print("Input Length:", FIXED_LENGTH)
#Model add word2vec embedding
model.add(Embedding(vocab_size+1,
output_dim=w2v.vector_size,
weights=[embedding_matrix],
input_length=FIXED_LENGTH,
trainable=False))
model.add(Conv1D(num_filters, kernel_size=kernel_size, activation='relu', padding='same'))
model.add(MaxPooling1D(2))
model.add(Conv1D(num_filters, 7, activation='relu', padding='same'))
model.add(GlobalMaxPooling1D())
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Dense(num_classes, activation='softmax')) #multi-label (k-hot encoding)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.summary()
#define callbacks
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=4, verbose=1)
callbacks_list = [early_stopping]
print('Batch size:', batch_size)
print('Num of Epochs:', num_epochs)
print('X Train Size:', x_train_pad.shape)
print('Y Train Size:', y_train.shape)
hist = model.fit(x_train_pad,
y_train,
batch_size=batch_size,
epochs=num_epochs,
callbacks=callbacks_list,
validation_split=0.1,
shuffle=True,
verbose=2)
Вывод:
training CNN ...
Vocab size: 32186
Output_Dim size: 100
Weights: (1,)
Weights underlying shape: (32186, 100)
Input Length: 100
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-326-36db7b551866> in <module>()
31 weights=[embedding_matrix],
32 input_length=FIXED_LENGTH,
---> 33 trainable=False))
34 model.add(Conv1D(num_filters, kernel_size=kernel_size, activation='relu', padding='same'))
35 model.add(MaxPooling1D(2))
c:\users\tt\anaconda3b\lib\site-packages\tensorflow_core\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
c:\users\tt\anaconda3b\lib\site-packages\tensorflow_core\python\keras\engine\sequential.py in add(self, layer)
176 # and create the node connecting the current layer
177 # to the input layer we just created.
--> 178 layer(x)
179 set_inputs = True
180
c:\users\tt\anaconda3b\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
815 # Build layer if applicable (if the `build` method has been
816 # overridden).
--> 817 self._maybe_build(inputs)
818 cast_inputs = self._maybe_cast_inputs(inputs)
819
c:\users\tt\anaconda3b\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in _maybe_build(self, inputs)
2146 # Optionally load weight values specified at layer instantiation.
2147 if getattr(self, '_initial_weights', None) is not None:
-> 2148 self.set_weights(self._initial_weights)
2149 self._initial_weights = None
2150
c:\users\tt\anaconda3b\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in set_weights(self, weights)
1334 raise ValueError('Layer weight shape ' + str(ref_shape) +
1335 ' not compatible with '
-> 1336 'provided weight shape ' + str(w.shape))
1337 weight_value_tuples.append((p, w))
1338 backend.batch_set_value(weight_value_tuples)
ValueError: Layer weight shape (32187, 100) not compatible with provided weight shape (32186, 100)