Я пытаюсь извлечь объекты из набора данных с использованием и кодировщика, но я получаю следующую ошибку:
Traceback (most recent call last):
File "extract_features.py", line 104, in <module>
features = features.reshape((features.shape[0], 512 * 7 * 7))
ValueError: cannot reshape array of size 200704 into shape (32,25088)
кодировщик выглядит следующим образом:
class ConvNetEncoder(tf.keras.Model):
def __init__(self):
super(ConvNetEncoder, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same')
self.maxp1 = tf.keras.layers.MaxPooling2D((2, 2), padding='same')
self.conv2 = tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')
self.maxp2 = tf.keras.layers.MaxPooling2D((2, 2), padding='same')
self.conv3 = tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')
self.encoded = tf.keras.layers.MaxPooling2D((2, 2), padding='same')
def call(self, x):
x = self.conv1(x)
x = self.maxp1(x)
x = self.conv2(x)
x = self.maxp2(x)
x = self.conv3(x)
x = self.encoded(x)
return x
Следующее является частью кода функции извлечения
# loop over the images in batches
for i in np.arange(0, len(imagePaths), bs):
# extract the batch of images and labels, then initialize the
# list of actual images that will be passed through the network
# for feature extraction
batchPaths = imagePaths[i:i + bs]
batchLabels = labels[i:i + bs]
batchImages = []
# loop over the images and labels in the current batch
for (j, imagePath) in enumerate(batchPaths):
# load the input image using the Keras helper utility
# while ensuring the image is resized to 224x224 pixels
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# preprocess the image by (1) expanding the dimensions and
# (2) subtracting the mean RGB pixel intensity from the
# ImageNet dataset
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# add the image to the batch
batchImages.append(image)
# pass the images through the network and use the outputs as
# our actual features
batchImages = np.vstack(batchImages)
features = model.predict(batchImages, batch_size=bs)
# reshape the features so that each image is represented by
# a flattened feature vector of the `MaxPooling2D` outputs
features = features.reshape((features.shape[0], 512 * 7 * 7))
# add the features and labels to our HDF5 dataset
dataset.add(features, batchLabels)
pbar.update(i)
Я использую tenorflow 2.0, python 3.7.6 на windows.
После изменения размеров на features = features.reshape((features.shape[0], 128 * 7 * 7))
, я получил следующее ошибка
Traceback (most recent call last):#### | ETA: 0:00:24
File "extract_features.py", line 108, in <module>
dataset.add(features, batchLabels)
File "D:\Clones\feature_extraction_try\pyimagesearch\io\hdf5datasetwriter.py", line 37, in add
self.flush()
File "D:\Clones\feature_extraction_try\pyimagesearch\io\hdf5datasetwriter.py", line 42, in flush
self.data[self.idx:i] = self.buffer["data"]
File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "C:\Users\sancy\Anaconda3\envs\tensorflow2\lib\site-packages\h5py\_hl\dataset.py", line 707, in __setitem__
for fspace in selection.broadcast(mshape):
File "C:\Users\sancy\Anaconda3\envs\tensorflow2\lib\site-packages\h5py\_hl\selections.py", line 299, in broadcast
raise TypeError("Can't broadcast %s -> %s" % (target_shape, self.mshape))
TypeError: Can't broadcast (1024, 6272) -> (1024, 25088)
В трассировке упоминается следующее в моем hdf5datasetWriter.py
def add(self, rows, labels):
# add the rows and labels to the buffer
self.buffer["data"].extend(rows)
self.buffer["labels"].extend(labels)
# check to see if the buffer needs to be flushed to disk
if len(self.buffer["data"]) >= self.bufSize:
self.flush()
def flush(self):
# write the buffers to disk then reset the buffer
i = self.idx + len(self.buffer["data"])
self.data[self.idx:i] = self.buffer["data"]
self.labels[self.idx:i] = self.buffer["labels"]
self.idx = i
self.buffer = {"data": [], "labels": []}
Ждем вашей помощи.