Я делаю проект, который обучает модель, которая может классифицировать изображения планктона.Однако, когда я делаю свой код и запускаю его, он говорит, что мое изображение находится в папке, которую он не может идентифицировать.Во-первых, я подумал, что есть проблемы с именем файла.Я изменил все JPG в PNG.Но ничего не изменилось.Каждое изображение в этой папке работает нормально.
Я не использую PIL, поэтому я все еще пытаюсь выяснить, в чем заключается моя проблема, но ничего не изменилось. Папка существует, и изображение работает нормально.
Вот мой код:
import sys
import os
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, Activation
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras import callbacks
train_data_path = '/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/data/train'
validation_data_path = '/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/data/validation'
"""
Parameters
"""
img_width, img_height = 128, 128
batch_size = 16
samples_per_epoch = 1000
validation_steps = 300
nb_filters1 = 32
nb_filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 3 ## change this number with number of plankton folder you have
lr = 0.0004
epochs = 20
model = Sequential()
model.add(Convolution2D(nb_filters1, conv1_size, conv1_size, border_mode ="same", input_shape=(img_width, img_height, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(nb_filters2, conv2_size, conv2_size, border_mode ="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size), dim_ordering='th'))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(classes_num, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=lr),
metrics=['accuracy'])
print(model.summary())
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
"""
Tensorboard log
"""
log_dir = '/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/data/validation'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
cbks = [tb_cb]
model.fit_generator(
train_generator,
steps_per_epoch=samples_per_epoch,
epochs=epochs,
validation_data=validation_generator,
callbacks=cbks,
validation_steps=validation_steps)
target_dir = '/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/models'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/models/model.h5')
model.save_weights('/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/models/weights.h5')
Моя ошибка трассировки
Using TensorFlow backend.
/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/train.py:77: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), input_shape=(128, 128,..., padding="same")`
model.add(Convolution2D(nb_filters1, conv1_size, conv1_size, border_mode ="same", input_shape=(img_width, img_height, 3)))
/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/train.py:81: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (2, 2), padding="same")`
model.add(Convolution2D(nb_filters2, conv2_size, conv2_size, border_mode ="same"))
/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/train.py:83: UserWarning: Update your `MaxPooling2D` call to the Keras 2 API: `MaxPooling2D(pool_size=(2, 2), data_format="channels_first")`
model.add(MaxPooling2D(pool_size=(pool_size, pool_size), dim_ordering='th'))
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 128, 128, 32) 896
_________________________________________________________________
activation_1 (Activation) (None, 128, 128, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 64, 64, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 64, 64, 64) 8256
_________________________________________________________________
activation_2 (Activation) (None, 64, 64, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 64, 32, 32) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 65536) 0
_________________________________________________________________
dense_1 (Dense) (None, 256) 16777472
_________________________________________________________________
activation_3 (Activation) (None, 256) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 256) 0
_________________________________________________________________
dense_2 (Dense) (None, 3) 771
=================================================================
Total params: 16,787,395
Trainable params: 16,787,395
Non-trainable params: 0
_________________________________________________________________
None
Found 6309 images belonging to 9 classes.
Found 891 images belonging to 9 classes.
Epoch 1/20
Traceback (most recent call last):
File "/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/train.py", line 124, in <module>
validation_steps=validation_steps)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/engine/training.py", line 1418, in fit_generator
initial_epoch=initial_epoch)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/engine/training_generator.py", line 181, in fit_generator
generator_output = next(output_generator)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/utils/data_utils.py", line 709, in get
six.reraise(*sys.exc_info())
File "/home/sexybeam/.local/lib/python3.6/site-packages/six.py", line 693, in reraise
raise value
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/utils/data_utils.py", line 685, in get
inputs = self.queue.get(block=True).get()
File "/usr/lib/python3.6/multiprocessing/pool.py", line 670, in get
raise self._value
File "/usr/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras/utils/data_utils.py", line 626, in next_sample
return six.next(_SHARED_SEQUENCES[uid])
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 100, in __next__
return self.next(*args, **kwargs)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 112, in next
return self._get_batches_of_transformed_samples(index_array)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 226, in _get_batches_of_transformed_samples
interpolation=self.interpolation)
File "/home/sexybeam/.local/lib/python3.6/site-packages/keras_preprocessing/image/utils.py", line 104, in load_img
img = pil_image.open(path)
File "/home/sexybeam/.local/lib/python3.6/site-packages/PIL/Image.py", line 2687, in open
% (filename if filename else fp))
OSError: cannot identify image file '/media/sexybeam/Suzuya/Study/Group-Project/Python/train et test/data/train/amoeba5/IFCB1_2009_0_5167.jpg'