Я пытаюсь обучить 3d-сеть cnn на наборе данных Cifar 10, но я получил следующую ошибку:
Traceback (most recent call last):
File "kI3DV2s.py", line 286, in <module>
callbacks=[])
File "C:\Users\sancy\Anaconda3\envs\tensorflow\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\sancy\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py", line 1732, in fit_generator
initial_epoch=initial_epoch)
File "C:\Users\sancy\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\training_generator.py", line 150, in fit_generator
val_x, val_y, val_sample_weight)
File "C:\Users\sancy\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py", line 579, in _standardize_user_data
exception_prefix='input')
File "C:\Users\sancy\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\training_utils.py", line 135, in standardize_input_data
'with shape ' + str(data_shape))
ValueError: Error when checking input: expected input_1 to have 5 dimensions, but got array with shape (10000, 32, 3, 32)
Я думаю, что проблема связана с формой ввода и размерами x_train.
Соответствующая часть кода выглядит следующим образом:
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# reorder dimensions for tensorflow
x_train = np.transpose(x_train.astype('float32') / 255., (0, 2, 3, 1))
x_test = np.transpose(x_test.astype('float32') / 255., (0, 2, 3, 1))
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
img_rows, img_cols, img_rc = 32, 32, 32
img_channels = 3
inputs = Input(shape=(img_rows, img_cols, img_rc, img_channels))
# 3d cnn model
...
...
...
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
batch_size = 128
nb_epoch = 10
data_augmentation = True
# Model saving callback
#checkpointer = ModelCheckpoint(filepath='stochastic_depth_cifar10.hdf5', verbose=1, save_best_only=True)
if not data_augmentation:
print('Not using data augmentation.')
history = model.fit(x_train, y_train,
batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,
validation_data=(x_test, y_test), shuffle=True,
callbacks=[])
else:
print('Using real-time data augmentation.')
# realtime data augmentation
datagen_train = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.125,
height_shift_range=0.125,
horizontal_flip=True,
vertical_flip=False)
datagen_train.fit(x_train)
# fit the model on the batches generated by datagen.flow()
history = model.fit_generator(datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),
samples_per_epoch=x_train.shape[0],
nb_epoch=nb_epoch, verbose=1,
validation_data=(x_test, y_test),
callbacks=[])
Может кто-нибудь сказать мне, что я делаю неправильно, и как я могу правильно определить размеры. Спасибо.
Примечание:
x_train shape: (50000, 32, 3, 32)
50000 train samples
10000 test samples
Window 10
Python 3.7.6
Tensorflow-gpu==1.14
Keras==2.3.1