Я пытался обучить модель для бинарной классификации, но после 3 эпохи точность проверки все еще оставалась на уровне 0,5000.
Набор данных состоит из 1512 изображений для обоих классов, таким образом, в общей сложности 3024 изображения.Я использовал керас, чтобы сделать трансферное обучение, используя модель VGG16.
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
# Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)
# tensorboard
tensorboard = TensorBoard(log_dir='./logs')
model_check_point = ModelCheckpoint('vgg16.h5', save_best_only=True)
train_dir = 'dataset\\training_set'
validation_dir = 'dataset\\validation_set'
image_size_x = 360
image_size_y = 180
#Load the VGG model
vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(image_size_x, image_size_y, 3))
# Freeze the layers except the last 4 layers
for layer in vgg_conv.layers[:-4]:
layer.trainable = False
# Create the model
model = models.Sequential()
# Add the vgg convolutional base model
model.add(vgg_conv)
# Add new layers
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# Change the batchsize according to your system RAM
train_batchsize = 4
val_batchsize = 4
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(image_size_x, image_size_y),
batch_size=train_batchsize,
class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(image_size_x, image_size_y),
batch_size=val_batchsize,
class_mode='binary')
# Compile the model
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=0.1),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=7,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1,
callbacks=[tensorboard, early_stopper, model_check_point])
Результат был
Using TensorFlow backend.
2019-02-03 14:46:06.520723: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Found 2416 images belonging to 2 classes.
Found 608 images belonging to 2 classes.
Epoch 1/7
604/604 [==============================] - 2046s 3s/step - loss: 8.0208 - acc: 0.5008 - val_loss: 8.0590 - val_acc: 0.5000
Epoch 2/7
604/604 [==============================] - 1798s 3s/step - loss: 8.0055 - acc: 0.5033 - val_loss: 8.0590 - val_acc: 0.5000
Epoch 3/7
604/604 [==============================] - 2500s 4s/step - loss: 8.0054 - acc: 0.5033 - val_loss: 8.0590 - val_acc: 0.5000
Я пытался увеличить скорость обучения с 0,0001 до 0,01, пытался использовать другой оптимизатор (RMSprop), но точность проверки все еще остается на уровне 0,5000.Вот краткое описание модели
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inception_v3 (Model) (None, 9, 4, 2048) 21802784
=================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
_________________________________________________________________
None