Я настроил VGG16 для классификации моего собственного набора данных (из 3 классов), и хотя обучение показалось go хорошо (высокая точность при обучении и проверке во время обучения и на тестовом наборе после завершения обучения) и результаты как model.evaluate (), так и использование матрицы путаницы показывают плохие результаты ..
'''
Model file
-------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
# loading the directories
# importing the libraries
from keras.models import Model
from keras.layers import Flatten, Dense
from keras.applications import VGG16
#from keras.preprocessing import image
num_classes=3
IMAGE_SIZE = [224, 224] # we will keep the image size as (64,64). You can increase the size for better results.
# loading the weights of VGG16 without the top layer. These weights are trained on Imagenet dataset.
vgg = VGG16(input_shape = (224,224,3), weights = 'imagenet', include_top = False) # input_shape = (64,64,3) as required by VGG
# this will exclude the initial layers from training phase as there are already been trained.
for layer in vgg.layers:
layer.trainable = False
x = Flatten()(vgg.output)
x = Dense(128, activation = 'relu')(x) # we can add a new fully connected layer but it will increase the execution time.
x = Dense(64, activation = 'relu')(x)
x = Dense(num_classes, activation = 'softmax')(x) # adding the output layer with softmax function as this is a multi label classification problem.
model = Model(inputs = vgg.input, outputs = x)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
IMAGE_SHAPE = (224, 224, 3)
batch_size = 32
model_weights_file = 'vgg16-xfer-weights.h5'
train_data_dir = 'train'
val_data_dir = 'test'
# get all classes for this dataset (types of flowers) excluding LICENSE file
CLASS_NAMES = ['dog','cat','monkey']
image_generator = ImageDataGenerator(rescale=1/255)
# make the training dataset generator
train_data_gen = image_generator.flow_from_directory(train_data_dir, batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, class_mode='categorical')
# make the validation dataset generator
test_data_gen = image_generator.flow_from_directory(val_data_dir, batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, class_mode='categorical')
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
epochs = 3
model_name = "vgg16-xfer-weights"
tensorboard = TensorBoard(log_dir=f"logs/{model_name}")
checkpoint = ModelCheckpoint(f"results/{model_name}" + "-loss-{val_loss:.2f}-acc-{val_acc:.2f}.h5",
save_best_only=True,
verbose=1)
training_steps_per_epoch = np.ceil(train_data_gen.samples / batch_size)
validation_steps_per_epoch = np.ceil(test_data_gen.samples / batch_size)
model.fit_generator(train_data_gen, steps_per_epoch=training_steps_per_epoch, validation_data=test_data_gen, validation_steps=validation_steps_per_epoch,
epochs=epochs, verbose=1)
print('Training Completed!')
training result
-------------------
Epoch 1/3
323/323 [==============================] - 3992s 12s/step - loss: 0.6893 - accuracy: 0.7161 - val_loss: 0.4685 - val_accuracy: 0.8141
Epoch 2/3
323/323 [==============================] - 3589s 11s/step - loss: 0.4335 - accuracy: 0.8246 - val_loss: 0.4464 - val_accuracy: 0.8154
Epoch 3/3
323/323 [==============================] - 3441s 11s/step - loss: 0.3152 - accuracy: 0.8754 - val_loss: 0.2095 - val_accuracy: 0.9304
Training Completed!
confusion matrix result
------------------------
Y_pred = model.predict(test_data_gen, test_data_gen.samples / batch_size)
val_preds = np.argmax(Y_pred, axis=1)
import sklearn.metrics as metrics
val_trues =validation_generator.classes
cm = metrics.confusion_matrix(val_trues, val_preds)
cm
array([[824, 764, 528],
[886, 766, 545],
[508, 484, 344]], dtype=int64)
someone please help me
'''