У меня проблема с моим CNN. Я тренировал свою модель в течение 50 эпох (BN, Dropouts used), и я получил точность теста 92%. После этого я снова обучил свою ту же сеть, но в течение 100 эпох, с теми же методами настройки и обобщения, и точность моего тестового набора упала до 79%. Из-за моего небольшого набора данных я использовал увеличение данных (горизонтальное и вертикальное отражение). Я не могу объяснить это, кто-то может помочь?
import numpy as np
import tensorflow as tf
from numpy.random import seed
seed(1)
tf.compat.v1.set_random_seed(2)
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import tensorflow as tf
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
gpu_options = tf.GPUOptions(allow_growth=True)
session = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
import os
os.environ['KERAS_BACKEND']='tensorflow'
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D,MaxPooling2D
from keras.utils import np_utils
from tensorflow.keras.optimizers import SGD,Adam
from tensorflow.keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import BatchNormalization
import matplotlib as plt
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
keras.initializers.glorot_normal(seed=42)
train_path='C:/Users/Panagiotis Gkanos/Desktop/dataset/40X/train'
train_batches=ImageDataGenerator(rescale=1./255,horizontal_flip=True,
vertical_flip=True).flow_from_direct ory(train_path,
target_size=[400,400],
classes=['malignant','benign'],
class_mode='categorical',batch_size=40)
valid_path='C:/Users/Panagiotis Gkanos/Desktop/dataset/40X/valid'
valid_batches=ImageDataGenerator(rescale=1./255).flow_from_directory(valid_path,
target_size=[400,400],
classes=['malignant','benign'],
class_mode='categorical',batch_size=20)
test_path='C:/Users/Panagiotis Gkanos/Desktop/dataset/40X/test'
test_batches=ImageDataGenerator(rescale=1./255).flow_from_directory(test_path,
target_size=[400,400],
classes=['malignant','benign'],
class_mode='categorical',batch_size=20)
model=Sequential()
model.add(Conv2D(16,(3,3),strides=2,padding='same',input_shape=(400,400,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(16,(3,3),strides=1,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(16,(3,3),strides=1,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(32,(3,3),strides=1,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),strides=1,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),strides=1,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(64,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(128,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2,activation='softmax'))
model.summary()
#learn_control = ReduceLROnPlateau(monitor='val_acc', patience=5,
# verbose=1,factor=0.2, min_lr=1e-7)
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
history=model.fit_generator(train_batches,steps_per_epoch=20 ,validation_data=valid_batches,
validation_steps=8 ,epochs=100)
#,callbacks=[learn_control])
model.evaluate(test_batches)
def plot_loss(history):
train_loss=history.history['loss']
val_loss=history.history['val_loss']
x=list(range(1,len(val_loss)+1))
plt.plot(x,val_loss,color='red',label='validation loss')
plt.plot(x,train_loss,label='training loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss vs. Epoch')
plt.legend()
plt.show()
def plot_accuracy(history):
train_acc=history.history['acc']
val_acc=history.history['val_acc']
x=list(range(1,len(val_acc)+1))
plt.plot(x,val_acc,color='red',label='validation acc')
plt.plot(x,train_acc,label='training acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Accuracy vs. Epoch')
plt.legend()
plt.show()
plot_loss(history)
plot_accuracy(history)