Я использую нейронную сеть свертки для обучения моей модели. Когда я тренирую свою модель, чем значение точности, потеря, val_a cc и val_loss одинаковы, я искал об этом на github, но у меня не было решения моей проблемы. Помогите мне решить эту проблему.
Я использую концепцию сиамской сети, в которой я определил две архитектуры и у меня есть два набора данных и 48000 изображений в обоих наборах данных. Эти изображения у меня есть для обучения и для проверки, у меня 6000 изображений , Моя цель состоит в том, чтобы объединить особенности обеих архитектур, вот мой код
def VGG_16(weights_path=None,include_top=False):
visible = Input(shape=(224,224,3))
zero11 = ZeroPadding2D((1,1))(visible)
conv11 = Conv2D(64,kernel_size=4,activation='relu')(zero11)
zero12 = ZeroPadding2D((1,1))(conv11)
conv12 = Conv2D(64,kernel_size=4,activation='relu')(zero12)
pool11 = MaxPooling2D((2,2,),strides=(2,2,))(conv12)
zero13 = ZeroPadding2D((1,1))(pool11)
conv13 = Conv2D(128,kernel_size=4,activation='relu')(zero13)
zero14 = ZeroPadding2D((1,1))(conv13)
conv14 = Conv2D(128,kernel_size=4,activation='relu')(zero14)
pool12 = MaxPooling2D((2,2,),strides=(2,2,))(conv14)
zero15 = ZeroPadding2D((1,1))(pool12)
conv15 = Conv2D(256,kernel_size=4,activation='relu')(zero15)
zero16 = ZeroPadding2D((1,1))(conv15)
conv16 = Conv2D(256,kernel_size=4,activation='relu')(zero16)
pool13 = MaxPooling2D((2,2,),strides=(2,2,))(conv16)
zero17 = ZeroPadding2D((1,1))(pool13)
conv17 = Conv2D(512,kernel_size=4,activation='relu')(zero17)
zero18 = ZeroPadding2D((1,1))(conv17)
conv18 = Conv2D(512,kernel_size=4,activation='relu')(zero18)
pool14 = MaxPooling2D((2,2,),strides=(2,2,))(conv18)
zero19 = ZeroPadding2D((1,1))(pool14)
conv19 = Conv2D(512,kernel_size=4,activation='relu')(zero19)
zero20 = ZeroPadding2D((1,1))(conv19)
conv20 = Conv2D(512,kernel_size=4,activation='relu')(zero20)
pool15 = MaxPooling2D((2,2,),strides=(2,2,))(conv20)
flat = Flatten()(pool15)
visible1=Input(shape=(224,224,3))
zero21 = ZeroPadding2D((1,1))(visible)
conv21 = Conv2D(64,kernel_size=4,activation='relu')(zero21)
zero22 = ZeroPadding2D((1,1))(conv21)
conv22 = Conv2D(64,kernel_size=4,activation='relu')(zero22)
pool21 = MaxPooling2D((2,2,),strides=(2,2,))(conv22)
zero23 = ZeroPadding2D((1,1))(pool21)
conv23 = Conv2D(128,kernel_size=4,activation='relu')(zero23)
zero24 = ZeroPadding2D((1,1))(conv23)
conv24 = Conv2D(128,kernel_size=4,activation='relu')(zero24)
pool22 = MaxPooling2D((2,2,),strides=(2,2,))(conv24)
zero25 = ZeroPadding2D((1,1))(pool22)
conv25 = Conv2D(256,kernel_size=4,activation='relu')(zero25)
zero26 = ZeroPadding2D((1,1))(conv25)
conv26 = Conv2D(256,kernel_size=4,activation='relu')(zero26)
pool23 = MaxPooling2D((2,2,),strides=(2,2,))(conv26)
zero27 = ZeroPadding2D((1,1))(pool23)
conv27 = Conv2D(512,kernel_size=4,activation='relu')(zero27)
zero28 = ZeroPadding2D((1,1))(conv27)
conv28 = Conv2D(512,kernel_size=4,activation='relu')(zero28)
pool24 = MaxPooling2D((2,2,),strides=(2,2,))(conv28)
zero29 = ZeroPadding2D((1,1))(pool24)
conv29 = Conv2D(512,kernel_size=4,activation='relu')(zero29)
zero30 = ZeroPadding2D((1,1))(conv29)
conv30 = Conv2D(512,kernel_size=4,activation='relu')(zero30)
pool25 = MaxPooling2D((2,2,),strides=(2,2,))(conv30)
flat1 = Flatten()(pool25)
merge = concatenate([flat,flat1])
dropout=Dropout(0.5)(merge)
hidden = Dense(512,activation='relu')(dropout)
dropout1= Dropout(0.5)(hidden)
hidden1= Dense(512,activation='relu')(dropout1)
output = Dense(3, activation='softmax')(hidden1)
model = Model(inputs=[visible,visible1], outputs=output)
return model
model = VGG_16()
model.summary()
train='/home/project_deepak/deepak/siamese_network/Dataset/CPRI/train/'
train1='/home/project_deepak/deepak/siamese_network/Dataset/DATA_PV/data_pv/Train/'
val='/home/project_deepak/deepak/siamese_network/Dataset/CPRI/Validation/'
val1='/home/project_deepak/deepak/siamese_network/Dataset/DATA_PV/data_pv/Validation_data_PV/'
train_datagen = image.ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range=5.,
horizontal_flip = True)
val_imgen = image.ImageDataGenerator(rescale = 1./255)
def generate_generator_multiple(generator,dir1, dir2, batch_size, img_height,img_width):
genX1 = generator.flow_from_directory(dir1,
target_size = (img_height,img_width),
class_mode = 'categorical',
batch_size = batch_size,
shuffle=False,
seed=7)
genX2 = generator.flow_from_directory(dir2,
target_size = (img_height,img_width),
class_mode = 'categorical',
batch_size = batch_size,
shuffle=False,
seed=7)
while True:
X1i = genX1.next()
X2i = genX2.next()
# print(len(X1i[0]), len(X2i[0]))
# x = X1i[0]
# y = X2i[0]
# print(x.shape, y.shape)
# print(X1i,X2i.shape)
yield [X1i[0], X2i[0]], X2i[1] #Yield both images and their mutual label
train_generator=generate_generator_multiple(generator=train_datagen,
dir1=train,
dir2=train1,
batch_size=batch_size,
img_height=img_height,
img_width=img_height)
val_generator=generate_generator_multiple(val_imgen,
dir1=val,
dir2=val1,
batch_size=batch_size,
img_height=img_height,
img_width=img_height)
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=1e-4),
metrics=['acc'])
history=model.fit_generator(train_generator,
steps_per_epoch=1500,
epochs = epochs,
validation_data = val_generator,
validation_steps = 375,
#use_multiprocessing=True,
shuffle=True,
#show_accuracy = True,
#verbose=1)
>after run this code i am getting the same value in every epoch
Epoch 2/10
1500/1500 [==============================] - 1480s 987ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 3/10
1500/1500 [==============================] - 1479s 986ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 4/10
1500/1500 [==============================] - 1487s 991ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 5/10
1500/1500 [==============================] - 1469s 979ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 6/10
1500/1500 [==============================] - 1475s 983ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 7/10
1500/1500 [==============================] - 1482s 988ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 8/10
1500/1500 [==============================] - 1472s 981ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 9/10
1500/1500 [==============================] - 1468s 979ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342
Epoch 10/10
1500/1500 [==============================] - 1475s 983ms/step - loss: 10.7454 - acc: 0.3333 - val_loss: 10.7310 - val_acc: 0.3342