Я строю UNet с Efficine tNet в качестве кодера. Однако всякий раз, когда я начинаю тренироваться, я получаю ошибки, такие как Ошибка при проверке цели: ожидал, что conv2d_30 будет иметь форму (224, 224, 1), но получил массив с формой (224, 224, 3). Однако в коде я поместил 1 выходной канал на последнем слое с сигмовидной функцией. Я застрял здесь и ценю помощь экспертов.
import efficientnet.keras as efn
from keras.models import Model
from keras.layers.convolutional import Conv2D
from keras.layers import LeakyReLU, Add, Input,MaxPool2D,UpSampling2D,concatenate,Conv2DTranspose,BatchNormalization,Dropout
base_model = efn.EfficientNetB0(weights='imagenet',include_top=False,input_shape=(224,224,3))
input = base_model.input
start_neurons = 8
dropout_ratio = 0.1
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = LeakyReLU(alpha=0.1)(x)
return x
def residual_block(blockInput, num_filters=16):
x = LeakyReLU(alpha=0.1)(blockInput)
x = BatchNormalization()(x)
blockInput = BatchNormalization()(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
return x
conv5 = base_model.get_layer('top_activation').output
conv4 = base_model.get_layer('block6a_expand_activation').output
conv3 = base_model.get_layer('block4a_expand_activation').output
conv2 = base_model.get_layer('block3a_expand_activation').output
conv1 = base_model.get_layer('block2a_expand_activation').output
#Middle 7*7
convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same",name='conv_middle')(conv5)
convm = residual_block(convm,start_neurons * 32)
convm = residual_block(convm,start_neurons * 32)
convm = LeakyReLU(alpha=0.1)(convm) #7*7
#14*14
deconv4 = Conv2DTranspose(start_neurons*16,(3,3),strides=(2,2),padding="same")(convm)
uconv4 = concatenate([deconv4,conv4])
uconv4 = Dropout(dropout_ratio)(uconv4)
uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 16)
uconv4 = LeakyReLU(alpha=0.1)(uconv4)
#28*28
deconv3 = Conv2DTranspose(start_neurons*8,(3,3),strides=(2,2),padding="same")(uconv4)
uconv3 = concatenate([deconv3,conv3])
uconv3 = Dropout(dropout_ratio)(uconv3)
uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 8)
uconv3 = LeakyReLU(alpha=0.1)(uconv3)
#56*56
deconv2 = Conv2DTranspose(start_neurons*4,(3,3),strides=(2,2),padding="same")(uconv3)
uconv2 = concatenate([deconv2,conv2])
uconv2 = Dropout(dropout_ratio)(uconv2)
uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 4)
uconv2 = LeakyReLU(alpha=0.1)(uconv2)
#112*112
deconv1 = Conv2DTranspose(start_neurons*2,(3,3),strides=(2,2),padding="same")(uconv2)
uconv1 = concatenate([deconv1,conv1])
uconv1 = Dropout(dropout_ratio)(uconv1)
uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 2)
uconv1 = LeakyReLU(alpha=0.1)(uconv1)
#224*224
deconv0 = Conv2DTranspose(start_neurons*1,(3,3),strides=(2,2),padding="same")(uconv1)
uconv0 = Dropout(dropout_ratio)(deconv0)
uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0)
uconv0 = residual_block(uconv0,start_neurons * 1)
uconv0 = LeakyReLU(alpha=0.1)(uconv0)
output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv0)
model = Model(input,output_layer)
print(model.summary())
model.compile(optimizer=Adam(lr = 0.00001), loss="binary_crossentropy", metrics=["accuracy"])
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
callbacks = [EarlyStopping(patience=10, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=4, min_lr=0.000001, verbose=1),
ModelCheckpoint('Efficient_original.h5', verbose=1, save_best_only=True, save_weights_only=True)]
results = model.fit_generator(train_generator, validation_data=valid_generator, validation_steps=50, steps_per_epoch=100,
epochs=100, callbacks=callbacks)
Я хочу решить эту проблему. Пожалуйста, исправьте, где я сделал ошибку.