Я пытаюсь предварительно обработать изображение из промежуточного слоя в UNet. Я получаю следующую ошибку при попытке объединить. Я попытался изменить форму тензора, но его нельзя преобразовать из (1, 352, 640, 64) в (Нет, 352, 640, 64). Даже значения слоев None не могут быть изменены. Как решить эту проблему? Мне нужно изменить 1 на None или наоборот, чтобы обе формы были похожи.
ValueError: A Concatenate layer requires inputs with matching shapes except for the concat axis. Got inputs shapes: [(None, 352, 640, 64), (1, 352, 640, 64)]
def unet(pretrained_weights = None,input_size = (352,640,3)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = RA_unit(x=pool1,h=pool1.shape[1].value, w=pool1.shape[2].value,n=16)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# pool2 = RA_unit(x=pool2,n=16)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# pool3 = RA_unit(x=pool3,n=16)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# pool4 = RA_unit(x=pool4,n=16)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
# merge6 = RA_unit(x=merge6,n=16)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
# merge7 = RA_unit(x=merge7,n=16)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
# merge8 = RA_unit(x=merge8,n=16)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
# merge9 = RA_unit(x=merge9,n=16)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(3, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy']) # original 1e-4 | 2e-4 = 0.00020
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
def RA_unit(x, h, w, n):
x_1 = tf.nn.avg_pool(x, ksize=[1, h/n, 2, 1], strides=[1, h/n, 2, 1], padding="SAME")
x_t = tf.zeros([1, h, w, 0], tf.float32)
for k in range(n):
x_t_1 = tf.slice(x_1, [0,k,0,0], [1,1,int(w/2),x.shape[3].value])
x_t_2 = tf.image.resize_images(x_t_1, [h,w], 1)
x_t_3 = tf.abs(x - x_t_2)
x_t = tf.concat([x_t, x_t_3], axis=3)
x_out = tf.concat([x, x_t], axis=3)
return x_out
def RA_unit_new(x, h, w, n):
x_1 = tf.nn.avg_pool(x, ksize=[1, h/n, 2, 1], strides=[1, h/n, 2, 1], padding="SAME")
x_t = tf.zeros([1, h, w, 0], tf.float32)
for k in range(n):
x_t_1 = tf.slice(x_1, [0,k,0,0], [1,1,int(w/2),x.shape[3].value])
x_t_2 = tf.image.resize_images(x_t_1, [h,w], 1)
x_t_3 = tf.abs(x - x_t_2)
x_t = tf.concat([x_t, x_t_3], axis=3)
x_out = tf.concat([x, x_t], axis=3)
conv = Conv2D(x.shape[3], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(x_out)
return conv