Я кодирую UNet для изображений разных размеров и использую data generator
. UNet
выглядит следующим образом:
class UNet():
def __init__(self):
print ('build UNet ...')
def get_crop_shape(self, target, refer):
# width, the 3rd dimension
cw = (target.get_shape()[2] - refer.get_shape()[2]).value
# height, the 2nd dimension
ch = (target.get_shape()[1] - refer.get_shape()[1]).value
if(cw==None and ch==None):
print(cw, ch)
return (0,0),(0,0)
else:
print(ch,cw)
assert (cw >= 0)
if cw % 2 != 0:
cw1, cw2 = int(cw/2), int(cw/2) + 1
else:
cw1, cw2 = int(cw/2), int(cw/2)
assert (ch >= 0)
if ch % 2 != 0:
ch1, ch2 = int(ch/2), int(ch/2) + 1
else:
ch1, ch2 = int(ch/2), int(ch/2)
return (ch1, ch2), (cw1, cw2)
def create_model(self, img_shape, num_class):
concat_axis = 3
inputs = keras.layers.Input(shape = (None, None, 1))
# inputs = keras.layers.Input(shape = img_shape)
conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up_conv5 = keras.layers.UpSampling2D(size=(2, 2))(conv5)
ch, cw = self.get_crop_shape(conv4, up_conv5)
crop_conv4 = keras.layers.Cropping2D(cropping=(ch,cw))(conv4)
up6 = keras.layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
conv6 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up_conv6 = keras.layers.UpSampling2D(size=(2, 2))(conv6)
ch, cw = self.get_crop_shape(conv3, up_conv6)
crop_conv3 = keras.layers.Cropping2D(cropping=(ch,cw))(conv3)
up7 = keras.layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
conv7 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up_conv7 = keras.layers.UpSampling2D(size=(2, 2))(conv7)
ch, cw = self.get_crop_shape(conv2, up_conv7)
crop_conv2 = keras.layers.Cropping2D(cropping=(ch,cw))(conv2)
up8 = keras.layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
conv8 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up_conv8 = keras.layers.UpSampling2D(size=(2, 2))(conv8)
ch, cw = self.get_crop_shape(conv1, up_conv8)
crop_conv1 = keras.layers.Cropping2D(cropping=(ch,cw))(conv1)
up9 = keras.layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
conv9 = keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
ch, cw = self.get_crop_shape(inputs, conv9)
conv9 = keras.layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
conv10 = keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = keras.models.Model(inputs=inputs, outputs=conv10)
return model
Когда я запускаю код, я получаю следующую ошибку:
тензор потока. python .framework.errors_impl.InvalidArgumentError: 2 root найдены ошибки. (0) Неверный аргумент: ConcatOp: Размеры входных данных должны совпадать: shape [0] = [16,512,30,52] и shape [1] = [16,256,30,53] [[{{node concatenate_1 / concat}} ]]
[[metrics / acc / Mean_1 / _345]] (1) Недопустимый аргумент: ConcatOp: размеры входных данных должны совпадать: shape [0] = [16,512,30,52] против формы [1] = [ 16,256,30,53] [[{{node concatenate_1 / concat}}]]
, что, вероятно, означает cropping
из conv4
. Более того, это единственный тензор с размерностью 256
.
Однако, если я укажу форму входного изображения, ie, запустите inputs = keras.layers.Input(shape = img_shape)
вместо inputs = keras.layers.Input(shape = (None, None, 1))
, тогда код будет работать идеально.
Почему это происходит?