Почему возникает ошибка при запуске Se gnet с 3 каналами? - PullRequest
0 голосов
/ 23 марта 2020

Я пытаюсь построить модель Se gNet с многоканальными каналами.

Мои тренировочные данные имеют 3 канала, а их ширина и высота 128.

Однако, когда я запускаю эту модель ниже с

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=["categorical_accuracy"]),

Есть сообщение об ошибке, подобное этому.

"Целевой массив с формой (516, 2) передан для вывода формы (Нет, 16384, 2) при использовании в качестве потери categorical_crossentropy. Эта потеря предполагает, что цели будут иметь ту же форму, что и выходные данные. "

Пожалуйста, помогите!

from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input



def segnet_model(img_shape, num_classes, use_bias=False):

    input_img = Input(shape=img_shape)
    print("input_img", input_img)

    # encoder

    # encoder block 1
    conv_1 = Conv2D(64, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(input_img)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation('relu')(conv_1)
    conv_1 = Conv2D(64, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_1)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation('relu')(conv_1)
    pool_1 = MaxPooling2D(pool_size=(2, 2))(conv_1)

    # encoder block 2
    conv_2 = Conv2D(128, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(pool_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation('relu')(conv_2)
    conv_2 = Conv2D(128, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_2)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation('relu')(conv_2)
    pool_2 = MaxPooling2D(pool_size=(2, 2))(conv_2)

    # encoder block 3
    conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(pool_2)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation('relu')(conv_3)
    conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_3)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation('relu')(conv_3)
    conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_3)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation('relu')(conv_3)
    pool_3 = MaxPooling2D(pool_size=(2, 2))(conv_3)

    # encoder block 4
    conv_4 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(pool_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation('relu')(conv_4)
    conv_4 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_4)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation('relu')(conv_4)
    conv_4 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_4)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation('relu')(conv_4)
    pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)

    # encoder block 5
    conv_5 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(pool_4)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation('relu')(conv_5)
    conv_5 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_5)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation('relu')(conv_5)
    conv_5 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(conv_5)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation('relu')(conv_5)
    pool_5 = MaxPooling2D(pool_size=(2, 2))(conv_5)

    # decoder

    # decoder block 1
    up_conv_1 = UpSampling2D(size=(2, 2))(pool_5)
    up_conv_1 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_1)
    up_conv_1 = BatchNormalization()(up_conv_1)
    up_conv_1 = Activation('relu')(up_conv_1)
    up_conv_1 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_1)
    up_conv_1 = BatchNormalization()(up_conv_1)
    up_conv_1 = Activation('relu')(up_conv_1)
    up_conv_1 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_1)
    up_conv_1 = BatchNormalization()(up_conv_1)
    up_conv_1 = Activation('relu')(up_conv_1)

    # decoder block 2
    up_conv_2 = UpSampling2D(size=(2, 2))(up_conv_1)
    up_conv_2 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_2)
    up_conv_2 = BatchNormalization()(up_conv_2)
    up_conv_2 = Activation('relu')(up_conv_2)
    up_conv_2 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_2)
    up_conv_2 = BatchNormalization()(up_conv_2)
    up_conv_2 = Activation('relu')(up_conv_2)
    up_conv_2 = Conv2D(512, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_2)
    up_conv_2 = BatchNormalization()(up_conv_2)
    up_conv_2 = Activation('relu')(up_conv_2)

    # decoder block 3
    up_conv_3 = UpSampling2D(size=(2, 2))(up_conv_2)
    up_conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_3)
    up_conv_3 = BatchNormalization()(up_conv_3)
    up_conv_3 = Activation('relu')(up_conv_3)
    up_conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_3)
    up_conv_3 = BatchNormalization()(up_conv_3)
    up_conv_3 = Activation('relu')(up_conv_3)
    up_conv_3 = Conv2D(256, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_3)
    up_conv_3 = BatchNormalization()(up_conv_3)
    up_conv_3 = Activation('relu')(up_conv_3)

    # decoder block 4
    up_conv_4 = UpSampling2D(size=(2, 2))(up_conv_3)
    up_conv_4 = Conv2D(128, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_4)
    up_conv_4 = BatchNormalization()(up_conv_4)
    up_conv_4 = Activation('relu')(up_conv_4)
    up_conv_4 = Conv2D(128, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_4)
    up_conv_4 = BatchNormalization()(up_conv_4)
    up_conv_4 = Activation('relu')(up_conv_4)

    # decoder block 5
    up_conv_5 = UpSampling2D(size=(2, 2))(up_conv_4)
    up_conv_5 = Conv2D(64, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_5)
    up_conv_5 = BatchNormalization()(up_conv_5)
    up_conv_5 = Activation('relu')(up_conv_5)
    up_conv_5 = Conv2D(64, (3, 3), strides=(1, 1), activation=None, use_bias=use_bias, padding='same')(up_conv_5)
    up_conv_5 = BatchNormalization()(up_conv_5)
    up_conv_5 = Activation('relu')(up_conv_5)

    # predict
    finalconv = Conv2D(num_classes, (1, 1), strides=(1, 1), activation=None, use_bias=use_bias, padding='valid')(up_conv_5)
    predict = BatchNormalization()(finalconv)
    predict = Reshape((img_shape[0]*img_shape[1], num_classes),input_shape=(img_shape))(finalconv)

    outputs = Activation('softmax')(predict)


    model = Model(inputs=input_img, outputs=outputs)
    model.summary()  
    return model
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...