У меня есть две модели (A и B), которые обучаются поочередно. Я хотел бы иметь пользовательскую функцию потерь для модели B, которая зависит от значений промежуточных слоев в модели A при расчете потерь за эпоху.
def model_A(in_shape=(48,48,1), n_classes=1):
init = RandomNormal(mean=0.0, stddev=0.02)
in_image = Input(shape=in_shape)
model = Conv2D(32, (7, 7), padding='same', kernel_initializer=init)(in_image)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D((2, 2),padding='same')(model)
model = Dropout(0.25)(model)
model = Flatten(name="feature_map")(model) # I want to access values of this layer
model = Dense(n_classes)(features)
a_out_layer = Activation('softmax')(fe)
a_model = Model(in_image, c_out_layer)
a_model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0002, beta_1=0.5), metrics=['accuracy'])
return a_model
def model_B(input_dim=(100,)):
inp_vector = Input(shape=in_dim)
n_nodes = 128 * 12 * 12
gen = Dense(n_nodes, use_bias=False, kernel_initializer=init)(in_lat)
gen = BatchNormalization()(gen)
gen = LeakyReLU(alpha=0.2)(gen)
gen = Reshape((12, 12, 128))(gen)
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same', use_bias=False, kernel_initializer=init)(gen)
gen = BatchNormalization()(gen)
gen = LeakyReLU(alpha=0.2)(gen)
b_model = Model(inp_vector, gen)
return b_model
def combined_model(a_model, b_model):
a_model.trainable = False
def calculate_new_loss(a_model, y_true, y_pred):
return a_model.get_layer("feature_map").output + binary_crossentropy(y_true, y_pred)
combined_output = a_model(b_model.output)
model = Model(b_model.input, combined_output)
model.compile(loss=calculate_new_loss, optimizer=opt)