• 1000 1003 *
batch_size = 4
epochs = 50
myLearnRate1=1e-4
myLearnRate2=1e-4
X_train,X_test=train_data,val_data
for epoch in range(0, epochs):
train_loss=[]
for i in range(0, len(X_train) // batch_size):
X = X_train[i * batch_size:min(len(X_train), (i+1) * batch_size)]
Image,Mask_p,Mask_n=create_mask(X)
Lr1= myLearnRate1 /(1 + (epoch/37))
Lr2= myLearnRate2 /(1 + (epoch/37))
optimizer1 =tf.keras.optimizers.Adam(learning_rate=Lr1)
optimizer2 =tf.keras.optimizers.Adam(learning_rate=Lr2)
loss=train_on_batch(Image,Mask_p,Mask_n,optimizer1,optimizer2)
в основном я отправляю «оптимизатор с разной скоростью обучения» на каждой итерации в обучающую функцию
обучающая функция
def train_on_batch(X_original,X_p,X_n,optimizer1,optimizer2):
with tf.GradientTape(persistent=True) as tape:
# Forward pass.
recon,latent=autoencoder_model([X_original,X_p,X_n],training=True)
# Loss value for this batch.
loss_value1_,loss_value2=assymetric_fun(X_original,X_p,X_n,recon)
loss_value1=-1.0*loss_value1_
# make gradient
grads1 = tape.gradient(loss_value1, autoencoder_model.trainable_variables)
grads2 = tape.gradient(loss_value2, autoencoder_model.trainable_variables)
#update weight
optimizer1.apply_gradients(zip(grads1, autoencoder_model.trainable_variables))
optimizer2.apply_gradients(zip(grads2, autoencoder_model.trainable_variables))
return loss_value1_+loss_value2