Мой вопрос здесь такой:
Будет ли следующий код в PyTorch давать те же результаты, что и первый код в TensorFlow? и если нет Как я могу это исправить, пожалуйста?
- Я работаю над реализацией DFCN, как видно на прилагаемом изображении,
- следующий код в TensorFlow:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def BN_ELU_Conv(inputs, n_filters, filter_size=3, dropout_rate=0.2, is_training=tf.constant(False,dtype=tf.bool)):
initializer = tf.keras.initializers.VarianceScaling(scale=2.0,
mode='fan_in', distribution='truncated_normal', seed=None)
regularizer = tf.keras.regularizers.l2(1.0)
l = tf.layers.batch_normalization( inputs, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(), beta_regularizer=None,
gamma_regularizer=None, beta_constraint=None, gamma_constraint=None,
training=False, trainable=True, name=None, reuse=None, renorm=False,
renorm_clipping=None, renorm_momentum=0.99, fused=None, virtual_batch_size=None,
adjustment=None
)
l = tf.keras.layers.Conv2D(inputs=l,
filters=n_filters,
kernel_size=[filter_size, filter_size],
padding="same",
activation=None,
kernel_initializer=initializer,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=regularizer
)
l = tf.layers.dropout(l, rate=(dropout_rate), training=is_training)
return l
def TransitionDown(inputs, n_filters, dropout_rate=0.2, is_training=tf.constant(False,dtype=tf.bool)):
l = BN_ELU_Conv(inputs, n_filters, filter_size=1, dropout_rate=dropout_rate,
is_training=is_training)
l = tf.layers.max_pooling2d(l, pool_size=[2, 2], strides=2)
return l
def ResidualTransitionUp(skip_connection, block_to_upsample, n_filters_keep, is_training=tf.constant(False,dtype=tf.bool)):
"""
Performs upsampling on block_to_upsample by a factor 2 and adds it with the skip_connection
"""
initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN', uniform=False, seed=None, dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale=1.0)
l = tf.keras.layers.Conv2DTranspose(block_to_upsample, filters=n_filters_keep,
kernel_size = (3,3),
strides = (2,2),
padding = 'SAME',
kernel_initializer=initializer,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=regularizer
)
skip_connection_shape = tf.shape(skip_connection)
l = tf.image.resize_image_with_crop_or_pad(l, skip_connection_shape[1], skip_connection_shape[2])
l = tf.add(l, skip_connection)
return l
- Я преобразовал этот код в PyTorch следующим образом:
import torch
import torch.nn as nn
import torchvision.transforms as transforms
def BN_ELU_Conv(inputs, n_filters, filter_size=3, dropout_rate=0.2):
# Variance Scaling to 2
# l2_regularization = 1 * torch.norm(inputs, 2)
l = nn.Sequential(
nn.Conv2d(in_channels =l, out_channels =n_filters,kernel_size=[filter_size, filter_size],
stride = 1,padding=[filter_size//2, filter_size//2])
nn.Dropout2d(p=dropout_rate) )
return l
#-------------- Transition_Down----------------------------------------------
def TransitionDown(inputs, n_filters, dropout_rate=0.2):
l = BN_ELU_Conv(inputs, n_filters, filter_size=1, dropout_rate=dropout_rate)
l = nn.MaxPool2d(l, kernel_size=[2, 2], strides=2)
return l
#-------------- Transition_UP -----------------------------------------------
def ResidualTransitionUp(skip_connection, block_to_upsample, n_filters_keep):
"""
Performs upsampling on block_to_upsample by a factor 2 and adds it with the skip_connection
"""
# Variance Scaling to 2
# l2_regularization = 1 * torch.norm(inputs, 2)
l = nn.ConvTranspose2d(block_to_upsample, out_channels =n_filters_keep,kernel_size=(3,3),
stride = (2,2),padding=[filter_size//2, filter_size//2])
skip_connection_shape = skip_connection.size()
l = transforms.RandomCrop(l, [skip_connection_shape[1], skip_connection_shape[2]],
padding=None, pad_if_needed=True, fill=0, padding_mode='constant')
l = torch.add(l, skip_connection)
return l