В файле определения tf.keras.layers.BatchNormalization
,
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
self.gamma,
self.beta,
epsilon=self.epsilon)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
self.gamma,
self.beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
Используется training_value = tf_utils.constant_value(training)
, а затем if training_value is None
, где tf_utils.constant_value
должно быть tf.contrib.util.constant_value