Следующий фрагмент кода был использован для определения архитектуры CNN в кератах с бэкэндом тензорного потока:
class DownBlock(object):
def __init__(self, prev_layer, num_chann = 16, depthwise_initializer = 'glorot_uniform', kernel_initializer = 'glorot_uniform', bias_initializer = 'zeros', drop_rate = None, spdrop_rate = None, activation = 'relu', pool = True):
self.prev_layer = prev_layer
if pool == True:
self.prev_layer = MaxPooling2D((2, 2)) (self.prev_layer)
self.prev_layer = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.prev_layer)
self.convo = Activation(activation) (self.prev_layer)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Activation(activation) (self.convo)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Add([self.prev_layer, self.convo])
def get(self):
return self.convo
class UpBlock(object):
def __init__(self, prev_layer, bridge_layer, num_chann = 16, depthwise_initializer = 'glorot_uniform', kernel_initializer = 'glorot_uniform', bias_initializer = 'zeros', drop_rate = None, spdrop_rate = None, activation = 'relu', up = True):
self.prev_layer = prev_layer
self.bridge_layer = bridge_layer
self.convo = Activation(activation) (self.prev_layer)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Activation(activation) (self.convo)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Add([self.prev_layer, self.convo])
if up == True:
self.convo = Conv2D(num_chann/2, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Conv2DTranspose(num_chann/2, (2, 2), strides = (2, 2), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Add([self.bridge_layer, self.convo])
def get(self):
return self.convo
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
s = Conv2D(8, (1, 1)) (s)
d1 = DownBlock(s, num_chann = 16, drop_rate = 0.1)
d2 = DownBlock(d1.get(), num_chann = 32, drop_rate = 0.1)
d3 = DownBlock(d2.get(), num_chann = 64, drop_rate = 0.1)
d4 = DownBlock(d3.get(), num_chann = 128, drop_rate = 0.1)
d5 = DownBlock(d4.get(), num_chann = 256, drop_rate = 0.1)
m = DownBlock(d5.get(), num_chann = 512, drop_rate = 0.1)
u5 = UpBlock(m.get(), d4.get(), num_chann = 256, drop_rate = 0.1)
u4 = UpBlock(u5.get(), d3.get(), num_chann = 128, drop_rate = 0.1)
u3 = UpBlock(u4.get(), d2.get(), num_chann = 64, drop_rate = 0.1)
u2 = UpBlock(u3.get(), d1.get(), num_chann = 32, drop_rate = 0.1)
u1 = UpBlock(u2.get(), s, num_chann = 16, drop_rate = 0.1)
final = Conv2D(1, (1, 1)) (u1.get())
# final = SpatialDropout2D(0.1) (final)
final = Dropout(0.1) (final)
final = BatchNormalization() (final)
outputs = Activation("sigmoid") (final)
model = Model(inputs = [inputs], outputs = [outputs])
При выполнении внутри ноутбука Jupyter создается следующая трассировка стека:
TypeError Traceback (most recent call last)
<ipython-input-31-f23b70d0be6d> in <module>()
79 s = Conv2D(8, (1, 1)) (s)
80
---> 81 d1 = DownBlock(s, num_chann = 16, drop_rate = 0.1)
82
83 d2 = DownBlock(d1.get(), num_chann = 32, drop_rate = 0.1)
<ipython-input-31-f23b70d0be6d> in __init__(self, prev_layer, num_chann, depthwise_initializer, kernel_initializer, bias_initializer, drop_rate, spdrop_rate, activation, pool)
29 self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
30
---> 31 self.convo = Add([self.prev_layer, self.convo])
32
33 def get(self):
TypeError: __init__() takes 1 positional argument but 2 were given
Последняя строка в трассировке ...
TypeError: __init__() takes 1 positional argument but 2 were given
... говорит о передаче двух позиционных аргументов первому вызову UpBlock (), тогда как я явно передал один -
d1 = DownBlock(s, num_chann = 16, drop_rate = 0.1)
Где находится другой позиционный аргумент и почему я получаю эту ошибку, если ее нет?