Я создал пользовательский слой в керасе со следующим кодом:
class drawLines(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(drawLines, self, ).__init__(**kwargs)
def build(self, input_shape):
super(drawLines, self).build(input_shape)
def call(self, input):
xout = tf.py_function(image_tensor_func, [input], 'float32')
xout.set_shape([input.shape[0], input.shape[1], input.shape[2], 1])
return xout
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
c_axis, h_axis, w_axis = 3, 1, 2
output_shape[c_axis] = 1
output_shape[h_axis] = input_shape[h_axis]
output_shape[w_axis] = input_shape[w_axis]
return tuple(output_shape)
def get_keypointCoordinates(hm):
kp_coor = []
value = np.amax(hm)
coord = []
for width in range(hm.shape[0]):
for height in range(hm.shape[1]):
if tf.keras.backend.get_value(hm[width][height]) == value:
coord = [height, width]
break
kp_coor.append([coord, value])
return kp_coor
kps_lines = [(0, 1), (1, 2), (2, 6), (7, 12), (12, 11), (11, 10), (5, 4), (4, 3), (3, 6), (7, 13), (13, 14), (14, 15), (6, 7), (7, 8), (8, 9)]
def create_skelton(hms):
kps = []
width, height = hms.shape[0], hms.shape[1]
for hm in range(hms.shape[2]):
kps.append(get_keypointCoordinates(hms[:, :, hm]))
kp_mask = np.zeros([width, height], dtype=np.uint8)
kp_mask.fill(255) # or img[:] = 255
for l in range(len(kps_lines)):
i1 = kps_lines[l][0]
i2 = kps_lines[l][1]
p1 = int(kps[i1][0][0][0]), int(kps[i1][0][0][1])
p2 = int(kps[i2][0][0][0]), int(kps[i2][0][0][1])
cv2.line(
kp_mask, p1, p2, color=(0, 0, 0), thickness=8, lineType=cv2.LINE_AA)
cv2.circle(
kp_mask, p1, radius=3, color=(0, 0, 0), thickness=8, lineType=cv2.LINE_AA)
cv2.circle(
kp_mask, p2, radius=3, color=(0, 0, 0), thickness=8, lineType=cv2.LINE_AA)
return kp_mask
def image_tensor_func(img4d):
results = []
for img3d in img4d:
rimg3d = create_skelton(img3d)
results.append(rimg3d[:, :, np.newaxis])
return results
И я создаю модель следующим образом:
input_img = Input(shape=(64, 64, 3))
mod = Conv2D(16, kernel_size=1, activation='relu')(input_img)
mod = drawLines(output_dim=(64, 64, 1))(mod)
model = Model(input_img, mod)
model.compile('adadelta', loss='mse', metrics=['accuracy'])
model.summary()
Все работает хорошо, пока я не получу модель Сводка в виде: 
Но когда я запускаю:
model.fit(model_inp, model_out, batch_size=3, epochs=1, validation_data=(val_inp, val_out))
Модель_импа и val_inp форма: (7, 64, 64, 3)
Фигуры model_out и val_out: (7, 64, 64, 1)
При запуске я получаю следующую ошибку: 
tenorflow . python .framework. / ReluGrad-0-TransposeNHWCToNCHW-LayoutOptimizer}}]]