У меня есть два входа данных одинаковой формы для моей модели U- net. У меня есть 2 (или более) изображения, представляющих один и тот же объект, но я использую разные типы камер / изображений, например, ночное видение, тепловизор, rgb и т.д. c. В таком случае я бы просто сложил все слои друг на друга и рассматривал их как «одно изображение».
Я пробовал:
X_train = tf.keras.layers.concatenate([X_train, X_train2])
Форма X_train равна (288,288,3 )
Форма X_train2 равна (288,288,3)
Результат конкатенации дает мне объединенное изображение с формой (288,288,6). Вот почему я установил входную форму модели на (288,288,6)
Мой код следующий:
import cv2
from keras import backend as K
import tensorflow as tf
import os
import numpy as np
from tqdm import tqdm
from skimage.io import imread, imshow
# from keras.preprocessing.image import cv2
from keras.preprocessing.image import ImageDataGenerator
#from keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import ReduceLROnPlateau
from skimage.transform import resize
import matplotlib.pyplot as plt
import keras
from google.colab import drive
drive.mount('/content/drive')
!ls /content/drive/
from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras.models import load_model
X_train = np.zeros((1, 288, 288, 3), dtype=np.uint8)
Y_train = np.zeros((1, 288, 288, 12),
dtype=np.uint8) # what we are trying to predict ), IMG_HEIGHT, IMG_WIDHT, 3, 1
X_val = np.zeros((1, 288, 288, 3), dtype=np.uint8)
Y_val = np.zeros((1, 288, 288, 12),
dtype=np.uint8) # what we are trying to predict ), IMG_HEIGHT, IMG_WIDHT, 3, 1
X_train=np.concatenate([X_train, X_train], axis = -1)
inputs = tf.keras.layers.Input((288, 288, 6))
print('X_trononononononnnonono',inputs.shape)
smooth = 1.
s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
c1 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
s) # Kernelsize : start with some weights initial value
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c1) # Kernelsize : start with some weights initial value
p1 = tf.keras.layers.MaxPool2D((2, 2))(c1)
c2 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p1) # Kernelsize : start with some weights initial value
c2 = tf.keras.layers.Dropout(0.1)(c2)
c2 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c2) # Kernelsize : start with some weights initial value
p2 = tf.keras.layers.MaxPool2D((2, 2))(c2)
c3 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p2) # Kernelsize : start with some weights initial value
c3 = tf.keras.layers.Dropout(0.1)(c3)
c3 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c3) # Kernelsize : start with some weights initial value
p3 = tf.keras.layers.MaxPool2D((2, 2))(c3)
c4 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p3) # Kernelsize : start with some weights initial value
c4 = tf.keras.layers.Dropout(0.1)(c4)
c4 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c4) # Kernelsize : start with some weights initial value
p4 = tf.keras.layers.MaxPool2D((2, 2))(c4)
c5 = tf.keras.layers.Conv2D(1024, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p4) # Kernelsize : start with some weights initial value
c5 = tf.keras.layers.Dropout(0.1)(c5)
c5 = tf.keras.layers.Conv2D(1024, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c5) # Kernelsize : start wi
u6 = tf.keras.layers.Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(256, (2, 2), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(128, (2, 2), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.1)(c8)
c8 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
c9 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(12, (1, 1), activation='softmax')(c9)#, activity_regularizer=l1(0.0001)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
#model = unet_v1_deeper()
cc = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999, amsgrad=False,decay = 1e-6) #epsilon=1e-07,,decay = 1e-6
opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6)
opt2 = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9, nesterov=False)
de = tf.keras.optimizers.Adadelta(learning_rate=0.1, rho=0.95)
from keras.losses import categorical_crossentropy
def focal_loss(target, output, gamma=2):
output /= K.sum(output, axis=-1, keepdims=True)
eps = K.epsilon()
output = K.clip(output, eps, 1. - eps)
return -K.sum(K.pow(1. - output, gamma) * target * tf.math.log(output), #Kf.log
axis=-1)
# return (1-ytrue[:, :, :, 0])*categorical_crossentropy(ytrue, ypred)
from keras import backend as K
model.compile(optimizer= cc, loss=focal_loss,
metrics=['categorical_accuracy']) # metrics =[dice_coeff] model.summary(),sample_weight_mode="temporal"
#tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss')
model.summary()
print(X_train.shape)
checkpointer = tf.keras.callbacks.ModelCheckpoint('model350epk.h5', verbose = 1, save_best_only = True)
history = model.fit(X_train, Y_train, validation_data=(X_val,Y_val), batch_size=5,epochs = 2,shuffle=True) #,callbacks = callbacks
Я получаю эту ошибку:
ValueError: Input 0 of layer conv2d is incompatible with the layer: expected axis -1 of input shape to have value 6 but received input with shape [5, 288, 288, 3]