Я использую собственный генератор данных Keras, код выглядит следующим образом:
class DataGen (keras.utils.Sequence):
def __init__(self, combined_ids, mask_ids, combined_path, mask_path, batch_size = 1, image_size = 256, PET_only = False, CT_only = False):
self.combined_ids = combined_ids
self.mask_ids = mask_ids
self.combined_path = combined_path
self.mask_path = mask_path
self.image_size = image_size
self.batch_size = batch_size
self.PET_only = PET_only
self.CT_only = CT_only
self.on_epoch_end()
def __loadcombined__ (self, id_name_combined):
#Path of the image
combined_path = os.path.join(self.combined_path, id_name_combined)
#Reading the combined image
combined_image = np.load(combined_path)
#Only PET images
if self.PET_only == True:
combined_image = combined_image[:,:,0]
combined_image = whitening(combined_image)
combined_image = crop_image(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size), interpolation = cv2.INTER_CUBIC)
#Only CT images
elif self.CT_only == True:
combined_image = combined_image[:,:,1]
combined_image = whitening(combined_image)
combined_image = crop_image(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size), interpolation = cv2.INTER_CUBIC)
#Full combined images
else:
combined_image = combined_image.astype("float32")
combined_image[:,:,0] = whitening(combined_image[:,:,0])
combined_image[:,:,1] = whitening(combined_image[:,:,1])
combined_image = crop_image_3d(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size, 2), interpolation = cv2.INTER_CUBIC)
return combined_image
def __loadmask__(self, id_name_mask):
#Path of the image
mask_path = os.path.join(self.mask_path, id_name_mask)
#Reading the mask image
mask_image = np.load(mask_path) #No normalisation needed
#Cropping the mask image to remove some useless information
mask_image = crop_image(mask_image, 256, 256)
#mask_image = cv2.resize(mask_image, dsize=(self.image_size,self.image_size), interpolation=cv2.INTER_NEAREST)
#Converting the masks to boolean data type
mask_image = mask_image.astype(bool)
return mask_image
def __getitem__(self, index):
#Generate one batch of data.
if(index+1)*self.batch_size > len(self.combined_ids):
self.batch_size = len(self.combined_ids) - index*self.batch_size
files_batch_combined = self.combined_ids[index*self.batch_size : (index+1)*self.batch_size]
files_batch_mask = self.mask_ids[index*self.batch_size : (index+1)*self.batch_size]
combined = []
mask = []
for id_name_mask in files_batch_mask:
_mask = self.__loadmask__(id_name_mask)
mask.append(_mask)
for id_name_combined in files_batch_combined:
_combined = self.__loadcombined__(id_name_combined)
combined.append(_combined)
combined = np.array(combined)
if self.PET_only == True:
combined = np.expand_dims(combined, -1)
if self.CT_only == True:
combined = np.expand_dims(combined, -1)
mask = np.array(mask)
mask = np.expand_dims(mask, -1)
return combined, mask
def on_epoch_end(self):
pass
def __len__(self):
#Denotes the number of batches per epoch
return int(np.ceil(len(self.combined_ids)/float(self.batch_size)))
Это будет генерировать 4-D тензор с 2-канальными изображениями в качестве обучающих данных (изображение ПЭТ / КТ) и бинарная маска в качестве основной сегментации истинности.
Однако, когда я пытаюсь обучить стандартный U- Net с помощью этого генератора, он будет go через, возможно, 10 пакетов обучающих данных, прежде чем выдаст мне эту ошибку:
Неверный аргумент: Ошибка типа: generator
вернул элемент, который не удалось преобразовать в ожидаемый тип. Ожидаемый тип был float32, но полученный элемент был [[array ([[- 0.480255, -0.48197123, -0.48379683], [-0.4811588, -0.48264366, -0.48423332]
Это совершенно сбивает с толку для меня, поскольку в моем генераторе я закодировал его для преобразования моих обучающих многоканальных данных ПЭТ / КТ в Float32, тип данных, который он якобы ожидает, но не получает.
* 1014 .fit и model.fit_generator. Код ниже
train_gen = DataGen(train_ids_combined, train_ids_mask, train_path_combined, train_path_mask, batch_size = batch_size, image_size = image_size, PET_only=True)
valid_gen = DataGen(valid_ids_combined, valid_ids_mask, train_path_combined, train_path_mask, batch_size = batch_size, image_size = image_size, PET_only=True)
test_gen = DataGen(test_ids_combined, test_ids_mask, train_path_combined, train_path_mask, batch_size = 1, image_size = image_size, PET_only=True)
train_steps = len(train_ids_combined)//batch_size
valid_steps = len(valid_ids_combined)//batch_size
test_steps = len(test_ids_combined)
history = model.fit(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
epochs=epochs, callbacks=[model_checkpoint, early_stopping])
Если у кого-то есть какие-либо предложения и / или он сталкивался с этой проблемой раньше, я был бы очень признателен за помощь. Я реализую это в Google Colab (стандартная версия ).