Я сделал этот код для тренировки моего CNN. В этой версии я поставил несколько методов предварительной обработки, и он вылетел:
def estimate_radius(img):
mx = img[img.shape[0] // 2,:,:].sum(1)
rx = (mx > mx.mean() / 10).sum() / 2
my = img[:,img.shape[1] // 2,:].sum(1)
ry = (my > my.mean() / 10).sum() / 2
return (ry, rx)
def crop_img(img, h, w):
h_margin = (img.shape[0] - h) // 2 if img.shape[0] > h else 0
w_margin = (img.shape[1] - w) // 2 if img.shape[1] > w else 0
crop_img = img[h_margin:h + h_margin,w_margin:w + w_margin,:]
return crop_img
def subtract_gaussian_blur(img):
return cv2.addWeighted(img, 4, cv2.GaussianBlur(img, (0, 0), 5), -4, 128)
def remove_outer_circle(a, p, r):
b = np.zeros(a.shape, dtype=np.uint8)
cv2.circle(b, (a.shape[1] // 2, a.shape[0] // 2), int(r * p), (1, 1, 1), -1, 8, 0)
return a * b + 128 * (1 - b)
def place_in_square(img, r, h, w):
new_img = np.zeros((2 * r, 2 * r, 3), dtype=np.uint8)
new_img += 128
new_img[r - h // 2:r - h // 2 + img.shape[0], r - w // 2:r - w // 2 + img.shape[1]] = img
return new_img
def ReadImages(Path):
LabelList = list()
ImageCV = list()
classes = ["nonPdr", "pdr"]
scale = 224
# Get all subdirectories
FolderList = [f for f in os.listdir(Path) if not f.startswith('.')]
# Loop over each directory
for File in FolderList:
for index, Image in enumerate(os.listdir(os.path.join(Path, File))):
# Convert the path into a file
ImageCV.append(cv2.resize(cv2.imread(os.path.join(Path, File) + os.path.sep + Image), (224,224)))
#ImageCV[index]= np.array(ImageCV[index]) / 255.0
LabelList.append(classes.index(os.path.splitext(File)[0]))
ry, rx = estimate_radius(ImageCV[index])
img_crop = crop_img(ImageCV[index].copy(), 224, 224)
img_gbs = subtract_gaussian_blur(img_crop.copy())
img_remove_outer = remove_outer_circle(img_gbs.copy(), 0.9, scale)
ImageCV[index] = place_in_square(img_remove_outer.copy(), scale, 224, 224)
return ImageCV, LabelList
data, labels = ReadImages(TRAIN_DIR)
valid, vlabels = ReadImages(TEST_DIR)
vgg16_model = VGG16(weights="imagenet", include_top=True)
# (2) remove the top layer
base_model = Model(input=vgg16_model.input,
output=vgg16_model.get_layer("block5_pool").output)
# (3) attach a new top layer
base_out = base_model.output
base_out = Reshape((25088,))(base_out)
top_fc1 = Dense(4096, activation="relu")(base_out)
top_fc1 = Dropout(0.5)(base_out)
top_fc1 = Dense(64, activation="relu")(base_out)
top_fc1 = Dropout(0.5)(base_out)
# output layer: (None, 5)
top_preds = Dense(1, activation="sigmoid")(top_fc1)
# (4) freeze weights until the last but one convolution layer (block4_pool)
for layer in base_model.layers[0:14]:
layer.trainable = False
# (5) create new hybrid model
model = Model(input=base_model.input, output=top_preds)
# (6) compile and train the model
sgd = SGD(lr=1e-4, momentum=0.9)
model.compile(optimizer=sgd, loss="binary_crossentropy", metrics=["accuracy"])
data = np.asarray(data)
valid = np.asarray(valid)
data = data.astype('float32')
valid = valid.astype('float32')
data /= 255
valid /= 255
labels = np.array(labels)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(data)
mean = datagen.mean
std = datagen.std
print(mean, "mean")
print(std, "std")
es = EarlyStopping(monitor='val_loss', verbose=1)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(data, np.array(labels), batch_size=32),
steps_per_epoch=len(data) / 32, epochs=10,
validation_data=(valid, np.array(vlabels)),
nb_val_samples=72, callbacks=[es])
model.save('model.h5')
Но когда я его запускаю, выдает ошибку:
data = data.astype ('float32')
ValueError: установка элемента массива с последовательностью.
В моей последней версии (без размытия и кадрирования) этот код работал нормально. Что это может быть?
Я ценю вашу помощь