Я пытаюсь настроить мой набор данных с использованием моделей DenseNet (доступно здесь ) и системы Nvidia-Digits. Я уже прочитал все вопросы и внес некоторые изменения в свою собственную сеть, но этовыдает мне следующую ошибку:
conv2_1/x2/bn needs backward computation.
conv2_1/x1 needs backward computation.
relu2_1/x1 needs backward computation.
conv2_1/x1/scale needs backward computation.
conv2_1/x1/bn needs backward computation.
pool1_pool1_0_split needs backward computation.
pool1 needs backward computation.
relu1 needs backward computation.
conv1/scale needs backward computation.
conv1/bn needs backward computation.
conv1 needs backward computation.
label_val-data_1_split does not need backward computation.
val-data does not need backward computation.
This network produces output accuracy
This network produces output loss
Network initialization done.
Solver scaffolding done.
Finetuning from /home/ubuntu/models/DenseNet-Caffe/densenet201.caffemodel
Ignoring source layer input
Check failed: target_blobs.size() == source_layer.blobs_size() (5 vs. 3) Incompatible number of blobs for layer conv1/bn
вот моя сеть, я использовал оригинальный прототекст и внес некоторые изменения, как показано ниже
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
include {
stage: "train"
}
transform_param {
crop_size: 224
}
data_param {
batch_size: 126
}
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
include {
stage: "val"
}
transform_param {
crop_size: 224
}
data_param {
batch_size: 64
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 64
bias_term: false
pad: 3
kernel_size: 7
stride: 2
}
}
layer {
name: "conv1/bn"
type: "BatchNorm"
bottom: "conv1"
top: "conv1/bn"
batch_norm_param {
eps: 1e-5
}
}
layer {
name: "conv1/scale"
type: "Scale"
bottom: "conv1/bn"
top: "conv1/bn"
scale_param {
bias_term: true
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1/bn"
top: "conv1/bn"
}
.
.
.
.
layer {
name: "fc6new"
type: "Convolution"
bottom: "pool5"
top: "fc6new"
convolution_param {
num_output: 35
kernel_size: 1
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc6new"
bottom: "label"
top: "loss"
exclude {
stage: "deploy"
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc6new"
bottom: "label"
top: "accuracy"
include {
stage: "val"
}
}
layer {
name: "accuracy_train"
type: "Accuracy"
bottom: "fc6new"
bottom: "label"
top: "accuracy_train"
include {
stage: "train"
}
accuracy_param {
top_k: 5
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "fc6new"
top: "softmax"
include {
stage: "deploy"
}
}