Я тренировал Yolo v3, используя свои собственные данные, но прогноз показывает, что я не тренировался - PullRequest
0 голосов
/ 04 октября 2019

Я пытаюсь создать модель Yolo v3 для определения светофора для каждого цвета.

Поэтому я начал обучать Yolo v3, используя мои собственные данные светофора.

КлассыЯ установил шесть.

красный свет, зеленый свет, желтый свет, поверните налево, идите прямо и поверните налево, поверните направо. (Я сделаю эти 4 урока.)

После тренировки я запускаю детектор Yolo v3 с помощью своего собственного файла Weight и cfg.

Но предсказание показало неожиданный результат!

Его класс не имел ничего общего с тем, что я установил. Как автомобиль, самолет, мотоцикл ...

Что с этим не так?

Я использую Ubuntu 16.04 LTS и GForce RTX 2080. OpenCV, CUDA, CuDNN также. Итак, make-файл darknet был настроен следующим образом.

    GPU=1
    CUDNN=1
    OPENCV=1
    OPENMP=0
    DEBUG=0
    ...

Я использовал предварительно скомпонованный вес darknet19_448.conv.23. Я слышал, что это необходимо.

Добавьте мой файл cfg для обучения, которое я установил.

    [net]
    # Testing
    #batch=1
    #subdivisions=1
    # Training
    batch=24
    subdivisions=8
    width=416
    height=416
    channels=3
    momentum=0.9
    decay=0.0005
    angle=0
    saturation = 1.5
    exposure = 1.5
    hue=.1

    learning_rate=0.001
    burn_in=1000
    max_batches = 20000
    policy=steps
    steps=400000,450000
    scales=.1,.1

    [convolutional]
    batch_normalize=1
    filters=32
    size=3
    stride=1
    pad=1
    activation=leaky

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=32
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    ######################

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 6,7,8
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1


    [route]
    layers = -4

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [upsample]
    stride=2

    [route]
    layers = -1, 61



    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 3,4,5
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1



    [route]
    layers = -4

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [upsample]
    stride=2

    [route]
    layers = -1, 36



    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 0,1,2
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1

Буду очень признателен, если вы ответите мне.

1 Ответ

0 голосов
/ 04 октября 2019

Какой репозиторий вы используете?

Там также должно быть несколько файлов .data или .names, которые вы должны изменить. Взгляните на эту страницу , например. Вам необходимо изменить количество фильтров , количество классов и имена классов перед тренировкой вашей модели. Модель, которую вы используете, вероятно, предварительно обучена на некотором другом наборе данных для обнаружения транспортного средства.

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...