Python [Errno 32] Разбитая труба - PullRequest
0 голосов
/ 08 апреля 2020

Есть несколько вопросов об этой ошибке. И они не решают мою проблему. Я пытался разработать систему подсчета автомобилей с использованием методов глубокого обучения. Я не загружал hw.mp4 видео, потому что оно выдает ту же ошибку на любом другом видео. Я загрузил json файл, файл EfficientDet , модель , использует и object_tracking , чтобы сделать возможным воспроизведите ту же ошибку с минимальным кодом.

import json
import numpy as np
from model import efficientdet
from utils import preprocess_image, postprocess_boxes
from object_tracking.centroidtracker import CentroidTracker
from object_tracking.trackableobject import TrackableObject
from imutils.video import FPS
import multiprocessing
import dlib
import cv2
import os
import imutils
import time

def start_tracker(box, label, rgb, inputQueue, outputQueue):
    # construct a dlib rectangle object from the bounding box
    # coordinates and then start the correlation tracker
    t = dlib.correlation_tracker()
    rect = dlib.rectangle(int(box[0]), int(box[1]), int(box[2]), int(box[3]))
    t.start_track(rgb, rect)
    # loop indefinitely -- this function will be called as a daemon
    # process so we don't need to worry about joining it
    while True:
        # attempt to grab the next frame from the input queue
        rgb = inputQueue.get()
        # if there was an entry in our queue, process it
        if rgb is not None:
            # update the tracker and grab the position of the tracked
            # object
            t.update(rgb)
            pos = t.get_position()
            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())    
            # add the label + bounding box coordinates to the output
            # queue
            outputQueue.put((label, (startX, startY, endX, endY)))
def run():
     # initialize our list of queues -- both input queue and output queue
    # for *every* object that we will be tracking
    inputQueues = []
    outputQueues = []

    os.environ['CUDA_VISIBLE_DEVICES'] = '0' #ekran kartı olsaydı 0 -> 1 olacaktı.
    phi = 0 # çözünürlük indisi. phi, hangi çözünürlük ile eğitildiyse ona göre ayarlanmalıdır.
    weighted_bifpn = True
    model_path = 'efficientdet-d0.h5' #modelin dizini ve kendisi.
    image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
    image_size = image_sizes[phi]
    # coco datasetindeki class'lar
    classes = {value['id'] - 1: value['name'] for value in json.load(open('coco_90.json', 'r')).values()} # bu datasetler 90 farklı nesneye göre
    num_classes = 90 #toplam class sayısı
    score_threshold = 0.1 # modelin verdiği score'un eşiklenmesi.
    _, model = efficientdet(phi=phi,
                            weighted_bifpn=weighted_bifpn,
                            num_classes=num_classes,
                            score_threshold=score_threshold)
    model.load_weights(model_path, by_name=True) #modele gerekli parametrelerin verilmesi
    vs = cv2.VideoCapture('i.mp4') #video dosyası
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50) #centroid tracker'ın ilklendirilmesi
    trackers = [] #herbir dlib correlation tracker'ın depolanması için bir liste oluşturuldu
    trackableObjects = {} #her araca özel ID'lerin tutulduğu dictionary oluşturuldu

    totalFrames = 0 #fps hesaplaması için kaç  adet frame'in işlendiğini tutan değişken
    totalDown = 0
    totalUp = 0

    # fps ölçümünü başlat
    #yukarisi yaklasik 50 ms suruyo
    fps = FPS().start()
   # bütün frame'ler tek tek gezilir.
    while True:
        (grabbed, frame) = vs.read()
        if frame is None:
            break
        else:
            frame = imutils.resize(frame, width=600)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if len(inputQueues) == 0:
                (H, W) = frame.shape[:2]
                rgb, scale = preprocess_image(rgb, image_size=image_size)
                # scale = 1.6
                #cv2.imshow("rgb2", rgb)
                #print(scale)
                boxes, scores, labels = model.predict_on_batch([np.expand_dims(rgb, axis=0)]) # modelin çalıştığı yer
                boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
                boxes.setflags(write=1)
                boxes = postprocess_boxes(boxes=boxes, scale=scale, height=H, width=W)

            # eşik değerinden yüksek olan ve araba olan nesneler seçilir
                indices = np.where(scores[:] > score_threshold)[0]
                indices = np.where(labels[:] == 2|3|5|7)
                for i in np.arange(0, boxes[indices].shape[0]):
                    #cv2.waitKey(0)
                    boxes[indices][i] = np.array([W, H, W, H])*boxes[indices][i] # buraya dikkat
                    #bounding box koordinatları alınır
                    (startX, startY, endX, endY) = boxes[indices][i].astype("int")
                    bb = (startX, startY, endX, endY)
                    iq = multiprocessing.Queue()
                    oq = multiprocessing.Queue()
                    inputQueues.append(iq)
                    outputQueues.append(oq)
    # spawn a daemon process for a new object tracker
                    p = multiprocessing.Process(
    target=start_tracker,
    args=(bb, labels, frame, iq, oq))
                    p.daemon = True
                    p.start()

    # grab the corresponding class label for the detection
    # and draw the bounding box
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
    (0, 255, 0), 2)
                   # cv2.putText(frame, labels, (startX, startY - 15),
    # cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)

            else:
                # loop over each of our input ques and add the input RGB
    # frame to it, enabling us to update each of the respective
    # object trackers running in separate processes
                for iq in inputQueues:
                    iq.put(rgb)

    # loop over each of the output queues
                for oq in outputQueues:
    # grab the updated bounding box coordinates for the
    # object -- the .get method is a blocking operation so
    # this will pause our execution until the respective
    # process finishes the tracking update
                    (label, (startX, startY, endX, endY)) = oq.get()

    # draw the bounding box from the correlation object
    # tracker
                    #cv2.rectangle(frame, (startX, startY), (endX, endY),
    # (0, 255, 0), 2)
                    #cv2.putText(frame, label, (startX, startY - 15),
    # cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)

        # show the output frame
            cv2.imshow("Frame", rgb)
            key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break
            fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print(totalUp)
    print(totalDown)
    print('vehicles detected')
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
    vs.release()
   # code goes here

if __name__ == '__main__':
    run()

1 Ответ

0 голосов
/ 12 апреля 2020

Вы должны добавить

if __name__ == "__main__":

после start_tracker

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...