Как получить точный вывод, используя reprojectImageTo3D ()? - PullRequest
0 голосов
/ 01 апреля 2019

Я пытаюсь построить облако трехмерных точек с помощью функции reprojectImageTo3D (). Я реализовал следующие шаги: StereoCalibrate () -> StereoRectify () -> Расчет карты диспаратности -> reprojectImageTo3D. Вывод reprojectImageTo3D () дает конус в облаке точек, который не является тем, что требуется. Я также сталкиваюсь проблема в получении точной карты диспаратности. Какова пошаговая процедура получения карты несоответствия. Ниже приведен мой код. Пожалуйста, помогите мне, где я не прав

import numpy as np
    import cv2
    import glob
    from matplotlib import pyplot as plt
    import sys

    #get all frames from calibration video
    calibcap = cv2.VideoCapture('testcut1.avi')

    calib_files = []
    if calibcap.isOpened() == False:
        print('Error file not found!')

    while calibcap.isOpened():
        ret,frame = calibcap.read()
        if ret == True:
            #time.sleep(1/20)
            cv2.imshow('frame',frame)
            calib_files.append(frame)
            if cv2.waitKey(10) & 0xFF == 27:
                break
        else:
            break

    calibcap.release()

    #divide a frame into 2 images (left cam img and right cam img)
    cam_left = []
    cam_right = []

    for fname in calib_files:
        cam_left.append(fname[288:576,:360])
        cam_right.append(fname[:288,360:720])

    #print(len(cam_left))
    #print(len(cam_right))

    height, width, depth = cam_left[0].shape
    print(cam_left[0].shape)
    #cv2.imshow("leftcam", cam_left[0])
    #cv2.imshow("righcam", cam_right[0])

    #Declaring Object points array
    objpleft = np.zeros((6*6,3),np.float32)
    objpleft[:,:2] = np.mgrid[0:6,0:6].T.reshape(-1,2)

    objpright = np.zeros((6*6,3),np.float32)
    objpright[:,:2] = np.mgrid[0:6,0:6].T.reshape(-1,2)

    #Termination Criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    #Declaring variables for objectpoints and imagepoints
    objectpoints = []
    objectpointsleft = []
    objectpointsright = []
    imgpointsleft = []
    imgpointsright = []
    imagepointsl = []

    #Finding Imagepoints and ObjectPoints of left camera
    for fname in cam_left:
        img = fname
        gray = cv2.cvtColor(fname,cv2.COLOR_BGR2GRAY)

        ret, corners = cv2.findChessboardCorners(gray, (6,6),None)

        if ret == True:
            objectpointsleft.append(objpleft)

            corners1 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
            imgpointsleft.append(corners1)

            #Draw and Display Corners
            img = cv2.drawChessboardCorners(img, (6,6), corners1, ret)
            cv2.imshow("calibleft", img)
            cv2.waitKey(500)

    ret, mtxl, distl, rvecl, tvecl = cv2.calibrateCamera(objectpointsleft,imgpointsleft,(width,height),None,None)
    print(mtxl)
    print(distl)
    objectpoints.append(objectpointsleft[0])
    imagepointsl.append(imgpointsleft[0])

    #Finding Image points and Object points for right Camera
    for fnamee in cam_right:
        img1 = fnamee
        gray1 = cv2.cvtColor(fnamee,cv2.COLOR_BGR2GRAY)

        ret1, corners2 = cv2.findChessboardCorners(gray1, (6,6),None)

        if ret1 == True:
            objectpointsright.append(objpright)

            corners3 = cv2.cornerSubPix(gray1, corners2, (11,11), (-1,-1), criteria)
            imgpointsright.append(corners3)

            #Draw and Display Corners
            img1 = cv2.drawChessboardCorners(img1, (6,6), corners3, ret1)
            cv2.imshow("calibright", img1)
            cv2.waitKey(500)

    #ret, mtxr, distr, rvecr, tvecr = cv2.calibrateCamera(objectpointsright,imgpointsright,gray1.shape[::-1],None,None)
    #print(mtxr)
    #print(distr)

    print("Now doing stereoCalib")


    cameraMatrix1 = None
    distCoeffs1 = None
    cameraMatrix2 = None
    distCoeffs2 = None
    R = None
    T = None
    E = None
    F = None

    stereo_criteria = (cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, 1000, 1e-6)

    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectpoints, imagepointsl, imgpointsright, None, None, None, None, (width,height), flags=0)

    print(cameraMatrix1)
    print(distCoeffs1)

    print("Stereo Calibration Sucessful\n")

    #Implementing Rectification
    R1 = np.zeros(shape=(3,3))
    R2 = np.zeros(shape=(3,3))
    P1 = np.zeros(shape=(3,4))
    P2 = np.zeros(shape=(3,4))

    print("\nStereo Rectification started")

    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (width, height), R, T, alpha=0)
    print(Q)

    leftMapX, leftMapY = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (width, height), cv2.CV_16SC2)
    rightMapX, rightMapY = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (width, height), cv2.CV_16SC2)

    distCoeffs1[0][4] = 0.0
    distCoeffs2[0][4] = 0.0

    newCamsL, roiL = cv2.getOptimalNewCameraMatrix(cameraMatrix = cameraMatrix1, distCoeffs = distCoeffs1, imageSize = (width,height), alpha = 0)
    newCamsR, roiR = cv2.getOptimalNewCameraMatrix(cameraMatrix = cameraMatrix2, distCoeffs = distCoeffs2, imageSize = (width,height), alpha = 0)

    print("Stereo Rectification Successful\n")

    print(roi1)
    print('\n///////////////////////////////////\n')
    print(roiL)

    #In the above code we have done the Calibration technique and found the Q matrix, which can be used for reprojectImageTo3D

    #//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

    #Now we will calculate the Disparity Map for a pair of Stereo Images and find their 3D co-ordinates using reprojectImageTo3D/triangulatePoints

    dispcap = cv2.VideoCapture('stereo4.avi')

    if dispcap.isOpened() == False:
        print("Error in reading Video File")

    ret, frame = dispcap.read()

    if ret == True:
        cv2.imshow("frame", frame)
        temp = frame

    imageLeft = temp[288:576,:360]
    imageRight = temp[:288,360:720]

    #imageLeft = cv2.imread('imgL.jpg')
    #imageRight = cv2.imread('imgR.jpg')

    cv2.imshow('Left Image', imageLeft)
    cv2.imshow('Right Image', imageRight)

    height, width, depth = imageLeft.shape

    #rectImageLeft = cv2.remap(imageLeft, leftMapX, leftMapY, cv2.INTER_LINEAR)
    #rectImageRight = cv2.remap(imageRight, rightMapX, rightMapY, cv2.INTER_LINEAR)

    #cv2.imshow('Rectified Left', rectImageLeft)
    #cv2.imshow('Rectified right', rectImageRight)

    rectFramesL = cv2.undistort(imageLeft, cameraMatrix1, distCoeffs1, newCamsL)
    rectFramesR = cv2.undistort(imageRight, cameraMatrix2, distCoeffs2, newCamsR)

    cv2.imshow('Rectified Left 1', rectFramesL)
    cv2.imshow('Rectified right 1', rectFramesR)

    rectFramesLgray = cv2.cvtColor(rectFramesL, cv2.COLOR_BGR2GRAY)
    rectFramesRgray = cv2.cvtColor(rectFramesR, cv2.COLOR_BGR2GRAY)

    window_size = 3
    min_disp = 0
    max_disp = 16
    num_disp = max_disp - min_disp
    stereo = cv2.StereoSGBM_create(
        minDisparity = min_disp,
        numDisparities=num_disp,
        blockSize=16,
        P1=8 * 1 * window_size ** 2,
        P2=32 * 1 * window_size ** 2,
        disp12MaxDiff=1,
        uniquenessRatio=10,
        speckleWindowSize=0,
        speckleRange=0,
    )

    output = stereo.compute(imageLeft, imageRight)

    disparity = np.uint8(output)

    cv2.imshow('Disparity Map', disparity)

    point_cloud = cv2.reprojectImageTo3D(disparity, Q)
    cv2.imshow('pointcloud', point_cloud)

    cv2.waitKey(0)

    cv2.destroyAllWindows()
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...