Функция OpenCV triangulatePoints не дает расстояния в мировых единицах - PullRequest
1 голос
/ 16 мая 2019

Я хочу оценить трехмерные координаты в мировых единицах.Я использую стерео 3D технику реконструкции.Я выбираю соответствующие точки вручную щелчком мыши слева и справа стереоизображения.Оба изображения сняты одной и той же камерой, установленной на движущемся транспортном средстве.Я использую opencv, чтобы найти необходимую матрицу и позировать между двумя камерами.Затем я использую функцию триангуляционной точки opencv, чтобы найти 3D-координаты объекта в мировой единице.Я получаю координаты, но я думаю, что эти координаты - уменьшенная версия.Они не в реальном мире.Дайте мне знать, как я могу получить координаты в мировых единицах относительно левой камеры.Также функция triangulatePoints не работает, если я выбираю менее 6 соответствий.Дайте мне знать, как я могу использовать функцию триангуляции, если соответствия меньше 6.

Заранее спасибо !!


'''
pts_l - set of n 2d points in left image. nx2 numpy float array
pts_r - set of n 2d points in right image. nx2 numpy float array

K_l - Left Camera matrix. 3x3 numpy float array
K_r - Right Camera matrix. 3x3 numpy float array
'''

import numpy as np
from numpy.linalg import inv, pinv,det
import cv2
import sys
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
lc=[]
rc=[]

fx=1761.320022138 #focal_length=1761.320022138
fy=1761.320022138 #focal_length=1761.320022138
prince_pt=[969.85441709, 565.51099872]
lense_distortion=[0.061628499,  0.129355593,    -0.073783067,   -0.000252309,   -0.000738866]

camera_matrix = np.array([[fx,   0.00000000, prince_pt[0]], 
                 [  0.00000000, fy, prince_pt[1]],
                 [  0.00000000,   0.00000000,   1.00000000]])


dist_coeffs = np.array([0.061628499,    0.129355593,    -0.073783067,   -0.000252309,   -0.000738866], dtype=np.float32)

#camera_matrix = np.array([[1.3e+03, 0., 6.0e+02], [0., 1.3e+03, 4.8e+02], [0., 0., 1.]], dtype=np.float32)
#dist_coeffs = np.array([-2.4-01, 9.5e-02, -4.0e-04, 8.9e-05, 0.], dtype=np.float32)


def click_Left(eventL, x, y, flags, param):
# grab references to the global variables
    global refPtL, cropping
    #refPt=[[0,0]]
    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed

    if eventL == cv2.EVENT_LBUTTONDOWN:
        refPtL = [[x, y]]
        cropping = True
        cv2.circle(imageL,(refPtL[0][0], refPtL[0][1]), 5, (0,0,255), -1)
        cv2.imshow("Left image", imageL)
        #print("Event locked")

    # check to see if the left mouse button was released
    elif eventL == cv2.EVENT_LBUTTONUP:
        lc.append([x, y])
        print("lc in loop",lc)
    # record the ending (x, y) coordinates and indicate that
    # the cropping operation is finished
        #refPt.append((x, y))
    return 0


def click_Right(eventR, a, b, flags, param):
# grab references to the global variables
    global ix,iy
    #refPt=[[0,0]]
    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed

    if eventR == cv2.EVENT_LBUTTONDOWN:
        refPtR = [[a, b]]
        cv2.circle(imageR,(refPtR[0][0], refPtR[0][1]), 5, (255,0,0), -1)
        cv2.imshow("Right image", imageR)
    # check to see if the left mouse button was released
    elif eventR == cv2.EVENT_LBUTTONUP:
        rc.append([a, b])
        print("rc in loop",rc)  
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        #refPt.append((x, y))
    return 0



# load the image, clone it, and setup the mouse callback function
imageL = cv2.imread("testL.jpg")
#print("Image Shape ",imageL.shape) 
clone = imageL.copy()
cv2.namedWindow("Left image",cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Left image", click_Left)

imageR = cv2.imread("testR.jpg")
#print("Image Shape ",imageL.shape) 
clone = imageL.copy()
cv2.namedWindow("Right image",cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Right image", click_Right)

#print("refPtL",click_Left)
while True:
    # display the image and wait for a keypress
    cv2.imshow("Left image", imageL)
    cv2.imshow("Right image", imageR)
    key = cv2.waitKey(1) & 0xFF

    # if the 'r' key is pressed, reset the cropping region
    if key == ord("r"):
        image = clone.copy()

    # if the 'd' key is pressed, break from the loop
    elif key == ord("d"):

        break
cv2.destroyAllWindows()


# Normalize for Esential Matrix calaculation
pts_l=np.expand_dims(lc, axis=1)
#print("Before ptsL",pts_l)
#pts_l=np.float32(pts_l)
#print("After conversion ptsL",pts_l)
pts_r=np.expand_dims(rc, axis=1)

pts_l_norm = cv2.undistortPoints(np.float32(pts_l), cameraMatrix=camera_matrix, distCoeffs=dist_coeffs)
pts_r_norm = cv2.undistortPoints(np.float32(pts_r), cameraMatrix=camera_matrix, distCoeffs=dist_coeffs)

##Keep in mind that all input data should be of float type in order for this function to work.

#https://stackoverflow.com/questions/11017984/how-to-format-xy-points-for-undistortpoints-with-the-python-cv2-api

#pts_l_norm = cv2.undistortPoints(np.expand_dims(lc, axis=1), cameraMatrix=np.asarray(camera_matrix), distCoeffs=np.asarray(dist_coeffs))
#pts_r_norm = cv2.undistortPoints(np.expand_dims(rc, axis=1), cameraMatrix=np.asarray(camera_matrix), distCoeffs=np.asarray(dist_coeffs))

E, mask = cv2.findEssentialMat(pts_l, pts_r, focal=1761.320022138, pp=(prince_pt[0], prince_pt[1]), method=cv2.RANSAC, prob=0.999, threshold=3.0)
print("Essential Matrix \n", E)
points, R, t, mask = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
print("Rotation", R)

M_r = np.hstack((R, t))
M_l = np.hstack((np.eye(3, 3), np.zeros((3, 1))))

P_l = np.dot(camera_matrix,  M_l)
P_r = np.dot(camera_matrix,  M_r)
point_4d_hom = cv2.triangulatePoints(P_l, P_r, np.float32(pts_l), np.float32(pts_r))
point_4d = point_4d_hom / np.tile(point_4d_hom[-1, :], (4, 1))
point_3d = point_4d[:3, :].T
print("point_4d_hom \n",point_4d_hom)
print("point_4d \n",point_4d)
print("Points in left Image \n",pts_l)
print("Distance from base Line\n",point_3d)

def CheckCoherentRotation (R):
    if (abs(det(R))-1.0> 1e-07):
        print("No unique solution")
    else:
        print("Unique rotation")


CheckCoherentRotation(R)
 # plot with matplotlib
Ys = point_3d[:, 0]
Zs = point_3d[:, 1]
Xs = point_3d[:, 2]

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Xs, Ys, Zs, c='r', marker='o')
ax.set_xlabel('Y')
ax.set_ylabel('Z')
ax.set_zlabel('X')
plt.title('3D point cloud: Use pan axes button below to inspect')
plt.show()

...