Я хочу рассчитать трехмерные координаты в мировых единицах для каждого пикселя в 2D-изображении. Я использую технику стерео триангуляции. Я беру два изображения из одной сцены. Соответствующее левое и правое изображения доступны по ссылке: https://drive.google.com/open?id=1Ep6Hgx5pvwVrwBHl0H-Rsat7gHgrEEQN
Пожалуйста, пройдите код и дайте мне знать, где я ошибаюсь.
Спасибо заранее !!
Код:
import numpy as np
import cv2
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
fx=1761.320022138 #focal_length=1761.320022138
fy=1761.320022138 #focal_length=1761.320022138
prince_pt=[969.85441709, 565.51099872] #principle offset
lense_distortion=[0.061628499,0.129355593,-0.073783067,-0.000252309,-0.000738866]
K= np.array([[fx, 0.00000000, prince_pt[0]],
[ 0.00000000, fy, prince_pt[1]],
[ 0.00000000, 0.00000000, 1.00000000]])
def flann_match(kp1,des1, kp2,des2, ratio=0.8, returnKP = True):
''' return a list of matches '''
matches12 = flann.knnMatch(des1,des2,k=2)
good12 = [m for (m,n) in matches12 if m.distance<ratio*n.distance]
matches21 = flann.knnMatch(des2,des1,k=2)
good21 = [m for (m,n) in matches21 if m.distance<ratio*n.distance]
good = [m12 for m12 in good12 for m21 in good21 if
(kp1[m12.queryIdx]==kp1[m21.trainIdx] and
kp2[m12.trainIdx]==kp2[m21.queryIdx])]
if returnKP == True:
kps1 = [kp1[m.queryIdx].pt for m in good]
kps2 = [kp2[m.trainIdx].pt for m in good]
pts1 = np.int32(kps1)
pts2 = np.int32(kps2)
return good, pts1, pts2
def show3d(X,Y,Z):
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.plot(X.flatten(),Y.flatten(),Z.flatten(),'k.')
#######################################################################
imgl = cv2.imread('testL.jpg',0)
imgr = cv2.imread('testR.jpg',0)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(imgl, None)
kp2, des2 = sift.detectAndCompute(imgr, None)
# ########################################### step 1 : Find the matching points
good, pts1, pts2 = flann_match(kp1,des1,kp2,des2)
# ########################################### step 2: find fundamental matrix, required is E, but we don't have K
F, mask = cv2.findFundamentalMat(pts1,pts2)
mask = mask.astype(bool).flatten()
E = K.T.dot(F.dot(K)) #Essential Matrix E = K' * F * K
_,R,t,_ = cv2.recoverPose(E,pts1[mask],pts2[mask],K)
P1 = np.hstack((R,t)) # Projection matrix of second cam is ready
P0 = np.array([ [1,0,0,0],
[0,1,0,0],
[0,0,1,0] ]) # Projection matrix of first cam at origin
#print("pts1",pts1)
#img1ptsHom = cv2.convertPointsToHomogeneous(pts1)[:,0,:]
img1ptsHom = cv2.convertPointsToHomogeneous(pts1)[:,0,:]
print("\n img1ptsHom",img1ptsHom )
#img2ptsHom = cv2.convertPointsToHomogeneous(pts2)[:,0,:]
img2ptsHom = cv2.convertPointsToHomogeneous(pts2)[:,0,:]
img1ptsNorm = (np.linalg.inv(K).dot(img1ptsHom.T)).T
img2ptsNorm = (np.linalg.inv(K).dot(img2ptsHom.T)).T
print("\n img1ptsNorm",img1ptsNorm)
img1ptsNorm = cv2.convertPointsFromHomogeneous(img1ptsNorm)[:,0,:]
img2ptsNorm = cv2.convertPointsFromHomogeneous(img2ptsNorm)[:,0,:]
pts4d = cv2.triangulatePoints(P0,P1,img1ptsNorm.T,img2ptsNorm.T)
pts3d = cv2.convertPointsFromHomogeneous(pts4d.T)[:,0,:]
print("points in 3d \n",pts3d)