ORBSLAM2学习笔记4(Initializer)

    技术2023-09-17  75

    单目初始化的类

    Initializer.h

    /** * This file is part of ORB-SLAM2. * * Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza) * For more information see <https://github.com/raulmur/ORB_SLAM2> * * ORB-SLAM2 is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * ORB-SLAM2 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>. */ #ifndef INITIALIZER_H #define INITIALIZER_H #include<opencv2/opencv.hpp> #include "Frame.h" namespace ORB_SLAM2 { // THIS IS THE INITIALIZER FOR MONOCULAR SLAM. NOT USED IN THE STEREO OR RGBD CASE. //单目初始化器 class Initializer { typedef pair<int,int> Match; public: // Fix the reference frame Initializer(const Frame &ReferenceFrame, float sigma = 1.0, int iterations = 200); // Computes in parallel a fundamental matrix and a homography // Selects a model and tries to recover the motion and the structure from motion bool Initialize(const Frame &CurrentFrame, const vector<int> &vMatches12, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated); private: void FindHomography(vector<bool> &vbMatchesInliers, float &score, cv::Mat &H21); void FindFundamental(vector<bool> &vbInliers, float &score, cv::Mat &F21); cv::Mat ComputeH21(const vector<cv::Point2f> &vP1, const vector<cv::Point2f> &vP2); cv::Mat ComputeF21(const vector<cv::Point2f> &vP1, const vector<cv::Point2f> &vP2); float CheckHomography(const cv::Mat &H21, const cv::Mat &H12, vector<bool> &vbMatchesInliers, float sigma); float CheckFundamental(const cv::Mat &F21, vector<bool> &vbMatchesInliers, float sigma); bool ReconstructF(vector<bool> &vbMatchesInliers, cv::Mat &F21, cv::Mat &K, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated, float minParallax, int minTriangulated); bool ReconstructH(vector<bool> &vbMatchesInliers, cv::Mat &H21, cv::Mat &K, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated, float minParallax, int minTriangulated); void Triangulate(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &P1, const cv::Mat &P2, cv::Mat &x3D); void Normalize(const vector<cv::KeyPoint> &vKeys, vector<cv::Point2f> &vNormalizedPoints, cv::Mat &T); int CheckRT(const cv::Mat &R, const cv::Mat &t, const vector<cv::KeyPoint> &vKeys1, const vector<cv::KeyPoint> &vKeys2, const vector<Match> &vMatches12, vector<bool> &vbInliers, const cv::Mat &K, vector<cv::Point3f> &vP3D, float th2, vector<bool> &vbGood, float &parallax); void DecomposeE(const cv::Mat &E, cv::Mat &R1, cv::Mat &R2, cv::Mat &t); // Keypoints from Reference Frame (Frame 1) vector<cv::KeyPoint> mvKeys1; // Keypoints from Current Frame (Frame 2) vector<cv::KeyPoint> mvKeys2; // Current Matches from Reference to Current vector<Match> mvMatches12; vector<bool> mvbMatched1; // Calibration cv::Mat mK; // Standard Deviation and Variance float mSigma, mSigma2; // Ransac max iterations int mMaxIterations; // Ransac sets vector<vector<size_t> > mvSets; }; } //namespace ORB_SLAM #endif // INITIALIZER_H

    Initializer.cc

    /** * This file is part of ORB-SLAM2. * * Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza) * For more information see <https://github.com/raulmur/ORB_SLAM2> * * ORB-SLAM2 is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * ORB-SLAM2 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>. */ #include "Initializer.h" #include "Thirdparty/DBoW2/DUtils/Random.h" #include "Optimizer.h" #include "ORBmatcher.h" #include<thread> namespace ORB_SLAM2 { Initializer::Initializer(const Frame &ReferenceFrame, float sigma, int iterations) { mK = ReferenceFrame.mK.clone(); mvKeys1 = ReferenceFrame.mvKeysUn; mSigma = sigma; mSigma2 = sigma*sigma; mMaxIterations = iterations; } /** * 初始化 * 1.RANSAC随机选择200组数据,每组数据8个特征点 * 2.分别计算H和F矩阵,选择效果最好的那个 */ bool Initializer::Initialize(const Frame &CurrentFrame, const vector<int> &vMatches12, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated) { // Fill structures with current keypoints and matches with reference frame // Reference Frame: 1, Current Frame: 2 mvKeys2 = CurrentFrame.mvKeysUn; mvMatches12.clear(); mvMatches12.reserve(mvKeys2.size()); mvbMatched1.resize(mvKeys1.size()); //将vMatches12中的特征点匹配关系填充到mvMatches12中 for(size_t i=0, iend=vMatches12.size();i<iend; i++) { if(vMatches12[i]>=0) { //pair [ref frame keypoint id, current frame keypoint id] mvMatches12.push_back(make_pair(i,vMatches12[i])); mvbMatched1[i]=true; } else /// -1 means not matched mvbMatched1[i]=false; } /// matched key points size ///匹配的特征点的数目 const int N = mvMatches12.size(); // Indices for minimum set selection vector<size_t> vAllIndices; vAllIndices.reserve(N); vector<size_t> vAvailableIndices; //索引值 for(int i=0; i<N; i++) { vAllIndices.push_back(i); } // Generate sets of 8 points for each RANSAC iteration ///200 * vector<size_t> //为每次的RANSAC迭代生成8个点 mvSets = vector< vector<size_t> >(mMaxIterations,vector<size_t>(8,0)); DUtils::Random::SeedRandOnce(0); /// 200 times iterations,生成200次迭代数据 for(int it=0; it<mMaxIterations; it++) { vAvailableIndices = vAllIndices; // Select a minimum set for(size_t j=0; j<8; j++) { /// get a random int number between [0, avalible_index] int randi = DUtils::Random::RandomInt(0,vAvailableIndices.size()-1); /// get key point index according random int number int idx = vAvailableIndices[randi]; //填充第it次迭代的第j个数据 mvSets[it][j] = idx; /// move the last element to replace randi index, and delete the last element vAvailableIndices[randi] = vAvailableIndices.back(); vAvailableIndices.pop_back(); } } // Launch threads to compute in parallel a fundamental matrix and a homography //启动两个线程同步分别计算H和F矩阵 vector<bool> vbMatchesInliersH, vbMatchesInliersF; float SH, SF; cv::Mat H, F; thread threadH(&Initializer::FindHomography,this,ref(vbMatchesInliersH), ref(SH), ref(H)); thread threadF(&Initializer::FindFundamental,this,ref(vbMatchesInliersF), ref(SF), ref(F)); // Wait until both threads have finished threadH.join(); threadF.join(); // Compute ratio of scores float RH = SH/(SH+SF); // Try to reconstruct from homography or fundamental depending on the ratio (0.40-0.45) if(RH>0.40) return ReconstructH(vbMatchesInliersH,H,mK,R21,t21,vP3D,vbTriangulated,1.0,50); else //if(pF_HF>0.6) return ReconstructF(vbMatchesInliersF,F,mK,R21,t21,vP3D,vbTriangulated,1.0,50); return false; } /** * 找到最佳的Homography矩阵 */ void Initializer::FindHomography(vector<bool> &vbMatchesInliers, float &score, cv::Mat &H21) { // Number of putative matches const int N = mvMatches12.size(); // Normalize coordinates //归一化坐标 vector<cv::Point2f> vPn1, vPn2; cv::Mat T1, T2; Normalize(mvKeys1,vPn1, T1); Normalize(mvKeys2,vPn2, T2); cv::Mat T2inv = T2.inv(); // Best Results variables score = 0.0; vbMatchesInliers = vector<bool>(N,false); // Iteration variables vector<cv::Point2f> vPn1i(8); vector<cv::Point2f> vPn2i(8); cv::Mat H21i, H12i; vector<bool> vbCurrentInliers(N,false); float currentScore; // Perform all RANSAC iterations and save the solution with highest score //执行迭代,保存得分最高的解决方案 for(int it=0; it<mMaxIterations; it++) { // Select a minimum set for(size_t j=0; j<8; j++) { int idx = mvSets[it][j]; /// key point in frame 1 vPn1i[j] = vPn1[mvMatches12[idx].first]; /// key point in frame 2 vPn2i[j] = vPn2[mvMatches12[idx].second]; } /** * 这里计算出来的H矩阵是相对于归一化之后的特征点的,我们需要的是归一化之前的H21矩阵 * 设P1,P2分别为F1和F2两帧上的匹配的特征点(未归一化),其通过Hn矩阵的映射关系为 * 有1.T2*P2 = Hn*T1*P1 * 2. H21*P1=P2 , * 因此T2*H21*P1=Hn* T1*P1 * ==>H21 = T2^-1*Hn*T1 * */ cv::Mat Hn = ComputeH21(vPn1i,vPn2i); H21i = T2inv*Hn*T1; H12i = H21i.inv(); //在参数mSigma下给这个H矩阵评分 currentScore = CheckHomography(H21i, H12i, vbCurrentInliers, mSigma); if(currentScore>score) { H21 = H21i.clone(); vbMatchesInliers = vbCurrentInliers; score = currentScore; } } } /**计算F矩阵 */ void Initializer::FindFundamental(vector<bool> &vbMatchesInliers, float &score, cv::Mat &F21) { // Number of putative matches const int N = vbMatchesInliers.size(); // Normalize coordinates vector<cv::Point2f> vPn1, vPn2; cv::Mat T1, T2; Normalize(mvKeys1,vPn1, T1); Normalize(mvKeys2,vPn2, T2); cv::Mat T2t = T2.t(); // Best Results variables score = 0.0; vbMatchesInliers = vector<bool>(N,false); // Iteration variables vector<cv::Point2f> vPn1i(8); vector<cv::Point2f> vPn2i(8); cv::Mat F21i; vector<bool> vbCurrentInliers(N,false); float currentScore; // Perform all RANSAC iterations and save the solution with highest score for(int it=0; it<mMaxIterations; it++) { // Select a minimum set for(int j=0; j<8; j++) { int idx = mvSets[it][j]; vPn1i[j] = vPn1[mvMatches12[idx].first]; vPn2i[j] = vPn2[mvMatches12[idx].second]; } cv::Mat Fn = ComputeF21(vPn1i,vPn2i); F21i = T2t*Fn*T1; currentScore = CheckFundamental(F21i, vbCurrentInliers, mSigma); if(currentScore>score) { F21 = F21i.clone(); vbMatchesInliers = vbCurrentInliers; score = currentScore; } } } cv::Mat Initializer::ComputeH21(const vector<cv::Point2f> &vP1, const vector<cv::Point2f> &vP2) { const int N = vP1.size(); cv::Mat A(2*N,9,CV_32F); for(int i=0; i<N; i++) { const float u1 = vP1[i].x; const float v1 = vP1[i].y; const float u2 = vP2[i].x; const float v2 = vP2[i].y; A.at<float>(2*i,0) = 0.0; A.at<float>(2*i,1) = 0.0; A.at<float>(2*i,2) = 0.0; A.at<float>(2*i,3) = -u1; A.at<float>(2*i,4) = -v1; A.at<float>(2*i,5) = -1; A.at<float>(2*i,6) = v2*u1; A.at<float>(2*i,7) = v2*v1; A.at<float>(2*i,8) = v2; A.at<float>(2*i+1,0) = u1; A.at<float>(2*i+1,1) = v1; A.at<float>(2*i+1,2) = 1; A.at<float>(2*i+1,3) = 0.0; A.at<float>(2*i+1,4) = 0.0; A.at<float>(2*i+1,5) = 0.0; A.at<float>(2*i+1,6) = -u2*u1; A.at<float>(2*i+1,7) = -u2*v1; A.at<float>(2*i+1,8) = -u2; } cv::Mat u,w,vt; cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); return vt.row(8).reshape(0, 3); } cv::Mat Initializer::ComputeF21(const vector<cv::Point2f> &vP1,const vector<cv::Point2f> &vP2) { const int N = vP1.size(); cv::Mat A(N,9,CV_32F); for(int i=0; i<N; i++) { const float u1 = vP1[i].x; const float v1 = vP1[i].y; const float u2 = vP2[i].x; const float v2 = vP2[i].y; A.at<float>(i,0) = u2*u1; A.at<float>(i,1) = u2*v1; A.at<float>(i,2) = u2; A.at<float>(i,3) = v2*u1; A.at<float>(i,4) = v2*v1; A.at<float>(i,5) = v2; A.at<float>(i,6) = u1; A.at<float>(i,7) = v1; A.at<float>(i,8) = 1; } cv::Mat u,w,vt; cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); cv::Mat Fpre = vt.row(8).reshape(0, 3); cv::SVDecomp(Fpre,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); w.at<float>(2)=0; return u*cv::Mat::diag(w)*vt; } float Initializer::CheckHomography(const cv::Mat &H21, const cv::Mat &H12, vector<bool> &vbMatchesInliers, float sigma) { const int N = mvMatches12.size(); const float h11 = H21.at<float>(0,0); const float h12 = H21.at<float>(0,1); const float h13 = H21.at<float>(0,2); const float h21 = H21.at<float>(1,0); const float h22 = H21.at<float>(1,1); const float h23 = H21.at<float>(1,2); const float h31 = H21.at<float>(2,0); const float h32 = H21.at<float>(2,1); const float h33 = H21.at<float>(2,2); const float h11inv = H12.at<float>(0,0); const float h12inv = H12.at<float>(0,1); const float h13inv = H12.at<float>(0,2); const float h21inv = H12.at<float>(1,0); const float h22inv = H12.at<float>(1,1); const float h23inv = H12.at<float>(1,2); const float h31inv = H12.at<float>(2,0); const float h32inv = H12.at<float>(2,1); const float h33inv = H12.at<float>(2,2); vbMatchesInliers.resize(N); float score = 0; //来源于卡方分布,有95%的可信度 const float th = 5.991; //卡方分布的方差 const float invSigmaSquare = 1.0/(sigma*sigma); for(int i=0; i<N; i++) { bool bIn = true; const cv::KeyPoint &kp1 = mvKeys1[mvMatches12[i].first]; const cv::KeyPoint &kp2 = mvKeys2[mvMatches12[i].second]; const float u1 = kp1.pt.x; const float v1 = kp1.pt.y; const float u2 = kp2.pt.x; const float v2 = kp2.pt.y; // Reprojection error in first image // x2in1 = H12*x2 const float w2in1inv = 1.0/(h31inv*u2+h32inv*v2+h33inv); const float u2in1 = (h11inv*u2+h12inv*v2+h13inv)*w2in1inv; const float v2in1 = (h21inv*u2+h22inv*v2+h23inv)*w2in1inv; //计算v2,u2投影到F1之后,与v1,u1的差值的平方和 const float squareDist1 = (u1-u2in1)*(u1-u2in1)+(v1-v2in1)*(v1-v2in1); //计算卡方值 //ref https://en.wikipedia.org/wiki/Chi-square_distribution [Occurrence and applications] table(chi-square distribution) const float chiSquare1 = squareDist1*invSigmaSquare; if(chiSquare1>th) bIn = false; else score += th - chiSquare1; // Reprojection error in second image // x1in2 = H21*x1 const float w1in2inv = 1.0/(h31*u1+h32*v1+h33); const float u1in2 = (h11*u1+h12*v1+h13)*w1in2inv; const float v1in2 = (h21*u1+h22*v1+h23)*w1in2inv; const float squareDist2 = (u2-u1in2)*(u2-u1in2)+(v2-v1in2)*(v2-v1in2); const float chiSquare2 = squareDist2*invSigmaSquare; //统计inlier和分数 if(chiSquare2>th) bIn = false; else score += th - chiSquare2; if(bIn) vbMatchesInliers[i]=true; else vbMatchesInliers[i]=false; } return score; } float Initializer::CheckFundamental(const cv::Mat &F21, vector<bool> &vbMatchesInliers, float sigma) { const int N = mvMatches12.size(); const float f11 = F21.at<float>(0,0); const float f12 = F21.at<float>(0,1); const float f13 = F21.at<float>(0,2); const float f21 = F21.at<float>(1,0); const float f22 = F21.at<float>(1,1); const float f23 = F21.at<float>(1,2); const float f31 = F21.at<float>(2,0); const float f32 = F21.at<float>(2,1); const float f33 = F21.at<float>(2,2); vbMatchesInliers.resize(N); float score = 0; //卡方分布1自由度,95%可信度 const float th = 3.841; //卡方分布2自由度,95%可信度 const float thScore = 5.991; const float invSigmaSquare = 1.0/(sigma*sigma); for(int i=0; i<N; i++) { bool bIn = true; const cv::KeyPoint &kp1 = mvKeys1[mvMatches12[i].first]; const cv::KeyPoint &kp2 = mvKeys2[mvMatches12[i].second]; const float u1 = kp1.pt.x; const float v1 = kp1.pt.y; const float u2 = kp2.pt.x; const float v2 = kp2.pt.y; // Reprojection error in second image // l2=F21x1=(a2,b2,c2) const float a2 = f11*u1+f12*v1+f13; const float b2 = f21*u1+f22*v1+f23; const float c2 = f31*u1+f32*v1+f33; const float num2 = a2*u2+b2*v2+c2; const float squareDist1 = num2*num2/(a2*a2+b2*b2); const float chiSquare1 = squareDist1*invSigmaSquare; if(chiSquare1>th) bIn = false; else score += thScore - chiSquare1; // Reprojection error in second image // l1 =x2tF21=(a1,b1,c1) //F矩阵将点映射成一条直线 const float a1 = f11*u2+f21*v2+f31; const float b1 = f12*u2+f22*v2+f32; const float c1 = f13*u2+f23*v2+f33; const float num1 = a1*u1+b1*v1+c1; //点到直线的距离的平方 const float squareDist2 = num1*num1/(a1*a1+b1*b1); const float chiSquare2 = squareDist2*invSigmaSquare; if(chiSquare2>th) bIn = false; else score += thScore - chiSquare2; if(bIn) vbMatchesInliers[i]=true; else vbMatchesInliers[i]=false; } return score; } bool Initializer::ReconstructF(vector<bool> &vbMatchesInliers, cv::Mat &F21, cv::Mat &K, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated, float minParallax, int minTriangulated) { int N=0; for(size_t i=0, iend = vbMatchesInliers.size() ; i<iend; i++) if(vbMatchesInliers[i]) N++; // Compute Essential Matrix from Fundamental Matrix cv::Mat E21 = K.t()*F21*K; cv::Mat R1, R2, t; // Recover the 4 motion hypotheses DecomposeE(E21,R1,R2,t); cv::Mat t1=t; cv::Mat t2=-t; // Reconstruct with the 4 hyphoteses and check //用三角化的点的方式检测4种模型 vector<cv::Point3f> vP3D1, vP3D2, vP3D3, vP3D4; vector<bool> vbTriangulated1,vbTriangulated2,vbTriangulated3, vbTriangulated4; float parallax1,parallax2, parallax3, parallax4; int nGood1 = CheckRT(R1,t1,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D1, 4.0*mSigma2, vbTriangulated1, parallax1); int nGood2 = CheckRT(R2,t1,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D2, 4.0*mSigma2, vbTriangulated2, parallax2); int nGood3 = CheckRT(R1,t2,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D3, 4.0*mSigma2, vbTriangulated3, parallax3); int nGood4 = CheckRT(R2,t2,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D4, 4.0*mSigma2, vbTriangulated4, parallax4); int maxGood = max(nGood1,max(nGood2,max(nGood3,nGood4))); R21 = cv::Mat(); t21 = cv::Mat(); int nMinGood = max(static_cast<int>(0.9*N),minTriangulated); int nsimilar = 0; if(nGood1>0.7*maxGood) nsimilar++; if(nGood2>0.7*maxGood) nsimilar++; if(nGood3>0.7*maxGood) nsimilar++; if(nGood4>0.7*maxGood) nsimilar++; // If there is not a clear winner or not enough triangulated points reject initialization //maxGood<nMinGood:三角化成功的点数目过少 //nsimilar>1,没有一个模型明显好于其它模型 if(maxGood<nMinGood || nsimilar>1) { return false; } // If best reconstruction has enough parallax initialize if(maxGood==nGood1) { if(parallax1>minParallax) { vP3D = vP3D1; vbTriangulated = vbTriangulated1; R1.copyTo(R21); t1.copyTo(t21); return true; } }else if(maxGood==nGood2) { if(parallax2>minParallax) { vP3D = vP3D2; vbTriangulated = vbTriangulated2; R2.copyTo(R21); t1.copyTo(t21); return true; } }else if(maxGood==nGood3) { if(parallax3>minParallax) { vP3D = vP3D3; vbTriangulated = vbTriangulated3; R1.copyTo(R21); t2.copyTo(t21); return true; } }else if(maxGood==nGood4) { if(parallax4>minParallax) { vP3D = vP3D4; vbTriangulated = vbTriangulated4; R2.copyTo(R21); t2.copyTo(t21); return true; } } return false; } /** * 将H矩阵分解为R,t,并三角化点 */ bool Initializer::ReconstructH(vector<bool> &vbMatchesInliers, cv::Mat &H21, cv::Mat &K, cv::Mat &R21, cv::Mat &t21, vector<cv::Point3f> &vP3D, vector<bool> &vbTriangulated, float minParallax, int minTriangulated) { int N=0; for(size_t i=0, iend = vbMatchesInliers.size() ; i<iend; i++) if(vbMatchesInliers[i]) N++; // We recover 8 motion hypotheses using the method of Faugeras et al. // Motion and structure from motion in a piecewise planar environment. // International Journal of Pattern Recognition and Artificial Intelligence, 1988 cv::Mat invK = K.inv(); cv::Mat A = invK*H21*K; cv::Mat U,w,Vt,V; cv::SVD::compute(A,w,U,Vt,cv::SVD::FULL_UV); V=Vt.t(); float s = cv::determinant(U)*cv::determinant(Vt); float d1 = w.at<float>(0); float d2 = w.at<float>(1); float d3 = w.at<float>(2); if(d1/d2<1.00001 || d2/d3<1.00001) { return false; } vector<cv::Mat> vR, vt, vn; vR.reserve(8); vt.reserve(8); vn.reserve(8); //n'=[x1 0 x3] 4 posibilities e1=e3=1, e1=1 e3=-1, e1=-1 e3=1, e1=e3=-1 float aux1 = sqrt((d1*d1-d2*d2)/(d1*d1-d3*d3)); float aux3 = sqrt((d2*d2-d3*d3)/(d1*d1-d3*d3)); float x1[] = {aux1,aux1,-aux1,-aux1}; float x3[] = {aux3,-aux3,aux3,-aux3}; //case d'=d2 float aux_stheta = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1+d3)*d2); float ctheta = (d2*d2+d1*d3)/((d1+d3)*d2); float stheta[] = {aux_stheta, -aux_stheta, -aux_stheta, aux_stheta}; for(int i=0; i<4; i++) { cv::Mat Rp=cv::Mat::eye(3,3,CV_32F); Rp.at<float>(0,0)=ctheta; Rp.at<float>(0,2)=-stheta[i]; Rp.at<float>(2,0)=stheta[i]; Rp.at<float>(2,2)=ctheta; cv::Mat R = s*U*Rp*Vt; vR.push_back(R); cv::Mat tp(3,1,CV_32F); tp.at<float>(0)=x1[i]; tp.at<float>(1)=0; tp.at<float>(2)=-x3[i]; tp*=d1-d3; cv::Mat t = U*tp; vt.push_back(t/cv::norm(t)); cv::Mat np(3,1,CV_32F); np.at<float>(0)=x1[i]; np.at<float>(1)=0; np.at<float>(2)=x3[i]; cv::Mat n = V*np; if(n.at<float>(2)<0) n=-n; vn.push_back(n); } //case d'=-d2 float aux_sphi = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1-d3)*d2); float cphi = (d1*d3-d2*d2)/((d1-d3)*d2); float sphi[] = {aux_sphi, -aux_sphi, -aux_sphi, aux_sphi}; for(int i=0; i<4; i++) { cv::Mat Rp=cv::Mat::eye(3,3,CV_32F); Rp.at<float>(0,0)=cphi; Rp.at<float>(0,2)=sphi[i]; Rp.at<float>(1,1)=-1; Rp.at<float>(2,0)=sphi[i]; Rp.at<float>(2,2)=-cphi; cv::Mat R = s*U*Rp*Vt; vR.push_back(R); cv::Mat tp(3,1,CV_32F); tp.at<float>(0)=x1[i]; tp.at<float>(1)=0; tp.at<float>(2)=x3[i]; tp*=d1+d3; cv::Mat t = U*tp; vt.push_back(t/cv::norm(t)); cv::Mat np(3,1,CV_32F); np.at<float>(0)=x1[i]; np.at<float>(1)=0; np.at<float>(2)=x3[i]; cv::Mat n = V*np; if(n.at<float>(2)<0) n=-n; vn.push_back(n); } int bestGood = 0; int secondBestGood = 0; int bestSolutionIdx = -1; float bestParallax = -1; vector<cv::Point3f> bestP3D; vector<bool> bestTriangulated; // Instead of applying the visibility constraints proposed in the Faugeras' paper (which could fail for points seen with low parallax) // We reconstruct all hypotheses and check in terms of triangulated points and parallax //遍历计算出来的8种,R,t,然后根据R,t计算三角化出来的匹配点的数目,找出最好和次好的模型 for(size_t i=0; i<8; i++) { float parallaxi; vector<cv::Point3f> vP3Di; vector<bool> vbTriangulatedi; int nGood = CheckRT(vR[i],vt[i],mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K,vP3Di, 4.0*mSigma2, vbTriangulatedi, parallaxi); if(nGood>bestGood) { secondBestGood = bestGood; bestGood = nGood; bestSolutionIdx = i; bestParallax = parallaxi; bestP3D = vP3Di; bestTriangulated = vbTriangulatedi; } else if(nGood>secondBestGood) { secondBestGood = nGood; } } /** * 1.最好的模型比次好的模型有足够的差距; * 2.最好的模型视差>=最小视差 * 3.最好模型的三角化陈宫的点的数目大于一定的阈值; * 4.最好的模型的三角化成功数量和通过H矩阵重投影的特征点数目的比例大于0.9 */ if(secondBestGood<0.75*bestGood && bestParallax>=minParallax && bestGood>minTriangulated && bestGood>0.9*N) { vR[bestSolutionIdx].copyTo(R21); vt[bestSolutionIdx].copyTo(t21); vP3D = bestP3D; vbTriangulated = bestTriangulated; return true; } return false; } ///<Multiple_View_Geometry_in_Computer_Vision__2nd_Edition> 12.2 Linear triangulation methods void Initializer::Triangulate(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &P1, const cv::Mat &P2, cv::Mat &x3D) { cv::Mat A(4,4,CV_32F); A.row(0) = kp1.pt.x*P1.row(2)-P1.row(0); A.row(1) = kp1.pt.y*P1.row(2)-P1.row(1); A.row(2) = kp2.pt.x*P2.row(2)-P2.row(0); A.row(3) = kp2.pt.y*P2.row(2)-P2.row(1); cv::Mat u,w,vt; cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV); x3D = vt.row(3).t(); x3D = x3D.rowRange(0,3)/x3D.at<float>(3); } /// vNormalizedPoints=T*vKeys /// /// T = [ sx 0 -meanX*sX; /// 0 sy -meanY*sY; /// 0 0 1 ] /// 对特征点的像素坐标进行归一化 void Initializer::Normalize(const vector<cv::KeyPoint> &vKeys, vector<cv::Point2f> &vNormalizedPoints, cv::Mat &T) { float meanX = 0; float meanY = 0; const int N = vKeys.size(); vNormalizedPoints.resize(N); /// 1. get meanX and meanY for(int i=0; i<N; i++) { meanX += vKeys[i].pt.x; meanY += vKeys[i].pt.y; } //1.求出质心 meanX = meanX/N; meanY = meanY/N; float meanDevX = 0; float meanDevY = 0; //2.减去质心 for(int i=0; i<N; i++) { /// remove the center of mass vNormalizedPoints[i].x = vKeys[i].pt.x - meanX; vNormalizedPoints[i].y = vKeys[i].pt.y - meanY; meanDevX += fabs(vNormalizedPoints[i].x); meanDevY += fabs(vNormalizedPoints[i].y); } /// mean abs dis to center of mass //3.计算特征点到质心的平均距离 meanDevX = meanDevX/N; meanDevY = meanDevY/N; ///4. 计算缩放因子 sX, sY float sX = 1.0/meanDevX; float sY = 1.0/meanDevY; ///5. 使用缩放因此缩放特征点 for(int i=0; i<N; i++) { vNormalizedPoints[i].x = vNormalizedPoints[i].x * sX; vNormalizedPoints[i].y = vNormalizedPoints[i].y * sY; } //6.保存转换矩阵T,用来恢复归一化之前的尺度信息 T = cv::Mat::eye(3,3,CV_32F); T.at<float>(0,0) = sX; T.at<float>(1,1) = sY; T.at<float>(0,2) = -meanX*sX; T.at<float>(1,2) = -meanY*sY; } /** * @brief:检测R,t * R:输入的旋转矩阵R; * t:输入的平移矩阵t; * vKeys1:输入的第一组特征点 * vKeys2:输入的第二组特征点; * vMatches12:输入的匹配的数目; * vbMatchesInliers:输出inlier的vector * K:输入内参 * vP3D:输出3D点的坐标; * th2:输入阈值 * vbGood:输出好的点的flag; * parallax:输出视差 */ int Initializer::CheckRT(const cv::Mat &R, const cv::Mat &t, const vector<cv::KeyPoint> &vKeys1, const vector<cv::KeyPoint> &vKeys2, const vector<Match> &vMatches12, vector<bool> &vbMatchesInliers, const cv::Mat &K, vector<cv::Point3f> &vP3D, float th2, vector<bool> &vbGood, float &parallax) { // Calibration parameters const float fx = K.at<float>(0,0); const float fy = K.at<float>(1,1); const float cx = K.at<float>(0,2); const float cy = K.at<float>(1,2); vbGood = vector<bool>(vKeys1.size(),false); vP3D.resize(vKeys1.size()); vector<float> vCosParallax; vCosParallax.reserve(vKeys1.size()); // Camera 1 Projection Matrix K[I|0] //相机1的坐标系和世界坐标系相同 cv::Mat P1(3,4,CV_32F,cv::Scalar(0)); K.copyTo(P1.rowRange(0,3).colRange(0,3)); //假设相机光心的在世界坐标系的坐标为(0,0,0) cv::Mat O1 = cv::Mat::zeros(3,1,CV_32F); // Camera 2 Projection Matrix K[R|t] cv::Mat P2(3,4,CV_32F); R.copyTo(P2.rowRange(0,3).colRange(0,3)); t.copyTo(P2.rowRange(0,3).col(3)); P2 = K*P2; //将相机光心2转换到世界坐标系 //O1=R12*O2+t12 ==> O2=R12'*(O1-t12) ==> O2=-R12'*t12 cv::Mat O2 = -R.t()*t; int nGood=0; //遍历所有匹配的特征点 for(size_t i=0, iend=vMatches12.size();i<iend;i++) { //如果不是inlier,则跳过 if(!vbMatchesInliers[i]) continue; const cv::KeyPoint &kp1 = vKeys1[vMatches12[i].first]; const cv::KeyPoint &kp2 = vKeys2[vMatches12[i].second]; cv::Mat p3dC1; //三角化匹配特征点 Triangulate(kp1,kp2,P1,P2,p3dC1); //判断三角化点的大小是否无穷大 if(!isfinite(p3dC1.at<float>(0)) || !isfinite(p3dC1.at<float>(1)) || !isfinite(p3dC1.at<float>(2))) { vbGood[vMatches12[i].first]=false; continue; } // Check parallax //检测视差 cv::Mat normal1 = p3dC1 - O1; float dist1 = cv::norm(normal1); cv::Mat normal2 = p3dC1 - O2; float dist2 = cv::norm(normal2); //3D点和两个光心构成的夹角的cos值 float cosParallax = normal1.dot(normal2)/(dist1*dist2); // Check depth in front of first camera (only if enough parallax, as "infinite" points can easily go to negative depth) //视差角度太大,并且深度为负,就淘汰 if(p3dC1.at<float>(2)<=0 && cosParallax<0.99998) continue; // Check depth in front of second camera (only if enough parallax, as "infinite" points can easily go to negative depth) //视差角度太大,并且深度为负,就淘汰 cv::Mat p3dC2 = R*p3dC1+t; if(p3dC2.at<float>(2)<=0 && cosParallax<0.99998) continue; // Check reprojection error in first image //将三角化出来的3D点重新投影到第一帧图像,然后计算方差 float im1x, im1y; float invZ1 = 1.0/p3dC1.at<float>(2); im1x = fx*p3dC1.at<float>(0)*invZ1+cx; im1y = fy*p3dC1.at<float>(1)*invZ1+cy; float squareError1 = (im1x-kp1.pt.x)*(im1x-kp1.pt.x)+(im1y-kp1.pt.y)*(im1y-kp1.pt.y); if(squareError1>th2) continue; // Check reprojection error in second image //将三角化出来的3D点重新投影到第二帧图像,然后计算方差 float im2x, im2y; float invZ2 = 1.0/p3dC2.at<float>(2); im2x = fx*p3dC2.at<float>(0)*invZ2+cx; im2y = fy*p3dC2.at<float>(1)*invZ2+cy; float squareError2 = (im2x-kp2.pt.x)*(im2x-kp2.pt.x)+(im2y-kp2.pt.y)*(im2y-kp2.pt.y); //重投影误差太大,淘汰 if(squareError2>th2) continue; //保存视差角 vCosParallax.push_back(cosParallax); vP3D[vMatches12[i].first] = cv::Point3f(p3dC1.at<float>(0),p3dC1.at<float>(1),p3dC1.at<float>(2)); nGood++; //视差角度足够大,三角化成功 if(cosParallax<0.99998) vbGood[vMatches12[i].first]=true; }//for if(nGood>0) { //视差角度从大到小排列 sort(vCosParallax.begin(),vCosParallax.end()); //取出第50个,或者最后(就是最小的视差角度最大的那个) size_t idx = min(50,int(vCosParallax.size()-1)); //计算视差角 parallax = acos(vCosParallax[idx])*180/CV_PI; } else parallax=0; return nGood; } /** * 分解本质矩阵,得到两种不同R,t */ void Initializer::DecomposeE(const cv::Mat &E, cv::Mat &R1, cv::Mat &R2, cv::Mat &t) { cv::Mat u,w,vt; cv::SVD::compute(E,w,u,vt); u.col(2).copyTo(t); t=t/cv::norm(t); cv::Mat W(3,3,CV_32F,cv::Scalar(0)); W.at<float>(0,1)=-1; W.at<float>(1,0)=1; W.at<float>(2,2)=1; R1 = u*W*vt; if(cv::determinant(R1)<0) R1=-R1; R2 = u*W.t()*vt; if(cv::determinant(R2)<0) R2=-R2; } } //namespace ORB_SLAM
    Processed: 0.010, SQL: 9