/*
使用两张图片进行三维重建
流程:
1.求相机内外参数
a):2D-2D:可通过提取特征点、计算基本(或本质)矩阵、分解基本(或本质)矩阵得到R、t(K已知)
b):也可通过相机标定的方式,直接获得R、t、K
2.三维重建
a):可通过三角法进行重建
b):也可通过RGB-D相机获得深度z,再求得x,y进行重建
注意事项:
1.任意两张图片之间的K略有不同,不影响重建效果,但要尽量选择相邻的图片,否则匹配的特征点较少
2.可由本质矩阵E通过SVD分解可得到4个解,只有选择正确的R、T才能完成重建,否则归一化的坐标之后无法正常显示;
使用recoverPose则能直接得到正确的R、t
3.在使用triangulatePoints三维重建时,有两种归一化方式
4.由于E本身具有尺度等价性,它分解得到的t具有一个尺度,即在分解过程中,对t乘以任意非零常数,分解都是成立的。因此常把t进行归一化,让其长度等于1。
对t长度的归一化,导致重建的尺度不确定性。因此如何1:1三维重建?
*/
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/pcl_visualizer.h>
using namespace std;
using namespace cv;
using namespace pcl;
using namespace cv::xfeatures2d;
// ratio & symmetry test
void ratioTest(vector<vector<DMatch>> &matches, vector<DMatch> &goodMatches);
void symmetryTest(const vector<DMatch>& matches1, const vector<DMatch>& matches2, vector<DMatch>& symMatches);
// 从匹配对中提取特征点
void fillPoints(vector<DMatch> goodMatches, vector<KeyPoint> keypoints1, vector<KeyPoint> keypoints2, vector<Point2f>& points1, vector<Point2f>& points2);
// 三维重建
void reconstruct(Mat& K, Mat& fundamentalMatrix, vector<Point2f>& points1, vector<Point2f>& points2, vector<Point3f>& points3D);
// 获取关键点RGB
void getPointColor(vector<Point2f> points1, Mat BaseImageLeft, vector<Vec3b>& colors);
int main(int argc, char* argv[])
{
// PCL可视化
PointCloud<PointXYZRGB>::Ptr cloud(new PointCloud<PointXYZRGB>);
boost::shared_ptr<visualization::PCLVisualizer> viewer(new visualization::PCLVisualizer("3D viewer")); // 实例化PCLVisualizer对象,窗口命名为3D viewer
Mat K; // 内参数矩阵
Matx34d P, P1; // 两图片的相机坐标
// 1.读入图片
Mat BaseImageLeft = imread(".\\images\\003.png", -1);
Mat BaseImageRight = imread(".\\images\\004.png", -1);
if (BaseImageLeft.empty() || BaseImageRight.empty())
{
cout << "ERROR! NO IMAGE LOADED!" << endl;
return -1;
}
cout << "processing..." << endl;
// 2.SIFT提取特征点
Ptr<Feature2D> detector = xfeatures2d::SIFT::create(0, 3, 0.04, 10);
vector<KeyPoint> keypoints_1, keypoints_2; // 关键点
Mat descriptors_1, descriptors_2; // 描述符
detector->detectAndCompute(BaseImageLeft, noArray(), keypoints_1, descriptors_1);
detector->detectAndCompute(BaseImageRight, noArray(), keypoints_2, descriptors_2);
// 3.Flann匹配特征点
vector<vector<DMatch>> matches1, matches2;
vector<DMatch> goodMatches1, goodMatches2, goodMatches, outMatches;
FlannBasedMatcher matcher;
// 1个descriptors_1[i]对应1个matches[i],1个matches[i]对应2个DMatch(2 nearest)
matcher.knnMatch(descriptors_1, descriptors_2, matches1, 2);// find 2 nearest neighbours, match.size() = query.rowsize()
matcher.knnMatch(descriptors_2, descriptors_1, matches2, 2);
// 4.使用Ratio Test和Symmetry Test消除误匹配点,提高重建精度
ratioTest(matches1, goodMatches1);
ratioTest(matches2, goodMatches2);
symmetryTest(goodMatches1, goodMatches2, goodMatches);// Symmetry Test
// 5.计算基本矩阵F
Mat fundamentalMatx;
vector<Point2f> points1, points2;
if (goodMatches.size() < 30)
{
cerr << "ERROR: NOT ENOUGH MATCHES" << endl;
}
else
{
// 使用RANSAC消除误匹配(保留inliers值为1的特征点)
fillPoints(goodMatches, keypoints_1, keypoints_2, points1, points2);
vector<uchar> inliers(points1.size(), 0); // 内点为1,外点为0
fundamentalMatx = findFundamentalMat(points1, points2, inliers, CV_FM_RANSAC, 3.0, 0.99);
vector<DMatch>::const_iterator itM = goodMatches.begin();
for (vector<uchar>::const_iterator itIn = inliers.begin(); itIn != inliers.end(); ++itIn, ++itM)
{
if (*itIn) // 若为内点
outMatches.push_back(*itM); // 用于计算F的最终匹配对
}
points1.clear();
points2.clear();
// 计算基本矩阵F
fillPoints(outMatches, keypoints_1, keypoints_2, points1, points2);
fundamentalMatx = findFundamentalMat(points1, points2, CV_FM_8POINT);
}
// 6.三角化三维重建
K = (Mat_<double>(3, 3) << 2759.48, 0, 1520.69, 0, 2764.16, 1006.81, 0, 0, 1); // Fountain的内参数矩阵
vector<Point3f> points3D;
reconstruct(K, fundamentalMatx, points1, points2, points3D);
cout << "\t3D Reconstruction was Finished." << endl;
// 7.可视化
vector<Vec3b> points_Colors;
getPointColor(points1, BaseImageLeft, points_Colors);
for (size_t i = 0; i < points3D.size(); i++)
{
PointXYZRGB p;
p.x = points3D[i].x;
p.y = points3D[i].y;
p.z = points3D[i].z;
p.b = points_Colors[i][0];
p.g = points_Colors[i][1];
p.r = points_Colors[i][2];
cloud->points.push_back(p);
}
viewer->setBackgroundColor(0, 0, 0); // 设置背景颜色
//viewer->addCoordinateSystem(0.1, 0, 0, 0);
viewer->addPointCloud<PointXYZRGB>(cloud, "sample cloud"); // 将点云数据添加到视窗中
viewer->setPointCloudRenderingProperties(visualization::PCL_VISUALIZER_OPACITY, 2, "sample cloud");// 设置点云显示属性,
while (!viewer->wasStopped()) { // 保持窗口一直打开
viewer->spinOnce(100);
boost::this_thread::sleep(boost::posix_time::microseconds(1000));
}
waitKey();
return 0;
}
// 比率检测
void ratioTest(vector<vector<DMatch>> &matches, vector<DMatch> &goodMatches)
{
for (vector<vector<DMatch>>::iterator matchIterator = matches.begin(); matchIterator != matches.end(); ++matchIterator)
if (matchIterator->size() > 1)
if ((*matchIterator)[0].distance < (*matchIterator)[1].distance *0.8)
goodMatches.push_back((*matchIterator)[0]);
}
// 对称性检测
void symmetryTest(const vector<DMatch>& matches1, const vector<DMatch>& matches2, vector<DMatch>& symMatches)
{
symMatches.clear();
for (vector<DMatch>::const_iterator matchIterator1 = matches1.begin(); matchIterator1 != matches1.end(); ++matchIterator1)
for (vector<DMatch>::const_iterator matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2)
if ((*matchIterator1).queryIdx == (*matchIterator2).trainIdx && (*matchIterator1).trainIdx == (*matchIterator2).queryIdx)
symMatches.push_back(*matchIterator1);
//symMatches.push_back(DMatch((*matchIterator1).queryIdx, (*matchIterator1).trainIdx, (*matchIterator1).distance));
}
// 由匹配对提取特征点对
void fillPoints(vector<DMatch> goodMatches, vector<KeyPoint> keypoints1, vector<KeyPoint> keypoints2, vector<Point2f>& points1, vector<Point2f>& points2)
{
for (vector<DMatch>::const_iterator it = goodMatches.begin(); it != goodMatches.end(); ++it)
{
points1.push_back(keypoints1[it->queryIdx].pt); // Get the position of left keypoints
points2.push_back(keypoints2[it->trainIdx].pt); // Get the position of right keypoints
}
}
void reconstruct(Mat& K, Mat& fundamentalMatrix, vector<Point2f>& points1, vector<Point2f>& points2, vector<Point3f>& points3D)
{