//修复深度图像并配准深度彩色图像,在OPENGL中绘制点云
//本段程序MapColorFrameToDepthSpace 黑色区域为映射失败
//本段程序MapDepthFrameToColorSpace 白色区域和重复区域为映射失败
//获取并显示点云
#include <iostream>
#include <Kinect.h>
#include <GL/glut.h>
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgproc.hpp>
using namespace cv;
using namespace std;
HRESULT hResult;
GLubyte *rgb = new GLubyte();
//static int day = 0; // day的变化:从0到17
unsigned short colorValue = 0;
int idx = 0;
int thresh = 10;
int flag = 0; //判断首次获取数据
float cameraX = 0.0;
float cameraY = 0.0;
float cameraZ = 0.0;
Mat CoordinateMapperMat(1080, 1920, CV_8UC4);
Mat original(1080, 1920, CV_8UC4); //为Color帧的准备,直接开好Reader
Mat temp(424, 512, CV_16UC1); //建立图像矩阵
Mat img(424, 512, CV_8UC1);
Mat defaultDepth(424, 512, CV_16UC1); //缺省深度图像
Mat img2(424, 512, CV_8UC1);
Mat copyColorToDepth(424, 512, CV_8UC1);
UINT16 ptrDepthBuffer[217088] = { 0 };
UINT16 defaultDepthBuffer[217088] = { 0 };
UINT16 copydepthBuffer[217088] = { 0 };
UINT16 diff = 0;
UINT16 notNull = 0;
IDepthFrameReader * myDepthReader = nullptr;
IDepthFrame * myDepthFrame = nullptr;
IColorFrameReader * myColorReader = nullptr;
IColorFrame * myColorFrame = nullptr;
ICoordinateMapper * myMapper = nullptr; //Mapper的准备
void repairDepth(UINT16 * depthBuffer, Mat temp); //修复深度图像
void display(void)
{
/*
glClear(GL_COLOR_BUFFER_BIT);//清除所有像素
//绘制一个对角顶点坐标(0.25,1.25,0)和(0.75,0.75,0)的白色矩形
glColor3f(1.0, 1.0, 1.0);//设置颜色为白色
glBegin(GL_POLYGON);//glBegin()与glEnd()之间的函数调用制定了要绘制的物体,限定一组或多组图元的顶点定义
// 显示点
glColor3ubv(rgb);
glVertex3f(cameraX, -cameraY, cameraZ);
glEnd();
glFlush();//用于强制刷新缓冲,保证绘图命令将被执行,而不是存储在缓冲区中等待其他的OpenGL命令
*/
}
void init(void)
{
glClearColor(0.0, 0.0, 0.0, 0.0);//将清空颜色设置为黑
glMatrixMode(GL_PROJECTION);//glMatrixMode:告诉计算机“我接下来要做什么”,GL_PROJECTION:要对投影相关进行操作
glLoadIdentity();//将当前的用户坐标系的原点移到了屏幕中心
glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0);//投影类型:正交投影。不会因为离屏幕的远近而产生大小的变换的情况
}
void myIdle()
{
//printf("begin :No.");
while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK);
myColorFrame->CopyConvertedFrameDataToArray(1080 * 1920 * 4, original.data, ColorImageFormat_Bgra);
Mat copy = original.clone(); //读取彩色图像并输出到矩阵
while (myDepthReader->AcquireLatestFrame(&myDepthFrame) != S_OK); //读取Depth数据
UINT depthBufferSize = 0;
UINT16 * depthBuffer = nullptr;;
myDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &depthBuffer);
myDepthFrame->CopyFrameDataToArray(424 * 512, (UINT16 *)temp.data); //先把数据存入16位的图像矩阵中
//temp.convertTo(img, CV_8UC1, 255.0 / 4500); //再把16位转换为8位
/*
//开始配准
vector<DepthSpacePoint> depthSpacePoints(1920 * 1080);
hResult = myMapper->MapColorFrameToDepthSpace(512 * 424, reinterpret_cast<UINT16*>(temp.data), 1920 * 1080, &depthSpacePoints[0]);
if (SUCCEEDED(hResult))
{
CoordinateMapperMat = Scalar(0, 0, 0, 0); // 定义为Mat(colorHeight, colorWidth, CV_8UC4)
// 遍历彩色图所有的点
for (int y = 0; y < 1080; y++)
{
for (int x = 0; x < 1920; x++)
{
// 按照彩色图像素坐标生成深度坐标点集的索引index,根据索引得到对应深度点
unsigned int index = y * 1920 + x;
DepthSpacePoint p = depthSpacePoints[index];
// 如果对应深度点坐标没有超限,就把对应彩色点赋给新Mat CoordinateMapperMat(?)
if (p.X != -std::numeric_limits<float>::infinity() && p.Y != -std::numeric_limits<float>::infinity())
{
int depthX = static_cast<int>(p.X + 0.2f);
int depthY = static_cast<int>(p.Y + 0.2f);
if ((depthX >= 0) && (depthX < 512) && (depthY >= 0) && (depthY < 424))
{
CoordinateMapperMat.at<Vec4b>(y, x) = original.at<Vec4b>(y, x);
}
}
}
}
}
*/
//修复深度图并显示结果
repairDepth(depthBuffer, temp);
temp.convertTo(img, CV_8UC1, 255.0 / 4500); //再把16位深度图像转换为8位
imshow("afterdepthimg", img); //depthimg
//开始配准
vector<ColorSpacePoint> m_pColorCoordinates(512 * 424);
vector<CameraSpacePoint> m_pCameraCoordinates(512 * 424);
hResult = myMapper->MapDepthFrameToColorSpace(512 * 424, (UINT16 *)temp.data, 512 * 424, &m_pColorCoordinates[0]);
Mat i_depthToRgb(424, 512, CV_8UC4);
if (SUCCEEDED(hResult))
{
for (int i = 0; i < 424 * 512; i++)
{
ColorSpacePoint p = m_pColorCoordinates[i];
if (p.X != -std::numeric_limits<float>::infinity() && p.Y != -std::numeric_limits<float>::infinity())
{
int colorX = static_cast<int>(p.X + 0.5f);
int colorY = static_cast<int>(p.Y + 0.5f);
if ((colorX >= 0 && colorX < 1920) && (colorY >= 0 && colorY < 1080))
{
i_depthToRgb.data[i * 4] = copy.data[(colorY * 1920 + colorX) * 4];
i_depthToRgb.data[i * 4 + 1] = copy.data[(colorY * 1920 + colorX) * 4 + 1];
i_depthToRgb.data[i * 4 + 2] = copy.data[(colorY * 1920 + colorX) * 4 + 2];
i_depthToRgb.data[i * 4 + 3] = copy.data[(colorY * 1920 + colorX) * 4 + 3];
}
}
}
}
//获取点云
glClear(GL_COLOR_BUFFER_BIT);//清除所有像素
glLoadIdentity();
//绘制一个对角顶点坐标(0.25,1.25,0)和(0.75,0.75,0)的白色矩形
glColor3f(1.0, 1.0, 1.0);//设置颜色为白色
glBegin(GL_POINTS);//glBegin()与glEnd()之间的函数调用制定了要绘制的物体,限定一组或多组图元的顶点定义
hResult = myMapper->MapDepthFrameToCameraSpace(512 * 424, (UINT16 *)temp.data, 512 * 424, &m_pCameraCoordinates[0]); // 开销较小
if (SUCCEEDED(hResult))
{
for (int i = 0; i < 512 * 424; i++)
{
CameraSpacePoint p = m_pCameraCoordinates[i];
if (p.X != -std::numeric_limits<float>::infinity() && p.Y != -std::numeric_limits<float>::infinity() && p.Z != -std::numeric_limits<float>::infinity())
{
cameraX = static_cast<float>(p.X);
cameraY = static_cast<float>(p.Y);
cameraZ = static_cast<float>(p.Z);
//cout << "x: " << cameraX << "y: " << cameraY << "z: " << cameraZ << endl;
rgb[2] = i_depthToRgb.data[i * 4 + 0];
rgb[1] = i_depthToRgb.data[i * 4 + 1];
rgb[0] = i_depthToRgb.data[i * 4 + 2];
// 显示点
glColor3ubv(rgb);
glPointSize(10);
glVertex3f(cameraX, cameraY, cameraZ / 10);
glVertex3f(0, 0, 0);
}
}
}
glEnd();
glFlush();//用于强制刷新缓冲,保证绘图命令将被执行,而不是存储在缓冲区中等待其他的OpenGL命令
//Mat coloImg;
//resize(CoordinateMapperMat, coloImg, Size(521, 424), (0, 0), 3);
// CoordinateMapperMat就是要显示的配准结果
//imshow("color", copy);//color到depth配准结果
circle(i_depthToRgb, Point(256, 212), 4, Scalar(0, 255, 255), 4);
imshow("i_depthToRgb", i_depthToRgb); //depth到color配准结果
//imshow("depthimg", temp); //depththimg
Sleep(100);
myColorFrame->Release();
myDepthFrame->Release();
//waitKey();
//新的函数,在空闲时调用,作用是把日期往后移动一天并重新绘制,达到动画效果
/*
++day;
if (day >= 17)
day = 0;
printf("%d end\n", day);
//Sleep(500);
*/
display();
}
int main(int argc, char* argv[])
{
IKinectSensor * mySensor = nullptr;
GetDefaultKinectSensor(&mySensor);
mySensor->Open();
IFrameDescription * myDescription = nullptr;
int depthHeight = 0, depthWidth = 0;
IDepthFrameSource * myDepthSource = nullptr;
mySensor->get_DepthFrameSource(&myDepthSource);
myDepthSource->get_FrameDescription(&myDescription);
myDescription->get_Height(&depthHeight);
myDescription->get_Width(&depthWidth);
myDepthSource->OpenReader(&myDepthReader); //为Depth帧的准备,直接开好Reader
IBodyIndexFrameSource * myBodyIndexSource = nullptr;
IBodyIndexFrameReader * myBodyIndexReader = nullptr;
IBodyIndexFrame * myBodyIndexFrame = nullptr;
mySensor->get_BodyIndexFrameSource(&myBodyIndexSource);
myBodyIndexSource->OpenReader(&myBodyIndexReader); //为BodyI