【ROS2】深度相机相关的话题、消息详解:CameraInfo、Image、PointCloud2

122 篇文章 ¥69.90 ¥99.00

【ROS】郭老二博文之:ROS目录

1、话题列表

1)图像、深度相关话题

ros2 topic list -t
/camera/color/camera_info [sensor_msgs/msg/CameraInfo]
/camera/color/image_raw [sensor_msgs/msg/Image]
/camera/depth/camera_info [sensor_msgs/msg/CameraInfo]
/camera/depth/image_raw [sensor_msgs/msg/Image]
/camera/depth/points [sensor_msgs/msg/PointCloud2]
/camera/depth_filter_status [std_msgs/msg/String]
/camera/ir/camera_info [sensor_msgs/msg/CameraInfo]
/camera/ir/image_raw [sensor_msgs/msg/Image]

2)IMU话题
如果深度相机集成了IMU,相关话题如下,以奥比中光深度相机为例:

/camera/gyro_accel/sample [sensor_msgs/msg/Imu]
/camera/gyro/im
详细介绍这个专利的技术方案: #include <ros/ros.h> #include <sensor_msgs/PointCloud2.h> #include <sensor_msgs/point_cloud_conversion.h> #include <pcl_conversions/pcl_conversions.h> #include <pcl/point_cloud.h> #include <pcl/point_types.h> #include <Eigen/Dense> #include <sensor_msgs/CameraInfo.h> #include <pcl/filters/voxel_grid.h> #include <pcl/features/normal_3d_omp.h> #include <pcl/kdtree/kdtree_flann.h> #include <pcl/surface/mls.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <opencv2/opencv.hpp> #include <cv_bridge/cv_bridge.h> using namespace std; using namespace pcl; using namespace Eigen; // 全局变量:存储接收的点云、图像和相机参数 sensor_msgs::PointCloud2 cloud1, cloud2, cloud3; Matrix3d K; sensor_msgs::Image rgb_image; bool rgb_received = false; bool K_initialized = false; bool cloud1_received = false; bool cloud2_received = false; bool cloud3_received = false; // ================ 新增:动态标定参数 ================ Eigen::Matrix4d dynamic_compensation_matrix = Eigen::Matrix4d::Identity(); bool compensation_initialized = false; ros::Time last_calibration_time; const double CALIBRATION_INTERVAL = 300.0; // 每300秒重新标定 // ================ 新增:色度参考板参数 ================ const cv::Vec3f COLOR_REFERENCE(0.5, 0.5, 0.5); // 中性灰参考色 Eigen::Matrix3f color_correction_matrix = Eigen::Matrix3f::Identity(); // ================ 回调函数 ================ void callback1(const sensor_msgs::PointCloud2ConstPtr& msg) { cloud1 = *msg; cloud1_received = true; } void callback2(const sensor_msgs::PointCloud2ConstPtr& msg) { cloud2 = *msg; cloud2_received = true; } void callback3(const sensor_msgs::PointCloud2ConstPtr& msg) { cloud3 = *msg; cloud3_received = true; } void rgbCallback(const sensor_msgs::ImageConstPtr& msg) { rgb_image = *msg; rgb_received = true; } void cameraInfoCallback(const sensor_msgs::CameraInfoConstPtr& msg) { K << msg->K[0], msg->K[1], msg->K[2], msg->K[3], msg->K[4], msg->K[5], msg->K[6], msg->K[7], msg->K[8]; K_initialized = true; } // ================ CUDA内核函数 ================ __global__ void transformPointsKernel( const pcl::PointXYZRGB* __restrict__ input_points, pcl::PointXYZRGB* __restrict__ output_points, const float* __restrict__ transform_matrix, const float* __restrict__ compensation_matrix, // 新增:动态补偿矩阵 int num_points) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_points) { float x = input_points[idx].x; float y = input_points[idx].y; float z = input_points[idx].z; // 应用主变换矩阵 float tx = transform_matrix[0]*x + transform_matrix[1]*y + transform_matrix[2]*z + transform_matrix[3]; float ty = transform_matrix[4]*x + transform_matrix[5]*y + transform_matrix[6]*z + transform_matrix[7]; float tz = transform_matrix[8]*x + transform_matrix[9]*y + transform_matrix[10]*z + transform_matrix[11]; // 新增:应用动态补偿矩阵 x = tx; y = ty; z = tz; tx = compensation_matrix[0]*x + compensation_matrix[1]*y + compensation_matrix[2]*z + compensation_matrix[3]; ty = compensation_matrix[4]*x + compensation_matrix[5]*y + compensation_matrix[6]*z + compensation_matrix[7]; tz = compensation_matrix[8]*x + compensation_matrix[9]*y + compensation_matrix[10]*z + compensation_matrix[11]; output_points[idx].x = tx; output_points[idx].y = ty; output_points[idx].z = tz; // 新增:应用色彩校正 float r = input_points[idx].r / 255.0f; float g = input_points[idx].g / 255.0f; float b = input_points[idx].b / 255.0f; // 使用共享内存中的色彩校正矩阵 __shared__ float color_mat[9]; if (threadIdx.x < 9) { color_mat[threadIdx.x] = compensation_matrix[12 + threadIdx.x]; } __syncthreads(); float new_r = color_mat[0]*r + color_mat[1]*g + color_mat[2]*b; float new_g = color_mat[3]*r + color_mat[4]*g + color_mat[5]*b; float new_b = color_mat[6]*r + color_mat[7]*g + color_mat[8]*b; output_points[idx].r = fminf(fmaxf(new_r * 255, 0), 255); output_points[idx].g = fminf(fmaxf(new_g * 255, 0), 255); output_points[idx].b = fminf(fmaxf(new_b * 255, 0), 255); } } __global__ void filterKernel( const pcl::PointXYZRGB* input, pcl::PointXYZRGB* output, int* count, float* dynamic_params, // 动态参数数组 [z_min, z_max, x_min, x_max, y_min, y_max, curvature_thresh] int num_points) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < num_points) { // 动态参数 float z_min = dynamic_params[0]; float z_max = dynamic_params[1]; float x_min = dynamic_params[2]; float x_max = dynamic_params[3]; float y_min = dynamic_params[4]; float y_max = dynamic_params[5]; float curvature_thresh = dynamic_params[6]; // 新增:曲率自适应滤波 bool keep_point = true; // 空间范围过滤 if(input[idx].z < z_min || input[idx].z > z_max || input[idx].x < x_min || input[idx].x > x_max || input[idx].y < y_min || input[idx].y > y_max) { keep_point = false; } // 曲率过滤(简化版,实际应计算局部曲率) __shared__ float local_curvature[256]; local_curvature[threadIdx.x] = 0.0f; if(keep_point) { // 在实际应用中应计算点云曲率 // 这里使用简化方法:z值变化越大,曲率越大 float curvature = 0.0f; for(int i = -2; i <= 2; i++) { if(i == 0) continue; int nidx = idx + i; if(nidx >= 0 && nidx < num_points) { curvature += fabs(input[idx].z - input[nidx].z); } } if(curvature > curvature_thresh) { keep_point = false; } } if(keep_point) { int pos = atomicAdd(count, 1); output[pos] = input[idx]; } } } // ================ 新增:动态标定补偿函数 ================ void updateDynamicCompensation() { // 在实际系统中,这里会检测场景中的标记球并计算补偿矩阵 // 这里使用简化方法:根据时间模拟微小变化 static int call_count = 0; call_count++; // 每10次调用产生微小变化 if(call_count % 10 == 0) { Eigen::Matrix4d delta = Eigen::Matrix4d::Identity(); delta(0, 3) = (rand() % 100 - 50) * 0.0001; // ±0.5mm delta(1, 3) = (rand() % 100 - 50) * 0.0001; delta(2, 3) = (rand() % 100 - 50) * 0.0001; dynamic_compensation_matrix = delta * dynamic_compensation_matrix; compensation_initialized = true; ROS_INFO("Updated dynamic compensation matrix"); } } // ================ 新增:色彩校正函数 ================ void calculateColorCorrection(const cv::Mat& rgb_image) { // 在实际系统中,这里会检测色度参考板并计算校正矩阵 // 这里使用简化方法:随机生成微小变化 static int call_count = 0; call_count++; // 每5次调用产生微小变化 if(call_count % 5 == 0) { Eigen::Matrix3f delta = Eigen::Matrix3f::Identity(); for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { delta(i, j) += (rand() % 100 - 50) * 0.001f; } } color_correction_matrix = delta * color_correction_matrix; ROS_INFO("Updated color correction matrix"); } } // ================ 新增:非重叠区修复函数 ================ void repairPointCloudHoles(pcl::PointCloud<pcl::PointXYZRGB>& cloud) { // 1. 计算点云密度 pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZRGB>); tree->setInputCloud(cloud.makeShared()); // 2. 检测低密度区域(空洞) std::vector<int> low_density_indices; const float density_threshold = 0.1f; // 点/立方米 const float search_radius = 0.1f; // 10cm半径 for(size_t i = 0; i < cloud.size(); i++) { std::vector<int> indices; std::vector<float> distances; if(tree->radiusSearch(cloud[i], search_radius, indices, distances) < density_threshold) { low_density_indices.push_back(i); } } if(low_density_indices.empty()) return; ROS_INFO("Detected %lu low-density points, repairing...", low_density_indices.size()); // 3. 使用径向基函数插值修复(简化版,实际应使用更复杂方法) for(int idx : low_density_indices) { // 在实际系统中,这里会使用RBF插值生成新点 // 这里使用简化方法:取周围点的平均值 std::vector<int> indices; std::vector<float> distances; if(tree->radiusSearch(cloud[idx], search_radius*2, indices, distances) > 5) { float x = 0, y = 0, z = 0; int r = 0, g = 0, b = 0; for(int nidx : indices) { x += cloud[nidx].x; y += cloud[nidx].y; z += cloud[nidx].z; r += cloud[nidx].r; g += cloud[nidx].g; b += cloud[nidx].b; } float n = indices.size(); cloud[idx].x = x / n; cloud[idx].y = y / n; cloud[idx].z = z / n; cloud[idx].r = r / n; cloud[idx].g = g / n; cloud[idx].b = b / n; } } } // ================ 点云变换函数 ================ sensor_msgs::PointCloud2 transformPointCloud(const Vector3d& translation, const Vector3d& euler_angles, const sensor_msgs::PointCloud2& cloud) { // 分解欧拉角为绕各轴的旋转 AngleAxisd yawAngle(euler_angles[2], Vector3d::UnitZ()); AngleAxisd pitchAngle(euler_angles[1], Vector3d::UnitY()); AngleAxisd rollAngle(euler_angles[0], Vector3d::UnitX()); Quaterniond quaternion = yawAngle * pitchAngle * rollAngle; Matrix3d rotation_matrix = quaternion.toRotationMatrix(); Matrix4d transform_matrix = Matrix4d::Identity(); transform_matrix.block<3, 3>(0, 0) = rotation_matrix; transform_matrix.block<3, 1>(0, 3) = translation; // 转换ROS点云到PCL点云 pcl::PointCloud<pcl::PointXYZRGB> pcl_cloud; pcl::fromROSMsg(cloud, pcl_cloud); int num_points = pcl_cloud.size(); if (num_points == 0) return cloud; // 准备变换矩阵 float h_transform[12]; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { h_transform[i*4 + j] = transform_matrix(i, j); } h_transform[i*4 + 3] = transform_matrix(i, 3); } // 新增:准备动态补偿矩阵 float h_compensation[21]; // 12个变换参数 + 9个色彩校正参数 for (int i = 0; i < 4; ++i) { for (int j = 0; j < 3; ++j) { h_compensation[i*3 + j] = dynamic_compensation_matrix(j, i); } } // 添加色彩校正矩阵 for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { h_compensation[12 + i*3 + j] = color_correction_matrix(i, j); } } // 分配GPU内存 pcl::PointXYZRGB* d_input_points; pcl::PointXYZRGB* d_output_points; float* d_transform; float* d_compensation; cudaMalloc(&d_input_points, num_points * sizeof(pcl::PointXYZRGB)); cudaMalloc(&d_output_points, num_points * sizeof(pcl::PointXYZRGB)); cudaMalloc(&d_transform, 12 * sizeof(float)); cudaMalloc(&d_compensation, 21 * sizeof(float)); // 新增 // 拷贝数据到GPU cudaMemcpy(d_input_points, pcl_cloud.points.data(), num_points * sizeof(pcl::PointXYZRGB), cudaMemcpyHostToDevice); cudaMemcpy(d_transform, h_transform, 12 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_compensation, h_compensation, 21 * sizeof(float), cudaMemcpyHostToDevice); // 新增 // 启动CUDA内核 int block_size = 256; int grid_size = (num_points + block_size - 1) / block_size; transformPointsKernel<<<grid_size, block_size>>>(d_input_points, d_output_points, d_transform, d_compensation, // 新增补偿矩阵 num_points); cudaDeviceSynchronize(); // 拷贝结果回CPU cudaMemcpy(pcl_cloud.points.data(), d_output_points, num_points * sizeof(pcl::PointXYZRGB), cudaMemcpyDeviceToHost); // 释放GPU内存 cudaFree(d_input_points); cudaFree(d_output_points); cudaFree(d_transform); cudaFree(d_compensation); // 新增 // 转换回ROS点云格式 sensor_msgs::PointCloud2 transformed_cloud; pcl::toROSMsg(pcl_cloud, transformed_cloud); transformed_cloud.header = cloud.header; return transformed_cloud; } // ================ 点云过滤函数 ================ sensor_msgs::PointCloud2 filterPointCloud(const sensor_msgs::PointCloud2& cloud, double z_min, double z_max, double x_min, double x_max, double y_min, double y_max, float curvature_thresh = 0.05f) { pcl::PointCloud<pcl::PointXYZRGB> pcl_cloud; pcl::fromROSMsg(cloud, pcl_cloud); const int num_points = pcl_cloud.size(); if(num_points == 0) return cloud; // 准备动态参数 float dynamic_params[7] = { static_cast<float>(z_min), static_cast<float>(z_max), static_cast<float>(x_min), static_cast<float>(x_max), static_cast<float>(y_min), static_cast<float>(y_max), curvature_thresh }; // 分配GPU内存 pcl::PointXYZRGB *d_input, *d_output; int *d_count; float *d_params; cudaMalloc(&d_input, num_points * sizeof(pcl::PointXYZRGB)); cudaMalloc(&d_output, num_points * sizeof(pcl::PointXYZRGB)); cudaMalloc(&d_count, sizeof(int)); cudaMalloc(&d_params, 7 * sizeof(float)); // 新增 cudaMemset(d_count, 0, sizeof(int)); cudaMemcpy(d_params, dynamic_params, 7 * sizeof(float), cudaMemcpyHostToDevice); // 新增 // 数据拷贝到GPU cudaMemcpy(d_input, pcl_cloud.points.data(), num_points * sizeof(pcl::PointXYZRGB), cudaMemcpyHostToDevice); // 启动内核 int block_size = 256; int grid_size = (num_points + block_size - 1) / block_size; filterKernel<<<grid_size, block_size>>>( d_input, d_output, d_count, d_params, // 使用动态参数数组 num_points ); cudaDeviceSynchronize(); // 获取过滤后的点数 int h_count; cudaMemcpy(&h_count, d_count, sizeof(int), cudaMemcpyDeviceToHost); pcl::PointCloud<pcl::PointXYZRGB> filtered_cloud; filtered_cloud.resize(h_count); cudaMemcpy(filtered_cloud.points.data(), d_output, h_count * sizeof(pcl::PointXYZRGB), cudaMemcpyDeviceToHost); // 释放GPU内存 cudaFree(d_input); cudaFree(d_output); cudaFree(d_count); cudaFree(d_params); // 新增 // 转换回ROS消息格式 sensor_msgs::PointCloud2 filtered_msg; pcl::toROSMsg(filtered_cloud, filtered_msg); filtered_msg.header = cloud.header; return filtered_msg; } // ================ 点云合并函数 ================ sensor_msgs::PointCloud2 mergePointClouds(const sensor_msgs::PointCloud2& cloud3_filtered, const sensor_msgs::PointCloud2& cloud1_transformed, const sensor_msgs::PointCloud2& cloud2_transformed) { PointCloud<PointXYZRGB> pcl_cloud3, pcl_cloud1, pcl_cloud2, merged_cloud; fromROSMsg(cloud3_filtered, pcl_cloud3); fromROSMsg(cloud1_transformed, pcl_cloud1); fromROSMsg(cloud2_transformed, pcl_cloud2); pcl_cloud3 += pcl_cloud1; pcl_cloud3 += pcl_cloud2; // 新增:修复非重叠区域 repairPointCloudHoles(pcl_cloud3); sensor_msgs::PointCloud2 merged_msg; pcl::toROSMsg(pcl_cloud3, merged_msg); merged_msg.header = cloud3_filtered.header; return merged_msg; } // ================ 主函数 ================ int main(int argc, char** argv) { ros::init(argc, argv, "point_cloud_merger"); ros::NodeHandle nh; ros::Subscriber info_sub = nh.subscribe("/camera/rgb/camera_info", 1, cameraInfoCallback); ros::Subscriber rgb_sub = nh.subscribe("/camera/rgb/image_rect_color", 1, rgbCallback); ros::Subscriber sub1 = nh.subscribe("/camera/depth_registered/points", 1, callback1); ros::Subscriber sub2 = nh.subscribe("/camera_2/depth/points", 1, callback2); ros::Subscriber sub3 = nh.subscribe("/camera_3/depth/points", 1, callback3); // 发布合并后的点云话题 ros::Publisher pub = nh.advertise<sensor_msgs::PointCloud2>("/merged_point_cloud", 5); ros::Rate rate(10); // 10Hz循环处理 // 初始化动态补偿 dynamic_compensation_matrix = Eigen::Matrix4d::Identity(); compensation_initialized = true; last_calibration_time = ros::Time::now(); while (ros::ok()) { // 新增:定期更新动态补偿 if ((ros::Time::now() - last_calibration_time).toSec() > CALIBRATION_INTERVAL) { updateDynamicCompensation(); last_calibration_time = ros::Time::now(); } // 新增:更新色彩校正 if (rgb_received) { cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(rgb_image, sensor_msgs::image_encodings::BGR8); calculateColorCorrection(cv_ptr->image); rgb_received = false; } if (K_initialized && cloud1_received && cloud2_received && cloud3_received && compensation_initialized) { // 对点云进行过滤(使用动态参数) sensor_msgs::PointCloud2 cloud1_filtered = filterPointCloud(cloud1, 0.0, 2.5, -3, 3, -5, 5, 0.03f); sensor_msgs::PointCloud2 cloud2_filtered = filterPointCloud(cloud2, 1.87, 2.6, -3, 1.4, -0.65, 0.65, 0.04f); sensor_msgs::PointCloud2 cloud3_filtered = filterPointCloud(cloud3, 1.87, 2.6, -1.4, 1.5, -0.65, 0.65, 0.04f); // 对点云进行坐标变换(包含动态补偿和色彩校正) sensor_msgs::PointCloud2 cloud1_transformed = transformPointCloud( Eigen::Vector3d(-2.94897, 0.05, 0.906158), Eigen::Vector3d(0.0017, -0.0249, -1.5884), cloud1_filtered ); sensor_msgs::PointCloud2 cloud2_transformed = transformPointCloud( Eigen::Vector3d(-2.1, 0, 0.0), Eigen::Vector3d(0, 0, 0), cloud2_filtered ); // 合并点云(包含空洞修复) sensor_msgs::PointCloud2 merged_msg = mergePointClouds( cloud3_filtered, cloud1_transformed, cloud2_transformed ); // 发布合并结果 merged_msg.header.stamp = ros::Time::now(); pub.publish(merged_msg); // 重置标志位 cloud1_received = false; cloud2_received = false; cloud3_received = false; } ros::spinOnce(); rate.sleep(); } return 0; }
最新发布
07-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

艺高机器人编程

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值