使用Azure Kinect获取彩色三维点云,对彩色二维图像进行目标检测,依据得到的box区域,再找出对应的点云信息

cpp 复制代码
#include <iostream>
#include <fstream>
#include <iostream>
#include <chrono>
#include <string>
#include <io.h>
#include <vector>
#include <direct.h>
#include <math.h>
#include <sstream>
//OpenCV
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
 //Kinect DK
#include <k4a/k4a.hpp>
#include <set>
// PCL 库
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <iostream>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/filters/crop_box.h>
#include <pcl/visualization/cloud_viewer.h>

//定义点云类型
typedef pcl::PointXYZRGB PointT;
typedef pcl::PointCloud<PointT> PointCloud;

using namespace cv;
using namespace std;


std::set<int> getPixelIndicesInROI(const cv::Mat& image, const cv::Rect& roi) {
    std::set<int> indices;
    for (int x = roi.x; x < roi.x + roi.width; ++x){
        for (int y = roi.y; y < roi.y + roi.height; ++y){
            // 确保索引在图像尺寸范围内
            if (x >= 0 && y >= 0 && x < image.cols && y < image.rows) {
                indices.insert(x+ image.cols *y);
            }
        }
    }
    return indices;
}


PointT point;
PointT point_center;


int main(int argc, char* argv[]) {
    /*
    找到并打开 Azure Kinect 设备
*/
// 发现已连接的设备数

    const uint32_t device_count = k4a::device::get_installed_count();


    if (0 == device_count) {
        std::cout << "Error: no K4A devices found. " << std::endl;
        return -1;
    }
    else {
        std::cout << "Found " << device_count << " connected devices. " << std::endl;
        if (1 != device_count)// 超过1个设备,也输出错误信息。
        {
            std::cout << "Error: more than one K4A devices found. " << std::endl;
            return -1;
        }
        else// 该示例代码仅限对1个设备操作
        {
            std::cout << "Done: found 1 K4A device. " << std::endl;
        }
    }

    // 打开(默认)设备
    k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
    std::cout << "Done: open device. " << std::endl;

    /*
        检索并保存 Azure Kinect 图像数据
    */
    // 配置并启动设备
    k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
    config.camera_fps = K4A_FRAMES_PER_SECOND_30;
    //config.camera_fps = K4A_FRAMES_PER_SECOND_15;
    config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
    config.color_resolution = K4A_COLOR_RESOLUTION_720P;
    config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
    //config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
    config.synchronized_images_only = true;// ensures that depth and color images are both available in the capture
    device.start_cameras(&config);
    std::cout << "Done: start camera." << std::endl;

    //写入txt文件流
    ofstream rgb_out;
    ofstream d_out;

    rgb_out.open("./rgb.txt");
    d_out.open("./depth.txt");

    rgb_out << "#  color images" << endl;
    rgb_out << "#  file: rgbd_dataset" << endl;
    rgb_out << "#  timestamp" << "    " << "filename" << endl;

    d_out << "#  depth images" << endl;
    d_out << "#  file: rgbd_dataset" << endl;
    d_out << "#  timestamp" << "    " << "filename" << endl;

    rgb_out << flush;
    d_out << flush;
    // 稳定化
    k4a::capture capture;
    int iAuto = 0;//用来稳定,类似自动曝光
    int iAutoError = 0;// 统计自动曝光的失败次数
    while (true) {
        if (device.get_capture(&capture)) {
            std::cout << iAuto << ". Capture several frames to give auto-exposure" << std::endl;

            // 跳过前 n 个(成功的数据采集)循环,用来稳定
            if (iAuto != 30) {
                iAuto++;
                continue;
            }
            else {
                std::cout << "Done: auto-exposure" << std::endl;
                break;// 跳出该循环,完成相机的稳定过程
            }

        }
        else {
            std::cout << iAutoError << ". K4A_WAIT_RESULT_TIMEOUT." << std::endl;
            if (iAutoError != 30) {
                iAutoError++;
                continue;
            }
            else {
                std::cout << "Error: failed to give auto-exposure. " << std::endl;
                return -1;
            }
        }
    }
    std::cout << "-----------------------------------" << std::endl;
    std::cout << "----- Have Started Kinect DK. -----" << std::endl;
    std::cout << "-----------------------------------" << std::endl;

    // 随机指定一个目标框,用于切割出点云(x,y,w,h)
    cv::Rect object1(451, 123, 210, 290);
    // 计算出中心点坐标
    float center_x = object1.x + object1.width / 2.0;
    float center_y = object1.y + object1.height / 2.0;
    int size_x = object1.width;
    int size_y = object1.height;

    // 目标框不行,必须用分割算法
    // 找出person的颜色区域
   
    

    // 从设备获取捕获
    k4a::image rgbImage;
    k4a::image depthImage;
    //k4a::image irImage;
    k4a::image transformed_depthImage;

    cv::Mat cv_rgbImage_with_alpha;
    cv::Mat cv_rgbImage_no_alpha;
    cv::Mat cv_depth;
    cv::Mat cv_depth_8U;

    float box_z = 0;
    

    int index = 0;
    while (index < 1) {
        if (device.get_capture(&capture)) {
            // rgb
            // * Each pixel of BGRA32 data is four bytes. The first three bytes represent Blue, Green,
            // * and Red data. The fourth byte is the alpha channel and is unused in the Azure Kinect APIs.
            rgbImage = capture.get_color_image();

            std::cout << "[rgb] " << "\n"
            << "format: " << rgbImage.get_format() << "\n"
            << "device_timestamp: " << rgbImage.get_device_timestamp().count() << "\n"
            << "system_timestamp: " << rgbImage.get_system_timestamp().count() << "\n"
            << "height*width: " << rgbImage.get_height_pixels() << ", " << rgbImage.get_width_pixels()
            << std::endl;

            uint8_t* color_buffer = rgbImage.get_buffer(); // 获取颜色图像数据的指针
            int width = rgbImage.get_width_pixels();
            int height = rgbImage.get_height_pixels();

            for (int i = 0; i < 10; i++) // 只打印前10个像素
            {
                int index = i * 4; // 每个像素占用4个字节(R、G、B 和 Alpha通道)
                std::cout << "Pixel[" << i << "]: ("
                    << (int)color_buffer[index] << ", "  // R
                    << (int)color_buffer[index + 1] << ", " // G
                    << (int)color_buffer[index + 2] << ") " // B
                    << std::endl;
            }
            cv::Mat cv_rgbImage_with_alpha = cv::Mat(height, width, CV_8UC4, color_buffer, cv::Mat::AUTO_STEP);
            cv::Mat cv_image_no_alpha;
            cv::cvtColor(cv_rgbImage_with_alpha, cv_image_no_alpha, cv::COLOR_BGRA2BGR);

            std::set<int> roi_indices = getPixelIndicesInROI(cv_image_no_alpha, object1);
            cv::imwrite("a.jpg", cv_image_no_alpha);
            cv::imshow("color", cv_image_no_alpha);
            cv::waitKey(1);
            // depth
            // * Each pixel of DEPTH16 data is two bytes of little endian unsigned depth data. The unit of the data is in
            // * millimeters from the origin of the camera.
            depthImage = capture.get_depth_image();

            std::cout << "[depth] " << "\n"
                << "format: " << depthImage.get_format() << "\n"
                << "device_timestamp: " << depthImage.get_device_timestamp().count() << "\n"
                << "system_timestamp: " << depthImage.get_system_timestamp().count() << "\n"
                << "height*width: " << depthImage.get_height_pixels() << ", " << depthImage.get_width_pixels()
                << std::endl;

            //获取彩色点云
            k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);
            float fx = k4aCalibration.color_camera_calibration.intrinsics.parameters.param.fx;
            float fy = k4aCalibration.color_camera_calibration.intrinsics.parameters.param.fy;
            std::cout << "fx: " << fx << "fy: " << fy << std::endl;
            k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);

            //PointCloud::Ptr cloud(new PointCloud);
            int color_image_width_pixels = rgbImage.get_width_pixels();
            int color_image_height_pixels = rgbImage.get_height_pixels();
            transformed_depthImage = NULL;
            transformed_depthImage = k4a::image::create(K4A_IMAGE_FORMAT_DEPTH16,
                color_image_width_pixels,
                color_image_height_pixels,
                color_image_width_pixels * (int)sizeof(uint16_t));
            k4a::image point_cloud_image = NULL;
            point_cloud_image = k4a::image::create(K4A_IMAGE_FORMAT_CUSTOM,
                color_image_width_pixels,
                color_image_height_pixels,
                color_image_width_pixels * 3 * (int)sizeof(int16_t));

            if (depthImage.get_width_pixels() == rgbImage.get_width_pixels() && depthImage.get_height_pixels() == rgbImage.get_height_pixels()) {
                std::copy(depthImage.get_buffer(), depthImage.get_buffer() + depthImage.get_height_pixels() * depthImage.get_width_pixels() * (int)sizeof(uint16_t), transformed_depthImage.get_buffer());
            }
            else {
                k4aTransformation.depth_image_to_color_camera(depthImage, &transformed_depthImage);
            }
            k4aTransformation.depth_image_to_point_cloud(transformed_depthImage, K4A_CALIBRATION_TYPE_COLOR, &point_cloud_image);

            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
            cloud->width = color_image_width_pixels;
            cloud->height = color_image_height_pixels;
            cloud->is_dense = false;
            cloud->resize(static_cast<size_t>(color_image_width_pixels) * color_image_height_pixels);


            const int16_t* point_cloud_image_data = reinterpret_cast<const int16_t*>(point_cloud_image.get_buffer());
            const uint8_t* color_image_data = rgbImage.get_buffer();




            std::set<float> object_pointx_list;
            std::set<float> object_pointy_list;
            std::set<float> object_pointz_list;

            for (int i = 0; i < color_image_width_pixels * color_image_height_pixels; i++) {
                
                
                if (roi_indices.count(i) > 0) {
                    object_pointx_list.insert(point_cloud_image_data[3 * i + 0] / 1000.0f);
                    object_pointy_list.insert(point_cloud_image_data[3 * i + 1] / 1000.0f);
                    object_pointz_list.insert(point_cloud_image_data[3 * i + 2] / 1000.0f);
                    if(i == int(center_x + center_y * width)){
                        box_z = point_cloud_image_data[3 * i + 2] / 1000.0f;
                        std::cout << "ddddd" <<box_z<< std::endl;
                    }
                }
                
                point.x = point_cloud_image_data[3 * i + 0] / 1000.0f;
                point.y = point_cloud_image_data[3 * i + 1] / 1000.0f;
                point.z = point_cloud_image_data[3 * i + 2] / 1000.0f;

                point.b = color_image_data[4 * i + 0];
                point.g = color_image_data[4 * i + 1];
                point.r = color_image_data[4 * i + 2];
                uint8_t alpha = color_image_data[4 * i + 3];
                if (point.x == 0 && point.y == 0 && point.z == 0 && alpha == 0)
                    continue;
                cloud->points[i] = point;
            }
            





            pcl::io::savePLYFile("4.ply", *cloud);   //将点云数据保存为ply文件
          



             创建一个 PCLVisualizer 对象
            pcl::visualization::PCLVisualizer viewer("3D Viewer");
             创建表示框的模型系数
            pcl::ModelCoefficients coefficients;
            coefficients.values.resize(6); // 6个值分别表示 x_min, x_max, y_min, y_max, z_min, z_max


             定义框的最大和最小坐标
            coefficients.values[0] = *object_pointx_list.begin();  // x_min
            coefficients.values[1] = *object_pointx_list.rbegin();  // x_max
            coefficients.values[2] = *object_pointy_list.begin();  // y_min
            coefficients.values[3] = *object_pointy_list.rbegin();  // y_max
            // 暂时用固定的
            coefficients.values[4] = box_z-0.2;  // z_min
            coefficients.values[5] = box_z+0.1;  // z_max
            std::cout << "x_min: " << coefficients.values[0] << "x_max: " << coefficients.values[1] << std::endl;

            std::cout << "y_min: " << coefficients.values[2] << "y_max: " << coefficients.values[3] << std::endl;
            std::cout << "z_min: " << coefficients.values[4] << "z_max: " << coefficients.values[5] << std::endl;

            pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgb(cloud);

            viewer.addPointCloud<pcl::PointXYZRGB>(cloud, rgb, "sample cloud");

             在可视化窗口中添加框
            viewer.addCube(coefficients.values[0], coefficients.values[1],
                coefficients.values[2], coefficients.values[3],
                coefficients.values[4], coefficients.values[5],
                1.0, 0.0, 0.0, "cube"); // 使用红色绘制框
            viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_REPRESENTATION, pcl::visualization::PCL_VISUALIZER_REPRESENTATION_WIREFRAME, "cube");
            viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 2.0, "cube"); // 设置线宽
             循环直到关闭窗口
            while (!viewer.wasStopped()) {
                viewer.spinOnce(100);
            }


        }
        else {
            std::cout << "false: K4A_WAIT_RESULT_TIMEOUT." << std::endl;
        }
        index++;
    }
    cv::destroyAllWindows();
    rgb_out << flush;
    d_out << flush;
    rgb_out.close();
    d_out.close();

    // 释放,关闭设备
    rgbImage.reset();
    depthImage.reset();
    capture.reset();
    device.close();

    return 1;
}

合成彩色点云数据步骤:

1、深度图像2D数据转为深度传感器下的3D点云数据

2、获得彩色传感器下的3D点云数据并投影到彩色图像

3、保存对应点的xyz坐标和rgb属性

4、遍历深度图像所有点,并排除数据异常点后得到的点集即为彩色点云数据

参考链接:

利用Azure Kinect 生成点云(C++)_azure kinect dk获取点云-CSDN博客

基于Kinect Azure的多相机数据采集(一)_kinect dk 两个相机融合-CSDN博客

相关推荐
禁默2 小时前
2024年图像处理、多媒体技术与机器学习
图像处理·人工智能·microsoft
KeepThinking!2 小时前
YOLO-World:Real-Time Open-Vocabulary Object Detection
人工智能·yolo·目标检测·多模态
Zmxcl-0076 小时前
IIS解析漏洞
服务器·数据库·microsoft
发呆小天才O.oᯅ7 小时前
YOLOv8目标检测——详细记录使用OpenCV的DNN模块进行推理部署C++实现
c++·图像处理·人工智能·opencv·yolo·目标检测·dnn
深度学习lover8 小时前
<项目代码>YOLO Visdrone航拍目标识别<目标检测>
python·yolo·目标检测·计算机视觉·visdrone航拍目标识别
蚁景网络安全9 小时前
Cobalt Strike 4.8 用户指南-第十四节 Aggressor 脚本
windows·microsoft
深度学习lover18 小时前
[项目代码] YOLOv8 遥感航拍飞机和船舶识别 [目标检测]
python·yolo·目标检测·计算机视觉·遥感航拍飞机和船舶识别
love you joyfully21 小时前
目标检测与R-CNN——pytorch与paddle实现目标检测与R-CNN
人工智能·pytorch·目标检测·cnn·paddle
weixin_468466851 天前
医学影像数据集汇总分享
深度学习·目标检测·数据集·图像分割·机器视觉·医学影像·ct影像
Eric.Lee20211 天前
数据集-目标检测系列 车牌检测&识别 数据集 CCPD2019
人工智能·python·目标检测·计算机视觉·车牌识别·车牌检测·yolo检测