QT开发技术 [opencv加载onnx模型,dnn推理]

一、导出onnx 模型

yolo export model=xx\xx\best.pt format=onnx

二、qt加载onnx模型,推理显示

cpp 复制代码
 std::string fileName = QCoreApplication::applicationDirPath().toStdString() + "/Model/best.onnx";

 cv::dnn::Net net = cv::dnn::readNetFromONNX(fileName);
 if (net.empty()) {
     std::cerr << "Failed to load ONNX model. Check: " << std::endl
         << "1. File path: " << fileName << std::endl
         << "2. OpenCV version (require >= 4.5)" << std::endl
         << "3. ONNX opset compatibility" << std::endl;
     return;
 }

 cv::Mat image = cv::imread(QCoreApplication::applicationDirPath().toStdString() + "/Data/test3.jpg");
 if (image.empty()) {
     std::cerr << "Failed to load image" << std::endl;
     return;
 }

 // 预处理增强
 cv::Mat blob;
 try {
     bool swapRB = true;  // OpenCV默认BGR,YOLO需要RGB
     bool crop = false;
     cv::Scalar mean = cv::Scalar(0, 0, 0);
     double scale = 1.0 / 255.0;

     blob = cv::dnn::blobFromImage(image,
         scale,
         cv::Size(640, 640),
         mean,
         swapRB,
         crop,
         CV_32F);
 }
 catch (...) {
     std::cerr << "Blob creation failed" << std::endl;
     return;
 }

 // 设置计算后端(根据环境配置)
 net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
 net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);

 std::vector<std::string> outLayerNames = net.getUnconnectedOutLayersNames();
 std::vector<cv::Mat> predictions;

 try {
     net.setInput(blob);
     net.forward(predictions, outLayerNames);
 }
 catch (const cv::Exception& e) {
     std::cerr << "Forward pass failed: " << e.what() << std::endl;
     return;
 }


 // 后处理
 std::vector<int> classIds;
 std::vector<float> confidences;
 std::vector<cv::Rect> boxes;
 float x_factor = image.cols / 640.0;
 float y_factor = image.rows / 640.0;
 // YOLO 专用预处理参数
 float confThreshold = 0.25;
 float nmsThreshold = 0.45;

 for (const auto& pred : predictions) {
     for (int i = 0; i < pred.rows; ++i) {
         cv::Mat scores = pred.row(i).colRange(5, pred.cols);
         cv::Point classIdPoint;
         double confidence;
         cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
         if (confidence > confThreshold) {
             int centerX = static_cast<int>(pred.at<float>(i, 0) * x_factor);
             int centerY = static_cast<int>(pred.at<float>(i, 1) * y_factor);
             int width = static_cast<int>(pred.at<float>(i, 2) * x_factor);
             int height = static_cast<int>(pred.at<float>(i, 3) * y_factor);
             int left = centerX - width / 2;
             int top = centerY - height / 2;

             classIds.push_back(classIdPoint.x);
             confidences.push_back(static_cast<float>(confidence));
             boxes.push_back(cv::Rect(left, top, width, height));
         }
     }
 }

 // 非极大值抑制
 std::vector<int> indices;
 cv::dnn::NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

 // 在图像上绘制边界框和标签
 for (int idx : indices) {
     cv::Rect box = boxes[idx];
     cv::rectangle(image, box, cv::Scalar(0, 255, 0), 2);
     std::string label = cv::format("%.2f", confidences[idx]);
     cv::putText(image, label, cv::Point(box.x, box.y - 10), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 2);
 }

 // 在 ui->label_Map 上显示图像
 cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
 QPixmap pixmap = QPixmap::fromImage(QImage(image.data, image.cols, image.rows, image.step, QImage::Format_RGB888));
 ui->label_Map->setPixmap(pixmap);
相关推荐
菜鸟看点3 小时前
自定义Cereal XML输出容器节点
c++·qt
漫步企鹅3 小时前
【蓝牙】Linux Qt4查看已经配对的蓝牙信息
linux·qt·蓝牙·配对
new_zhou4 小时前
Windows qt打包编译好的程序
开发语言·windows·qt·打包程序
阿蒙Amon11 小时前
【Python小工具】使用 OpenCV 获取视频时长的详细指南
python·opencv·音视频
看到我,请让我去学习12 小时前
Qt编程-qml操作(js,c++,canvas)
开发语言·qt
慕婉030712 小时前
OpenCV图像边缘检测
人工智能·opencv·计算机视觉
哈市雪花13 小时前
相机:Camera原理讲解(使用OpenGL+QT开发三维CAD)
qt·3d·交互·相机·图形学·opengl·视角
jndingxin14 小时前
OpenCV中超分辨率(Super Resolution)模块类cv::dnn_superres::DnnSuperResImpl
人工智能·opencv·dnn
津津有味道16 小时前
Qt C++串口SerialPort通讯发送指令读写NFC M1卡
linux·c++·qt·串口通信·serial·m1·nfc
Edward-tan17 小时前
基于 opencv+yolov8+easyocr的车牌追踪识别
python·opencv·ocr·yolov8