QT开发技术 [opencv加载onnx模型,dnn推理]

一、导出onnx 模型

yolo export model=xx\xx\best.pt format=onnx

二、qt加载onnx模型,推理显示

cpp 复制代码
 std::string fileName = QCoreApplication::applicationDirPath().toStdString() + "/Model/best.onnx";

 cv::dnn::Net net = cv::dnn::readNetFromONNX(fileName);
 if (net.empty()) {
     std::cerr << "Failed to load ONNX model. Check: " << std::endl
         << "1. File path: " << fileName << std::endl
         << "2. OpenCV version (require >= 4.5)" << std::endl
         << "3. ONNX opset compatibility" << std::endl;
     return;
 }

 cv::Mat image = cv::imread(QCoreApplication::applicationDirPath().toStdString() + "/Data/test3.jpg");
 if (image.empty()) {
     std::cerr << "Failed to load image" << std::endl;
     return;
 }

 // 预处理增强
 cv::Mat blob;
 try {
     bool swapRB = true;  // OpenCV默认BGR,YOLO需要RGB
     bool crop = false;
     cv::Scalar mean = cv::Scalar(0, 0, 0);
     double scale = 1.0 / 255.0;

     blob = cv::dnn::blobFromImage(image,
         scale,
         cv::Size(640, 640),
         mean,
         swapRB,
         crop,
         CV_32F);
 }
 catch (...) {
     std::cerr << "Blob creation failed" << std::endl;
     return;
 }

 // 设置计算后端(根据环境配置)
 net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
 net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);

 std::vector<std::string> outLayerNames = net.getUnconnectedOutLayersNames();
 std::vector<cv::Mat> predictions;

 try {
     net.setInput(blob);
     net.forward(predictions, outLayerNames);
 }
 catch (const cv::Exception& e) {
     std::cerr << "Forward pass failed: " << e.what() << std::endl;
     return;
 }


 // 后处理
 std::vector<int> classIds;
 std::vector<float> confidences;
 std::vector<cv::Rect> boxes;
 float x_factor = image.cols / 640.0;
 float y_factor = image.rows / 640.0;
 // YOLO 专用预处理参数
 float confThreshold = 0.25;
 float nmsThreshold = 0.45;

 for (const auto& pred : predictions) {
     for (int i = 0; i < pred.rows; ++i) {
         cv::Mat scores = pred.row(i).colRange(5, pred.cols);
         cv::Point classIdPoint;
         double confidence;
         cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
         if (confidence > confThreshold) {
             int centerX = static_cast<int>(pred.at<float>(i, 0) * x_factor);
             int centerY = static_cast<int>(pred.at<float>(i, 1) * y_factor);
             int width = static_cast<int>(pred.at<float>(i, 2) * x_factor);
             int height = static_cast<int>(pred.at<float>(i, 3) * y_factor);
             int left = centerX - width / 2;
             int top = centerY - height / 2;

             classIds.push_back(classIdPoint.x);
             confidences.push_back(static_cast<float>(confidence));
             boxes.push_back(cv::Rect(left, top, width, height));
         }
     }
 }

 // 非极大值抑制
 std::vector<int> indices;
 cv::dnn::NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

 // 在图像上绘制边界框和标签
 for (int idx : indices) {
     cv::Rect box = boxes[idx];
     cv::rectangle(image, box, cv::Scalar(0, 255, 0), 2);
     std::string label = cv::format("%.2f", confidences[idx]);
     cv::putText(image, label, cv::Point(box.x, box.y - 10), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 2);
 }

 // 在 ui->label_Map 上显示图像
 cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
 QPixmap pixmap = QPixmap::fromImage(QImage(image.data, image.cols, image.rows, image.step, QImage::Format_RGB888));
 ui->label_Map->setPixmap(pixmap);
相关推荐
小草cys4 小时前
树莓派4 yolo 11l.pt性能优化后的版本
opencv·计算机视觉·目标跟踪
charlie1145141915 小时前
逐步理解Qt信号与槽机制
数据库·qt
Blossom.1187 小时前
探索边缘计算:赋能物联网的未来
开发语言·人工智能·深度学习·opencv·物联网·机器学习·边缘计算
(ღ星辰ღ)10 小时前
js应用opencv
开发语言·javascript·opencv
yaso_zhang10 小时前
当生产了~/qt-arm/bin/qmake,可以单独编译其他-源码的某个模块,如下,编译/qtmultimedia
qt
code bean11 小时前
【Qt/C++】深入理解 Lambda 表达式与 `mutable` 关键字的使用
开发语言·c++·qt
多巴胺与内啡肽.1 天前
Opencv进阶操作:图像拼接
人工智能·opencv·计算机视觉
爱看书的小沐1 天前
【小沐学GIS】基于C++绘制二维瓦片地图2D Map(QT、OpenGL、GIS)
c++·qt·gis·opengl·glfw·glut·二维地图
白熊1881 天前
【计算机视觉】OpenCV实战项目:ETcTI_smart_parking智能停车系统深度解析
人工智能·opencv·计算机视觉
HockerF1 天前
交叉编译 opencv-4.10
人工智能·opencv·计算机视觉