linux C++ onnxruntime yolov8 视频检测Demo

linux C++ onnxruntime yolov8 视频检测Demo

目录

项目目录

效果

​编辑CMakeLists.txt

代码

下载


项目目录

效果

./yolov8_demo --help

./yolov8_demo -c=2 -p=true

./yolov8_demo -c=1 -s=true

CMakeLists.txt

# cmake needs this line
cmake_minimum_required(VERSION 3.0)

# Define project name
project(yolov8_demo)

# Release模式下的编译指令
# SET(CMAKE_BUILD_TYPE "Release")
# set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -s")
# set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++17 -pthread -Wall -Wl")

# Debug模式下的编译指令
SET(CMAKE_BUILD_TYPE "Debug")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_DEBUG}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++17 -pthread") 

set(OpenCV_LIBS opencv_videoio opencv_imgcodecs opencv_imgproc opencv_core opencv_dnn opencv_highgui)

include_directories(
    /usr/local/include/opencv4
    ${PROJECT_SOURCE_DIR}/include
    ${PROJECT_SOURCE_DIR}/include/onnxruntime
)

link_directories(
    ${PROJECT_SOURCE_DIR}/lib/onnxruntime   # 第三方动态库文件
    /usr/local/lib/
)

#递归指定源码的路径
file(GLOB_RECURSE SRCS ${PROJECT_SOURCE_DIR}/src/*.cpp)

# Declare the executable target built from your sources
add_executable(yolov8_demo ${SRCS})

# Link your application with OpenCV libraries
target_link_libraries(yolov8_demo 
-lonnxruntime
${OpenCV_LIBS}
)

代码

main.cpp

#include <opencv2/core.hpp>

#include <opencv2/highgui.hpp>

#include <iostream>

#include <YoloV8.hpp>

#include <unistd.h>

#include <sys/syscall.h>

#include <thread>

int VideoDet(int index, bool showDet, bool useGPU, bool printPerStepInfo)

{

size_t threadId = static_cast<size_t>(syscall(SYS_gettid));

// std::cout << "index:" << index << " thread id: " << threadId << std::endl;

cv::VideoCapture capture("./test/test_car_person_1080P.mp4");

// 检查视频是否成功打开

if (!capture.isOpened())

{

std::cout << "无法读取视频文件" << std::endl;

return -1;

}

int frameCount = capture.get(cv::CAP_PROP_FRAME_COUNT); // 获取视频帧数

double fps = capture.get(cv::CAP_PROP_FPS); // 获取帧率

int delay = int(1000 / fps); // 根据帧率计算帧间间隔时间

// delay=1;

std::string model_path = "./models/yolov8n.onnx";

std::string lable_path = "./models/lable.txt";

int GPUCount = 2;

int device_id = 0;

if (index >= GPUCount)

{

device_id = index % GPUCount;

}

else

{

device_id = index;

}

// device_id=0;

YoloV8 yoloV8(model_path, lable_path, useGPU, device_id);

yoloV8.index = index;

yoloV8.threadId = threadId;

yoloV8.videoFps = fps;

yoloV8.frameCount = frameCount;

// std::cout << "device_id:" << yoloV8.device_id << std::endl;

// vector<DetectionResult> detectionResult;

// Mat frame=cv::imread("../test/dog.jpg");

// yoloV8.Detect(frame, detectionResult);

// std::cout << "detectionResult size:" << detectionResult.size() << std::endl;

string winname = "detectionResult-" + std::to_string(index);

while (true)

{

double start = (double)cv::getTickCount();

delay = int(1000 / fps);

Mat frame;

bool success = capture.read(frame); // 读取一帧数据

// 检查是否成功读取帧

if (!success)

{

std::cout << "index:" << index << ",读取完毕" << std::endl;

yoloV8.PrintAvgCostTime();

break;

}

vector<DetectionResult> detectionResult;

yoloV8.Detect(frame, detectionResult);

// std::cout <<"index:"<<index<< " thread id: " << threadId << " detectionResult size: " << detectionResult.size() << std::endl;

yoloV8.detectionResultSize = detectionResult.size();

if (printPerStepInfo)

{

yoloV8.PrintCostTime();

yoloV8.PrintAvgCostTime();

}

if (showDet)

{

yoloV8.Draw(frame, detectionResult);

imshow(winname, frame);

double costTime = ((double)getTickCount() - start) / getTickFrequency();

delay = delay - costTime;

if (delay <= 0)

{

delay = 1;

}

if (waitKey(delay) == 27) // 通过按下ESC键退出循环

{

break;

}

}

}

capture.release(); // 释放视频对象

if (showDet)

{

cv::destroyWindow(winname);

}

return 0;

}

int main(int argc, char *const argv[])

{

int threadCount = 1;

bool showDet = false;

bool useGPU = false;

bool printPerStepInfo = true;

const char *keys ="{h help || print this message}"

"{c threadCount | 1 | run thread number}"

"{s showDet | false | show detection result}"

"{g useGPU | false | use GPU}"

"{p printPerStepInfo | false | print per Step Info}";

cv::CommandLineParser parser(argc, argv, keys);

if(parser.has("help"))

{

parser.about("YoloV8 demo v1.0.0");

parser.printMessage();

return 0;

}

threadCount=parser.get<int>("threadCount");

showDet=parser.get<bool>("showDet");

useGPU=parser.get<bool>("useGPU");

printPerStepInfo=parser.get<bool>("printPerStepInfo");

std::cout << std::boolalpha;

std::cout << "threadCount:" << threadCount << ",showDet:" << showDet<< ",useGPU:" << useGPU << ",printPerStepInfo:" << printPerStepInfo << std::endl;

for (size_t i = 0; i < threadCount; i++)

{

std::thread thread(VideoDet, i, showDet, useGPU, printPerStepInfo);

thread.detach();

}

while (true)

{

sleep(100);

}

return 0;

}

#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <YoloV8.hpp>
#include <unistd.h>
#include <sys/syscall.h>
#include <thread>

int VideoDet(int index, bool showDet, bool useGPU, bool printPerStepInfo)
{
    size_t threadId = static_cast<size_t>(syscall(SYS_gettid));
    // std::cout << "index:" << index << " thread id: " << threadId << std::endl;

    cv::VideoCapture capture("./test/test_car_person_1080P.mp4");

    // 检查视频是否成功打开
    if (!capture.isOpened())
    {
        std::cout << "无法读取视频文件" << std::endl;
        return -1;
    }

    int frameCount = capture.get(cv::CAP_PROP_FRAME_COUNT); // 获取视频帧数
    double fps = capture.get(cv::CAP_PROP_FPS);             // 获取帧率
    int delay = int(1000 / fps);                            // 根据帧率计算帧间间隔时间
    // delay=1;

    std::string model_path = "./models/yolov8n.onnx";
    std::string lable_path = "./models/lable.txt";

    int GPUCount = 2;
    int device_id = 0;
    if (index >= GPUCount)
    {
        device_id = index % GPUCount;
    }
    else
    {
        device_id = index;
    }

    // device_id=0;

    YoloV8 yoloV8(model_path, lable_path, useGPU, device_id);

    yoloV8.index = index;
    yoloV8.threadId = threadId;
    yoloV8.videoFps = fps;
    yoloV8.frameCount = frameCount;

    // std::cout << "device_id:" << yoloV8.device_id << std::endl;

    // vector<DetectionResult> detectionResult;
    // Mat frame=cv::imread("../test/dog.jpg");
    // yoloV8.Detect(frame, detectionResult);
    // std::cout << "detectionResult size:" << detectionResult.size() << std::endl;

    string winname = "detectionResult-" + std::to_string(index);

    while (true)
    {

        double start = (double)cv::getTickCount();
        delay = int(1000 / fps);

        Mat frame;
        bool success = capture.read(frame); // 读取一帧数据

        // 检查是否成功读取帧
        if (!success)
        {
            std::cout << "index:" << index << ",读取完毕" << std::endl;
            yoloV8.PrintAvgCostTime();
            break;
        }

        vector<DetectionResult> detectionResult;

        yoloV8.Detect(frame, detectionResult);

        // std::cout <<"index:"<<index<< " thread id: " << threadId << " detectionResult size: " << detectionResult.size() << std::endl;
        yoloV8.detectionResultSize = detectionResult.size();

        if (printPerStepInfo)
        {
            yoloV8.PrintCostTime();
            yoloV8.PrintAvgCostTime();
        }

        if (showDet)
        {
            yoloV8.Draw(frame, detectionResult);

            imshow(winname, frame);

            double costTime = ((double)getTickCount() - start) / getTickFrequency();
            delay = delay - costTime;
            if (delay <= 0)
            {
                delay = 1;
            }

            if (waitKey(delay) == 27) // 通过按下ESC键退出循环
            {
                break;
            }
        }
    }

    capture.release(); // 释放视频对象

    if (showDet)
    {
        cv::destroyWindow(winname);
    }

    return 0;
}

int main(int argc, char *const argv[])
{
    int threadCount = 1;
    bool showDet = false;
    bool useGPU = false;
    bool printPerStepInfo = true;

    const char *keys ="{h help                || print this message}"
        "{c threadCount         | 1     | run thread number}"
        "{s showDet             | false | show detection result}"
        "{g useGPU              | false | use GPU}"
        "{p printPerStepInfo    | false | print per Step Info}";

    cv::CommandLineParser parser(argc, argv, keys);

    if(parser.has("help"))
    {
        parser.about("YoloV8 demo v1.0.0");
        parser.printMessage();
        return 0;
    }

    threadCount=parser.get<int>("threadCount");
    showDet=parser.get<bool>("showDet");
    useGPU=parser.get<bool>("useGPU");
    printPerStepInfo=parser.get<bool>("printPerStepInfo");

    std::cout << std::boolalpha;
    std::cout << "threadCount:" << threadCount << ",showDet:" << showDet<< ",useGPU:" << useGPU << ",printPerStepInfo:" << printPerStepInfo << std::endl;

    for (size_t i = 0; i < threadCount; i++)
    {
        std::thread thread(VideoDet, i, showDet, useGPU, printPerStepInfo);
        thread.detach();
    }

    while (true)
    {
        sleep(100);
    }

    return 0;
}

下载

源码下载

相关推荐
计算机科研狗@OUC3 分钟前
【TMM2024】Frequency-Guided Spatial Adaptation for Camouflaged Object Detection
人工智能·深度学习·目标检测·计算机视觉
悟兰因w10 分钟前
论文阅读(三十五):Boundary-guided network for camouflaged object detection
论文阅读·人工智能·目标检测
大山同学12 分钟前
多机器人图优化:2024ICARA开源
人工智能·语言模型·机器人·去中心化·slam·感知定位
Topstip19 分钟前
Gemini 对话机器人加入开源盲水印技术来检测 AI 生成的内容
人工智能·ai·机器人
Bearnaise22 分钟前
PointMamba: A Simple State Space Model for Point Cloud Analysis——点云论文阅读(10)
论文阅读·笔记·python·深度学习·机器学习·计算机视觉·3d
小嗷犬34 分钟前
【论文笔记】VCoder: Versatile Vision Encoders for Multimodal Large Language Models
论文阅读·人工智能·语言模型·大模型·多模态
Struart_R40 分钟前
LVSM: A LARGE VIEW SYNTHESIS MODEL WITH MINIMAL 3D INDUCTIVE BIAS 论文解读
人工智能·3d·transformer·三维重建
lucy1530275107941 分钟前
【青牛科技】GC5931:工业风扇驱动芯片的卓越替代者
人工智能·科技·单片机·嵌入式硬件·算法·机器学习
jndingxin1 小时前
OpenCV相机标定与3D重建(1)概述
数码相机·opencv·3d
幻风_huanfeng1 小时前
线性代数中的核心数学知识
人工智能·机器学习