Opencv-光流算法-实战

0. 写在前面

理论介绍篇在:图像处理算法--光流法-原理-CSDN博客

2. Main函数代码
cpp 复制代码
#include "mainwindow.h"
#include "ui_mainwindow.h"


#include <QFileDialog>
#include <QLabel>
#include <QDebug>

MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    //定时器
    showTimer = new QTimer(this);
    connect(showTimer,SIGNAL(timeout()),this,SLOT(ReadFrame()));

    //初始化状态栏
    QLabel *labelFile = new QLabel("暂时无文件",this);
    labelFile->setMinimumWidth(300);

    //将初始化的标签添加到底部状态上
    ui->statusBar->addWidget(labelFile);

    ui->centralWidget->setMouseTracking(true);
    this->setMouseTracking(true);

}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::tracking(Mat &frame, Mat &output)
{
    cvtColor(frame, gray, COLOR_BGR2GRAY);
        frame.copyTo(output);

        //添加特征点
        if (addNewPoints())
        {
            goodFeaturesToTrack(gray, features, maxCount, qLevel, minDest);
            points[0].insert(points[0].end(), features.begin(), features.end());
            initial.insert(initial.end(), features.begin(), features.end());
        }
        if (gray_prev.empty())
        {
            gray.copyTo(gray_prev);
        }
        //l-k流光法运动估计
        calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
        //去掉一些不好的特征点
        int k = 0;
        for (size_t i = 0; i < points[1].size(); i++)
        {
            if (acceptTrackedPoint(i))
            {
                initial[k] = initial[i];
                points[1][k++] = points[1][i];

            }

        }
        points[1].resize(k);
        initial.resize(k);
        //显示特征点和运动轨迹
        for (size_t i = 0; i < points[1].size(); i++)
        {
            line(output, initial[i], points[1][i], Scalar(0, 0, 255));
            circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);

        }

        //把当前跟踪结果作为下一次的参考
        swap(points[1],points[0]);
        swap(gray_prev,gray);

        imshow(window_name, output);
}

bool MainWindow::addNewPoints()
{
    return points[0].size() <= 10;       //points.size()求行数     points.size()求列数
}

bool MainWindow::acceptTrackedPoint(int i)
{
    return status[i] && ((abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y)) > 2);
}


//鼠标移动事件
void MainWindow::mouseMoveEvent(QMouseEvent *event)
{
    //if(event->buttons() & Qt::LeftButton)
    {
        QPoint sPoint1=event->globalPos();
        QPoint widgetPoint = ui->ShowLabel->mapFromGlobal(sPoint1);
        ui->TXTLabel_x->setNum((widgetPoint.x()));
        ui->TXTLabel_y->setNum((widgetPoint.y()));
    }
}

Mat MainWindow::moveCheck(Mat &forntFrame, Mat &afterFrame)
{
    Mat frontGray,afterGray,diff;

    Mat resFrame=afterFrame.clone();
    //灰度处理
    cvtColor(forntFrame,frontGray,COLOR_BGR2GRAY);
    cvtColor(afterFrame,afterGray,COLOR_BGR2GRAY);


    //帧差处理 找到帧与帧之间运动物体差异
    absdiff(frontGray,afterGray,diff);
    //imshow("diff",diff);

    //二值化
    //threshold(diff,diff,15,255,THRESH_BINARY);
    adaptiveThreshold(diff,diff,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,5,5,5);
    imshow("threashold",diff);
    waitKey(25);
    //腐蚀处理:
    Mat element=cv::getStructuringElement(MORPH_RECT,Size(3,3));
    erode(diff,diff,element);
    //imshow("erode",diff);
    //膨胀处理
    Mat element2=cv::getStructuringElement(MORPH_RECT,Size(20,20));
    dilate(diff,diff,element2);
    //imshow("dilate",diff);

    //动态物体标记
    vector<vector<Point>>contours;//保存关键点
    findContours(diff,contours,RETR_EXTERNAL,CHAIN_APPROX_SIMPLE,Point(0,0));

    //提取关键点
    vector<vector<Point>>contour_poly(contours.size());
    vector<Rect>boundRect(contours.size());

    int x,y,w,h;
    int num=contours.size();

    for(int i=0;i<num;i++)
    {
        approxPolyDP(Mat(contours[i]),contour_poly[i],3,true);
        boundRect[i]=boundingRect(Mat(contour_poly[i]));

        x=boundRect[i].x;
        y=boundRect[i].y;
        w=boundRect[i].width;
        h=boundRect[i].height;

        //绘制
        rectangle(resFrame,Point(x,y),Point(x+w,y+h),Scalar(0,255,0),2);
    }


    return resFrame;
}



void MainWindow::on_pBtn_OpenFile_clicked()
{
    //打开图片文件,选择图片
    QString filename = QFileDialog::getOpenFileName(this,tr("Open File"),QDir::homePath(),tr("所有文件(*.avi *.mp4 *.h624 *.mkv)\n(*.jpg)\n(*.bmp)\n(*.png)"));

    capture.open(filename.toStdString()); //.toStdString()

    if(!capture.isOpened())
    {
        ui->statusBar->showMessage(tr("Open Video Failed!"));
    }
    else
    {
        ui->statusBar->showMessage(tr("Open Video Success!"));
    }


    Mat frame;
    Mat temp;
    Mat res;
    int count = 0;
    while(capture.read(frame))
   {
         //frame = frame(cv::Rect(440, 260,200,200));

         count = count + 60;
         if(count==0)
         {
             res=moveCheck(frame,frame);

         }
         else
         {
             res=moveCheck(temp,frame);

         }
         temp=frame.clone();
         imshow("frame",frame);
         imshow("res",res);
         waitKey(2500);
   }





#if 0
    Mat frame,gray;
    vector<Point2f> features;       //检测出来的角点集合
    vector<Point2f> inPoints;       //这个主要是为了画线用的
    vector<Point2f> fpts[2];        //[0],存入的是是二维特征向量,[1]输出的二维特征向量
    Mat pre_frame,pre_gray;
    vector<uchar> status;           //光流输出状态
    vector<float> err;              //光流输出错误

    //【2】循环读取视频
    while(capture.read(frame))
    {
        //循环读取视频中每一帧的图像
        //【3】将视频帧图像转为灰度图
        cvtColor(frame,gray,COLOR_BGR2GRAY);    //ps:角点检测输入要求单通道
        cv::Mat imageRIO = gray(cv::Rect(440, 260,200,200));
        cv::imshow("ROI",imageRIO);

        //【4】如果特征向量(角点)小于40个我们就重新执行角点检测
        if(fpts[0].size()<25)
        {
            //如果小于40个角点就重新开始执行角点检测
            //执行角点检测
            goodFeaturesToTrack(imageRIO,features,1000,0.01,10,Mat(),3,false,0.04);
            //【5】将检测到的角点放入fpts[0]中作为,光流跟踪的输入特征向量
            //将检测到的角点插入vector
            fpts[0].insert(fpts[0].begin(),features.begin(),features.end());
            inPoints.insert(inPoints.end(),features.begin(),features.end());
            qDebug()<<"角点检测执行完成,角点个数为:"<<features.size();
       }else{
           qDebug()<<"正在跟踪...";
       }
       //【6】初始化的时候如果检测到前一帧为空,这个把当前帧的灰度图像给前一帧
       if(pre_gray.empty()){//如果前一帧为空就给前一帧赋值一次
           imageRIO.copyTo(pre_gray);
       }

       //执行光流跟踪
       qDebug()<<"开始执行光流跟踪";
       //【7】执行光流跟踪,并将输出的特征向量放入fpts[1]中
       calcOpticalFlowPyrLK(pre_gray,imageRIO,fpts[0],fpts[1],status,err);
       qDebug()<<"光流跟踪执行结束";
       //【8】遍历光流跟踪的输出特征向量,并得到距离和状态都符合预期的特征向量。让后将其重新填充到fpts[1]中备用
       int k =0;
       for(size_t i=0;i<fpts[1].size();i++)
       {                                                                                        //循环遍历二维输出向量
           double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);   //特征向量移动距离
           if(dist>1&&status[i])
           {                                                                                    //如果距离大于2,status=true(正常)
               inPoints[k] = inPoints[i];
               fpts[1][k++] = fpts[1][i];
           }
       }
       //【9】重置集合大小(由于有错误/不符合条件的输出特征向量),只拿状态正确的
       //重新设置集合大小
       inPoints.resize(k);
       fpts[1].resize(k);
       //【10】绘制光流线,这一步要不要都行
       //绘制光流线
       if(true){
           for(size_t i = 0;i<fpts[1].size();i++){
               line(imageRIO,inPoints[i],fpts[1][i],Scalar(0,255,0),1,8,0);
               circle(imageRIO, fpts[1][i], 2, Scalar(0, 0, 255), 2, 8, 0);
           }
       }

       qDebug()<<"特征向量的输入输出交换数据";
       //【11】交换特征向量的输入和输出,(循环往复/进入下一个循环),此时特征向量的值会递减
       std::swap(fpts[1],fpts[0]);//交换特征向量的输入和输出,此处焦点的总数量会递减

       //【12】将用于跟踪的角点绘制出来
       //将角点绘制出来
       for(size_t i = 0;i<fpts[0].size();i++){
           circle(imageRIO,fpts[0][i],2,Scalar(0,0,255),2,8,0);
       }

       //【13】重置前一帧图像(每一个循环都要刷新)
       imageRIO.copyTo(pre_gray);
       imageRIO.copyTo(pre_frame);
       //【14】展示最终的效果
       imshow("imageRIO",imageRIO);
       int keyValue = waitKey(100);
       if(keyValue==27){//如果用户按ese键退出播放
           break;
       }
    }
#endif


#if 0
    //读取第一帧图像,进行初始化;
    Mat pre_image;
    capture.read(pre_image);
    cvtColor(pre_image, pre_image, COLOR_BGR2GRAY);
    //光流检测必须为浮点型坐标点
    vector<Point2f> prevPts;                    //定义上一帧图像的稀疏特征点集
    vector<Point2f> initpoint;                  //定义上一帧图像中保留的稀疏特征点集,用于绘制轨迹
    vector<Point2f> features;					//用于存放从图像中获得的特征角点
    goodFeaturesToTrack(pre_image, features, 100, 0.3, 10, Mat(), 3, false);				//获取第一帧图像的稀疏特征点集
    //insert(插入位置,插入对象的首地址,插入对象的尾地址)
    initpoint.insert(initpoint.end(), features.begin(), features.end());				//初始化当前帧的特征点集
    prevPts.insert(prevPts.end(), features.begin(), features.end());		//初始化第一帧的特征角点

    //Mat frame;
    while (capture.read(frame))
    {
        Mat next_image;
        flip(frame, frame, 1);
        cvtColor(frame, next_image, COLOR_BGR2GRAY);
        vector<Point2f> nextPts;			//下一帧图像检测到的对应稀疏特征点集
        vector<uchar> status;			//输出点的状态向量;如果某点在两帧图像之间存在光流,则该向量中对应该点的元素设置为1,否则设置为0。
        vector<float>err;					//输出错误的向量; 向量的每个元素都设置为相应特征点的错误
        calcOpticalFlowPyrLK(pre_image, next_image, prevPts, nextPts, status, err, Size(31,31));

        RNG rng;
        int k = 0;
        for (int i = 0; i < nextPts.size(); i++)		//遍历下一帧图像的稀疏特征点集
        {
            //计算两个对应特征点的(dx+dy)
            double dist = abs(double(prevPts[i].x) - double(nextPts[i].x)) + abs(double(prevPts[i].y) - double(nextPts[i].y));
            if (status[i] && dist > 2)				//如果该点在两帧图像之间存在光流,且两帧图像中对应点的距离大于2,即非静止点
            {
                //将存在光流的非静止特征点保留起来
                prevPts[k] = prevPts[i];
                nextPts[k] = nextPts[i];
                initpoint[k] = initpoint[i];
                k++;
                //绘制保留的特征点
                int b = rng.uniform(0, 256);
                int g = rng.uniform(0, 256);
                int r = rng.uniform(0, 256);
                circle(frame, nextPts[i], 3, Scalar(b, g, r), -1, 8, 0);
            }
        }
        //将稀疏特征点集更新为现有的容量,也就是保存下来的特征点数
        prevPts.resize(k);
        nextPts.resize(k);
        initpoint.resize(k);

        //在每一帧图像中绘制当前特征点走过的整个路径
        for (int j = 0; j < initpoint.size(); j++)
        {
            int b = rng.uniform(0, 256);
            int g = rng.uniform(0, 256);
            int r = rng.uniform(0, 256);
            line(frame, initpoint[j], nextPts[j], Scalar(b, g, r), 1, 8, 0);
        }
        imshow("frame", frame);
        //swap()交换两个变量的数据
        swap(nextPts, prevPts);			//将下一帧图像的稀疏特征点集,变为上一帧
        swap(pre_image, next_image);			//将下一帧图像变为上一帧图像

        //当特征点的数量被筛选得低于阈值时,重新从下一帧图像中寻找特征角点;注意此时的上下两帧图像已经互换
        if (initpoint.size() < 10)
        {
            goodFeaturesToTrack(pre_image, features, 100, 0.01, 10, Mat(), 3, false);
            initpoint.insert(initpoint.end(), features.begin(), features.end());
            prevPts.insert(prevPts.end(), features.begin(), features.end());
        }

    }
#endif
#if 0
    Mat prevgray, gray, rgb, frame;
    Mat flow, flow_uv[2];
    Mat flow_Farneback;
    Mat flow_uv_Farneback[2];

    Mat mag, ang;
    Mat mag_Farneback, ang_Farneback;
    Mat hsv_split[3], hsv;
    Mat hsv_split_Farneback[3], hsv_Farneback;
    Mat rgb_Farneback;


    Ptr<DenseOpticalFlow> algorithm = DISOpticalFlow::create(DISOpticalFlow::PRESET_MEDIUM);

    int idx = 0;
    while(true)
    {
       capture >> frame;
       if (frame.empty())
           break;
       cv::resize(frame,frame,cv::Size(0.8*frame.cols,0.8*frame.rows),0,0,cv::INTER_LINEAR);
       idx++;
       cvtColor(frame, gray, COLOR_BGR2GRAY);
       cv::imshow("orig", frame);

       if (!prevgray.empty())
       {
           /*DISOpticalFlow*/
           /*main function of DISOpticalFlow*/
           algorithm->calc(prevgray, gray, flow);
           split(flow, flow_uv);
           multiply(flow_uv[1], -1, flow_uv[1]);
           cartToPolar(flow_uv[0], flow_uv[1], mag, ang, true);
           normalize(mag, mag, 0, 1, NORM_MINMAX);
           hsv_split[0] = ang;
           hsv_split[1] = mag;
           hsv_split[2] = Mat::ones(ang.size(), ang.type());
           merge(hsv_split, 3, hsv);
           cvtColor(hsv, rgb, COLOR_HSV2BGR);
           cv::Mat rgbU;
           rgb.convertTo(rgbU, CV_8UC3,  255, 0);
           cv::imshow("DISOpticalFlow", rgbU);
           Mat rgbU_b = rgbU.clone();
           Mat split_dis[3];
           split(rgbU_b, split_dis);

           split_dis[2] = prevgray;
           Mat merge_dis;
           merge(split_dis, 3, merge_dis);
           cv::imshow("DISOpticalFlow_mask", merge_dis);
           /*Farneback*/
           cv::calcOpticalFlowFarneback(prevgray, gray, flow_Farneback, 0.5, 3,15, 3, 5, 1.2, 0);

           split(flow_Farneback, flow_uv_Farneback);
           multiply(flow_uv_Farneback[1], -1, flow_uv_Farneback[1]);
           cartToPolar(flow_uv_Farneback[0], flow_uv_Farneback[1], mag_Farneback, ang_Farneback, true);
           normalize(mag_Farneback, mag_Farneback, 0, 1, NORM_MINMAX);
           hsv_split_Farneback[0] = ang_Farneback;
           hsv_split_Farneback[1] = mag_Farneback;
           hsv_split_Farneback[2] = Mat::ones(ang_Farneback.size(), ang_Farneback.type());
           merge(hsv_split_Farneback, 3, hsv_Farneback);
           cvtColor(hsv_Farneback, rgb_Farneback, COLOR_HSV2BGR);
           cv::Mat rgbU_Farneback;
           rgb_Farneback.convertTo(rgbU_Farneback, CV_8UC3,  255, 0);
           cv::imshow("FlowFarneback", rgbU_Farneback);
           Mat rgbU_Farneback_b = rgbU_Farneback.clone();
           Mat split_Fb[3];
           split(rgbU_Farneback_b, split_Fb);
           split_Fb[2] = prevgray;
           Mat merge_Fb;
           merge(split_Fb, 3, merge_Fb);
           cv::imshow("FlowFarneback_mask", merge_Fb);
           cv::waitKey(1);
       }

       std::swap(prevgray, gray);
    }
#endif


    //showTimer->start(25);

#if 0
    //TODO:后期优化,内部区分是读图片还是视频
    QImage image = QImage(filename);

    if(!image.isNull())
    {
        ui->statusBar->showMessage(tr("Open image Success!"));
    }
    else
    {
        ui->statusBar->showMessage(tr("Open image Failed!"));
    }
#endif



}

void MainWindow::ReadFrame()
{


}



void MainWindow::on_pBtn_CloseFile_clicked()
{
    showTimer->stop();

    capture.release();
    frame.release();

}
相关推荐
希望有朝一日能如愿以偿31 分钟前
力扣题解(飞机座位分配概率)
算法·leetcode·职场和发展
丶Darling.43 分钟前
代码随想录 | Day26 | 二叉树:二叉搜索树中的插入操作&&删除二叉搜索树中的节点&&修剪二叉搜索树
开发语言·数据结构·c++·笔记·学习·算法
JustCouvrir1 小时前
代码随想录算法训练营Day15
算法
小小工匠1 小时前
加密与安全_HOTP一次性密码生成算法
算法·安全·htop·一次性密码
中文英文-我选中文1 小时前
排序算法的理解
算法·排序算法
我明天再来学Web渗透1 小时前
【hot100-java】【二叉树的层序遍历】
java·开发语言·数据库·sql·算法·排序算法
数据分析螺丝钉2 小时前
力扣第240题“搜索二维矩阵 II”
经验分享·python·算法·leetcode·面试
no_play_no_games2 小时前
「3.3」虫洞 Wormholes
数据结构·c++·算法·图论
五味香2 小时前
C++学习,信号处理
android·c语言·开发语言·c++·学习·算法·信号处理
毕小宝3 小时前
逻辑回归(下): Sigmoid 函数的发展历史
算法·机器学习·逻辑回归