拉取HTTP-FLV视频流处理逻辑:
1.在子线程中从流媒体服务端拉取视频流、使用ffmpeg进行解码,转成QImage ,发送给主线程。
2.主线程接收QImage后在界面显示。
pro文件:
cpp
QT += core gui
greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
CONFIG += c++17
# You can make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line.
#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0
SOURCES += \
cc_video_thread.cpp \
main.cpp \
httpflvmainwindow.cpp
HEADERS += \
cc_video_thread.h \
httpflvmainwindow.h
FORMS += \
httpflvmainwindow.ui
# Default rules for deployment.
qnx: target.path = /tmp/$${TARGET}/bin
else: unix:!android: target.path = /opt/$${TARGET}/bin
!isEmpty(target.path): INSTALLS += target
INCLUDEPATH += $$PWD/lib/ffmpeg/include
LIBS += -L$$PWD/lib/ffmpeg/lib -lavcodec -lavdevice -lavfilter -lavformat -lavutil -lpostproc -lswresample -lswscale
子线程:
Cc_Video_thread .h
cpp
#ifndef CC_VIDEO_THREAD_H
#define CC_VIDEO_THREAD_H
#include <QThread>
#include <QAtomicInt>
#include <QImage>
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
#include <libavutil/dict.h>
#include <libavutil/imgutils.h>
#ifdef __cplusplus
}
#endif
class Cc_Video_thread : public QThread
{
Q_OBJECT
public:
Cc_Video_thread();
~Cc_Video_thread();
void run() override;
/**
* @brief open stream and decode
* @param addr
*/
int open(const QString &addr);
/**
* @brief exit
*/
void exit();
void packer_to_qimage();
/**
* @brief custom_interrupt_callback
* @return
*/
static int custom_interrupt_callback(void *){
// LOG(ERROR) <<"[ERROR]:OUT TIME..."<<std::endl;
return 0;
};
signals:
void sendimage(QImage img);
private:
QAtomicInt exit_state_ = 1; // thread exit state
QAtomicInt save_state_ = 1; //
QAtomicInt open_state_ = -1;
AVCodec* codec_;
AVPacket* packet_;
AVStream * video_st;
AVFrame* yuv_frame_;
AVFrame* pFrameRGB;
AVCodecContext* codecContext;
AVFormatContext* format_context_;
AVCodecParameters *codecParam;
SwsContext* y2r_sws_context_;
int video_stream_index_ = 0;
int audio_stream_index_ = 0;
int video_frame_size = 0 ;
int audio_frame_size = 0;
int video_frame_count = 0;
int audio_frame_count = 0;
uint8_t *m_OutBuffer;
};
#endif // CC_VIDEO_THREAD_H
Cc_Video_thread.cpp
cpp
#include "cc_video_thread.h"
#include <string>
#include <QPixmap>
#include <QDebug>
Cc_Video_thread::Cc_Video_thread()
{
QString m_url=改成自己的url;
start();
open(m_url);
}
Cc_Video_thread::~Cc_Video_thread()
{
exit_state_ = 0;
av_packet_free(&packet_);
av_frame_free(&pFrameRGB);
av_frame_free(&yuv_frame_);
avformat_close_input(&format_context_);
}
int Cc_Video_thread::open(const QString &addr)
{
if(open_state_ == 1){
return -1;
}
if(addr.isEmpty())
{
//qDebug() << "[ERROR] addr is empty... ";
return -1;
}
// init ffmpeg and open stream
//qDebug() << avcodec_configuration() << endl;
format_context_ = avformat_alloc_context();
format_context_->interrupt_callback.callback=custom_interrupt_callback;
format_context_->interrupt_callback.opaque=this;
codecContext=avcodec_alloc_context3(nullptr);
packet_ = av_packet_alloc();
yuv_frame_=av_frame_alloc();
pFrameRGB = av_frame_alloc();
avformat_network_init(); // init net work
AVDictionary *options = nullptr;
av_dict_set(&options,"rtsp_transport", "tcp", 0);
av_dict_set(&options,"stimeout","10000",0);
// 设置"buffer_size"缓存容量
av_dict_set(&options, "buffer_size", "1024000", 0);
int ret = avformat_open_input(&format_context_,addr.toStdString().c_str(),NULL,&options);
if(ret< 0 )
{
qDebug() <<"[ERROR]:avformat_open_input FAIL..."<<endl;
return ret;
}
//从媒体文件中读包进而获取流消息
if(avformat_find_stream_info(format_context_,nullptr)<0)
{
//qDebug() <<"[ERROR]:avformat_find_stream_info FAIL..."<<endl;
return -1;
}
//打印
av_dump_format(format_context_,0,addr.toStdString().c_str(),0);
for(unsigned int i=0;i<format_context_->nb_streams;i++)
{
video_st=format_context_->streams[i];
//筛选视频流和音频流
if(video_st->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
video_stream_index_=i;
}
if(video_st->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
audio_stream_index_=i;
}
}
codecParam=format_context_->streams[video_stream_index_]->codecpar; //获取编解码器的参数集
codec_= const_cast<AVCodec*>(avcodec_find_decoder(codecParam->codec_id)); //获取编解码器
if(NULL == codec_){
qDebug()<<"获取编解码器 fail";
return -1;
}
codecContext=avcodec_alloc_context3(nullptr); //获取编解码上下文
avcodec_parameters_to_context(codecContext,codecParam);
if( avcodec_open2(codecContext,codec_,nullptr)!=0)
{
avcodec_free_context(&codecContext);
qDebug()<<"Error : can`t open codec";
return -1;
}
//构造一个格式转换上下文
y2r_sws_context_=sws_getContext(codecParam->width,codecParam->height,(AVPixelFormat)codecParam->format,
codecParam->width,codecParam->height,
AV_PIX_FMT_RGB32,SWS_BICUBIC,NULL,NULL,NULL);
int bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB32, codecContext->width, codecContext->height,4);
m_OutBuffer = (uint8_t *)av_malloc(bytes * sizeof(uint8_t));
avpicture_fill((AVPicture *)pFrameRGB, m_OutBuffer,
AV_PIX_FMT_RGB32, codecContext->width, codecContext->height);
open_state_ = 1;
return 1;
}
void Cc_Video_thread::run()
{
while(true)
{
if(exit_state_ != 1)
{
break;
}
if(open_state_ == 1)
{ //6.读取数据包
int ret=av_read_frame(format_context_,packet_);
//if(ret<0)break;
if(ret==0)
{
qDebug()<<"ret:"<<ret<<endl;
char output[1024];
if(packet_->stream_index==video_stream_index_)
{
video_frame_size+=packet_->size;
memset(output,0,1024);
sprintf(output,"recv %5d video frame %5d-%5d\n", ++video_frame_count, packet_->size, video_frame_size);
qDebug() << output;
ret =avcodec_send_packet(codecContext,packet_);//packet中H264数据给解码器码器进行解码,解码好的YUV数据放在pInCodecCtx,
// if(ret!=0)
// {
// qDebug()<<"send packet error code is " <<ret;
// break;
// }
av_packet_unref(packet_);
ret = avcodec_receive_frame(codecContext,yuv_frame_);//把解码好的YUV数据放到pFrame中
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
else if (ret < 0)
{
qDebug()<<"Error during decoding\n";
break;
}
ret = sws_scale(y2r_sws_context_,yuv_frame_->data,yuv_frame_->linesize,0
,codecParam->height,pFrameRGB->data,pFrameRGB->linesize);
if(ret <= 0){
qDebug() << "ERROR to rgb....";
}
// 转换到QImage
QImage tmmImage((uchar *)m_OutBuffer, codecContext->width, codecContext->height, QImage::Format_RGB32);
emit sendimage(tmmImage);
}
if(packet_->stream_index==audio_stream_index_)
{
audio_frame_size+=packet_->size;
memset(output,0,1024);
sprintf(output,"recv %5d audio frame %5d-%5d\n", ++audio_frame_count, packet_->size, audio_frame_size);
qDebug() << output;
}
}
if(open_state_ != 1){
qDebug() << "thread id is :"<< currentThreadId() <<"open_state_:" <<open_state_<<" run...";
msleep(100);
}
}
}
}
void Cc_Video_thread::exit()
{
open_state_ = -1;
exit_state_ = 0;
}
主线程:
HttpFlvMainWindow.h
cpp
#ifndef HTTPFLVMAINWINDOW_H
#define HTTPFLVMAINWINDOW_H
#include <QMainWindow>
#include <QPixmap>
#include <QImage>
#include"cc_video_thread.h"
QT_BEGIN_NAMESPACE
namespace Ui {
class HttpFlvMainWindow;
}
QT_END_NAMESPACE
class HttpFlvMainWindow : public QMainWindow
{
Q_OBJECT
public:
HttpFlvMainWindow(QWidget *parent = nullptr);
~HttpFlvMainWindow();
private slots:
void recvImage(QImage img);
private:
Cc_Video_thread *videoThread=NULL;
private:
Ui::HttpFlvMainWindow *ui;
};
#endif // HTTPFLVMAINWINDOW_H
HttpFlvMainWindow.cpp
cpp
#include "httpflvmainwindow.h"
#include "ui_httpflvmainwindow.h"
#include <QDebug>
HttpFlvMainWindow::HttpFlvMainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::HttpFlvMainWindow)
{
ui->setupUi(this);
videoThread=new Cc_Video_thread();
connect(videoThread,SIGNAL(sendimage(QImage)),this,SLOT(recvImage(QImage)));
}
HttpFlvMainWindow::~HttpFlvMainWindow()
{
delete ui;
}
void HttpFlvMainWindow::recvImage(QImage img)
{
//qDebug()<<"into recv"<<endl;
QPixmap pixmap = QPixmap::fromImage(img);
QPixmap fitpixmap = pixmap.scaled(ui->m_pLblVideo->width(), ui->m_pLblVideo->height(),
Qt::IgnoreAspectRatio, Qt::SmoothTransformation); // 饱满填充
ui->m_pLblVideo->setPixmap(fitpixmap);
}
最后在ui->m_pLblVideo->setPixmap(fitpixmap)显示画面信息
经过测试:和webRTC进行比对,实时性和webRTC一致并且画质效果很好,几乎没有出现花屏的情况。
打印的时间是系统的时间,电脑本身的计时器和标准北京时间慢:300ms,所以总的耗时在:100ms左右。