Qt调用FFmpeg库实时播放UDP组播视频流

基于以下参考链接,通过改进实现实时播放UDP组播视频流

https://blog.csdn.net/u012532263/article/details/102736700

源码在windows(qt-opensource-windows-x86-5.12.9.exe)、ubuntu20.04.6(x64)(qt-opensource-linux-x64-5.12.12.run)、以及针对arm64的ubuntu20.04.6(x64)交叉编译环境下编译成功(QT5.12.8, 5.15.13), 可执行程序在windows,ubuntu(x64)、arm64上均可运行。

工程代码见:

https://download.csdn.net/download/daqinzl/90315016

主要代码
videoplayer.cpp

#include "videoplayer.h"

#include <QDebug>

extern "C"

{

#include "libavcodec/avcodec.h"

#include "libavformat/avformat.h"

#include "libavutil/pixfmt.h"

#include "libswscale/swscale.h"

}

#include <stdio.h>

#include<iostream>

using namespace std;

VideoPlayer::VideoPlayer()

{

}

VideoPlayer::~VideoPlayer()

{

}

void VideoPlayer::startPlay()

{

///调用 QThread 的start函数 将会自动执行下面的run函数 run函数是一个新的线程

this->start();

}

void VideoPlayer::run()

{

/*

AVFormatContext *pFormatCtx;

AVCodecContext *pCodecCtx;

AVCodec *pCodec;

AVFrame *pFrame;

AVFrame *pFrameRGB;

AVPacket *packet;

uint8_t *out_buffer;

static struct SwsContext *img_convert_ctx;

int videoStream, i, numBytes;

int ret, got_picture;

avformat_network_init();

av_register_all();

//Allocate an AVFormatContext.

pFormatCtx = avformat_alloc_context();

// ffmpeg取rtsp流时av_read_frame阻塞的解决办法 设置参数优化

AVDictionary* avdic = NULL;

//rtsp

//av_dict_set(&avdic, "buffer_size", "102400", 0); //设置缓存大小,1080p可将值调大

//av_dict_set(&avdic, "rtsp_transport", "udp", 0); //以udp方式打开,如果以tcp方式打开将udp替换为tcp

//rtmp

//av_dict_set(&avdic, "buffer_size", "8192000", 0); //设置缓存大小,1080p可将值调大

//av_dict_set(&avdic, "rtsp_transport", "tcp", 0); //以udp方式打开,如果以tcp方式打开将udp替换为tcp

//udp

av_dict_set(&avdic, "buffer_size", "8192000", 0); //设置缓存大小,1080p可将值调大

av_dict_set(&avdic, "rtsp_transport", "udp", 0); //以udp方式打开,如果以tcp方式打开将udp替换为tcp

//av_dict_set(&avdic, "fflags", "nobuffer", 0); // 设置实时选项

//av_dict_set(&avdic, "flags", "low_delay", 0); // 设置低延迟选项

av_dict_set(&avdic, "max_interleave_delta", "40000", 0);

//av_dict_set(&avdic, "stimeout", "2000000", 0); //设置超时断开连接时间,单位微秒

//av_dict_set(&avdic, "max_delay", "500000", 0); //设置最大时延

///rtsp地址,可根据实际情况修改

//char url[]="rtsp://admin:admin@192.168.1.18:554/h264/ch1/main/av_stream";

//char url[]="rtsp://192.168.17.112/test2.264";

//char url[]="rtsp://admin:admin@192.168.43.1/stream/main";

//char url[]="rtmp://mobliestream.c3tv.com:554/live/goodtv.sdp";

char url[]="udp://224.1.1.1:5001";

//char url[]="rtmp://192.168.1.100:1935/live/desktop";

if (avformat_open_input(&pFormatCtx, url, NULL, &avdic) != 0) {

qDebug("can't open the file. \n");

return;

}

if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {

qDebug("Could't find stream infomation.\n");

return;

}

videoStream = -1;

///循环查找视频中包含的流信息,直到找到视频类型的流

///便将其记录下来 保存到videoStream变量中

///这里我们现在只处理视频流 音频流先不管他

for (i = 0; i < pFormatCtx->nb_streams; i++) {

if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

videoStream = i;

}

}

///如果videoStream为-1 说明没有找到视频流

if (videoStream == -1) {

qDebug("Didn't find a video stream.\n");

return;

}

///查找解码器

pCodecCtx = pFormatCtx->streams[videoStream]->codec;

pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

///2017.8.9---lizhen

//pCodecCtx->bit_rate = 0; //初始化为0

//pCodecCtx->time_base.num = 1; //下面两行:一秒钟25帧

//pCodecCtx->time_base.den = 10;

//pCodecCtx->frame_number = 1; //每包一个视频帧

if (pCodec == NULL) {

qDebug("Codec not found.\n");

return;

}

///打开解码器

if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {

qDebug("Could not open codec.\n");

return;

}

pFrame = av_frame_alloc();

pFrameRGB = av_frame_alloc();

///这里我们改成了 将解码后的YUV数据转换成RGB32

img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,

pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,

AV_PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL);

numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

avpicture_fill((AVPicture *) pFrameRGB, out_buffer, AV_PIX_FMT_RGB32,

pCodecCtx->width, pCodecCtx->height);

int y_size = pCodecCtx->width * pCodecCtx->height;

packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet

av_new_packet(packet, y_size); //分配packet的数据

//*/

//bool CanRun = true;

//ffmpeg 初始化

// 初始化注册ffmpeg相关的编码器

av_register_all();

avcodec_register_all();

avformat_network_init();

//qDebug()<<"1 FFmpeg version info: { av_version_info() }";

qDebug()<<"2 FFmpeg version info: { " << av_version_info() << "}";

qDebug()<<"3 FFmpeg version info:";

qDebug()<< av_version_info() ;

char url[]="udp://224.1.1.1:5001";

// ffmpeg 转码

// 分配音视频格式上下文

AVFormatContext *pFormatCtx = avformat_alloc_context();

AVDictionary* avdic = NULL;

av_dict_set(&avdic, "buffer_size", "8192000", 0);

av_dict_set(&avdic, "max_interleave_delta", "40000", 0);

av_dict_set(&avdic, "analyzeduration", "100000000", 0);

av_dict_set(&avdic, "probesize", "100000000", 0);

int ret;

//打开流

ret = avformat_open_input(&pFormatCtx, url, NULL, &avdic);

if (ret != 0){ qDebug("can't open the url. \n"); return; }

// 读取媒体流信息

ret = avformat_find_stream_info(pFormatCtx, NULL);

if (ret != 0) { qDebug("Could't find stream infomation.\n"); return; }

// 这里只是为了打印些视频参数

AVDictionaryEntry* tag = NULL;

while ((tag = av_dict_get(pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)) != NULL)

{

char * key = tag->key;

char * value = tag->value;

qDebug()<< *key << *value << "\n";

}

// 从格式化上下文获取流索引

AVStream* pStream = NULL;

//AVStream* aStream = NULL;

int videoStream = -1;

for (int i = 0; i < pFormatCtx->nb_streams; i++)

{

if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)

{

pStream = pFormatCtx->streams[i];

videoStream = i;

}

//else if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)

//{

// aStream = pFormatCtx->streams[i];

//}

}

if (pStream == NULL) { qDebug("Didn't find a video stream.\n"); return; }

// 获取流的编码器上下文

AVCodecContext codecContext = *pStream->codec;

qDebug() << "codec name: {" << avcodec_get_name(codecContext.codec_id) << "}\n";

// 获取图像的宽、高及像素格式

int width = codecContext.width;

int height = codecContext.height;

AVPixelFormat sourcePixFmt = codecContext.pix_fmt;

// 得到编码器ID

AVCodecID codecId = codecContext.codec_id;

// 目标像素格式

AVPixelFormat destinationPixFmt = AV_PIX_FMT_RGB32;

// 某些264格式codecContext.pix_fmt获取到的格式是AV_PIX_FMT_NONE 统一都认为是YUV420P

if (sourcePixFmt == AV_PIX_FMT_NONE && codecId == AV_CODEC_ID_MPEG2TS)

{

sourcePixFmt = AV_PIX_FMT_YUV420P;

}

static struct SwsContext *img_convert_ctx;

// 得到SwsContext对象:用于图像的缩放和转换操作

img_convert_ctx = sws_getContext(width, height, sourcePixFmt,

width, height, destinationPixFmt,

SWS_FAST_BILINEAR, NULL, NULL, NULL);

if (img_convert_ctx == NULL) { qDebug() << "Could not initialize the conversion context.\n" ; return; }

//分配一个默认的帧对象:AVFrame

//AVFrame* pConvertedFrame = av_frame_alloc();

// 目标媒体格式需要的字节长度

//numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

int numBytes = avpicture_get_size(destinationPixFmt, width, height);

// 分配目标媒体格式内存使用

//out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

uint8_t * out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

//var dstData = new byte_ptrArray4();

//var dstLinesize = new int_array4();

AVFrame *pFrameRGB = av_frame_alloc();

// 设置图像填充参数

//av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, convertedFrameBufferPtr, destinationPixFmt, width, height, 1);

avpicture_fill((AVPicture *) pFrameRGB, out_buffer, destinationPixFmt,width, height);

// ffmpeg 解码

// 根据编码器ID获取对应的解码器

AVCodec* pCodec = avcodec_find_decoder(codecId);

if (pCodec == NULL) { qDebug() << "Unsupported codec.\n"; }

AVCodecContext* pCodecCtx = &codecContext;

if ((pCodec->capabilities & AV_CODEC_CAP_TRUNCATED) == AV_CODEC_CAP_TRUNCATED)

pCodecCtx->flags |= AV_CODEC_FLAG_TRUNCATED;

// 通过解码器打开解码器上下文:AVCodecContext pCodecContext

ret = avcodec_open2(pCodecCtx, pCodec, NULL);

if (ret < 0) { qDebug() << ret << "\n"; return; }

// 分配解码帧对象:AVFrame pDecodedFrame

AVFrame* pFrame = av_frame_alloc();

// 初始化媒体数据包

AVPacket* packet = new AVPacket();

AVPacket** pPacket = &packet;

av_init_packet(packet);

while (1)

{

/*

if (av_read_frame(pFormatCtx, packet) < 0)

{

qDebug() << "av_read_frame < 0";

break; //这里认为视频读取完了

}

if (packet->stream_index == videoStream) {

ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);

if (ret < 0) {

qDebug("decode error.\n");

return;

}

if (got_picture) {

sws_scale(img_convert_ctx,

(uint8_t const * const *) pFrame->data,

pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,

pFrameRGB->linesize);

//把这个RGB数据 用QImage加载

QImage tmpImg((uchar *)out_buffer,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);

QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示

emit sig_GetOneFrame(image); //发送信号

//emit sig_GetOneFrame(tmpImg);

//提取出图像中的R数据

//for(int i=0;i<pCodecCtx->width;i++)

//{

// for(int j=0;j<pCodecCtx->height;j++)

// {

// QRgb rgb=image.pixel(i,j);

// int r=qRed(rgb);

// image.setPixel(i,j,qRgb(r,0,0));

// }

//}

//emit sig_GetRFrame(image);

}else qDebug() << "got_picture < 0";

}else qDebug() << "packet->stream_index not video stream";

av_free_packet(packet); //释放资源,否则内存会一直上升

//msleep(0.02); //停一停 不然放的太快了

//*/

//*

try

{

do

{

// 读取一帧未解码数据

ret = av_read_frame(pFormatCtx, packet);

// Console.WriteLine(pPacket->dts);

if (ret == AVERROR_EOF) break;

if (ret < 0) { qDebug() << "got error "; return; }

if (packet->stream_index != videoStream) continue;

// 解码

ret = avcodec_send_packet(pCodecCtx, packet);

if (ret < 0) { qDebug() << "got error 2 "; return; }

// 解码输出解码数据

ret = avcodec_receive_frame(pCodecCtx, pFrame);

} while (ret == AVERROR(EAGAIN) && 1);

if (ret == AVERROR_EOF) break;

if (ret < 0) { qDebug() << "got error 3 "; return; }

if (packet->stream_index != videoStream) continue;

//Console.WriteLine($@"frame: {frameNumber}");

// YUV->RGB

sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

//把这个RGB数据 用QImage加载

QImage tmpImg((uchar *)out_buffer,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);

QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示

emit sig_GetOneFrame(image); //发送信号

//emit sig_GetOneFrame(tmpImg);

} catch (exception e) {

}

{

av_packet_unref(packet);//释放数据包对象引用

av_frame_unref(pFrame);//释放解码帧对象引用

}

//*/

}

av_free(out_buffer);

av_free(pFrameRGB);

avcodec_close(pCodecCtx);

avformat_close_input(&pFormatCtx);

}

相关推荐
码农客栈2 小时前
qt QNetworkRequest详解
qt
Say-hai2 小时前
QT6 + CMAKE编译OPENCV3.9
qt·opencv
威桑6 小时前
关于QLineEdit 添加的QAction图标的交互问题
qt
island131414 小时前
【QT】 控件 -- 显示类
开发语言·数据库·qt
FancySuMMer1114 小时前
关于av_get_channel_layout_nb_channels函数
qt·ffmpeg
weixin_4043702914 小时前
ffmpeg的AVOption用法
ffmpeg
行十万里人生16 小时前
Qt 控件与布局管理
数据库·qt·microsoft·华为od·华为·华为云·harmonyos
XuanRanDev16 小时前
【音视频处理】FFmpeg for Windows 安装教程
windows·ffmpeg·音视频
daqinzl16 小时前
Qt调用ffmpeg库录屏并进行UDP组播推流
qt·ffmpeg·udp组播 推流