FFmpeg 大名鼎鼎,就不多介绍了
1:环境
win11_amd64
ffpmeg download:https://git.ffmpeg.org/ffmpeg.git ffmpeg
msys2 download:https://www.msys2.org/
vs2022 (c++ 写demo用) 用别的也行
usb2.0 摄像头(有点老)
opencv 看上传的流 download:https://github.com/opencv/opencv/tags
cmake (没用到)
2:msys2 更新下载
1> 在开始菜单打开MSYS2 MSYS,使用命令pacman -Syu升级所有库
pacman -S --needed base-devel mingw-w64-x86_64-toolchain mingw-w64-x86_64-cmake mingw-w64-x86_64-nasm mingw-w64-x86_64-yasm
2> msys64\etc\pacman.d 目录下 有多个文件 这里以mirrorlist.mingw64 为例配置国内源
Server = https://mirrors.tuna.tsinghua.edu.cn/msys2/msys/x86_64/
Server = https://mirrors.aliyun.com/msys2/msys/x86_64/
3>再安装
pacman -S mingw-w64-x86_64-x264 mingw-w64-x86_64-x265 mingw-w64-x86_64-fdk-aac
2:ffpmeg 配置编译选项
1> 打开MSYS2 mingw64 注意环境是 amd64位 所以用这个
cd ffpmeg 目录
/usr/path eg(D盘下目录): /d/ffpmep/FFmpegn4 linux 格式的反斜杠 不是windows 的 斜杠
bash
./configure --prefix=/usr/path --enable-shared --disable-static --enable-gpl --enable-libx264 --enable-libx265 --enable-libfdk-aac --enable-nonfree --disable-postproc
建议在ffpmeg 同级创建 build ,install 假如 ffpmegcode(下载的代码) 在 d://ffpmeg/ffpmegcode
那么就有 d://ffpmeg/build d://ffpmeg/install 2个目录
cd 到 build (执行configure 也有点慢,慢慢等几分种0
bash
../ffpmegcode/configure --prefix=/d/ffpmeg/install --enable-shared --disable-static --enable-gpl --enable-libx264 --enable-libx265 --enable-libfdk-aac --enable-nonfree --disable-postproc
make -jN (N为电脑有几核)直接make 也可以,就是慢点
make install 会安装到 /d/ffpmeg/install
``
opencv 自行安装 网上太多了,这里就不介绍了
3 demo 测试
1》先把上章的 MediaServer 开起来,这个我装到ubbuntu22上了
2》上代码
cpp
#include <iostream>
#include <opencv2/opencv.hpp>
extern "C" {
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
}
#include <memory>
#include <thread>
#include <queue>
#include <condition_variable>
#include <chrono>
// 添加 OpenCV 库的链接指令
#pragma comment(lib, "opencv_core4110d.lib") //opencv_core4110d.lib
#pragma comment(lib, "opencv_imgcodecs4110d.lib")
#pragma comment(lib, "opencv_highgui4110d.lib")
#pragma comment(lib, "opencv_imgcodecs4110d.lib")
// 添加 FFmpeg 库的链接指令
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "swresample.lib")
// 自定义删除器
struct AVFormatContextDeleter {
void operator()(AVFormatContext* ctx) const {
if (ctx) {
if (!(ctx->oformat && (ctx->oformat->flags & AVFMT_NOFILE))) {
avio_closep(&ctx->pb);
}
avformat_free_context(ctx);
}
}
};
struct AVCodecContextDeleter {
void operator()(AVCodecContext* ctx) const {
if (ctx) {
avcodec_free_context(&ctx);
}
}
};
struct AVFrameDeleter {
void operator()(AVFrame* frame) const {
if (frame) {
av_frame_free(&frame);
}
}
};
struct AVPacketDeleter {
void operator()(AVPacket* packet) const {
if (packet) {
av_packet_free(&packet);
}
}
};
struct SwsContextDeleter {
void operator()(SwsContext* swsCtx) const {
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
};
struct AVFilterGraphDeleter {
void operator()(AVFilterGraph* graph) const {
if (graph) {
avfilter_graph_free(&graph);
}
}
};
struct AVFilterContextDeleter {
void operator()(AVFilterContext* ctx) const {
if (ctx) {
avfilter_free(ctx);
}
}
};
// 帧队列
std::queue<std::shared_ptr<AVFrame>> frameQueue;
std::condition_variable frameQueueCond;
std::mutex frameQueueMutex;
const size_t MAX_QUEUE_SIZE = 20; // 增大队列大小,增加缓冲
bool stopFlag = false;
// 上传线程函数
void uploadThread(const std::shared_ptr<AVFormatContext>& inputFormatContext, int videoStreamIndex,
const std::shared_ptr<AVCodecContext>& decoderContext, const std::string& outputUrl) {
// 输出格式上下文
AVFormatContext* outputFormatContextRaw = nullptr;
avformat_alloc_output_context2(&outputFormatContextRaw, nullptr, "rtsp", outputUrl.c_str());
if (!outputFormatContextRaw) {
std::cerr << "无法分配输出格式上下文。" << std::endl;
return;
}
std::shared_ptr<AVFormatContext> outputFormatContext(outputFormatContextRaw, AVFormatContextDeleter());
// 查找编码器
AVCodec* videoEncoder = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!videoEncoder) {
std::cerr << "未找到 H.264 编码器。" << std::endl;
return;
}
// 创建视频输出流
AVStream* videoOutputStream = avformat_new_stream(outputFormatContext.get(), videoEncoder);
if (!videoOutputStream) {
std::cerr << "无法创建视频输出流。" << std::endl;
return;
}
// 分配视频编码器上下文
AVCodecContext* videoEncoderContextRaw = avcodec_alloc_context3(videoEncoder);
if (!videoEncoderContextRaw) {
std::cerr << "无法分配视频编码器上下文。" << std::endl;
return;
}
std::shared_ptr<AVCodecContext> videoEncoderContext(videoEncoderContextRaw, AVCodecContextDeleter());
videoEncoderContext->codec_id = AV_CODEC_ID_H264;
videoEncoderContext->codec_type = AVMEDIA_TYPE_VIDEO;
videoEncoderContext->pix_fmt = AV_PIX_FMT_YUV420P;
videoEncoderContext->width = decoderContext->width;
videoEncoderContext->height = decoderContext->height;
videoEncoderContext->time_base = { 1, 25 };
videoEncoderContext->framerate = { 25, 1 };
// 优化编码器参数,降低比特率
videoEncoderContext->bit_rate = 2000000; // 降低比特率到 2Mbps
videoEncoderContext->gop_size = 25;
videoEncoderContext->max_b_frames = 1;
// 打开视频编码器
if (avcodec_open2(videoEncoderContext.get(), videoEncoder, nullptr) < 0) {
std::cerr << "无法打开视频编码器。" << std::endl;
return;
}
// 复制视频编码器参数到输出流
if (avcodec_parameters_from_context(videoOutputStream->codecpar, videoEncoderContext.get()) < 0) {
std::cerr << "无法复制视频编码器参数到输出流。" << std::endl;
return;
}
// 打开输出 URL
if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
if (avio_open(&outputFormatContext->pb, outputUrl.c_str(), AVIO_FLAG_WRITE) < 0) {
std::cerr << "无法打开输出 URL。" << std::endl;
return;
}
}
// 写入文件头
if (avformat_write_header(outputFormatContext.get(), nullptr) < 0) {
std::cerr << "无法写入输出文件头。" << std::endl;
return;
}
// 视频比特流过滤器
AVBSFContext* videoBsfContextRaw = nullptr;
const AVBitStreamFilter* videoBsf = av_bsf_get_by_name("h264_mp4toannexb");
if (!videoBsf) {
std::cerr << "未找到 h264_mp4toannexb 比特流过滤器。" << std::endl;
return;
}
if (av_bsf_alloc(videoBsf, &videoBsfContextRaw) < 0) {
std::cerr << "无法分配视频比特流过滤器上下文。" << std::endl;
return;
}
std::shared_ptr<AVBSFContext> videoBsfContext(videoBsfContextRaw);
if (avcodec_parameters_copy(videoBsfContext->par_in, videoOutputStream->codecpar) < 0) {
std::cerr << "无法复制视频参数到比特流过滤器。" << std::endl;
return;
}
if (av_bsf_init(videoBsfContext.get()) < 0) {
std::cerr << "无法初始化视频比特流过滤器。" << std::endl;
return;
}
std::shared_ptr<AVFrame> uploadFrame(av_frame_alloc(), AVFrameDeleter());
uploadFrame->format = AV_PIX_FMT_YUV420P;
uploadFrame->width = decoderContext->width;
uploadFrame->height = decoderContext->height;
if (av_frame_get_buffer(uploadFrame.get(), 0) < 0) {
std::cerr << "无法分配上传帧缓冲区。" << std::endl;
return;
}
std::shared_ptr<AVPacket> uploadPacket(av_packet_alloc(), AVPacketDeleter());
std::shared_ptr<SwsContext> swsContext(sws_getContext(decoderContext->width, decoderContext->height,
decoderContext->pix_fmt, decoderContext->width, decoderContext->height,
AV_PIX_FMT_YUV420P, SWS_BILINEAR, nullptr, nullptr, nullptr),
SwsContextDeleter());
if (!swsContext) {
std::cerr << "无法初始化 SwsContext。" << std::endl;
return;
}
std::shared_ptr<AVFilterGraph> filterGraph(avfilter_graph_alloc(), AVFilterGraphDeleter());
if (!filterGraph) {
std::cerr << "无法分配过滤器图。" << std::endl;
return;
}
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
AVRational time_base = decoderContext->time_base;
if (time_base.num == 0 || time_base.den == 0) {
time_base = { 1, 25 }; // 设置默认值
}
AVRational pixel_aspect = decoderContext->sample_aspect_ratio;
if (pixel_aspect.num == 0 || pixel_aspect.den == 0) {
pixel_aspect = { 1, 1 }; // 设置默认值
}
char args[512];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
decoderContext->width, decoderContext->height, decoderContext->pix_fmt,
time_base.num, time_base.den,
pixel_aspect.num, pixel_aspect.den);
std::shared_ptr<AVFilterContext> buffersrc_ctx(avfilter_graph_alloc_filter(filterGraph.get(), buffersrc, "in"), AVFilterContextDeleter());
if (!buffersrc_ctx) {
std::cerr << "无法分配缓冲源过滤器上下文。" << std::endl;
return;
}
int ret = avfilter_init_str(buffersrc_ctx.get(), args);
if (ret < 0) {
std::cerr << "无法初始化缓冲源过滤器,错误码: " << ret << std::endl;
// 打印参数以调试
std::cerr << "传递的参数: " << args << std::endl;
return;
}
std::shared_ptr<AVFilterContext> buffersink_ctx(avfilter_graph_alloc_filter(filterGraph.get(), buffersink, "out"), AVFilterContextDeleter());
if (!buffersink_ctx) {
std::cerr << "无法分配缓冲宿过滤器上下文。" << std::endl;
return;
}
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
if (av_opt_set_int_list(buffersink_ctx.get(), "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN) < 0) {
std::cerr << "无法设置缓冲宿过滤器像素格式。" << std::endl;
return;
}
if (avfilter_init_str(buffersink_ctx.get(), nullptr) < 0) {
std::cerr << "无法初始化缓冲宿过滤器。" << std::endl;
return;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx.get();
outputs->pad_idx = 0;
outputs->next = nullptr;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx.get();
inputs->pad_idx = 0;
inputs->next = nullptr;
if (avfilter_graph_parse_ptr(filterGraph.get(), "null", &inputs, &outputs, nullptr) < 0) {
std::cerr << "无法解析过滤器图。" << std::endl;
return;
}
if (avfilter_graph_config(filterGraph.get(), nullptr) < 0) {
std::cerr << "无法配置过滤器图。" << std::endl;
return;
}
int64_t nextPts = 0;
while (true) {
std::unique_lock<std::mutex> lock(frameQueueMutex);
if (frameQueueCond.wait_for(lock, std::chrono::milliseconds(100), [&] { return!frameQueue.empty() || stopFlag; })) {
if (stopFlag && frameQueue.empty()) {
break;
}
if (!frameQueue.empty()) {
auto frame = frameQueue.front();
frameQueue.pop();
lock.unlock();
if (av_buffersrc_add_frame_flags(buffersrc_ctx.get(), frame.get(), AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
std::cerr << "无法将帧添加到过滤器图。" << std::endl;
continue;
}
while (av_buffersink_get_frame(buffersink_ctx.get(), uploadFrame.get()) == 0) {
uploadFrame->pts = nextPts++;
// 发送帧到视频编码器
if (avcodec_send_frame(videoEncoderContext.get(), uploadFrame.get()) < 0) {
std::cerr << "发送帧到视频编码器失败。" << std::endl;
continue;
}
// 从视频编码器接收数据包
while (avcodec_receive_packet(videoEncoderContext.get(), uploadPacket.get()) == 0) {
// 通过视频比特流过滤器
if (av_bsf_send_packet(videoBsfContext.get(), uploadPacket.get()) < 0) {
std::cerr << "发送数据包到视频比特流过滤器失败。" << std::endl;
continue;
}
while (av_bsf_receive_packet(videoBsfContext.get(), uploadPacket.get()) == 0) {
// 调整时间戳
av_packet_rescale_ts(uploadPacket.get(), videoEncoderContext->time_base, videoOutputStream->time_base);
uploadPacket->stream_index = videoOutputStream->index;
// 写入数据包
int writeResult = av_interleaved_write_frame(outputFormatContext.get(), uploadPacket.get());
if (writeResult < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(writeResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "写入视频数据包失败,错误码: " << errbuf << std::endl;
break;
}
}
}
}
}
}
}
// 写入文件尾
av_write_trailer(outputFormatContext.get());
}
// 显示线程函数
void displayThread(const std::shared_ptr<AVFormatContext>& inputFormatContext, int videoStreamIndex,
const std::shared_ptr<AVCodecContext>& decoderContext) {
std::shared_ptr<AVFrame> frame(av_frame_alloc(), AVFrameDeleter());
std::shared_ptr<AVPacket> packet(av_packet_alloc(), AVPacketDeleter());
cv::namedWindow("Camera Feed", cv::WINDOW_NORMAL);
while (av_read_frame(inputFormatContext.get(), packet.get()) >= 0) {
if (packet->stream_index == videoStreamIndex) {
// 发送数据包到解码器
if (avcodec_send_packet(decoderContext.get(), packet.get()) < 0) {
std::cerr << "发送数据包到解码器失败。" << std::endl;
continue;
}
// 从解码器接收帧
while (avcodec_receive_frame(decoderContext.get(), frame.get()) == 0) {
// 分配 OpenCV 图像
cv::Mat cvFrame(frame->height, frame->width, CV_8UC3);
// 转换帧格式
uint8_t* dstData[1] = { cvFrame.data };
int dstLinesize[1] = { cvFrame.step };
SwsContext* swsCtx = sws_getContext(decoderContext->width, decoderContext->height,
decoderContext->pix_fmt, decoderContext->width, decoderContext->height,
AV_PIX_FMT_BGR24, SWS_BILINEAR, nullptr, nullptr, nullptr);
if (swsCtx) {
sws_scale(swsCtx, frame->data, frame->linesize, 0, frame->height, dstData, dstLinesize);
sws_freeContext(swsCtx);
}
// 显示帧
cv::imshow("Camera Feed", cvFrame);
// 按 'q' 键退出
if (cv::waitKey(1) == 'q') {
stopFlag = true;
frameQueueCond.notify_one();
return;
}
// 将帧加入队列
{
std::unique_lock<std::mutex> lock(frameQueueMutex);
if (frameQueue.size() < MAX_QUEUE_SIZE) {
auto newFrame = std::shared_ptr<AVFrame>(av_frame_clone(frame.get()), AVFrameDeleter());
frameQueue.push(newFrame);
frameQueueCond.notify_one();
}
}
}
}
av_packet_unref(packet.get());
}
stopFlag = true;
frameQueueCond.notify_one();
}
int main() {
// 初始化 FFmpeg 库
avformat_network_init();
avdevice_register_all();
// 查找 dshow 输入格式
const AVInputFormat* iformat = av_find_input_format("dshow");
if (!iformat) {
std::cerr << "未找到 dshow 输入格式。" << std::endl;
return -1;
}
// 分配格式上下文
AVFormatContext* inputFormatContextRaw = nullptr;
const char* deviceName = "video=USB2.0 PC CAMERA";// "video=@device_pnp_\\?\\usb#vid_1908&pid_2310&mi_00#7&2892be33&0&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\\global";
AVDictionary* options = nullptr;
// 设置 rtbufsize 参数,增加缓冲区大小
av_dict_set(&options, "rtbufsize", "100000000", 0); // 设置为 100MB
if (avformat_open_input(&inputFormatContextRaw, deviceName, const_cast<AVInputFormat*>(iformat), &options) != 0) {
std::cerr << "无法打开输入设备。" << std::endl;
return -1;
}
std::shared_ptr<AVFormatContext> inputFormatContext(inputFormatContextRaw, AVFormatContextDeleter());
// 查找流信息
if (avformat_find_stream_info(inputFormatContext.get(), nullptr) < 0) {
std::cerr << "无法找到流信息。" << std::endl;
return -1;
}
// 查找视频流
int videoStreamIndex = -1;
for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex == -1) {
std::cerr << "未找到视频流。" << std::endl;
return -1;
}
// 查找解码器
AVCodecParameters* codecParameters = inputFormatContext->streams[videoStreamIndex]->codecpar;
AVCodec* decoder = avcodec_find_decoder(codecParameters->codec_id);
if (!decoder) {
std::cerr << "未找到合适的解码器。" << std::endl;
return -1;
}
// 分配解码器上下文
AVCodecContext* decoderContextRaw = avcodec_alloc_context3(decoder);
if (!decoderContextRaw) {
std::cerr << "无法分配解码器上下文。" << std::endl;
return -1;
}
std::shared_ptr<AVCodecContext> decoderContext(decoderContextRaw, AVCodecContextDeleter());
// 将参数复制到解码器上下文
if (avcodec_parameters_to_context(decoderContext.get(), codecParameters) < 0) {
std::cerr << "无法复制编解码器参数到上下文。" << std::endl;
return -1;
}
// 打开解码器
if (avcodec_open2(decoderContext.get(), decoder, nullptr) < 0) {
std::cerr << "无法打开解码器。" << std::endl;
return -1;
}
const std::string outputUrl = "rtsp://192.168.1.100:554/live/test1";
std::thread uploader(uploadThread, inputFormatContext, videoStreamIndex, decoderContext, outputUrl);
std::thread displayer(displayThread, inputFormatContext, videoStreamIndex, decoderContext);
displayer.join();
uploader.join();
return 0;
}
配置好 ffpmeg opencv 目录
把相关的 DLL 放在运行目录 在 ffpmeg opencv 目录 还有到 msys64\mingw64\bin 目录下找
运行测试 左边的opencv 窗口 右边的 是VLC media player 拉流播放的窗口
4:如果对你又帮助,麻烦点个赞,加个关注
用的 usb摄像头不支持265 ,
下章 做下转化 把 264 转 265 上传 再拉流下来播放