【FFmpeg学习】视频变慢处理

视频慢动作处理是个比较常用的操作,可以在播放的时候处理,这里我们考虑把视频修改为慢动作,使用ffmpeg命令,可以这样

ffmpeg -i test.mp4 -vf "setpts=5*PTS" -an test_slow3.mp4

这里把视频放慢了5倍,生成的文件大小也变大了几倍。

怎么用程序来实现呢,参考一下GPT给出的code,

cpp 复制代码
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstring>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
}

// 慢速播放的速度因子
const double SLOWDOWN_FACTOR = 2.0;

int main(int argc, char* argv[]) {
    if (argc < 3) {
        std::cout << "Usage: " << argv[0] << " input_file output_file" << std::endl;
        return 1;
    }

    const std::string inputFileName = argv[1];
    const std::string outputFileName = argv[2];

    av_register_all();

    AVFormatContext* inputFormatContext = nullptr;
    if (avformat_open_input(&inputFormatContext, inputFileName.c_str(), nullptr, nullptr) != 0) {
        std::cerr << "Failed to open input file: " << inputFileName << std::endl;
        return 1;
    }

    if (avformat_find_stream_info(inputFormatContext, nullptr) < 0) {
        std::cerr << "Failed to retrieve input stream information" << std::endl;
        avformat_close_input(&inputFormatContext);
        return 1;
    }

    AVFormatContext* outputFormatContext = nullptr;
    if (avformat_alloc_output_context2(&outputFormatContext, nullptr, nullptr, outputFileName.c_str()) < 0) {
        std::cerr << "Failed to allocate output format context" << std::endl;
        avformat_close_input(&inputFormatContext);
        return 1;
    }

    for (unsigned int i = 0; i < inputFormatContext->nb_streams; ++i) {
        AVStream* inputStream = inputFormatContext->streams[i];
        AVStream* outputStream = avformat_new_stream(outputFormatContext, nullptr);
        if (!outputStream) {
            std::cerr << "Failed to allocate output stream" << std::endl;
            avformat_close_input(&inputFormatContext);
            avformat_free_context(outputFormatContext);
            return 1;
        }

        if (avcodec_parameters_copy(outputStream->codecpar, inputStream->codecpar) < 0) {
            std::cerr << "Failed to copy codec parameters" << std::endl;
            avformat_close_input(&inputFormatContext);
            avformat_free_context(outputFormatContext);
            return 1;
        }

        outputStream->time_base = inputStream->time_base;
    }

    if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
        if (avio_open(&outputFormatContext->pb, outputFileName.c_str(), AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Failed to open output file: " << outputFileName << std::endl;
            avformat_close_input(&inputFormatContext);
            avformat_free_context(outputFormatContext);
            return 1;
        }
    }

    if (avformat_write_header(outputFormatContext, nullptr) < 0) {
        std::cerr << "Failed to write output file header" << std::endl;
        avformat_close_input(&inputFormatContext);
        avformat_free_context(outputFormatContext);
        return 1;
    }

    AVPacket packet;
    while (av_read_frame(inputFormatContext, &packet) >= 0) {
        AVStream* outputStream = outputFormatContext->streams[packet.stream_index];

        // 慢速播放的时间戳调整
        packet.pts *= static_cast<int64_t>(SLOWDOWN_FACTOR);
        packet.dts *= static_cast<int64_t>(SLOWDOWN_FACTOR);

        packet.duration = static_cast<int>(packet.duration * SLOWDOWN_FACTOR);

        av_interleaved_write_frame(outputFormatContext, &packet);
        av_packet_unref(&packet);
    }

    av_write_trailer(outputFormatContext);

    avformat_close_input(&inputFormatContext);
    avformat_free_context(outputFormatContext);

    return 0;
}

这个代码执行后并没有实现变慢,参考一下,进行修改后可以实现慢动作处理,如下

cpp 复制代码
int slow() {
    std::string filename = "test.mp4";     // 输入MP4文件名
    std::string outputFilename = "test_slow.mp4";  // 输出图片文件名

    AVFormatContext* ofmt_ctx = nullptr;
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, outputFilename.c_str());
    const AVOutputFormat* ofmt = ofmt_ctx->oformat;

    AVFormatContext* formatContext = nullptr;
    if (avformat_open_input(&formatContext, filename.c_str(), nullptr, nullptr) != 0) {
        std::cerr << "Error opening input file" << std::endl;
        return -1;
    }

    if (avformat_find_stream_info(formatContext, nullptr) < 0) {
        std::cerr << "Error finding stream information" << std::endl;
        avformat_close_input(&formatContext);
        return -1;
    }

    const AVCodec* codec = nullptr;
    int videoStreamIndex = -1;

    for (unsigned int i = 0; i < formatContext->nb_streams; i++) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStreamIndex = i;
            codec = avcodec_find_decoder(formatContext->streams[i]->codecpar->codec_id);

            //
            AVStream* out_stream = avformat_new_stream(ofmt_ctx, NULL);
            avcodec_parameters_copy(out_stream->codecpar, formatContext->streams[i]->codecpar);
            out_stream->codecpar->codec_tag = 0;

            break;
        }
    }

    avio_open(&ofmt_ctx->pb, outputFilename.c_str(), AVIO_FLAG_WRITE);
    avformat_write_header(ofmt_ctx, NULL);

 

    AVPacket packet;
    av_init_packet(&packet);

    // 计算目标时间戳
    int64_t targetTimestamp = targetSecond * AV_TIME_BASE;

    // 查找目标时间戳所对应的帧
    AVFrame* frame = av_frame_alloc();
    bool foundTargetFrame = false;
    int count = 0;
    while (av_read_frame(formatContext, &packet) >= 0) {
        if (packet.stream_index == videoStreamIndex) {
            AVStream* inputStream = formatContext->streams[packet.stream_index];
            AVStream* outputStream = ofmt_ctx->streams[packet.stream_index];

            // 计算新的时间戳
            cout << "1, pts=" << packet.pts << endl;
            packet.pts = av_rescale_q_rnd(packet.pts, inputStream->time_base, outputStream->time_base, AV_ROUND_NEAR_INF);
            packet.dts = av_rescale_q_rnd(packet.dts, inputStream->time_base, outputStream->time_base, AV_ROUND_NEAR_INF);
            packet.duration = av_rescale_q(packet.duration, inputStream->time_base, outputStream->time_base);
            packet.pos = -1;
            cout << "2, pts=" << packet.pts << endl;

            // 慢速播放的时间戳调整
            packet.pts *= 5;
            packet.dts *= 5;
            packet.duration *= 5;
  //          av_log(NULL, AV_LOG_INFO, "...av_write_frame(ofmt_ctx, packet);\n");
            int ret = av_write_frame(ofmt_ctx, &packet);
            if (ret < 0) {
                std::cerr << "Error av_write_frame" << std::endl;
            }
            count++;

        }

        av_packet_unref(&packet);
    }

    av_write_trailer(ofmt_ctx);
 
    return 1;
}

通过pts的修改来实现

相关推荐
Fre丸子_4 小时前
ffmpeg之播放一个yuv视频
ffmpeg·音视频
9527华安4 小时前
FPGA多路MIPI转FPD-Link视频缩放拼接显示,基于IMX327+FPD953架构,提供2套工程源码和技术支持
fpga开发·架构·音视频
catmes5 小时前
设置浏览器声音或视频的自动播放策略
chrome·音视频·edge浏览器
yinqinggong6 小时前
从源码编译支持FFmpeg的OpenCV
opencv·ffmpeg
冰山一脚20137 小时前
ffmpeg添加sps,pps
ffmpeg
杨德杰7 小时前
QT多媒体开发(一):概述
qt·音视频·多媒体
是十一月末8 小时前
Opencv实现图片和视频的加噪、平滑处理
人工智能·python·opencv·计算机视觉·音视频
余~~1853816280010 小时前
稳定的碰一碰发视频、碰一碰矩阵源码技术开发,支持OEM
开发语言·人工智能·python·音视频
m0_7482323910 小时前
WebRTC学习二:WebRTC音视频数据采集
学习·音视频·webrtc
Kai HVZ11 小时前
python爬虫----爬取视频实战
爬虫·python·音视频