系列文章:
音视频编解码全流程之用Extractor后Muxer生成MP4
根据前面叙述的 音视频编解码全流程之Extractor和 音视频编解码全流程之复用器Muxer 两篇文章可知:
媒体文件提取数据包的初始化及提取流程;
向目标文件写入数据的初始化及写入流程。
依此为基础现在本篇文章中实现"从媒体文件中Extractor提取数据包后,用复用器Muxer向目标媒体文件写入数据包生成MP4文件的操作"。
一 .FFmpeg中提取媒体文件数据包后,写入目标文件:
1.FFmpeg交叉编译:
首先的是FFmpeg交叉编译,在之前的博客中有介绍交叉编译的全过程,感兴趣的可以查看博客:
2.提取媒体文件数据后写入目标文件的全流程:
具体的流程细节分解在 音视频编解码全流程之复用器Muxer 已经进行了描述,这里不再赘述。
在本人个人的GitHub:https://github.com/wangyongyao1989/FFmpegPractices
中codecTraningLib模块的CopyMeidaFile.cpp类中:
cpp
//
// Created by wangyao on 2025/8/17.
//
#include "includes/CopyMeidaFile.h"
CopyMeidaFile::CopyMeidaFile() {
}
CopyMeidaFile::~CopyMeidaFile() {
}
string CopyMeidaFile::copyMediaFile(const char *srcPath, const char *destPath) {
// 打开音视频文件
int ret = avformat_open_input(&in_fmt_ctx, srcPath, nullptr, nullptr);
if (ret < 0) {
LOGE("Can't open file %s.\n", srcPath);
copyInfo = "Can't open file :" + string(srcPath);
return copyInfo;
}
LOGI("Success open input_file %s.\n", srcPath);
copyInfo = copyInfo + "\nSuccess open input_file :" + string(srcPath);
// 查找音视频文件中的流信息
ret = avformat_find_stream_info(in_fmt_ctx, nullptr);
if (ret < 0) {
LOGE("Can't find stream information.\n");
copyInfo = "\nCan't find stream information. ";
return copyInfo;
}
AVStream *src_video = nullptr;
// 找到视频流的索引
int video_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
if (video_index >= 0) {
src_video = in_fmt_ctx->streams[video_index];
}
AVStream *src_audio = nullptr;
// 找到音频流的索引
int audio_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (audio_index >= 0) {
src_audio = in_fmt_ctx->streams[audio_index];
}
AVFormatContext *out_fmt_ctx; // 输出文件的封装器实例
// 分配音视频文件的封装实例
ret = avformat_alloc_output_context2(&out_fmt_ctx, nullptr, nullptr, destPath);
if (ret < 0) {
LOGE("Can't alloc output_file %s.\n", destPath);
copyInfo = "\nCan't alloc output_file : " + string(destPath);
return copyInfo;
}
// 打开输出流
ret = avio_open(&out_fmt_ctx->pb, destPath, AVIO_FLAG_READ_WRITE);
if (ret < 0) {
LOGE("Can't open output_file %s.\n", destPath);
copyInfo = "\nCan't open output_file: " + string(destPath);
return copyInfo;
}
LOGI("Success open output_file %s.\n", destPath);
copyInfo = copyInfo + "\nSuccess open output_file :" + string(destPath);
if (video_index >= 0) { // 源文件有视频流,就给目标文件创建视频流
AVStream *dest_video = avformat_new_stream(out_fmt_ctx, nullptr); // 创建数据流
// 把源文件的视频参数原样复制过来
avcodec_parameters_copy(dest_video->codecpar, src_video->codecpar);
dest_video->time_base = src_video->time_base;
dest_video->codecpar->codec_tag = 0;
}
if (audio_index >= 0) { // 源文件有音频流,就给目标文件创建音频流
AVStream *dest_audio = avformat_new_stream(out_fmt_ctx, nullptr); // 创建数据流
// 把源文件的音频参数原样复制过来
avcodec_parameters_copy(dest_audio->codecpar, src_audio->codecpar);
dest_audio->codecpar->codec_tag = 0;
}
ret = avformat_write_header(out_fmt_ctx, nullptr); // 写文件头
if (ret < 0) {
LOGE("write file_header occur error %d.\n", ret);
copyInfo = "\n write file_header occur error: " + to_string(ret);
return copyInfo;
}
LOGI("Success write file_header.\n");
copyInfo = copyInfo + "\nSuccess write file_header.";
AVPacket *packet = av_packet_alloc(); // 分配一个数据包
while (av_read_frame(in_fmt_ctx, packet) >= 0) { // 轮询数据包
// 有的文件视频流没在第一路,需要调整到第一路,因为目标的视频流默认第一路
if (packet->stream_index == video_index) { // 视频包
packet->stream_index = 0;
ret = av_write_frame(out_fmt_ctx, packet); // 往文件写入一个数据包
} else { // 音频包
packet->stream_index = 1;
ret = av_write_frame(out_fmt_ctx, packet); // 往文件写入一个数据包
}
if (ret < 0) {
LOGE("write frame occur error %d.\n", ret);
copyInfo = copyInfo + "\n write frame occur error: " + to_string(ret);
break;
}
av_packet_unref(packet); // 清除数据包
}
av_write_trailer(out_fmt_ctx); // 写文件尾
LOGI("Success copy file.\n");
copyInfo = copyInfo + "\n Success copy file.";
av_packet_free(&packet); // 释放数据包资源
avio_close(out_fmt_ctx->pb); // 关闭输出流
avformat_free_context(out_fmt_ctx); // 释放封装器的实例
avformat_close_input(&in_fmt_ctx); // 关闭音视频文件
return copyInfo;
}
二.NDK中提取媒体文件数据包后,写入目标文件:
在这里使用了NDK中的NdkMediaExtractor和NdkMediaMuxer两个类,来实现"一"中类似FFmpeg的功能。
1.初始化提取器Extractor:
创建 AMediaExtractor_new() ------> 设置Extractor的fd AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize)
该函数代码如下:
cpp
// 初始化提取器
bool MediaTransMuxer::initExtractor() {
extractor = AMediaExtractor_new();
if (!extractor) {
LOGE("Failed to create media extractor ");
callbackInfo =
"Failed to create media extractor \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGE("inputPath:%c", sSrcPath.c_str());
FILE *inputFp = fopen(sSrcPath.c_str(), "rb");
if (!inputFp) {
LOGE("Unable to open output file :%s", sSrcPath.c_str());
callbackInfo =
"Unable to open output file :" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
struct stat buf;
stat(sSrcPath.c_str(), &buf);
size_t fileSize = buf.st_size;
int32_t input_fd = fileno(inputFp);
LOGE("input_fd:%d", input_fd);
media_status_t status = AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize);
if (status != AMEDIA_OK) {
LOGE("Failed to set data source: %d", status);
callbackInfo =
"Failed to set data source :" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Extractor initialized successfully");
return true;
}
2.选择提取器Extractor中的轨道:
------> AMediaExtractor_getTrackCount()获取取器Extractor中的轨道数
------> AMediaExtractor_getTrackFormat(extractor, i) 遍历轨道从中获取每个轨道中的AMediaFormat
------> 筛选出音频轨道和视频轨道,分别获取音频轨道的audioTrackIndex值和视频轨道videoTrackIndex值。
该函数代码如下:
cpp
// 选择轨道
bool MediaTransMuxer::selectTracks() {
size_t trackCount = AMediaExtractor_getTrackCount(extractor);
LOGI("Total tracks: %zu", trackCount);
callbackInfo =
"Total tracks:" + to_string(trackCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
for (size_t i = 0; i < trackCount; i++) {
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, i);
if (!format) continue;
const char *mime;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGI("Track %zu: MIME=%s", i, mime);
if (strncmp(mime, "video/", 6) == 0 && videoTrackIndex == -1) {
videoTrackIndex = i;
hasVideo = true;
LOGI("Selected video track: %d", videoTrackIndex);
callbackInfo =
"Selected video track:" + to_string(videoTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
} else if (strncmp(mime, "audio/", 6) == 0 && audioTrackIndex == -1) {
audioTrackIndex = i;
hasAudio = true;
LOGI("Selected audio track: %d", audioTrackIndex);
callbackInfo =
"Selected audio track:" + to_string(audioTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
}
AMediaFormat_delete(format);
}
return hasVideo || hasAudio;
}
3.初始化复用器Muxer:
------> 创建AMediaMuxer_new(output_fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4)
------> 切换提取器Extractor至视频轨道 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 后给复用器Muxer添加轨道 AMediaMuxer_addTrack(muxer, videoFormat)
------> 切换提取器Extractor至音频轨道 AMediaExtractor_selectTrack(extractor, audioTrackIndex) 后给复用器Muxer添加轨道 AMediaMuxer_addTrack(muxer, audioFormat)
------> 启动复用器AMediaMuxer_addTrack(muxer, audioFormat)。
该函数代码如下:
cpp
// 初始化复用器
bool MediaTransMuxer::initMuxer() {
LOGE("outputPath:%c", sOutPath.c_str());
FILE *outputFp = fopen(sOutPath.c_str(), "w+b");
if (!outputFp) {
LOGE("Unable to open output file :%s", sOutPath.c_str());
callbackInfo =
"Unable to open output file :" + sOutPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
int32_t output_fd = fileno(outputFp);
muxer = AMediaMuxer_new(output_fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
if (!muxer) {
LOGE("Failed to create media muxer");
callbackInfo =
"Failed to create media muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
// 添加视频轨道
if (hasVideo) {
AMediaExtractor_selectTrack(extractor, videoTrackIndex);
AMediaFormat *videoFormat = AMediaExtractor_getTrackFormat(extractor, videoTrackIndex);
muxerVideoTrackIndex = AMediaMuxer_addTrack(muxer, videoFormat);
AMediaFormat_delete(videoFormat);
if (muxerVideoTrackIndex < 0) {
LOGE("Failed to add video track to muxer");
callbackInfo =
"Failed to add video track to muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer video track index: %d", muxerVideoTrackIndex);
callbackInfo =
"Muxer video track index:" + to_string(muxerVideoTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
// 添加音频轨道
if (hasAudio) {
AMediaExtractor_selectTrack(extractor, audioTrackIndex);
AMediaFormat *audioFormat = AMediaExtractor_getTrackFormat(extractor, audioTrackIndex);
muxerAudioTrackIndex = AMediaMuxer_addTrack(muxer, audioFormat);
AMediaFormat_delete(audioFormat);
if (muxerAudioTrackIndex < 0) {
LOGE("Failed to add audio track to muxer");
callbackInfo =
"Failed to add audio track to muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer audio track index: %d", muxerAudioTrackIndex);
callbackInfo =
"Muxer audio track index:" + to_string(muxerAudioTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
// 启动复用器
media_status_t status = AMediaMuxer_start(muxer);
if (status != AMEDIA_OK) {
LOGE("Failed to start muxer: %d", status);
callbackInfo =
"Failed to start muxer:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer initialized successfully");
callbackInfo =
"Muxer initialized successfully \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
4.执行转封装:
------> 重新选择所有轨道以重置读取位置 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 和 AMediaExtractor_selectTrack(extractor, audioTrackIndex);
------> 设置读取位置到开始 AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
------> 遍历 获取当前样本可用的轨道 AMediaExtractor_getSampleTrackIndex(extractor);
------> 遍历 分别设置结构AMediaCodecBufferInfo四个成员:
------> 设置样本大小: info.size = AMediaExtractor_getSampleSize(extractor);
------> 设置样本偏移值:info.offset = 0;
------> 设置样本标志: info.flags = AMediaExtractor_getSampleFlags(extractor);
------> 设置样本现在的时间:info.presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
------>遍历读取样本数据:AMediaExtractor_readSampleData(extractor, sampleData, info.size);
------> 遍历 分别写入视频和音频的样本数据:AMediaMuxer_writeSampleData(muxer, muxerVideoTrackIndex, sampleData, &info) 和 AMediaMuxer_writeSampleData(muxer, muxerAudioTrackIndex, sampleData, &info);
------> 遍历 前进到下一个样本:AMediaExtractor_advance(extractor);
该函数代码如下:
cpp
// 执行转封装
bool MediaTransMuxer::transmux() {
if (!extractor || !muxer) {
LOGE("Extractor or muxer not initialized");
callbackInfo =
"Extractor or muxer not initialized \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
AMediaCodecBufferInfo info;
bool sawEOS = false;
int64_t lastVideoPts = -1;
int64_t lastAudioPts = -1;
// 重新选择所有轨道以重置读取位置
if (hasVideo) AMediaExtractor_selectTrack(extractor, videoTrackIndex);
if (hasAudio) AMediaExtractor_selectTrack(extractor, audioTrackIndex);
// 设置读取位置到开始
AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
while (!sawEOS) {
ssize_t trackIndex = AMediaExtractor_getSampleTrackIndex(extractor);
if (trackIndex < 0) {
sawEOS = true;
break;
}
// 获取样本信息
info.size = AMediaExtractor_getSampleSize(extractor);
info.offset = 0;
info.flags = AMediaExtractor_getSampleFlags(extractor);
info.presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
// 读取样本数据
uint8_t *sampleData = new uint8_t[info.size];
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, sampleData, info.size);
if (bytesRead < 0) {
LOGE("Error reading sample data: %zd", bytesRead);
callbackInfo =
"Error reading sample data:" + to_string(bytesRead) + "\n";
PostStatusMessage(callbackInfo.c_str());
delete[] sampleData;
break;
}
// 写入到复用器
if (trackIndex == videoTrackIndex && hasVideo) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (info.presentationTimeUs > lastVideoPts) {
media_status_t status = AMediaMuxer_writeSampleData(muxer,
muxerVideoTrackIndex,
sampleData,
&info);
if (status != AMEDIA_OK) {
LOGE("Failed to write video sample: %d", status);
callbackInfo =
"Failed to write video sample:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
callbackInfo =
"AMediaMuxer_writeSampleData video size:" + to_string(info.size) + "\n";
PostStatusMessage(callbackInfo.c_str());
lastVideoPts = info.presentationTimeUs;
}
} else if (trackIndex == audioTrackIndex && hasAudio) {
// 检查时间戳是否有效
if (info.presentationTimeUs > lastAudioPts) {
media_status_t status = AMediaMuxer_writeSampleData(muxer,
muxerAudioTrackIndex,
sampleData,
&info);
if (status != AMEDIA_OK) {
LOGE("Failed to write audio sample : %d", status);
callbackInfo =
"Failed to write audio sample:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
callbackInfo =
"AMediaMuxer_writeSampleData audio size:" + to_string(info.size) + "\n";
PostStatusMessage(callbackInfo.c_str());
lastAudioPts = info.presentationTimeUs;
}
}
delete[] sampleData;
// 前进到下一个样本
if (!AMediaExtractor_advance(extractor)) {
sawEOS = true;
}
}
LOGI("Transmuxing completed");
callbackInfo =
"Transmuxing completed \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
5.完整的代码:
以上的代码放在本人的GitHub项目:https://github.com/wangyongyao1989/FFmpegPractices
中hwCodecLib模块的MediaTransMuxer.cpp类中:
cpp
// Author : wangyongyao https://github.com/wangyongyao1989
// Created by MMM on 2025/9/29.
//
#include <sys/stat.h>
#include "includes/MediaTransMuxer.h"
MediaTransMuxer::MediaTransMuxer(JNIEnv *env, jobject thiz) {
mEnv = env;
env->GetJavaVM(&mJavaVm);
mJavaObj = env->NewGlobalRef(thiz);
}
MediaTransMuxer::~MediaTransMuxer() {
mEnv->DeleteGlobalRef(mJavaObj);
if (mEnv) {
mEnv = nullptr;
}
if (mJavaVm) {
mJavaVm = nullptr;
}
if (mJavaObj) {
mJavaObj = nullptr;
}
release();
}
void MediaTransMuxer::startMediaTransMuxer(const char *inputPath, const char *outputPath) {
sSrcPath = inputPath;
sOutPath = outputPath;
LOGI("sSrcPath :%s \n sOutPath: %s ", sSrcPath.c_str(), sOutPath.c_str());
callbackInfo =
"sSrcPath:" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
// 1. 初始化提取器
if (!initExtractor()) {
LOGE("Failed to initialize extractor");
callbackInfo =
"Failed to initialize extractor \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 2. 选择轨道
if (!selectTracks()) {
LOGE("No valid tracks found");
callbackInfo =
"No valid tracks found \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 3. 初始化复用器
if (!initMuxer()) {
LOGE("Failed to initialize muxer");
callbackInfo =
"Failed to initialize muxer \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 4. 执行转封装
if (!transmux()) {
LOGE("Transmuxing failed");
callbackInfo =
"Transmuxing failed \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 释放资源
release();
}
// 初始化提取器
bool MediaTransMuxer::initExtractor() {
extractor = AMediaExtractor_new();
if (!extractor) {
LOGE("Failed to create media extractor ");
callbackInfo =
"Failed to create media extractor \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGE("inputPath:%c", sSrcPath.c_str());
FILE *inputFp = fopen(sSrcPath.c_str(), "rb");
if (!inputFp) {
LOGE("Unable to open output file :%s", sSrcPath.c_str());
callbackInfo =
"Unable to open output file :" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
struct stat buf;
stat(sSrcPath.c_str(), &buf);
size_t fileSize = buf.st_size;
int32_t input_fd = fileno(inputFp);
LOGE("input_fd:%d", input_fd);
media_status_t status = AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize);
if (status != AMEDIA_OK) {
LOGE("Failed to set data source: %d", status);
callbackInfo =
"Failed to set data source :" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Extractor initialized successfully");
return true;
}
// 选择轨道
bool MediaTransMuxer::selectTracks() {
size_t trackCount = AMediaExtractor_getTrackCount(extractor);
LOGI("Total tracks: %zu", trackCount);
callbackInfo =
"Total tracks:" + to_string(trackCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
for (size_t i = 0; i < trackCount; i++) {
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, i);
if (!format) continue;
const char *mime;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGI("Track %zu: MIME=%s", i, mime);
if (strncmp(mime, "video/", 6) == 0 && videoTrackIndex == -1) {
videoTrackIndex = i;
hasVideo = true;
LOGI("Selected video track: %d", videoTrackIndex);
callbackInfo =
"Selected video track:" + to_string(videoTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
} else if (strncmp(mime, "audio/", 6) == 0 && audioTrackIndex == -1) {
audioTrackIndex = i;
hasAudio = true;
LOGI("Selected audio track: %d", audioTrackIndex);
callbackInfo =
"Selected audio track:" + to_string(audioTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
}
AMediaFormat_delete(format);
}
return hasVideo || hasAudio;
}
// 初始化复用器
bool MediaTransMuxer::initMuxer() {
LOGE("outputPath:%c", sOutPath.c_str());
FILE *outputFp = fopen(sOutPath.c_str(), "w+b");
if (!outputFp) {
LOGE("Unable to open output file :%s", sOutPath.c_str());
callbackInfo =
"Unable to open output file :" + sOutPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
int32_t output_fd = fileno(outputFp);
muxer = AMediaMuxer_new(output_fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
if (!muxer) {
LOGE("Failed to create media muxer");
callbackInfo =
"Failed to create media muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
// 添加视频轨道
if (hasVideo) {
AMediaExtractor_selectTrack(extractor, videoTrackIndex);
AMediaFormat *videoFormat = AMediaExtractor_getTrackFormat(extractor, videoTrackIndex);
muxerVideoTrackIndex = AMediaMuxer_addTrack(muxer, videoFormat);
AMediaFormat_delete(videoFormat);
if (muxerVideoTrackIndex < 0) {
LOGE("Failed to add video track to muxer");
callbackInfo =
"Failed to add video track to muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer video track index: %d", muxerVideoTrackIndex);
callbackInfo =
"Muxer video track index:" + to_string(muxerVideoTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
// 添加音频轨道
if (hasAudio) {
AMediaExtractor_selectTrack(extractor, audioTrackIndex);
AMediaFormat *audioFormat = AMediaExtractor_getTrackFormat(extractor, audioTrackIndex);
muxerAudioTrackIndex = AMediaMuxer_addTrack(muxer, audioFormat);
AMediaFormat_delete(audioFormat);
if (muxerAudioTrackIndex < 0) {
LOGE("Failed to add audio track to muxer");
callbackInfo =
"Failed to add audio track to muxer \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer audio track index: %d", muxerAudioTrackIndex);
callbackInfo =
"Muxer audio track index:" + to_string(muxerAudioTrackIndex) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
// 启动复用器
media_status_t status = AMediaMuxer_start(muxer);
if (status != AMEDIA_OK) {
LOGE("Failed to start muxer: %d", status);
callbackInfo =
"Failed to start muxer:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Muxer initialized successfully");
callbackInfo =
"Muxer initialized successfully \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
// 执行转封装
bool MediaTransMuxer::transmux() {
if (!extractor || !muxer) {
LOGE("Extractor or muxer not initialized");
callbackInfo =
"Extractor or muxer not initialized \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
AMediaCodecBufferInfo info;
bool sawEOS = false;
int64_t lastVideoPts = -1;
int64_t lastAudioPts = -1;
// 重新选择所有轨道以重置读取位置
if (hasVideo) AMediaExtractor_selectTrack(extractor, videoTrackIndex);
if (hasAudio) AMediaExtractor_selectTrack(extractor, audioTrackIndex);
// 设置读取位置到开始
AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
while (!sawEOS) {
ssize_t trackIndex = AMediaExtractor_getSampleTrackIndex(extractor);
if (trackIndex < 0) {
sawEOS = true;
break;
}
// 获取样本信息
info.size = AMediaExtractor_getSampleSize(extractor);
info.offset = 0;
info.flags = AMediaExtractor_getSampleFlags(extractor);
info.presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
// 读取样本数据
uint8_t *sampleData = new uint8_t[info.size];
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, sampleData, info.size);
if (bytesRead < 0) {
LOGE("Error reading sample data: %zd", bytesRead);
callbackInfo =
"Error reading sample data:" + to_string(bytesRead) + "\n";
PostStatusMessage(callbackInfo.c_str());
delete[] sampleData;
break;
}
// 写入到复用器
if (trackIndex == videoTrackIndex && hasVideo) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (info.presentationTimeUs > lastVideoPts) {
media_status_t status = AMediaMuxer_writeSampleData(muxer,
muxerVideoTrackIndex,
sampleData,
&info);
if (status != AMEDIA_OK) {
LOGE("Failed to write video sample: %d", status);
callbackInfo =
"Failed to write video sample:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
callbackInfo =
"AMediaMuxer_writeSampleData video size:" + to_string(info.size) + "\n";
PostStatusMessage(callbackInfo.c_str());
lastVideoPts = info.presentationTimeUs;
}
} else if (trackIndex == audioTrackIndex && hasAudio) {
// 检查时间戳是否有效
if (info.presentationTimeUs > lastAudioPts) {
media_status_t status = AMediaMuxer_writeSampleData(muxer,
muxerAudioTrackIndex,
sampleData,
&info);
if (status != AMEDIA_OK) {
LOGE("Failed to write audio sample : %d", status);
callbackInfo =
"Failed to write audio sample:" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
callbackInfo =
"AMediaMuxer_writeSampleData audio size:" + to_string(info.size) + "\n";
PostStatusMessage(callbackInfo.c_str());
lastAudioPts = info.presentationTimeUs;
}
}
delete[] sampleData;
// 前进到下一个样本
if (!AMediaExtractor_advance(extractor)) {
sawEOS = true;
}
}
LOGI("Transmuxing completed");
callbackInfo =
"Transmuxing completed \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
// 释放资源
void MediaTransMuxer::release() {
if (muxer) {
AMediaMuxer_stop(muxer);
AMediaMuxer_delete(muxer);
muxer = nullptr;
}
if (extractor) {
AMediaExtractor_delete(extractor);
extractor = nullptr;
}
LOGI("Resources released");
}
JNIEnv *MediaTransMuxer::GetJNIEnv(bool *isAttach) {
JNIEnv *env;
int status;
if (nullptr == mJavaVm) {
LOGD("GetJNIEnv mJavaVm == nullptr");
return nullptr;
}
*isAttach = false;
status = mJavaVm->GetEnv((void **) &env, JNI_VERSION_1_6);
if (status != JNI_OK) {
status = mJavaVm->AttachCurrentThread(&env, nullptr);
if (status != JNI_OK) {
LOGD("GetJNIEnv failed to attach current thread");
return nullptr;
}
*isAttach = true;
}
return env;
}
void MediaTransMuxer::PostStatusMessage(const char *msg) {
bool isAttach = false;
JNIEnv *pEnv = GetJNIEnv(&isAttach);
if (pEnv == nullptr) {
return;
}
jobject javaObj = mJavaObj;
jmethodID mid = pEnv->GetMethodID(pEnv->GetObjectClass(javaObj), "CppStatusCallback",
"(Ljava/lang/String;)V");
jstring pJstring = pEnv->NewStringUTF(msg);
pEnv->CallVoidMethod(javaObj, mid, pJstring);
if (isAttach) {
JavaVM *pJavaVm = mJavaVm;
pJavaVm->DetachCurrentThread();
}
}
三.FFmpeg/NDK向目标文件写入数据包的操作的对比:
FFmpeg:
------>打开输入文件获取输入文件的封装实例AVFormatContext :avformat_open_input(&in_fmt_ctx, srcPath, nullptr, nullptr);
------> 查找音视频文件中的流信息:avformat_find_stream_info(in_fmt_ctx, nullptr);
------> 分别查找音频和视频的索引:av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0) / av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0) ;
------> **分别查找流封装的实例AVStream:**src_video = in_fmt_ctx->streams[video_index]
/ src_audio = in_fmt_ctx->streams[audio_index] ;
------> 分配音视频文件的封装实例AVFormatContext:avformat_alloc_output_context2(&out_fmt_ctx, nullptr, nullptr, destPath);
------> 打开输出流:avio_open(&out_fmt_ctx->pb, destPath, AVIO_FLAG_READ_WRITE);
------> 创建数据流并把源文件的音视频参数原样复制过来:
------> AVStream *dest_video = avformat_new_stream(out_fmt_ctx, nullptr)
------> avcodec_parameters_copy(dest_video->codecpar, src_video->codecpar)
------> **写入文件头:**avformat_write_header(out_fmt_ctx, nullptr)
------> 分配一个数据包/轮询数据包/轮询往文件写入数据包
------> AVPacket *packet = av_packet_alloc()
------> while (av_read_frame(in_fmt_ctx, packet) >= 0) ){}
------> av_write_frame(out_fmt_ctx, packet);
------> 写入文件尾: av_write_trailer(out_fmt_ctx);
NDK:
------> 初始化提取器:
------> 创建提取器:AMediaExtractor_new()
------> 设置Extractor的fd : AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize)
------> 选择提取器Extractor中的轨道:
------> AMediaExtractor_getTrackCount()获取取器Extractor中的轨道数
------> AMediaExtractor_getTrackFormat(extractor, i) 遍历轨道从中获取每个轨道中的AMediaFormat
------> 筛选出音频轨道和视频轨道,分别获取音频轨道的audioTrackIndex值和视频轨道videoTrackIndex值。
------> 初始化复用器Muxer:
------> 创建AMediaMuxer_new(output_fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4)
------> 切换提取器Extractor至视频轨道 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 后给复用器Muxer添加轨道 AMediaMuxer_addTrack(muxer, videoFormat)
------> 切换提取器Extractor至音频轨道 AMediaExtractor_selectTrack(extractor, audioTrackIndex) 后给复用器Muxer添加轨道 AMediaMuxer_addTrack(muxer, audioFormat)
------> 启动复用器AMediaMuxer_addTrack(muxer, audioFormat)。
------> 执行转封装:
------> 重新选择所有轨道以重置读取位置 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 和 AMediaExtractor_selectTrack(extractor, audioTrackIndex);
------> 设置读取位置到开始 AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
------> 遍历 获取当前样本可用的轨道 AMediaExtractor_getSampleTrackIndex(extractor);
------> 遍历 分别设置结构AMediaCodecBufferInfo四个成员:
------> 设置样本大小: info.size = AMediaExtractor_getSampleSize(extractor);
------> 设置样本偏移值:info.offset = 0;
------> 设置样本标志: info.flags = AMediaExtractor_getSampleFlags(extractor);
------> 设置样本现在的时间:info.presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
------>遍历读取样本数据:AMediaExtractor_readSampleData(extractor, sampleData, info.size);
------> 遍历 分别写入视频和音频的样本数据:AMediaMuxer_writeSampleData(muxer, muxerVideoTrackIndex, sampleData, &info) 和 AMediaMuxer_writeSampleData(muxer, muxerAudioTrackIndex, sampleData, &info);
------> 遍历 前进到下一个样本:AMediaExtractor_advance(extractor);
效果展示:
以上的代码放在本人的GitHub项目:https://github.com/wangyongyao1989/FFmpegPractices