系列文章:
音视频编解码全流程之用Extractor后Muxer生成MP4
根据前面叙述的 音视频编解码全流程之Extractor可知:
媒体文件提取数据包的初始化及提取流程;
依此为基础现在本篇文章中实现"从媒体文件中Extractor提取数据包后,对提取后的数据进行解码的操作"。
一 .FFmpeg中提取媒体文件数据包后,对进行数据包解码:
1.FFmpeg交叉编译:
首先的是FFmpeg交叉编译,在之前的博客中有介绍交叉编译的全过程,感兴趣的可以查看博客:
2.提取媒体文件数据包:
具体的流程细节分解在 音视频编解码全流程之复用器Muxer 已经进行了描述,这里不再赘述。
3.查找解码器AVCodec和分配解码器实例AVCodecContext:
------> 这里是从媒体文件中封装实例AVFormatContext,查找音视频文件中的流信息 avformat_find_stream_info(in_fmt_ctx, nullptr)
------> 从封装实例中查找视频流的索引video_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
------> 查找视频流AVStream,src_video = in_fmt_ctx->streams[video_index]。
------> 根据编解码器ID,查找解码器AVCodec *video_codec = (AVCodec *) avcodec_find_decoder(video_codec_id);
以下是查找解码器AVCodec的函数代码:
cpp
// 打开输入文件
int RecodecVideo::open_input_file(const char *src_name) {
// 打开音视频文件
int ret = avformat_open_input(&in_fmt_ctx, src_name, nullptr, nullptr);
if (ret < 0) {
LOGE("Can't open file %s.\n", src_name);
recodecInfo = "\n Can't open file :" + string(src_name);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
LOGI("Success open input_file %s.\n", src_name);
recodecInfo = "\n Success open input_file:" + string(src_name);
PostRecodecStatusMessage(recodecInfo.c_str());
// 查找音视频文件中的流信息
ret = avformat_find_stream_info(in_fmt_ctx, nullptr);
if (ret < 0) {
LOGE("Can't find stream information.\n");
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "Can't find stream information:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 找到视频流的索引
video_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
if (video_index >= 0) {
src_video = in_fmt_ctx->streams[video_index];
enum AVCodecID video_codec_id = src_video->codecpar->codec_id;
// 查找视频解码器
AVCodec *video_codec = (AVCodec *) avcodec_find_decoder(video_codec_id);
if (!video_codec) {
LOGE("video_codec not found\n");
recodecInfo = "\n video_codec not found. ";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
video_decode_ctx = avcodec_alloc_context3(video_codec); // 分配解码器的实例
if (!video_decode_ctx) {
LOGE("video_decode_ctx is nullptr\n");
recodecInfo = "\n video_decode_ctx is nullptr ";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 把视频流中的编解码参数复制给解码器的实例
avcodec_parameters_to_context(video_decode_ctx, src_video->codecpar);
ret = avcodec_open2(video_decode_ctx, video_codec, nullptr); // 打开解码器的实例
if (ret < 0) {
LOGE("Can't open video_decode_ctx.\n");
recodecInfo = "Can't open video_decode_ctx\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
} else {
LOGE("Can't find video stream.\n");
recodecInfo = "\n Can't find video stream.";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 找到音频流的索引
audio_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (audio_index >= 0) {
src_audio = in_fmt_ctx->streams[audio_index];
}
return 0;
}
4.对数据包进行解码:
由上的提取过程后从文件中提取出AVPacket, 在FFmpeg中接收AVFrame后对其解码出原始的数据帧AVFrame。
------> 把未解压的数据包发给解码器实例avcodec_send_packet(video_decode_ctx, packet);
------> 从解码器实例获取还原后的数据帧avcodec_receive_frame(video_decode_ctx, frame);
以下是把AVPacket 发送给解码器解码后得到AVFrame的函数代码:
cpp
// 对视频帧重新编码
int RecodecVideo::recode_video(AVPacket *packet, AVFrame *frame) {
// 把未解压的数据包发给解码器实例
int ret = avcodec_send_packet(video_decode_ctx, packet);
if (ret < 0) {
LOGE("send packet occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "send packet occur error:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return ret;
}
while (1) {
// 从解码器实例获取还原后的数据帧
ret = avcodec_receive_frame(video_decode_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return (ret == AVERROR(EAGAIN)) ? 0 : 1;
} else if (ret < 0) {
LOGE("decode frame occur error %d.\n", ret);
recodecInfo = "\n decode frame occur error :" + to_string(ret);
PostRecodecStatusMessage(recodecInfo.c_str());
break;
}
if (frame->pts == AV_NOPTS_VALUE) { // 对H.264裸流做特殊处理
double interval = 1.0 / av_q2d(src_video->r_frame_rate);
frame->pts = count * interval / av_q2d(src_video->time_base);
count++;
}
output_video(frame); // 给视频帧编码,并写入压缩后的视频包
}
return ret;
}
5.完整的编解码的代码:
以上的代码放在本人的GitHub项目中:https://github.com/wangyongyao1989/FFmpegPractices
中的RecodecVideo.cpp:
cpp
//
// Created by wangyao on 2025/8/17.
//
#include "includes/RecodecVideo.h"
RecodecVideo::RecodecVideo(JNIEnv *env, jobject thiz) {
mEnv = env;
env->GetJavaVM(&mJavaVm);
mJavaObj = env->NewGlobalRef(thiz);
}
RecodecVideo::~RecodecVideo() {
if (in_fmt_ctx) {
in_fmt_ctx = nullptr;
}
if (video_encode_ctx) {
video_encode_ctx = nullptr;
}
video_index = -1;
audio_index = -1;
if (src_video) {
src_video = nullptr;
}
if (src_audio) {
src_audio = nullptr;
}
if (dest_video) {
dest_video = nullptr;
}
if (out_fmt_ctx) {
out_fmt_ctx = nullptr;
}
if (video_encode_ctx) {
video_encode_ctx = nullptr;
}
mEnv->DeleteGlobalRef(mJavaObj);
if (mEnv) {
mEnv = nullptr;
}
if (mJavaVm) {
mJavaVm = nullptr;
}
if (mJavaObj) {
mJavaObj = nullptr;
}
if (codecThread != nullptr) {
codecThread->join();
delete codecThread;
codecThread = nullptr;
}
mSrcPath = nullptr;
mDestPath = nullptr;
}
void RecodecVideo::startRecodecThread(const char *srcPath, const char *destPath) {
mSrcPath = srcPath;
mDestPath = destPath;
if (open_input_file(mSrcPath) < 0) { // 打开输入文件
return;
}
if (open_output_file(mDestPath) < 0) { // 打开输出文件
return;
}
if (codecThread == nullptr) {
codecThread = new thread(DoRecoding, this);
codecThread->detach();
}
}
void RecodecVideo::DoRecoding(RecodecVideo *recodecVideo) {
recodecVideo->recodecVideo();
}
void RecodecVideo::recodecVideo() {
int ret = -1;
AVPacket *packet = av_packet_alloc(); // 分配一个数据包
AVFrame *frame = av_frame_alloc(); // 分配一个数据帧
while (av_read_frame(in_fmt_ctx, packet) >= 0) { // 轮询数据包
if (packet->stream_index == video_index) { // 视频包需要重新编码
packet->stream_index = 0;
if (packet->buf->size < 600) {
recodecInfo =
"读出视频包的大小:" + to_string(packet->buf->size) + ",并重新编码写入...\n";
PostRecodecStatusMessage(recodecInfo.c_str());
}
LOGD("%s.\n", recodecInfo.c_str());
recode_video(packet, frame); // 对视频帧重新编码
} else { // 音频包暂不重新编码,直接写入目标文件
packet->stream_index = 1;
ret = av_write_frame(out_fmt_ctx, packet); // 往文件写入一个数据包
if (ret < 0) {
LOGE("write frame occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "write frame occur error:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
recodecInfo = "\n write frame occur error:" + to_string(ret);
break;
}
}
av_packet_unref(packet); // 清除数据包
}
packet->data = nullptr; // 传入一个空包,冲走解码缓存
packet->size = 0;
recode_video(packet, frame); // 对视频帧重新编码
output_video(nullptr); // 传入一个空帧,冲走编码缓存
av_write_trailer(out_fmt_ctx); // 写文件尾
LOGI("Success recode file.\n");
recodecInfo = "Success recode file!!!!!!\n\n";
PostRecodecStatusMessage(recodecInfo.c_str());
av_frame_free(&frame); // 释放数据帧资源
av_packet_free(&packet); // 释放数据包资源
avio_close(out_fmt_ctx->pb); // 关闭输出流
avcodec_close(video_decode_ctx); // 关闭视频解码器的实例
avcodec_free_context(&video_decode_ctx); // 释放视频解码器的实例
avcodec_close(video_encode_ctx); // 关闭视频编码器的实例
avcodec_free_context(&video_encode_ctx); // 释放视频编码器的实例
avformat_free_context(out_fmt_ctx); // 释放封装器的实例
avformat_close_input(&in_fmt_ctx); // 关闭音视频文件
}
// 打开输入文件
int RecodecVideo::open_input_file(const char *src_name) {
// 打开音视频文件
int ret = avformat_open_input(&in_fmt_ctx, src_name, nullptr, nullptr);
if (ret < 0) {
LOGE("Can't open file %s.\n", src_name);
recodecInfo = "\n Can't open file :" + string(src_name);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
LOGI("Success open input_file %s.\n", src_name);
recodecInfo = "\n Success open input_file:" + string(src_name);
PostRecodecStatusMessage(recodecInfo.c_str());
// 查找音视频文件中的流信息
ret = avformat_find_stream_info(in_fmt_ctx, nullptr);
if (ret < 0) {
LOGE("Can't find stream information.\n");
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "Can't find stream information:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 找到视频流的索引
video_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
if (video_index >= 0) {
src_video = in_fmt_ctx->streams[video_index];
enum AVCodecID video_codec_id = src_video->codecpar->codec_id;
// 查找视频解码器
AVCodec *video_codec = (AVCodec *) avcodec_find_decoder(video_codec_id);
if (!video_codec) {
LOGE("video_codec not found\n");
recodecInfo = "\n video_codec not found. ";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
video_decode_ctx = avcodec_alloc_context3(video_codec); // 分配解码器的实例
if (!video_decode_ctx) {
LOGE("video_decode_ctx is nullptr\n");
recodecInfo = "\n video_decode_ctx is nullptr ";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 把视频流中的编解码参数复制给解码器的实例
avcodec_parameters_to_context(video_decode_ctx, src_video->codecpar);
ret = avcodec_open2(video_decode_ctx, video_codec, nullptr); // 打开解码器的实例
if (ret < 0) {
LOGE("Can't open video_decode_ctx.\n");
recodecInfo = "Can't open video_decode_ctx\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
} else {
LOGE("Can't find video stream.\n");
recodecInfo = "\n Can't find video stream.";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 找到音频流的索引
audio_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (audio_index >= 0) {
src_audio = in_fmt_ctx->streams[audio_index];
}
return 0;
}
// 给视频帧编码,并写入压缩后的视频包
int RecodecVideo::output_video(AVFrame *frame) {
// 把原始的数据帧发给编码器实例
int ret = avcodec_send_frame(video_encode_ctx, frame);
if (ret < 0) {
LOGE("send frame occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "send frame occur error :" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return ret;
}
while (1) {
AVPacket *packet = av_packet_alloc(); // 分配一个数据包
// 从编码器实例获取压缩后的数据包
ret = avcodec_receive_packet(video_encode_ctx, packet);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return (ret == AVERROR(EAGAIN)) ? 0 : 1;
} else if (ret < 0) {
LOGE("encode frame occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "encode frame occur error :" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
break;
}
// 把数据包的时间戳从一个时间基转换为另一个时间基
av_packet_rescale_ts(packet, src_video->time_base, dest_video->time_base);
// LOGI( "pts=%ld, dts=%ld.\n", packet->pts, packet->dts);
packet->stream_index = 0;
ret = av_write_frame(out_fmt_ctx, packet); // 往文件写入一个数据包
if (ret < 0) {
LOGE("write frame occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "write frame occur error:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
break;
}
av_packet_unref(packet); // 清除数据包
}
return ret;
}
// 对视频帧重新编码
int RecodecVideo::recode_video(AVPacket *packet, AVFrame *frame) {
// 把未解压的数据包发给解码器实例
int ret = avcodec_send_packet(video_decode_ctx, packet);
if (ret < 0) {
LOGE("send packet occur error %d.\n", ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "send packet occur error:" + to_string(ret) + "\n error msg:" +
string(errbuf) + "\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return ret;
}
while (1) {
// 从解码器实例获取还原后的数据帧
ret = avcodec_receive_frame(video_decode_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return (ret == AVERROR(EAGAIN)) ? 0 : 1;
} else if (ret < 0) {
LOGE("decode frame occur error %d.\n", ret);
recodecInfo = "\n decode frame occur error :" + to_string(ret);
PostRecodecStatusMessage(recodecInfo.c_str());
break;
}
if (frame->pts == AV_NOPTS_VALUE) { // 对H.264裸流做特殊处理
double interval = 1.0 / av_q2d(src_video->r_frame_rate);
frame->pts = count * interval / av_q2d(src_video->time_base);
count++;
}
output_video(frame); // 给视频帧编码,并写入压缩后的视频包
}
return ret;
}
int RecodecVideo::open_output_file(const char *dest_name) {
// 分配音视频文件的封装实例
int ret = avformat_alloc_output_context2(&out_fmt_ctx, nullptr, nullptr, dest_name);
if (ret < 0) {
LOGE("Can't alloc output_file %s.\n", dest_name);
recodecInfo = "\n Can't alloc output_file :" + string(dest_name);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 打开输出流
ret = avio_open(&out_fmt_ctx->pb, dest_name, AVIO_FLAG_READ_WRITE);
if (ret < 0) {
LOGE("Can't open output_file %s.\n", dest_name);
recodecInfo = "\n Can't open output_file:" + string(dest_name);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
LOGI("Success open output_file %s.\n", dest_name);
recodecInfo = "\n Success open output_file :" + string(dest_name);
PostRecodecStatusMessage(recodecInfo.c_str());
if (video_index >= 0) { // 创建编码器实例和新的视频流
enum AVCodecID video_codec_id = src_video->codecpar->codec_id;
// 查找视频编码器
// AVCodec *video_codec = (AVCodec *) avcodec_find_encoder(video_codec_id);
//使用libx264的编码器
AVCodec *video_codec = (AVCodec *) avcodec_find_encoder_by_name("libx264");
if (!video_codec) {
LOGE("video_codec not found\n");
recodecInfo = "\n video_codec not found .";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
video_encode_ctx = avcodec_alloc_context3(video_codec); // 分配编码器的实例
if (!video_encode_ctx) {
LOGE("video_encode_ctx is null\n");
recodecInfo = "\n video_encode_ctx is null";
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
// 把源视频流中的编解码参数复制给编码器的实例
avcodec_parameters_to_context(video_encode_ctx, src_video->codecpar);
// 注意:帧率和时间基要单独赋值,因为avcodec_parameters_to_context没复制这两个参数
video_encode_ctx->framerate = src_video->r_frame_rate;
// framerate.num值过大,会导致视频头一秒变灰色
if (video_encode_ctx->framerate.num > 60) {
video_encode_ctx->framerate = (AVRational) {25, 1}; // 帧率
}
video_encode_ctx->time_base = src_video->time_base;
video_encode_ctx->gop_size = 12; // 关键帧的间隔距离
//video_encode_ctx->max_b_frames = 0; // 0表示不要B帧
// AV_CODEC_FLAG_GLOBAL_HEADER标志允许操作系统显示该视频的缩略图
if (out_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
video_encode_ctx->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
}
ret = avcodec_open2(video_encode_ctx, video_codec, nullptr); // 打开编码器的实例
if (ret < 0) {
LOGE("Can't open video_encode_ctx.\n");
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
LOGE("avcodec_open2失败:%s\n", errbuf);
recodecInfo = "\n avcodec_open2失败:" + string(errbuf);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
dest_video = avformat_new_stream(out_fmt_ctx, nullptr); // 创建数据流
// 把编码器实例的参数复制给目标视频流
avcodec_parameters_from_context(dest_video->codecpar, video_encode_ctx);
// 如果后面有对视频帧转换时间基,这里就无需复制时间基
//dest_video->time_base = src_video->time_base;
dest_video->codecpar->codec_tag = 0;
}
if (audio_index >= 0) { // 源文件有音频流,就给目标文件创建音频流
AVStream *dest_audio = avformat_new_stream(out_fmt_ctx, nullptr); // 创建数据流
// 把源文件的音频参数原样复制过来
avcodec_parameters_copy(dest_audio->codecpar, src_audio->codecpar);
dest_audio->codecpar->codec_tag = 0;
}
ret = avformat_write_header(out_fmt_ctx, nullptr); // 写文件头
if (ret < 0) {
LOGE("write file_header occur error %d.\n", ret);
recodecInfo = "\n write file_header occur error :" + to_string(ret);
av_strerror(ret, errbuf, sizeof(errbuf)); // 将错误码转换为字符串
recodecInfo = "\n avformat_write_header 失败:" + string(errbuf);
PostRecodecStatusMessage(recodecInfo.c_str());
return -1;
}
LOGI("Success write file_header.\n");
recodecInfo = "Success write file_header.\n";
PostRecodecStatusMessage(recodecInfo.c_str());
return 0;
}
JNIEnv *RecodecVideo::GetJNIEnv(bool *isAttach) {
JNIEnv *env;
int status;
if (nullptr == mJavaVm) {
LOGD("RecodecVideo::GetJNIEnv mJavaVm == nullptr");
return nullptr;
}
*isAttach = false;
status = mJavaVm->GetEnv((void **) &env, JNI_VERSION_1_6);
if (status != JNI_OK) {
status = mJavaVm->AttachCurrentThread(&env, nullptr);
if (status != JNI_OK) {
LOGD("RecodecVideo::GetJNIEnv failed to attach current thread");
return nullptr;
}
*isAttach = true;
}
return env;
}
void RecodecVideo::PostRecodecStatusMessage(const char *msg) {
bool isAttach = false;
JNIEnv *pEnv = GetJNIEnv(&isAttach);
if (pEnv == nullptr) {
return;
}
jobject javaObj = mJavaObj;
jmethodID mid = pEnv->GetMethodID(pEnv->GetObjectClass(javaObj), "CppStatusCallback",
"(Ljava/lang/String;)V");
jstring pJstring = pEnv->NewStringUTF(msg);
pEnv->CallVoidMethod(javaObj, mid, pJstring);
if (isAttach) {
JavaVM *pJavaVm = mJavaVm;
pJavaVm->DetachCurrentThread();
}
}
二.MediaCodec中提取媒体文件数据包后,对进行数据包解码:
硬件编解码在Android中必须用到MediaCodec提供的下层编解码芯片的接口,在NdkMediaCodec 中AMediaCodec_createDecoderByType方法来获取解码器。
1.初始化Extractor:
------> 创建 AMediaExtractor_new()
------> 设置Extractor的fd AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize)
cpp
bool MediaExtratorDecodec::initExtractor() {
extractor = AMediaExtractor_new();
if (!extractor) {
LOGE("Failed to create media extractor ");
callbackInfo =
"Failed to create media extractor \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGE("inputPath:%s", sSrcPath.c_str());
FILE *inputFp = fopen(sSrcPath.c_str(), "rb");
if (!inputFp) {
LOGE("Unable to open output file :%s", sSrcPath.c_str());
callbackInfo =
"Unable to open output file :" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
struct stat buf;
stat(sSrcPath.c_str(), &buf);
size_t fileSize = buf.st_size;
int32_t input_fd = fileno(inputFp);
LOGE("input_fd:%d", input_fd);
media_status_t status = AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize);
if (status != AMEDIA_OK) {
LOGE("Failed to set data source: %d", status);
callbackInfo =
"Failed to set data source :" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Extractor initialized successfully");
return true;
}
2.选择轨道获取AMediaFormat:
------> AMediaExtractor_getTrackCount()获取取器Extractor中的轨道数
------> AMediaExtractor_getTrackFormat(extractor, i) 遍历轨道从中获取每个轨道中的AMediaFormat
------> 筛选出音频轨道和视频轨道,分别获取音频轨道的audioTrackIndex值和视频轨道videoTrackIndex值。
cpp
// 选择轨道获取AMediaFormat
bool MediaExtratorDecodec::selectTracksAndGetFormat() {
LOGI("selectTracksAndGetFormat===========");
size_t trackCount = AMediaExtractor_getTrackCount(extractor);
LOGI("Total tracks: %zu", trackCount);
callbackInfo =
"Total tracks:" + to_string(trackCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
for (size_t i = 0; i < trackCount; i++) {
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, i);
if (!format) continue;
const char *mime;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGI("Track %zu: MIME=%s", i, mime);
if (strncmp(mime, "video/", 6) == 0 && videoTrackIndex == -1) {
videoTrackIndex = i;
hasVideo = true;
// 获取视频格式信息
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &videoWidth);
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &videoHeight);
AMediaFormat_getInt64(format, AMEDIAFORMAT_KEY_DURATION, &videoDuration);
LOGI("Selected video track: %d", videoTrackIndex);
LOGI("Video track: %dx%d, duration: %lld us",
videoWidth, videoHeight, videoDuration);
callbackInfo =
"Selected video track:" + to_string(videoTrackIndex) + "\n";
callbackInfo = callbackInfo + ",videoWidth:" + to_string(videoWidth)
+ ",videoHeight:" + to_string(videoHeight) + ",videoDuration:"
+ to_string(videoDuration) + "\n";
PostStatusMessage(callbackInfo.c_str());
} else if (strncmp(mime, "audio/", 6) == 0 && audioTrackIndex == -1) {
audioTrackIndex = i;
hasAudio = true;
// 获取音频格式信息
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &audioSampleRate);
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &audioChannelCount);
LOGI("Audio track: sampleRate=%d, channels=%d",
audioSampleRate, audioChannelCount);
LOGI("Selected audio track: %d", audioTrackIndex);
callbackInfo =
"Selected audio track:" + to_string(audioTrackIndex) + "\n";
callbackInfo = callbackInfo + ",audioSampleRate:" + to_string(audioSampleRate)
+ ",audioChannelCount:" + to_string(audioChannelCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
}
AMediaFormat_delete(format);
}
return hasVideo || hasAudio;
}
3.初始化解码器:
------> 切换Extractor音视频的轨道,AMediaExtractor_getTrackFormat(extractor, videoTrackIndex) 方法中获取AMediaFormat。
------> 通过AMediaCodec_createDecoderByType 创建AMediaCodec
,并设置编解码AMediaCodec_configure(codec, format, nullptr, nullptr, isEncoder)。
以下是初始化编解码相关代码:
cpp
// 初始化编解码器
bool MediaExtratorDecodec::initDecodec(bool asyncMode) {
// 添加视频轨道
if (hasVideo) {
AMediaExtractor_selectTrack(extractor, videoTrackIndex);
mVideoFormat = AMediaExtractor_getTrackFormat(extractor, videoTrackIndex);
AMediaFormat_getString(mVideoFormat, AMEDIAFORMAT_KEY_MIME, &video_mime);
LOGI("video_mime: %s", video_mime);
callbackInfo =
"video_mime:" + string(video_mime) + "\n";
PostStatusMessage(callbackInfo.c_str());
mVideoCodec = createMediaCodec(mVideoFormat, video_mime, "", false /*isEncoder*/);
if (!mVideoCodec) {
LOGE("Failed to create video codec");
callbackInfo =
"Failed to create video codec \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
if (asyncMode) {
AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
OnFormatChangedCB, OnErrorCB};
AMediaCodec_setAsyncNotifyCallback(mVideoCodec, aCB, this);
ThreadTask task = []() {
CallBackHandle();
};
g_threadManager->submitTask("video-decode-Thread", task, PRIORITY_NORMAL);
}
LOGI("create video codec success");
callbackInfo =
"create video codec success: \n";
PostStatusMessage(callbackInfo.c_str());
AMediaCodec_start(mVideoCodec);
}
// 添加音频轨道
if (hasAudio) {
AMediaExtractor_selectTrack(extractor, audioTrackIndex);
mAudioFormat = AMediaExtractor_getTrackFormat(extractor, audioTrackIndex);
AMediaFormat_getString(mAudioFormat, AMEDIAFORMAT_KEY_MIME, &audio_mime);
LOGI("audio_mime: %s", audio_mime);
callbackInfo =
"audio_mime:" + string(audio_mime) + "\n";
PostStatusMessage(callbackInfo.c_str());
mAudioCodec = createMediaCodec(mAudioFormat, audio_mime, "", false /*isEncoder*/);
if (!mAudioCodec) {
LOGE("Failed to create audio codec");
callbackInfo =
"Failed to create audio codec \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
if (asyncMode) {
AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
OnFormatChangedCB, OnErrorCB};
AMediaCodec_setAsyncNotifyCallback(mAudioCodec, aCB, this);
ThreadTask task = []() {
CallBackHandle();
};
g_threadManager->submitTask("audio-decode-Thread", task, PRIORITY_NORMAL);
}
LOGI("create audio codec success");
callbackInfo =
"create audio codec success: \n";
PostStatusMessage(callbackInfo.c_str());
AMediaCodec_start(mAudioCodec);
}
LOGI("initDecodec initialized successfully");
callbackInfo =
"initDecodec initialized successfully \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
AMediaCodec *createMediaCodec(AMediaFormat *format, const char *mime, string codecName,
bool isEncoder) {
ALOGV("In %s", __func__);
if (!mime) {
ALOGE("Please specify a mime type to create codec");
return nullptr;
}
AMediaCodec *codec;
if (!codecName.empty()) {
codec = AMediaCodec_createCodecByName(codecName.c_str());
if (!codec) {
ALOGE("Unable to create codec by name: %s", codecName.c_str());
return nullptr;
}
ALOGV("create codec by name: %s", codecName.c_str());
} else {
if (isEncoder) {
codec = AMediaCodec_createEncoderByType(mime);
} else {
codec = AMediaCodec_createDecoderByType(mime);
}
if (!codec) {
ALOGE("Unable to create codec by mime: %s", mime);
return nullptr;
}
char *out_name = nullptr;
AMediaCodec_getName(codec, &out_name);
ALOGV("create codec by mime: %s", out_name);
}
/* Configure codec with the given format*/
const char *s = AMediaFormat_toString(format);
ALOGI("Input format: %s\n", s);
media_status_t status = AMediaCodec_configure(codec, format, nullptr, nullptr, isEncoder);
if (status != AMEDIA_OK) {
ALOGE("AMediaCodec_configure failed %d", status);
return nullptr;
}
return codec;
}
4.解码过程的操作:
解码过程解析:
------> 重新选择所有轨道以重置读取位置 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 和 AMediaExtractor_selectTrack(extractor, audioTrackIndex);
------> 设置读取位置到开始 AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
------> 遍历 获取下一个可用输入缓冲区的索引
ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mVideoCodec,
kQueueDequeueTimeoutUs)
根据inIdx的状态值得到可用的输入:
onInputAvailable(mVideoCodec, inIdx);
------> 从提取器Extractor中获取到该样本的大小:
size_t bufSize = AMediaExtractor_getSampleSize(extractor);
------> 获取输入缓冲区,以备输入的样本数据的填充:
uint8_t *buf = AMediaCodec_getInputBuffer(mVideoCodec, bufIdx, &bufSize);
------> 从提取器读取数据包,填充至输入缓冲区中:
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, buf, bufSize);
------> 把缓冲区的buf送入解码器:
media_status_t status = AMediaCodec_queueInputBuffer(mVideoCodec, bufIdx, 0 /* offset */,
bytesRead, presentationTimeUs, flag);
------ 遍历 获取下一个可用已处理数据缓冲区的索引:
ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mAudioCodec, &info,
kQueueDequeueTimeoutUs);
根据outIdx的状态值得到可用的输入:
onOutputAvailable(mVideoCodec, outIdx, &info);
------> 获取输出缓冲区的数据:
uint8_t *buf = AMediaCodec_getOutputBuffer(mVideoCodec, bufIdx, &bufSize);
decodec():
选择对应的音视频轨道,从提取器中提取数据包进行解码的操作。
以下是解码的代码:
cpp
// 执行解码
bool MediaExtratorDecodec::decodec() {
LOGI("decodec===========");
bool asyncMode = false;
AMediaCodecBufferInfo info;
bool sawEOS = false;
int64_t lastVideoPts = -1;
int64_t lastAudioPts = -1;
// 重新选择所有轨道以重置读取位置
if (hasVideo) AMediaExtractor_selectTrack(extractor, videoTrackIndex);
if (hasAudio) AMediaExtractor_selectTrack(extractor, audioTrackIndex);
// 设置读取位置到开始
AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
while (!sawEOS) {
ssize_t trackIndex = AMediaExtractor_getSampleTrackIndex(extractor);
if (trackIndex < 0) {
sawEOS = true;
break;
}
if (trackIndex == videoTrackIndex && hasVideo) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (AMediaExtractor_getSampleTime(extractor) > lastVideoPts) {
if (!asyncMode) {
while (!mSawOutputEOS && !mSignalledError) {
/* Queue input data */
if (!mSawInputEOS) {
ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mVideoCodec,
kQueueDequeueTimeoutUs);
if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
LOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n",
inIdx);
mErrorCode = (media_status_t) inIdx;
return mErrorCode;
} else if (inIdx >= 0) {
onInputAvailable(mVideoCodec, inIdx);
}
}
/* Dequeue output data */
AMediaCodecBufferInfo info;
ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mVideoCodec, &info,
kQueueDequeueTimeoutUs);
if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
mVideoFormat = AMediaCodec_getOutputFormat(mVideoCodec);
const char *s = AMediaFormat_toString(mVideoFormat);
LOGI("Output format: %s\n", s);
} else if (outIdx >= 0) {
onOutputAvailable(mVideoCodec, outIdx, &info);
} else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
LOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n",
outIdx);
mErrorCode = (media_status_t) outIdx;
return mErrorCode;
}
}
} else {
unique_lock<mutex> lock(mMutex);
mDecoderDoneCondition.wait(lock, [this]() {
return (mSawOutputEOS || mSignalledError);
});
}
if (mSignalledError) {
LOGE("Received Error while Decoding");
return mErrorCode;
}
lastVideoPts = info.presentationTimeUs;
}
} else if (trackIndex == audioTrackIndex && hasAudio) { //音频轨道的解码
// 检查时间戳是否有效
if (info.presentationTimeUs > lastAudioPts) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (!asyncMode) {
while (!mSawOutputEOS && !mSignalledError) {
/* Queue input data */
if (!mSawInputEOS) {
ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mAudioCodec,
kQueueDequeueTimeoutUs);
if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
LOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n",
inIdx);
mErrorCode = (media_status_t) inIdx;
return mErrorCode;
} else if (inIdx >= 0) {
onInputAvailable(mAudioCodec, inIdx);
}
}
/* Dequeue output data */
AMediaCodecBufferInfo info;
ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mAudioCodec, &info,
kQueueDequeueTimeoutUs);
if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
mAudioFormat = AMediaCodec_getOutputFormat(mAudioCodec);
const char *s = AMediaFormat_toString(mAudioFormat);
LOGI("Output format: %s\n", s);
} else if (outIdx >= 0) {
onOutputAvailable(mVideoCodec, outIdx, &info);
} else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
LOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n",
outIdx);
mErrorCode = (media_status_t) outIdx;
return mErrorCode;
}
}
} else {
unique_lock<mutex> lock(mMutex);
mDecoderDoneCondition.wait(lock, [this]() {
return (mSawOutputEOS || mSignalledError);
});
}
if (mSignalledError) {
ALOGE("Received Error while Decoding");
return mErrorCode;
}
lastAudioPts = info.presentationTimeUs;
}
}
// 短暂休眠以避免过度占用CPU
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
LOGI("media decodec completed");
callbackInfo =
"media decodec completed \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
onInputAvailable():
可用输入时,对数据的操作:
cpp
void MediaExtratorDecodec::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
LOGD("onInputAvailable %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
if (mSawInputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
size_t bufSize = AMediaExtractor_getSampleSize(extractor);
if (bufSize <= 0) {
LOGE("AMediaExtractor_getSampleSize====");
return;
}
// 获取输入缓冲区
uint8_t *buf = AMediaCodec_getInputBuffer(mVideoCodec, bufIdx, &bufSize);
if (!buf) {
mErrorCode = AMEDIA_ERROR_IO;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
// 从提取器读取数据
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, buf, bufSize);
if (bytesRead < 0) {
LOGI("reading video sample data: %zd", bytesRead);
// 输入结束
AMediaCodec_queueInputBuffer(mVideoCodec, bufIdx, 0, 0, 0,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
LOGI("从提取器读取数据到结束尾");
callbackInfo =
"视频轨道从提取器读取数据到结束尾 reading sample data:" + to_string(bytesRead) +
"\n";
PostStatusMessage(callbackInfo.c_str());
return;
}
uint32_t flag = AMediaExtractor_getSampleFlags(extractor);
int64_t presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (flag == AMEDIA_ERROR_MALFORMED) {
mErrorCode = (media_status_t) flag;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
LOGD("video - %s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s",
__FUNCTION__,
bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
// 将数据送入解码器
media_status_t status = AMediaCodec_queueInputBuffer(mVideoCodec, bufIdx, 0 /* offset */,
bytesRead, presentationTimeUs, flag);
if (AMEDIA_OK != status) {
mErrorCode = status;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (!AMediaExtractor_advance(extractor)) {
return;
}
} else if (mediaCodec == mAudioCodec && mediaCodec) {
if (mSawInputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
size_t bufSize = AMediaExtractor_getSampleSize(extractor);
if (bufSize <= 0) {
LOGE("AMediaExtractor_getSampleSize====");
return;
}
// 获取输入缓冲区
uint8_t *buf = AMediaCodec_getInputBuffer(mAudioCodec, bufIdx, &bufSize);
if (!buf) {
mErrorCode = AMEDIA_ERROR_IO;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
// 从提取器读取数据
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, buf, bufSize);
if (bytesRead < 0) {
LOGI("reading audio sample data: %zd", bytesRead);
// 输入结束
AMediaCodec_queueInputBuffer(mAudioCodec, bufIdx, 0, 0, 0,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
LOGI("从提取器读取音频轨道数据到结束尾");
callbackInfo =
"音频轨道从提取器读取数据到结束尾 reading sample data:" + to_string(bytesRead) +
"\n";
PostStatusMessage(callbackInfo.c_str());
return;
}
uint32_t flag = AMediaExtractor_getSampleFlags(extractor);
int64_t presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (flag == AMEDIA_ERROR_MALFORMED) {
mErrorCode = (media_status_t) flag;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
LOGD("audio - %s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s",
__FUNCTION__,
bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
// 将数据送入解码器
media_status_t status = AMediaCodec_queueInputBuffer(mAudioCodec, bufIdx, 0 /* offset */,
bytesRead, presentationTimeUs, flag);
if (AMEDIA_OK != status) {
mErrorCode = status;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (!AMediaExtractor_advance(extractor)) {
return;
}
}
}
onOutputAvailable():
当有可用的输出,对数据的操作:
cpp
void MediaExtratorDecodec::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
AMediaCodecBufferInfo *bufferInfo) {
LOGD("In %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
if (mSawOutputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (mOutFp != nullptr) {
size_t bufSize;
uint8_t *buf = AMediaCodec_getOutputBuffer(mVideoCodec, bufIdx, &bufSize);
if (buf) {
fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
LOGD("bytes written into file %d\n", bufferInfo->size);
}
}
AMediaCodec_releaseOutputBuffer(mVideoCodec, bufIdx, false);
mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
mNumOutputVideoFrame++;
LOGD("video - %s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputVideoFrame);
if (mSawOutputEOS) {
CallBackHandle::mIsDone = true;
mDecoderDoneCondition.notify_one();
}
} else if (mediaCodec == mAudioCodec && mediaCodec) {
if (mSawOutputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (mOutFp != nullptr) {
size_t bufSize;
uint8_t *buf = AMediaCodec_getOutputBuffer(mAudioCodec, bufIdx, &bufSize);
if (buf) {
fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
LOGD("bytes written into file %d\n", bufferInfo->size);
}
}
AMediaCodec_releaseOutputBuffer(mAudioCodec, bufIdx, false);
mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
mNumOutputAudioFrame++;
LOGD("video - %s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputAudioFrame);
if (mSawOutputEOS) {
CallBackHandle::mIsDone = true;
mDecoderDoneCondition.notify_one();
}
}
}
5.完整的编解码的代码:
以上的代码放在本人的GitHub项目中:https://github.com/wangyongyao1989/FFmpegPractices
中的MediaExtratorDecodec.cpp:
cpp
// Author : wangyongyao https://github.com/wangyongyao1989
// Created by MMM on 2025/9/29.
//
#include <sys/stat.h>
#include "includes/MediaExtratorDecodec.h"
MediaExtratorDecodec::MediaExtratorDecodec(JNIEnv *env, jobject thiz) {
mEnv = env;
env->GetJavaVM(&mJavaVm);
mJavaObj = env->NewGlobalRef(thiz);
// 初始化线程池
g_threadManager = std::make_unique<AndroidThreadManager>();
ThreadPoolConfig config;
config.minThreads = 2;
config.maxThreads = 4;
config.idleTimeoutMs = 30000;
config.queueSize = 50;
g_threadManager->initThreadPool(config);
}
MediaExtratorDecodec::~MediaExtratorDecodec() {
mEnv->DeleteGlobalRef(mJavaObj);
if (mEnv) {
mEnv = nullptr;
}
if (mJavaVm) {
mJavaVm = nullptr;
}
if (mJavaObj) {
mJavaObj = nullptr;
}
release();
g_threadManager.reset();
}
void
MediaExtratorDecodec::startMediaExtratorDecodec(const char *inputPath) {
sSrcPath = inputPath;
LOGI("sSrcPath :%s \n ", sSrcPath.c_str());
callbackInfo =
"sSrcPath:" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
// 1. 初始化提取器
if (!initExtractor()) {
LOGE("Failed to initialize extractor");
callbackInfo =
"Failed to initialize extractor \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 2. 选择轨道
if (!selectTracksAndGetFormat()) {
LOGE("No valid tracks found");
callbackInfo =
"No valid tracks found \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 3. 初始化解码器
if (!initDecodec(false)) {
LOGE("Failed to initialize Decodec");
callbackInfo =
"Failed to initialize Decodec \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 4. 执行解码
if (!decodec()) {
LOGE("Decodec failed");
callbackInfo =
"Decodec failed \n";
PostStatusMessage(callbackInfo.c_str());
return;
}
// 释放资源
release();
}
// 初始化提取器
bool MediaExtratorDecodec::initExtractor() {
extractor = AMediaExtractor_new();
if (!extractor) {
LOGE("Failed to create media extractor ");
callbackInfo =
"Failed to create media extractor \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGE("inputPath:%s", sSrcPath.c_str());
FILE *inputFp = fopen(sSrcPath.c_str(), "rb");
if (!inputFp) {
LOGE("Unable to open output file :%s", sSrcPath.c_str());
callbackInfo =
"Unable to open output file :" + sSrcPath + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
struct stat buf;
stat(sSrcPath.c_str(), &buf);
size_t fileSize = buf.st_size;
int32_t input_fd = fileno(inputFp);
LOGE("input_fd:%d", input_fd);
media_status_t status = AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize);
if (status != AMEDIA_OK) {
LOGE("Failed to set data source: %d", status);
callbackInfo =
"Failed to set data source :" + to_string(status) + "\n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
LOGI("Extractor initialized successfully");
return true;
}
// 选择轨道获取AMediaFormat
bool MediaExtratorDecodec::selectTracksAndGetFormat() {
LOGI("selectTracksAndGetFormat===========");
size_t trackCount = AMediaExtractor_getTrackCount(extractor);
LOGI("Total tracks: %zu", trackCount);
callbackInfo =
"Total tracks:" + to_string(trackCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
for (size_t i = 0; i < trackCount; i++) {
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, i);
if (!format) continue;
const char *mime;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGI("Track %zu: MIME=%s", i, mime);
if (strncmp(mime, "video/", 6) == 0 && videoTrackIndex == -1) {
videoTrackIndex = i;
hasVideo = true;
// 获取视频格式信息
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &videoWidth);
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &videoHeight);
AMediaFormat_getInt64(format, AMEDIAFORMAT_KEY_DURATION, &videoDuration);
LOGI("Selected video track: %d", videoTrackIndex);
LOGI("Video track: %dx%d, duration: %lld us",
videoWidth, videoHeight, videoDuration);
callbackInfo =
"Selected video track:" + to_string(videoTrackIndex) + "\n";
callbackInfo = callbackInfo + ",videoWidth:" + to_string(videoWidth)
+ ",videoHeight:" + to_string(videoHeight) + ",videoDuration:"
+ to_string(videoDuration) + "\n";
PostStatusMessage(callbackInfo.c_str());
} else if (strncmp(mime, "audio/", 6) == 0 && audioTrackIndex == -1) {
audioTrackIndex = i;
hasAudio = true;
// 获取音频格式信息
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &audioSampleRate);
AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &audioChannelCount);
LOGI("Audio track: sampleRate=%d, channels=%d",
audioSampleRate, audioChannelCount);
LOGI("Selected audio track: %d", audioTrackIndex);
callbackInfo =
"Selected audio track:" + to_string(audioTrackIndex) + "\n";
callbackInfo = callbackInfo + ",audioSampleRate:" + to_string(audioSampleRate)
+ ",audioChannelCount:" + to_string(audioChannelCount) + "\n";
PostStatusMessage(callbackInfo.c_str());
}
}
AMediaFormat_delete(format);
}
return hasVideo || hasAudio;
}
// 初始化复用器
bool MediaExtratorDecodec::initDecodec(bool asyncMode) {
// 添加视频轨道
if (hasVideo) {
AMediaExtractor_selectTrack(extractor, videoTrackIndex);
mVideoFormat = AMediaExtractor_getTrackFormat(extractor, videoTrackIndex);
AMediaFormat_getString(mVideoFormat, AMEDIAFORMAT_KEY_MIME, &video_mime);
LOGI("video_mime: %s", video_mime);
callbackInfo =
"video_mime:" + string(video_mime) + "\n";
PostStatusMessage(callbackInfo.c_str());
mVideoCodec = createMediaCodec(mVideoFormat, video_mime, "", false /*isEncoder*/);
if (!mVideoCodec) {
LOGE("Failed to create video codec");
callbackInfo =
"Failed to create video codec \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
if (asyncMode) {
AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
OnFormatChangedCB, OnErrorCB};
AMediaCodec_setAsyncNotifyCallback(mVideoCodec, aCB, this);
ThreadTask task = []() {
CallBackHandle();
};
g_threadManager->submitTask("video-decode-Thread", task, PRIORITY_NORMAL);
}
LOGI("create video codec success");
callbackInfo =
"create video codec success: \n";
PostStatusMessage(callbackInfo.c_str());
AMediaCodec_start(mVideoCodec);
}
// 添加音频轨道
if (hasAudio) {
AMediaExtractor_selectTrack(extractor, audioTrackIndex);
mAudioFormat = AMediaExtractor_getTrackFormat(extractor, audioTrackIndex);
AMediaFormat_getString(mAudioFormat, AMEDIAFORMAT_KEY_MIME, &audio_mime);
LOGI("audio_mime: %s", audio_mime);
callbackInfo =
"audio_mime:" + string(audio_mime) + "\n";
PostStatusMessage(callbackInfo.c_str());
mAudioCodec = createMediaCodec(mAudioFormat, audio_mime, "", false /*isEncoder*/);
if (!mAudioCodec) {
LOGE("Failed to create audio codec");
callbackInfo =
"Failed to create audio codec \n";
PostStatusMessage(callbackInfo.c_str());
return false;
}
if (asyncMode) {
AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
OnFormatChangedCB, OnErrorCB};
AMediaCodec_setAsyncNotifyCallback(mAudioCodec, aCB, this);
ThreadTask task = []() {
CallBackHandle();
};
g_threadManager->submitTask("audio-decode-Thread", task, PRIORITY_NORMAL);
}
LOGI("create audio codec success");
callbackInfo =
"create audio codec success: \n";
PostStatusMessage(callbackInfo.c_str());
AMediaCodec_start(mAudioCodec);
}
LOGI("initDecodec initialized successfully");
callbackInfo =
"initDecodec initialized successfully \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
// 执行解码
bool MediaExtratorDecodec::decodec() {
LOGI("decodec===========");
bool asyncMode = false;
AMediaCodecBufferInfo info;
bool sawEOS = false;
int64_t lastVideoPts = -1;
int64_t lastAudioPts = -1;
// 重新选择所有轨道以重置读取位置
if (hasVideo) AMediaExtractor_selectTrack(extractor, videoTrackIndex);
if (hasAudio) AMediaExtractor_selectTrack(extractor, audioTrackIndex);
// 设置读取位置到开始
AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
while (!sawEOS) {
ssize_t trackIndex = AMediaExtractor_getSampleTrackIndex(extractor);
if (trackIndex < 0) {
sawEOS = true;
break;
}
if (trackIndex == videoTrackIndex && hasVideo) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (AMediaExtractor_getSampleTime(extractor) > lastVideoPts) {
if (!asyncMode) {
while (!mSawOutputEOS && !mSignalledError) {
/* Queue input data */
if (!mSawInputEOS) {
ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mVideoCodec,
kQueueDequeueTimeoutUs);
if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
LOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n",
inIdx);
mErrorCode = (media_status_t) inIdx;
return mErrorCode;
} else if (inIdx >= 0) {
onInputAvailable(mVideoCodec, inIdx);
}
}
/* Dequeue output data */
AMediaCodecBufferInfo info;
ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mVideoCodec, &info,
kQueueDequeueTimeoutUs);
if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
mVideoFormat = AMediaCodec_getOutputFormat(mVideoCodec);
const char *s = AMediaFormat_toString(mVideoFormat);
LOGI("Output format: %s\n", s);
} else if (outIdx >= 0) {
onOutputAvailable(mVideoCodec, outIdx, &info);
} else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
LOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n",
outIdx);
mErrorCode = (media_status_t) outIdx;
return mErrorCode;
}
}
} else {
unique_lock<mutex> lock(mMutex);
mDecoderDoneCondition.wait(lock, [this]() {
return (mSawOutputEOS || mSignalledError);
});
}
if (mSignalledError) {
LOGE("Received Error while Decoding");
return mErrorCode;
}
lastVideoPts = info.presentationTimeUs;
}
} else if (trackIndex == audioTrackIndex && hasAudio) { //音频轨道的解码
// 检查时间戳是否有效
if (info.presentationTimeUs > lastAudioPts) {
// 检查时间戳是否有效(避免重复或倒退的时间戳)
if (!asyncMode) {
while (!mSawOutputEOS && !mSignalledError) {
/* Queue input data */
if (!mSawInputEOS) {
ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mAudioCodec,
kQueueDequeueTimeoutUs);
if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
LOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n",
inIdx);
mErrorCode = (media_status_t) inIdx;
return mErrorCode;
} else if (inIdx >= 0) {
onInputAvailable(mAudioCodec, inIdx);
}
}
/* Dequeue output data */
AMediaCodecBufferInfo info;
ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mAudioCodec, &info,
kQueueDequeueTimeoutUs);
if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
mAudioFormat = AMediaCodec_getOutputFormat(mAudioCodec);
const char *s = AMediaFormat_toString(mAudioFormat);
LOGI("Output format: %s\n", s);
} else if (outIdx >= 0) {
onOutputAvailable(mVideoCodec, outIdx, &info);
} else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
LOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n",
outIdx);
mErrorCode = (media_status_t) outIdx;
return mErrorCode;
}
}
} else {
unique_lock<mutex> lock(mMutex);
mDecoderDoneCondition.wait(lock, [this]() {
return (mSawOutputEOS || mSignalledError);
});
}
if (mSignalledError) {
ALOGE("Received Error while Decoding");
return mErrorCode;
}
lastAudioPts = info.presentationTimeUs;
}
}
// 短暂休眠以避免过度占用CPU
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
LOGI("media decodec completed");
callbackInfo =
"media decodec completed \n";
PostStatusMessage(callbackInfo.c_str());
return true;
}
// 释放资源
void MediaExtratorDecodec::release() {
if (extractor) {
AMediaExtractor_delete(extractor);
extractor = nullptr;
}
if (mVideoFormat) {
AMediaFormat_delete(mVideoFormat);
mVideoFormat = nullptr;
}
if (mVideoCodec) {
AMediaCodec_stop(mVideoCodec);
AMediaCodec_delete(mVideoCodec);
}
if (mAudioCodec) {
AMediaCodec_stop(mAudioCodec);
AMediaCodec_delete(mAudioCodec);
}
if (mAudioFormat) {
AMediaFormat_delete(mAudioFormat);
mAudioFormat = nullptr;
}
LOGI("Resources released");
}
void MediaExtratorDecodec::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
LOGD("onInputAvailable %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
if (mSawInputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
size_t bufSize = AMediaExtractor_getSampleSize(extractor);
if (bufSize <= 0) {
LOGE("AMediaExtractor_getSampleSize====");
return;
}
// 获取输入缓冲区
uint8_t *buf = AMediaCodec_getInputBuffer(mVideoCodec, bufIdx, &bufSize);
if (!buf) {
mErrorCode = AMEDIA_ERROR_IO;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
// 从提取器读取数据
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, buf, bufSize);
if (bytesRead < 0) {
LOGI("reading video sample data: %zd", bytesRead);
// 输入结束
AMediaCodec_queueInputBuffer(mVideoCodec, bufIdx, 0, 0, 0,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
LOGI("从提取器读取数据到结束尾");
callbackInfo =
"视频轨道从提取器读取数据到结束尾 reading sample data:" + to_string(bytesRead) +
"\n";
PostStatusMessage(callbackInfo.c_str());
return;
}
uint32_t flag = AMediaExtractor_getSampleFlags(extractor);
int64_t presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (flag == AMEDIA_ERROR_MALFORMED) {
mErrorCode = (media_status_t) flag;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
LOGD("video - %s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s",
__FUNCTION__,
bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
// 将数据送入解码器
media_status_t status = AMediaCodec_queueInputBuffer(mVideoCodec, bufIdx, 0 /* offset */,
bytesRead, presentationTimeUs, flag);
if (AMEDIA_OK != status) {
mErrorCode = status;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (!AMediaExtractor_advance(extractor)) {
return;
}
} else if (mediaCodec == mAudioCodec && mediaCodec) {
if (mSawInputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
size_t bufSize = AMediaExtractor_getSampleSize(extractor);
if (bufSize <= 0) {
LOGE("AMediaExtractor_getSampleSize====");
return;
}
// 获取输入缓冲区
uint8_t *buf = AMediaCodec_getInputBuffer(mAudioCodec, bufIdx, &bufSize);
if (!buf) {
mErrorCode = AMEDIA_ERROR_IO;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
// 从提取器读取数据
ssize_t bytesRead = AMediaExtractor_readSampleData(extractor, buf, bufSize);
if (bytesRead < 0) {
LOGI("reading audio sample data: %zd", bytesRead);
// 输入结束
AMediaCodec_queueInputBuffer(mAudioCodec, bufIdx, 0, 0, 0,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
LOGI("从提取器读取音频轨道数据到结束尾");
callbackInfo =
"音频轨道从提取器读取数据到结束尾 reading sample data:" + to_string(bytesRead) +
"\n";
PostStatusMessage(callbackInfo.c_str());
return;
}
uint32_t flag = AMediaExtractor_getSampleFlags(extractor);
int64_t presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (flag == AMEDIA_ERROR_MALFORMED) {
mErrorCode = (media_status_t) flag;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
LOGD("audio - %s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s",
__FUNCTION__,
bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
// 将数据送入解码器
media_status_t status = AMediaCodec_queueInputBuffer(mAudioCodec, bufIdx, 0 /* offset */,
bytesRead, presentationTimeUs, flag);
if (AMEDIA_OK != status) {
mErrorCode = status;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (!AMediaExtractor_advance(extractor)) {
return;
}
}
}
void MediaExtratorDecodec::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
AMediaCodecBufferInfo *bufferInfo) {
LOGD("In %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
if (mSawOutputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (mOutFp != nullptr) {
size_t bufSize;
uint8_t *buf = AMediaCodec_getOutputBuffer(mVideoCodec, bufIdx, &bufSize);
if (buf) {
fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
LOGD("bytes written into file %d\n", bufferInfo->size);
}
}
AMediaCodec_releaseOutputBuffer(mVideoCodec, bufIdx, false);
mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
mNumOutputVideoFrame++;
LOGD("video - %s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputVideoFrame);
if (mSawOutputEOS) {
CallBackHandle::mIsDone = true;
mDecoderDoneCondition.notify_one();
}
} else if (mediaCodec == mAudioCodec && mediaCodec) {
if (mSawOutputEOS || bufIdx < 0) return;
if (mSignalledError) {
CallBackHandle::mSawError = true;
mDecoderDoneCondition.notify_one();
return;
}
if (mOutFp != nullptr) {
size_t bufSize;
uint8_t *buf = AMediaCodec_getOutputBuffer(mAudioCodec, bufIdx, &bufSize);
if (buf) {
fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
LOGD("bytes written into file %d\n", bufferInfo->size);
}
}
AMediaCodec_releaseOutputBuffer(mAudioCodec, bufIdx, false);
mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
mNumOutputAudioFrame++;
LOGD("video - %s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputAudioFrame);
if (mSawOutputEOS) {
CallBackHandle::mIsDone = true;
mDecoderDoneCondition.notify_one();
}
}
}
void MediaExtratorDecodec::onFormatChanged(AMediaCodec *mediaCodec, AMediaFormat *format) {
LOGD("In %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
LOGD("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
mVideoFormat = format;
}
if (mediaCodec == mAudioCodec && mediaCodec) {
LOGD("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
mAudioFormat = format;
}
}
void MediaExtratorDecodec::onError(AMediaCodec *mediaCodec, media_status_t err) {
LOGD("In %s", __func__);
if (mediaCodec == mVideoCodec && mediaCodec) {
ALOGE("Received Error %d", err);
mErrorCode = err;
mSignalledError = true;
mDecoderDoneCondition.notify_one();
}
}
JNIEnv *MediaExtratorDecodec::GetJNIEnv(bool *isAttach) {
JNIEnv *env;
int status;
if (nullptr == mJavaVm) {
LOGD("GetJNIEnv mJavaVm == nullptr");
return nullptr;
}
*isAttach = false;
status = mJavaVm->GetEnv((void **) &env, JNI_VERSION_1_6);
if (status != JNI_OK) {
status = mJavaVm->AttachCurrentThread(&env, nullptr);
if (status != JNI_OK) {
LOGD("GetJNIEnv failed to attach current thread");
return nullptr;
}
*isAttach = true;
}
return env;
}
void MediaExtratorDecodec::PostStatusMessage(const char *msg) {
bool isAttach = false;
JNIEnv *pEnv = GetJNIEnv(&isAttach);
if (pEnv == nullptr) {
return;
}
jobject javaObj = mJavaObj;
jmethodID mid = pEnv->GetMethodID(pEnv->GetObjectClass(javaObj), "CppStatusCallback",
"(Ljava/lang/String;)V");
jstring pJstring = pEnv->NewStringUTF(msg);
pEnv->CallVoidMethod(javaObj, mid, pJstring);
if (isAttach) {
JavaVM *pJavaVm = mJavaVm;
pJavaVm->DetachCurrentThread();
}
}
三.FFmpeg/MediaCodec解码过程对比:
FFmpeg:
------> 打开输入文件获取输入文件的封装实例AVFormatContext :avformat_open_input(&in_fmt_ctx, srcPath, nullptr, nullptr);
------> 查找音视频文件中的流信息:avformat_find_stream_info(in_fmt_ctx, nullptr);
------> 分别查找音频和视频的索引:av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0) / av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0) ;
------> 分别查找流封装的实例AVStream:src_video = in_fmt_ctx->streams[video_index]
/ src_audio = in_fmt_ctx->streams[audio_index] ;
------> 这里是从媒体文件中封装实例AVFormatContext,查找音视频文件中的流信息 avformat_find_stream_info(in_fmt_ctx, nullptr)
------> 从封装实例中查找视频流的索引video_index = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
------> 查找视频流AVStream,src_video = in_fmt_ctx->streams[video_index]。
------> 根据编解码器ID,查找解码器AVCodec *video_codec = (AVCodec *) avcodec_find_decoder(video_codec_id);
------> 把未解压的数据包发给解码器实例avcodec_send_packet(video_decode_ctx, packet);
------> 从解码器实例获取还原后的数据帧avcodec_receive_frame(video_decode_ctx, frame);
MediaCodec:
------> 创建 AMediaExtractor_new()
------> 设置Extractor的fd AMediaExtractor_setDataSourceFd(extractor, input_fd, 0, fileSize)
------> AMediaExtractor_getTrackCount()获取取器Extractor中的轨道数
------> AMediaExtractor_getTrackFormat(extractor, i) 遍历轨道从中获取每个轨道中的AMediaFormat
------> 筛选出音频轨道和视频轨道,分别获取音频轨道的audioTrackIndex值和视频轨道videoTrackIndex值。
------> 切换Extractor音视频的轨道,AMediaExtractor_getTrackFormat(extractor, videoTrackIndex) 方法中获取AMediaFormat。
------> 通过AMediaCodec_createDecoderByType 创建AMediaCodec
,并设置编解码AMediaCodec_configure(codec, format, nullptr, nullptr, isEncoder)。
------> 重新选择所有轨道以重置读取位置 AMediaExtractor_selectTrack(extractor, videoTrackIndex) 和 AMediaExtractor_selectTrack(extractor, audioTrackIndex);
------> 设置读取位置到开始 AMediaExtractor_seekTo(extractor, 0, AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC);
------>遍历可用的输入和可用的输出
四.效果展示:

以上的代码放在本人的GitHub项目:https://github.com/wangyongyao1989/FFmpegPractices