还没有看过前置知识的同学可以先从下面的链接点过去:
音视频ijkplayer源码解析系列4--ijkplayer里面如何使用SDL渲染
我们在前面的文章中分析了ijkplayer
中的解码、渲染和初始化的流程,在这个流程中有几个关键的类:ff_ffpipeline#IJKFF_Pipeline
、ff_ffpipenode#IJKFF_Pipenode
本篇文章我们主要要解决的问题也就是pipeline和pipeNode的作用是什么,为什么会设计这个东西。
我们先来分析下IJKFF_Pipeline相关逻辑
1、IJKFF_Pipeline分析
1.1 类定义
cpp
struct IJKFF_Pipeline {
SDL_Class *opaque_class;
IJKFF_Pipeline_Opaque *opaque;
void (*func_destroy) (IJKFF_Pipeline *pipeline);
IJKFF_Pipenode *(*func_open_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
SDL_Aout *(*func_open_audio_output) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
IJKFF_Pipenode *(*func_init_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
int (*func_config_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
};
从上面的类型定义中我们会发现它的成员变量作用是:
-
SDL_Class *opaque_class
:存储的是具体pipeline的定义 -
IJKFF_Pipeline_Opaque *opaque
:主要存储了这些东西cpptypedef struct IJKFF_Pipeline_Opaque { FFPlayer *ffp; SDL_mutex *surface_mutex; jobject jsurface; volatile bool is_surface_need_reconfigure; bool (*mediacodec_select_callback)(void *opaque, ijkmp_mediacodecinfo_context *mcc); void *mediacodec_select_callback_opaque; SDL_Vout *weak_vout; float left_volume; float right_volume; } IJKFF_Pipeline_Opaque;
-
视频解码器部分
- IJKFF_Pipenode *(*func_init_video_decoder)
- IJKFF_Pipenode *(*func_init_video_decoder)
- IJKFF_Pipenode *(*func_open_video_decoder)
-
音频解码器部分
- IJKFF_Pipenode *(*func_open_video_decoder)
-
void (*func_destroy)
:销毁pipeline相关内容
我们可以发现ijkplayer有两种创建pipeline的方式,也对应了两种解码方式:
- 给android用的ffpipeline创建,文件路径:
ijkmedia/ijkplayer/android/pipeline/ffpipeline_android#ffpipeline_create_from_android
- 给ffplay用的ffpipeline创建,文件路径:
ijkmedia/ijkplayer/pipeline/ffpipeline_ffplay#ffpipeline_create_from_ffplay
因为我们主要考虑的是android上的技术实现,所以我们着重讨论第一种场景
接下来我们分别分析下这两种pipeline:
1.2 android的pipeline
1.2.1解码pipeline实例化
备注:硬解码的相关的核心代码都在ijkmedia/ijkplayer/android/pipeline/ffpipeline_android.c
这个目录下
正如我们在音视频ijkplayer源码解析系列4--ijkplayer里面如何使用SDL渲染里分析的那样,在c层调用ijkmp_android_create
创建IjkMediaPlayer
对象的时候,我们会调用mp->ffplayer->pipeline = ffpipeline_create_from_android(mp->ffplayer);
,这里调用的就是解码pipeline的实例化,接下来我们来看看ffpipeline_create_from_android
的源码:
cpp
IJKFF_Pipeline *ffpipeline_create_from_android(FFPlayer *ffp)
{
ALOGD("ffpipeline_create_from_android()\n");
IJKFF_Pipeline *pipeline = ffpipeline_alloc(&g_pipeline_class, sizeof(IJKFF_Pipeline_Opaque));
if (!pipeline)
return pipeline;
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
opaque->ffp = ffp;
opaque->surface_mutex = SDL_CreateMutex();
opaque->left_volume = 1.0f;
opaque->right_volume = 1.0f;
if (!opaque->surface_mutex) {
ALOGE("ffpipeline-android:create SDL_CreateMutex failed\n");
goto fail;
}
pipeline->func_destroy = func_destroy;
pipeline->func_open_video_decoder = func_open_video_decoder;
pipeline->func_open_audio_output = func_open_audio_output;
pipeline->func_init_video_decoder = func_init_video_decoder;
pipeline->func_config_video_decoder = func_config_video_decoder;
return pipeline;
fail:
ffpipeline_free_p(&pipeline);
return NULL;
}
首先 会调用ffpipeline_alloc
创建IJKFF_Pipeline
对象,其主要流程是:
- 调用
calloc
创建IJKFF_Pipeline
对象, - 保存
IJKFF_Pipeline
的成员变量opaque_class
- 调用
calloc
创建IJKFF_Pipeline
的成员变量opaque
,其类型IJKFF_Pipeline_Opaque
其代码是:
cpp
IJKFF_Pipeline *ffpipeline_alloc(SDL_Class *opaque_class, size_t opaque_size)
{
IJKFF_Pipeline *pipeline = (IJKFF_Pipeline*) calloc(1, sizeof(IJKFF_Pipeline));
if (!pipeline)
return NULL;
pipeline->opaque_class = opaque_class;
pipeline->opaque = calloc(1, opaque_size);
if (!pipeline->opaque) {
free(pipeline);
return NULL;
}
return pipeline;
}
然后 初始化IJKFF_Pipeline
的成员变量opaque
,其主要流程是:
- 将
FFPlayer
对象保存到opaque->ffp
里 - 调用
SDL_CreateMutex
创建锁opaque->surface_mutex
- 设置左右声道(
opaque->left_volume
和opaque->right_volume
)为默认1.0f
最后 指定解码pipeline
里这几个核心实现方法:func_destroy
、func_init_video_decoder
、func_config_video_decoder
、func_open_video_decoder
和func_open_audio_output
。这几个函数的实现都在当前文件ffpipeline_android.c
里面,我们就按照调用顺序一次拆分理解
1.2.2 pipeline初始化解码器func_init_video_decoder
硬解码初始化解码器的函数为ffpipeline_android#func_init_video_decoder
,其源码为:
cpp
static IJKFF_Pipenode *func_init_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
IJKFF_Pipenode *node = NULL;
if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
node = ffpipenode_init_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
return node;
}
结合上述的代码,我们可以发现,它主要就是判断播放器视频类型是不是mediacodec_all_videos
、mediacodec_avc
、mediacodec_hevc
或者mediacodec_mpeg2
,然后执行ffpipenode_init_decoder_from_android_mediacodec
创建IJKFF_Pipenode
。接下来我们分析下这个函数的源码:
cpp
IJKFF_Pipenode *ffpipenode_init_decoder_from_android_mediacodec(FFPlayer *ffp, IJKFF_Pipeline *pipeline, SDL_Vout *vout)
{
if (SDL_Android_GetApiLevel() < IJK_API_16_JELLY_BEAN)
return NULL;
if (!ffp || !ffp->is)
return NULL;
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
VideoState *is = ffp->is;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
JNIEnv *env = NULL;
node->func_destroy = func_destroy;
if (ffp->mediacodec_sync) {
node->func_run_sync = func_run_sync_loop;
} else {
node->func_run_sync = func_run_sync;
}
node->func_flush = func_flush;
opaque->pipeline = pipeline;
opaque->ffp = ffp;
opaque->decoder = &is->viddec;
opaque->weak_vout = vout;
opaque->acodec_mutex = SDL_CreateMutex();
opaque->acodec_cond = SDL_CreateCond();
opaque->acodec_first_dequeue_output_mutex = SDL_CreateMutex();
opaque->acodec_first_dequeue_output_cond = SDL_CreateCond();
opaque->any_input_mutex = SDL_CreateMutex();
opaque->any_input_cond = SDL_CreateCond();
if (!opaque->acodec_cond || !opaque->acodec_cond || !opaque->acodec_first_dequeue_output_mutex || !opaque->acodec_first_dequeue_output_cond) {
ALOGE("%s:open_video_decoder: SDL_CreateCond() failed\n", __func__);
goto fail;
}
opaque->codecpar = avcodec_parameters_alloc();
if (!opaque->codecpar)
goto fail;
if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) {
ALOGE("%s:create: SetupThreadEnv failed\n", __func__);
goto fail;
}
ALOGI("%s:use default mediacodec name: %s\n", __func__, ffp->mediacodec_default_name);
strcpy(opaque->mcc.codec_name, ffp->mediacodec_default_name);
opaque->acodec = SDL_AMediaCodecJava_createByCodecName(env, ffp->mediacodec_default_name);
if (!opaque->acodec) {
goto fail;
}
return node;
fail:
ALOGW("%s: init fail\n", __func__);
ffpipenode_free_p(&node);
return NULL;
}
从上面的代码我们可以发现,它的主要流程是:
-
使用
ffpipenode_alloc
创建IJKFF_Pipenode
node对象,他其实和"ffpipeline_alloc
创建IJKFF_Pipeline
对象"的流程是相似的,也是创建那几个对象,感兴趣的同学可以直接去看源码,这里就不赘述了 -
指定node对象的销毁函数:
node->func_destroy = func_destroy
-
指定node对象解码函数:
cppif (ffp->mediacodec_sync) { node->func_run_sync = func_run_sync_loop; } else { node->func_run_sync = func_run_sync; }
-
指定node对象刷新缓冲区函数:
node->func_flush = func_flush;
-
初始化
node
对象的成员变量IJKFF_Pipenode_Opaque* opaque
,初始化的代码我直接贴出来cppopaque->pipeline = pipeline; opaque->ffp = ffp; opaque->decoder = &is->viddec; opaque->weak_vout = vout; opaque->acodec_mutex = SDL_CreateMutex(); opaque->acodec_cond = SDL_CreateCond(); opaque->acodec_first_dequeue_output_mutex = SDL_CreateMutex(); opaque->acodec_first_dequeue_output_cond = SDL_CreateCond(); opaque->any_input_mutex = SDL_CreateMutex(); opaque->any_input_cond = SDL_CreateCond();
结合上述代码我们可以发现关键就是保存了pipeline和播放器ffp的对象
-
创建解码器解析参数对象:
opaque->codecpar = avcodec_parameters_alloc();
-
创建SDL解码器对象:
opaque->acodec = SDL_AMediaCodecJava_createByCodecName(env, ffp->mediacodec_default_name);
这里的解码器是映射的java层MediaCodec
硬解码类实现的,这一块相对来说还是比较负责的,我们先按下不表,在后续的章节逐步展开,先挖个坑。
走完上述步骤就创建出了IJKFF_Pipenode
对象,并把他存到ffp->node_vdec
即FFPlayer
对象里了。
1.2.3 pipeline配置解码器func_config_video_decoder
硬解码配置解码器的函数为ffpipeline_android#func_config_video_decoder
,其源码为:
cpp
static int func_config_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
int ret = NULL;
if (ffp->node_vdec) {
ret = ffpipenode_config_from_android_mediacodec(ffp, pipeline, opaque->weak_vout, ffp->node_vdec);
}
return ret;
}
结合上述的代码,其主要是需要在已经创建了node对象以后再调用,然后会执行ffpipenode_config_from_android_mediacodec
,我们先贴出代码:
cpp
int ffpipenode_config_from_android_mediacodec(FFPlayer *ffp, IJKFF_Pipeline *pipeline, SDL_Vout *vout, IJKFF_Pipenode *node) {
int ret = 0;
VideoState *is = ffp->is;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
JNIEnv *env = NULL;
jobject jsurface = NULL;
opaque->decoder = &is->viddec;
if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) {
ALOGE("%s:create: SetupThreadEnv failed\n", __func__);
goto fail;
}
ret = avcodec_parameters_from_context(opaque->codecpar, opaque->decoder->avctx);
if (ret)
goto fail;
switch (opaque->codecpar->codec_id) {
case AV_CODEC_ID_H264:
if (!ffp->mediacodec_avc && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec: AVC/H264 is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
...
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_AVC);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_HEVC:
if (!ffp->mediacodec_hevc && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/HEVC is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_HEVC);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_MPEG2VIDEO:
if (!ffp->mediacodec_mpeg2 && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/MPEG2VIDEO is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_MPEG2VIDEO);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_MPEG4:
if (!ffp->mediacodec_mpeg4 && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/MPEG4 is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
if ((opaque->codecpar->codec_tag & 0x0000FFFF) == 0x00005844) {
ALOGE("%s: divx is not supported \n", __func__);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_MPEG4);
opaque->mcc.profile = opaque->codecpar->profile >= 0 ? opaque->codecpar->profile : 0;
opaque->mcc.level = opaque->codecpar->level >= 0 ? opaque->codecpar->level : 1;
break;
default:
ALOGE("%s:create: not H264 or H265/HEVC, codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
if (strcmp(opaque->mcc.mime_type, ffp->video_mime_type)) {
ALOGW("amc: video_mime_type error opaque->mcc.mime_type = %s\n", opaque->mcc.mime_type);
goto fail;
}
ret = recreate_format_l(env, node);
if (ret) {
ALOGE("amc: recreate_format_l failed\n");
goto fail;
}
jsurface = ffpipeline_get_surface_as_global_ref(env, pipeline);
ret = configure_codec_l(env, node, jsurface);
J4A_DeleteGlobalRef__p(env, &jsurface);
if (ret != 0)
goto fail;
ffp_set_video_codec_info(ffp, MEDIACODEC_MODULE_NAME, opaque->mcc.codec_name);
opaque->off_buf_out = 0;
if (opaque->n_buf_out) {
int i;
opaque->amc_buf_out = calloc(opaque->n_buf_out, sizeof(*opaque->amc_buf_out));
assert(opaque->amc_buf_out != NULL);
for (i = 0; i < opaque->n_buf_out; i++)
opaque->amc_buf_out[i].pts = AV_NOPTS_VALUE;
}
SDL_SpeedSamplerReset(&opaque->sampler);
ffp->stat.vdec_type = FFP_PROPV_DECODER_MEDIACODEC;
return 0;
fail:
ret = -1;
ffpipenode_free_p(&node);
return ret;
}
结合上述源码我们可以发现,它首先 是调用 avcodec_parameters_from_context(opaque->codecpar, opaque->decoder->avctx);
将AVCodecParameters *codecpar
里的码流参数,保存到AVCodecContext *avctx
解码器上下文里。
1.2.4 pipeline打开解码器func_open_video_decoder
硬解码配置解码器的函数为ffpipeline_android#func_open_video_decoder
,其源码为:
cpp
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
IJKFF_Pipenode *node = NULL;
if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
if (!node) {
node = ffpipenode_create_video_decoder_from_ffplay(ffp);
}
return node;
}
结合上述代码可以发现,它会判断视频格式是否满足硬编的条件,
- 如果满足硬编的条件的话,则会执行
ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout)
创建android硬编码MediaCodec
的解码node - 如果不满足的话,则执行
ffpipenode_create_video_decoder_from_ffplay
创建ffmpeg的解码node
接下来我们就来分析这两种node对象的方式
2、IJKFF_Pipenode对象分析
紧接着pipeline中两种创建node的方式,这也是决定了我们播放器使用硬解码和软解码的关键
2.1 决定硬/软解码的点
正如我们刚才贴出的代码,我们发现决定硬/软解码的关键点在于:ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2
2.2 软解码node解析
经过刚才的解析我们也知道的软解码(即ffmpeg)的关键函数在ffpipenode_ffplay_vdec.c
文件里,为:
cpp
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ffplay(FFPlayer *ffp)
{
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
opaque->ffp = ffp;
node->func_destroy = func_destroy;
node->func_run_sync = func_run_sync;
ffp_set_video_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(ffp->is->viddec.avctx->codec_id));
ffp->stat.vdec_type = FFP_PROPV_DECODER_AVCODEC;
return node;
}
我们发现他首先 调用ffpipenode_alloc
创建IJKFF_Pipenode
对象,然后 将当前播放器FFplayer
保存到刚创建的node对象
里,紧接着 就是指定软编node的两个关键函数的实现func_destroy
和func_run_sync
,最后 会调用ffp_set_video_codec_info
保存解码器信息和指定解码器类型为FFP_PROPV_DECODER_AVCODEC
。
刚才我们提到了一个关键的函数func_run_sync
,这个函数也就是我们在音视频ijkplayer源码解析系列3--解码流程文章中用于解封装和解码的核心函数,忘记这块逻辑的同学可以点回去补补课,解析出视频帧以后就会如音视频ijkplayer源码解析系列4--ijkplayer里面如何使用SDL渲染里介绍的那样,将视频渲染在窗口上了。
2.3 硬解码node解析
刚刚我们还提到了使用android的MediaCodec
硬解码方式的node,考虑到函数十分的长,我把其日志相关的代码删掉了,其源码如下:
cpp
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_android_mediacodec(FFPlayer *ffp, IJKFF_Pipeline *pipeline, SDL_Vout *vout)
{
if (SDL_Android_GetApiLevel() < IJK_API_16_JELLY_BEAN)
return NULL;
if (!ffp || !ffp->is)
return NULL;
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
VideoState *is = ffp->is;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
JNIEnv *env = NULL;
int ret = 0;
jobject jsurface = NULL;
node->func_destroy = func_destroy;
if (ffp->mediacodec_sync) {
node->func_run_sync = func_run_sync_loop;
} else {
node->func_run_sync = func_run_sync;
}
node->func_flush = func_flush;
opaque->pipeline = pipeline;
opaque->ffp = ffp;
opaque->decoder = &is->viddec;
opaque->weak_vout = vout;
opaque->codecpar = avcodec_parameters_alloc();
if (!opaque->codecpar)
goto fail;
ret = avcodec_parameters_from_context(opaque->codecpar, opaque->decoder->avctx);
if (ret)
goto fail;
switch (opaque->codecpar->codec_id) {
case AV_CODEC_ID_H264:
if (!ffp->mediacodec_avc && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec: AVC/H264 is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_AVC);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_HEVC:
if (!ffp->mediacodec_hevc && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/HEVC is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_HEVC);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_MPEG2VIDEO:
if (!ffp->mediacodec_mpeg2 && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/MPEG2VIDEO is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_MPEG2VIDEO);
opaque->mcc.profile = opaque->codecpar->profile;
opaque->mcc.level = opaque->codecpar->level;
break;
case AV_CODEC_ID_MPEG4:
if (!ffp->mediacodec_mpeg4 && !ffp->mediacodec_all_videos) {
ALOGE("%s: MediaCodec/MPEG4 is disabled. codec_id:%d \n", __func__, opaque->codecpar->codec_id);
goto fail;
}
if ((opaque->codecpar->codec_tag & 0x0000FFFF) == 0x00005844) {
ALOGE("%s: divx is not supported \n", __func__);
goto fail;
}
strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_MPEG4);
opaque->mcc.profile = opaque->codecpar->profile >= 0 ? opaque->codecpar->profile : 0;
opaque->mcc.level = opaque->codecpar->level >= 0 ? opaque->codecpar->level : 1;
break;
default:
goto fail;
}
if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) {
goto fail;
}
opaque->acodec_mutex = SDL_CreateMutex();
opaque->acodec_cond = SDL_CreateCond();
opaque->acodec_first_dequeue_output_mutex = SDL_CreateMutex();
opaque->acodec_first_dequeue_output_cond = SDL_CreateCond();
opaque->any_input_mutex = SDL_CreateMutex();
opaque->any_input_cond = SDL_CreateCond();
if (!opaque->acodec_cond || !opaque->acodec_cond || !opaque->acodec_first_dequeue_output_mutex || !opaque->acodec_first_dequeue_output_cond) {
goto fail;
}
ret = recreate_format_l(env, node);
if (ret) {
goto fail;
}
if (!ffpipeline_select_mediacodec_l(pipeline, &opaque->mcc) || !opaque->mcc.codec_name[0]) {
goto fail;
}
jsurface = ffpipeline_get_surface_as_global_ref(env, pipeline);
ret = reconfigure_codec_l(env, node, jsurface);
J4A_DeleteGlobalRef__p(env, &jsurface);
if (ret != 0)
goto fail;
ffp_set_video_codec_info(ffp, MEDIACODEC_MODULE_NAME, opaque->mcc.codec_name);
opaque->off_buf_out = 0;
if (opaque->n_buf_out) {
int i;
opaque->amc_buf_out = calloc(opaque->n_buf_out, sizeof(*opaque->amc_buf_out));
assert(opaque->amc_buf_out != NULL);
for (i = 0; i < opaque->n_buf_out; i++)
opaque->amc_buf_out[i].pts = AV_NOPTS_VALUE;
}
SDL_SpeedSamplerReset(&opaque->sampler);
ffp->stat.vdec_type = FFP_PROPV_DECODER_MEDIACODEC;
return node;
fail:
ffpipenode_free_p(&node);
return NULL;
}
结合在上述的源码中,其核心流程和刚刚我们解析的"1.2.2 pipeline初始化解码器func_init_video_decoder
"不能说一模一样吧,至少也能说是大体相似,所以这次就只挑关键点将:
-
传统艺能调用
ffpipenode_alloc
创建IJKFF_Pipenode
对象 -
指定硬编node的解码逻辑:
cppif (ffp->mediacodec_sync) { node->func_run_sync = func_run_sync_loop; } else { node->func_run_sync = func_run_sync; }
-
创建
AVCodecParameters
解码器参数上下文:opaque->codecpar = avcodec_parameters_alloc();
,我们在解码的时候会用到这个 -
创建解码器SDL_AMediaCodec对象:opaque->acodec = SDL_AMediaCodecJava_createByCodecName(env, ffp->mediacodec_default_name);`