基于Android Camera2_HAL3&device.start流程分析_2

基于Android P版本分析

channel->start

初始化所有channel之后,紧接着下来就是执行startChannel;

rust 复制代码
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
LOGH("Start Processing Channel mask=%d",
     channel->getStreamTypeMask());
rc = channel->start();

调用了QCamera3Channel::start()函数:

ini 复制代码
int32_t QCamera3Channel::start()
{
    ATRACE_CALL();
    int32_t rc = NO_ERROR;
​
    // 一个QCamera3Channel只拥有一个QCamera3Stream
    if (m_numStreams > 1) {
        LOGW("bundle not supported");
    } else if (m_numStreams == 0) {
        return NO_INIT;
    }
​
    if(m_bIsActive) {
        LOGW("Attempt to start active channel");
        return rc;
    }
​
    for (uint32_t i = 0; i < m_numStreams; i++) {
        if (mStreams[i] != NULL) {
            mStreams[i]->start();
        }
    }
​
    m_bIsActive = true;
​
    return rc;
}

调用了QCamera3Stream::start()函数:

ini 复制代码
int32_t QCamera3Stream::start()
{
    int32_t rc = 0;
​
    mDataQ.init();
    mTimeoutFrameQ.clear();
    if (mBatchSize)
        mFreeBatchBufQ.init();
    rc = mProcTh.launch(dataProcRoutine, this);
    return rc;
}

这里会开启stream的数据处理线程dataProcRoutine,线程名称:CAM_PREVIEW,对于metadata的那条stream也是同理,只是线程名称为:CAM_METADATA;

ini 复制代码
void *QCamera3Stream::dataProcRoutine(void *data)
{
    int running = 1;
    int ret;
    QCamera3Stream *pme = (QCamera3Stream *)data;
    QCameraCmdThread *cmdThread = &pme->mProcTh;
​
    cmdThread->setName(mStreamNames[pme->mStreamInfo->stream_type]);
​
    LOGD("E");
    do {
        do {
            ret = cam_sem_wait(&cmdThread->cmd_sem);
            if (ret != 0 && errno != EINVAL) {
                LOGE("cam_sem_wait error (%s)",
                       strerror(errno));
                return NULL;
            }
        } while (ret != 0);
​
        // we got notified about new cmd avail in cmd queue
        camera_cmd_type_t cmd = cmdThread->getCmd();
        switch (cmd) {
        case CAMERA_CMD_TYPE_TIMEOUT:
            {
                int32_t bufIdx = (int32_t)(pme->mTimeoutFrameQ.dequeue());
                pme->cancelBuffer(bufIdx);
                break;
            }
        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
            {
                LOGD("Do next job");
                mm_camera_super_buf_t *frame =
                    (mm_camera_super_buf_t *)pme->mDataQ.dequeue();
                if (NULL != frame) {
                    if (UNLIKELY(frame->bufs[0]->buf_type ==
                            CAM_STREAM_BUF_TYPE_USERPTR)) {
                        pme->handleBatchBuffer(frame);
                    } else if (pme->mDataCB != NULL) {
                        pme->mDataCB(frame, pme, pme->mUserData);
                    } else {
                        // no data cb routine, return buf here
                        pme->bufDone(frame->bufs[0]->buf_idx);
                    }
                }
            }
            break;
        case CAMERA_CMD_TYPE_EXIT:
            LOGH("Exit");
            /* flush data buf queue */
            pme->mDataQ.flush();
            pme->mTimeoutFrameQ.flush();
            pme->flushFreeBatchBufQ();
            running = 0;
            break;
        default:
            break;
        }
    } while (running);
    LOGD("X");
    return NULL;
}

可以看到在case DO_NEXT_JOB中,会从stream的数据队列中出队一个mm_camera_super_buf_t,然后通过mDataCB将该buffer往上层传,而此处的buffer指的是在stream init的时候传入的QCamera3Channel的streamCbRoutine,也就是说会将数据通过回调抛到Channel层;

而在streamCbRoutine中,会调用Channel自身持有的线程去做数据处理操作,也就是执行:m_postprocessor.processData(frame,ppInfo->output, resultFrameNumber); 在processData中,会将buffer入队列,让channel的线程去处理,线程体为dataProcessRoutine;

dataProcessRoutine这里就对应上了HAL层config流程中最后的逻辑;

Request Channel

这一块的流程其实已经分析过了;

/hardware/qcom/camera/QCamera2/HAL3/QCamera3HWI.cpp

ini 复制代码
int QCamera3HardwareInterface::processCaptureRequest(
                    camera3_capture_request_t *request,
                    List<InternalRequest> &internallyRequestedStreams)
{
                    ...
                    for (size_t i = 0; i < request->num_output_buffers; i++) {
                        const camera3_stream_buffer_t& output = request->output_buffers[i];
                        QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
                        ...
                        rc = channel->request(output.buffer, frameNumber,
                                NULL, mParameters, indexUsed);
                    ...
}

在该函数中调用了channel->request函数将buffer注册到HAL3中;

/hardware/qcom/camera/msm8998/QCamera2/HAL3/QCamera3Channel.cpp

scss 复制代码
int32_t QCamera3ProcessingChannel::request(buffer_handle_t *buffer,
        uint32_t frameNumber,
        camera3_stream_buffer_t* pInputBuffer,
        metadata_buffer_t* metadata,
        int &indexUsed,
        __unused bool internalRequest = false,
        __unused bool meteringOnly = false)
{
    int32_t rc = NO_ERROR;
    int index;
​
    if (NULL == buffer || NULL == metadata) {
        LOGE("Invalid buffer/metadata in channel request");
        return BAD_VALUE;
    }
​
    if (pInputBuffer) {
        //need to send to reprocessing
        LOGD("Got a request with input buffer, output streamType = %d", mStreamType);
        reprocess_config_t reproc_cfg;
        cam_dimension_t dim;
        memset(&reproc_cfg, 0, sizeof(reprocess_config_t));
        memset(&dim, 0, sizeof(dim));
        setReprocConfig(reproc_cfg, pInputBuffer, metadata, mStreamFormat, dim);
        startPostProc(reproc_cfg);
​
        qcamera_fwk_input_pp_data_t *src_frame = NULL;
        src_frame = (qcamera_fwk_input_pp_data_t *)calloc(1,
                sizeof(qcamera_fwk_input_pp_data_t));
        if (src_frame == NULL) {
            LOGE("No memory for src frame");
            return NO_MEMORY;
        }
        rc = setFwkInputPPData(src_frame, pInputBuffer, &reproc_cfg, metadata, buffer, frameNumber);
        if (NO_ERROR != rc) {
            LOGE("Error %d while setting framework input PP data", rc);
            free(src_frame);
            return rc;
        }
        LOGH("Post-process started");
        m_postprocessor.processData(src_frame);
    } else {
        index = mMemory.getMatchBufIndex((void*)buffer);
        if(index < 0) {
            rc = registerBuffer(buffer, mIsType);
            if (NO_ERROR != rc) {
                LOGE("On-the-fly buffer registration failed %d",
                         rc);
                return rc;
            }
​
            index = mMemory.getMatchBufIndex((void*)buffer);
            if (index < 0) {
                LOGE("Could not find object among registered buffers");
                return DEAD_OBJECT;
            }
        }
        rc = mMemory.markFrameNumber(index, frameNumber);
        if(rc != NO_ERROR) {
            LOGE("Error marking frame number:%d for index %d", frameNumber,
                index);
            return rc;
        }
        if (m_bIsActive) {
            rc = mStreams[0]->bufDone(index);
            if(rc != NO_ERROR) {
                LOGE("Failed to Q new buffer to stream");
                mMemory.markFrameNumber(index, -1);
                return rc;
            }
        }
        indexUsed = index;
    }
    return rc;
}

其中主要的逻辑为:rc = registerBuffer(buffer, mIsType):

ini 复制代码
int32_t QCamera3ProcessingChannel::registerBuffer(buffer_handle_t *buffer,
        cam_is_type_t isType)
{
    ATRACE_CAMSCOPE_CALL(CAMSCOPE_HAL3_PROC_CH_REG_BUF);
    int rc = 0;
    mIsType = isType;
    cam_stream_type_t streamType;
​
    if ((uint32_t)mMemory.getCnt() > (mNumBufs - 1)) {
        LOGE("Trying to register more buffers than initially requested");
        return BAD_VALUE;
    }
​
    if (0 == m_numStreams) {
        rc = initialize(mIsType);
        if (rc != NO_ERROR) {
            LOGE("Couldn't initialize camera stream %d", rc);
            return rc;
        }
    }
​
    streamType = mStreams[0]->getMyType();
    rc = mMemory.registerBuffer(buffer, streamType);
    if (ALREADY_EXISTS == rc) {
        return NO_ERROR;
    } else if (NO_ERROR != rc) {
        LOGE("Buffer %p couldn't be registered %d", buffer, rc);
        return rc;
    }
​
    return rc;
}

我们关注一下mMemory.registerBuffer逻辑:

首先看一下mMemory和mGrallocMem的定义:

/hardware/qcom/camera/msm8998/QCamera2/HAL3/QCamera3Channel.h

ini 复制代码
    ..................
    QCamera3StreamMem mMemory; //output buffer allocated by fwk
    ..................
private:
    camera3_stream_t *mStream;
    QCamera3GrallocMemory mGrallocMem;
};
arduino 复制代码
int QCamera3StreamMem::registerBuffer(buffer_handle_t *buffer,
        cam_stream_type_t type)
{
    Mutex::Autolock lock(mLock);
    return mGrallocMem.registerBuffer(buffer, type);
}

mGrallocMem.registerBuffer:

ini 复制代码
int QCamera3GrallocMemory::registerBuffer(buffer_handle_t *buffer,
        __unused cam_stream_type_t type)
{
    status_t ret = NO_ERROR;
    struct ion_fd_data ion_info_fd;
    int32_t colorSpace = ITU_R_601_FR;
    int32_t idx = -1;
​
    LOGD("E");
​
    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
​
    if (0 <= getMatchBufIndex((void *) buffer)) {
        LOGL("Buffer already registered");
        return ALREADY_EXISTS;
    }
​
    Mutex::Autolock lock(mLock);
    if (mBufferCount >= (MM_CAMERA_MAX_NUM_FRAMES - 1 - mStartIdx)) {
        LOGE("Number of buffers %d greater than what's supported %d",
                 mBufferCount, MM_CAMERA_MAX_NUM_FRAMES - mStartIdx);
        return BAD_INDEX;
    }
​
    idx = getFreeIndexLocked();
    if (0 > idx) {
        LOGE("No available memory slots");
        return BAD_INDEX;
    }
​
    mBufferHandle[idx] = buffer;
    mPrivateHandle[idx] = (struct private_handle_t *)(*mBufferHandle[idx]);
​
    setMetaData(mPrivateHandle[idx], UPDATE_COLOR_SPACE, &colorSpace);
​
    if (main_ion_fd < 0) {
        LOGE("failed: could not open ion device");
        ret = NO_MEMORY;
        goto end;
    } else {
        ion_info_fd.fd = mPrivateHandle[idx]->fd;
        if (ioctl(main_ion_fd,
                  ION_IOC_IMPORT, &ion_info_fd) < 0) {
            LOGE("ION import failed\n");
            ret = NO_MEMORY;
            goto end;
        }
    }
    LOGD("idx = %d, fd = %d, size = %d, offset = %d",
             idx, mPrivateHandle[idx]->fd,
            mPrivateHandle[idx]->size,
            mPrivateHandle[idx]->offset);
    mMemInfo[idx].fd = mPrivateHandle[idx]->fd;
    mMemInfo[idx].size =
            ( /* FIXME: Should update ION interface */ size_t)
            mPrivateHandle[idx]->size;
    mMemInfo[idx].handle = ion_info_fd.handle;
​
    mBufferCount++;
​
end:
    LOGD("X ");
    return ret;
}

至此,窗口的Buffer(Surface中的buffer)就注册到了HAL3中,由mMemInfo保存buffer信息;

mMemInfo定义:

ini 复制代码
QCamera3Memory::QCamera3Memory()
{
    mBufferCount = 0;
    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i++) {
        mMemInfo[i].fd = -1;
        mMemInfo[i].handle = 0;
        mMemInfo[i].size = 0;
        mCurrentFrameNumbers[i] = -1;
    }
    main_ion_fd = open("/dev/ion", O_RDONLY);
}

/hardware/qcom/camera/msm8998/QCamera2/stack/common/mm_camera_interface.h

arduino 复制代码
#define MM_CAMERA_MAX_NUM_FRAMES CAM_MAX_NUM_BUFS_PER_STREAM

/hardware/qcom/camera/msm8998/QCamera2/stack/common/cam_types.h

arduino 复制代码
#define CAM_MAX_NUM_BUFS_PER_STREAM 64

/hardware/qcom/camera/msm8998/QCamera2/HAL3/QCamera3Mem.h

ini 复制代码
struct QCamera3MemInfo mMemInfo[MM_CAMERA_MAX_NUM_FRAMES];

mCameraHandle->ops->start_channel

request channel执行完成之后,紧接着就会执行mCameraHandle->ops->start_channel(mCameraHandle->camera_handle, mChannelHandle)函数:

ini 复制代码
int32_t mm_camera_start_channel(mm_camera_obj_t *my_obj, uint32_t ch_id)
{
    int32_t rc = -1;
    mm_channel_t * ch_obj =
        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
​
    if (NULL != ch_obj) {
        pthread_mutex_lock(&ch_obj->ch_lock);
        pthread_mutex_unlock(&my_obj->cam_lock);
​
        rc = mm_channel_fsm_fn(ch_obj,
                               MM_CHANNEL_EVT_START,
                               NULL,
                               NULL);
    } else {
        pthread_mutex_unlock(&my_obj->cam_lock);
    }
​
    return rc;
}

进入到mm_channel的state machine中,由于之前add_stream没有改变channel的状态,所以这次的case依然为MM_CHANNEL_STATE_STOPPED:

arduino 复制代码
int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
                          mm_channel_evt_type_t evt,
                          void * in_val,
                          void * out_val)
{
    int32_t rc = -1;
​
    LOGD("E state = %d", my_obj->state);
    switch (my_obj->state) {
    ........................
    case MM_CHANNEL_STATE_STOPPED:
        rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
        break;
    ........................
    default:
        LOGD("Not a valid state (%d)", my_obj->state);
        break;
    }
​
    /* unlock ch_lock */
    pthread_mutex_unlock(&my_obj->ch_lock);
    LOGD("X rc = %d", rc);
    return rc;
}

进入mm_channel_fsm_fn_stopped,case为MM_CHANNEL_EVT_START:

ini 复制代码
case MM_CHANNEL_EVT_START:
{
    rc = mm_channel_start(my_obj);
    /* first stream started in stopped state
             * move to active state */
    if (0 == rc) {
        my_obj->state = MM_CHANNEL_STATE_ACTIVE;
    }
}
break;

调用了mm_channel_start函数,同时将channel_obj->state修改为MM_CHANNEL_STATE_ACTIVE;

ini 复制代码
int32_t mm_channel_start(mm_channel_t *my_obj)
{
    int32_t rc = 0;
    int i = 0, j = 0;
    mm_stream_t *s_objs[MAX_STREAM_NUM_IN_BUNDLE] = {NULL};
    uint8_t num_streams_to_start = 0;
    uint8_t num_streams_in_bundle_queue = 0;
    mm_stream_t *s_obj = NULL;
    int meta_stream_idx = 0;
    cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
​
    for (i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
        if (my_obj->streams[i].my_hdl > 0) {
            s_obj = mm_channel_util_get_stream_by_handler(my_obj,
                                                          my_obj->streams[i].my_hdl);
            if (NULL != s_obj) {
                stream_type = s_obj->stream_info->stream_type;
                /* remember meta data stream index */
                if ((stream_type == CAM_STREAM_TYPE_METADATA) &&
                        (s_obj->ch_obj == my_obj)) {
                    meta_stream_idx = num_streams_to_start;
                }
                s_objs[num_streams_to_start++] = s_obj;
​
                if (!s_obj->stream_info->noFrameExpected) {
                    num_streams_in_bundle_queue++;
                }
            }
        }
    }
​
    if (meta_stream_idx > 0 ) {
        /* always start meta data stream first, so switch the stream object with the first one */
        s_obj = s_objs[0];
        s_objs[0] = s_objs[meta_stream_idx];
        s_objs[meta_stream_idx] = s_obj;
    }
​
    if (NULL != my_obj->bundle.super_buf_notify_cb) {
        /* need to send up cb, therefore launch thread */
        /* init superbuf queue */
        mm_channel_superbuf_queue_init(&my_obj->bundle.superbuf_queue);
        my_obj->bundle.superbuf_queue.num_streams = num_streams_in_bundle_queue;
        my_obj->bundle.superbuf_queue.expected_frame_id =
                my_obj->bundle.superbuf_queue.attr.user_expected_frame_id;
        my_obj->bundle.superbuf_queue.expected_frame_id_without_led = 0;
        my_obj->bundle.superbuf_queue.led_off_start_frame_id = 0;
        my_obj->bundle.superbuf_queue.led_on_start_frame_id = 0;
        my_obj->bundle.superbuf_queue.led_on_num_frames = 0;
        my_obj->bundle.superbuf_queue.good_frame_id = 0;
​
        for (i = 0; i < num_streams_to_start; i++) {
            /* Only bundle streams that belong to the channel */
            if(!(s_objs[i]->stream_info->noFrameExpected)) {
                if (s_objs[i]->ch_obj == my_obj) {
                    /* set bundled flag to streams */
                    s_objs[i]->is_bundled = 1;
                }
                my_obj->bundle.superbuf_queue.bundled_streams[j++] = s_objs[i]->my_hdl;
            }
        }
​
        /* launch cb thread for dispatching super buf through cb */
        snprintf(my_obj->cb_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBuf");
        mm_camera_cmd_thread_launch(&my_obj->cb_thread,
                                    mm_channel_dispatch_super_buf,
                                    (void*)my_obj);
​
        /* launch cmd thread for super buf dataCB */
        snprintf(my_obj->cmd_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBufCB");
        mm_camera_cmd_thread_launch(&my_obj->cmd_thread,
                                    mm_channel_process_stream_buf,
                                    (void*)my_obj);
​
        /* set flag to TRUE */
        my_obj->bundle.is_active = TRUE;
    }
​
    /* link any streams first before starting the rest of the streams */
    for (i = 0; i < num_streams_to_start; i++) {
        if (s_objs[i]->ch_obj != my_obj) {
            pthread_mutex_lock(&s_objs[i]->linked_stream->buf_lock);
            s_objs[i]->linked_stream->linked_obj = my_obj;
            s_objs[i]->linked_stream->is_linked = 1;
            pthread_mutex_unlock(&s_objs[i]->linked_stream->buf_lock);
            continue;
        }
    }
​
    for (i = 0; i < num_streams_to_start; i++) {
        if (s_objs[i]->ch_obj != my_obj) {
            continue;
        }
        /* all streams within a channel should be started at the same time */
        if (s_objs[i]->state == MM_STREAM_STATE_ACTIVE) {
            LOGE("stream already started idx(%d)", i);
            rc = -1;
            break;
        }
​
        /* allocate buf */
        rc = mm_stream_fsm_fn(s_objs[i],
                              MM_STREAM_EVT_GET_BUF,
                              NULL,
                              NULL);
        if (0 != rc) {
            LOGE("get buf failed at idx(%d)", i);
            break;
        }
​
        /* reg buf */
        rc = mm_stream_fsm_fn(s_objs[i],
                              MM_STREAM_EVT_REG_BUF,
                              NULL,
                              NULL);
        if (0 != rc) {
            LOGE("reg buf failed at idx(%d)", i);
            break;
        }
​
        /* start stream */
        rc = mm_stream_fsm_fn(s_objs[i],
                              MM_STREAM_EVT_START,
                              NULL,
                              NULL);
        if (0 != rc) {
            LOGE("start stream failed at idx(%d)", i);
            break;
        }
    }
​
    /* error handling */
    if (0 != rc) {
        /* unlink the streams first */
        for (j = 0; j < num_streams_to_start; j++) {
            if (s_objs[j]->ch_obj != my_obj) {
                pthread_mutex_lock(&s_objs[j]->linked_stream->buf_lock);
                s_objs[j]->linked_stream->is_linked = 0;
                s_objs[j]->linked_stream->linked_obj = NULL;
                pthread_mutex_unlock(&s_objs[j]->linked_stream->buf_lock);
​
                if (TRUE == my_obj->bundle.is_active) {
                    mm_channel_flush_super_buf_queue(my_obj, 0,
                            s_objs[i]->stream_info->stream_type);
                }
                memset(s_objs[j], 0, sizeof(mm_stream_t));
                continue;
            }
        }
​
        for (j = 0; j <= i; j++) {
            if ((NULL == s_objs[j]) || (s_objs[j]->ch_obj != my_obj)) {
                continue;
            }
            /* stop streams*/
            mm_stream_fsm_fn(s_objs[j],
                             MM_STREAM_EVT_STOP,
                             NULL,
                             NULL);
​
            /* unreg buf */
            mm_stream_fsm_fn(s_objs[j],
                             MM_STREAM_EVT_UNREG_BUF,
                             NULL,
                             NULL);
​
            /* put buf back */
            mm_stream_fsm_fn(s_objs[j],
                             MM_STREAM_EVT_PUT_BUF,
                             NULL,
                             NULL);
        }
​
        /* destroy super buf cmd thread */
        if (TRUE == my_obj->bundle.is_active) {
            /* first stop bundle thread */
            mm_camera_cmd_thread_release(&my_obj->cmd_thread);
            mm_camera_cmd_thread_release(&my_obj->cb_thread);
​
            /* deinit superbuf queue */
            mm_channel_superbuf_queue_deinit(&my_obj->bundle.superbuf_queue);
​
            /* memset super buffer queue info */
            my_obj->bundle.is_active = 0;
            memset(&my_obj->bundle.superbuf_queue, 0, sizeof(mm_channel_queue_t));
        }
    }
    my_obj->bWaitForPrepSnapshotDone = 0;
    if (my_obj->bundle.superbuf_queue.attr.enable_frame_sync) {
        LOGH("registering Channel obj %p", my_obj);
        mm_frame_sync_register_channel(my_obj);
    }
    return rc;
}
​
}

而在这个函数中,有一个mm_stream_fsm_fn函数的调用,该函数第二个入参的参数值为MM_STREAM_EVT_GET_BUF,在该方法中执行了mm_stream_fsm_cfg()函数,因为stream_obj->state的状态已经在config_stream的时候修改为MM_STREAM_STATE_CFG,所以调用了mm_stream_fsm_cfg()函数:

arduino 复制代码
int32_t mm_stream_fsm_cfg(mm_stream_t * my_obj,
                          mm_stream_evt_type_t evt,
                          void * in_val,
                          void * out_val)
{
    int32_t rc = 0;
    LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
          my_obj->my_hdl, my_obj->fd, my_obj->state);
    switch(evt) {
    ........................
    case MM_STREAM_EVT_GET_BUF:
        rc = mm_stream_init_bufs(my_obj);
        /* change state to buff allocated */
        if(0 == rc) {
            my_obj->state = MM_STREAM_STATE_BUFFED;
        }
        break;
    ........................
    default:
        LOGE("invalid state (%d) for evt (%d), in(%p), out(%p)",
                    my_obj->state, evt, in_val, out_val);
    }
    LOGD("X rc = %d", rc);
    return rc;
}

在MM_STREAM_EVT_GET_BUF分支中,调用了mm_stream_init_bufs()函数,这样就对应上了之前的分析;

buffer在HAL绑定之后,HAL层会调用start_channel函数,该函数在一系列的调用,会调用到mm_stream_init_bufs函数中:

/hardware/qcom/camera/msm8998/QCamera2/stack/mm-camera-interface/src/mm_camera_stream.c

rust 复制代码
/*===========================================================================
 * FUNCTION   : mm_stream_init_bufs
 *
 * DESCRIPTION: initialize stream buffers needed. This function will request
 *              buffers needed from upper layer through the mem ops table passed
 *              during configuration stage.
 *
 * PARAMETERS :
 *   @my_obj  : stream object
 *
 * RETURN     : int32_t type of status
 *              0  -- success
 *              -1 -- failure
 *==========================================================================*/
int32_t mm_stream_init_bufs(mm_stream_t * my_obj)
{
    int32_t i, rc = 0;
    uint8_t *reg_flags = NULL;
    LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
          my_obj->my_hdl, my_obj->fd, my_obj->state);
​
    /* deinit buf if it's not NULL*/
    if (NULL != my_obj->buf) {
        mm_stream_deinit_bufs(my_obj);
    }
​
    if (!my_obj->is_res_shared) {
        rc = my_obj->mem_vtbl.get_bufs(&my_obj->frame_offset,
                &my_obj->total_buf_cnt, &reg_flags, &my_obj->buf,
                &my_obj->map_ops, my_obj->mem_vtbl.user_data);
        if (rc == 0) {
            for (i = 0; i < my_obj->total_buf_cnt; i++) {
                my_obj->buf_status[i].initial_reg_flag = reg_flags[i];
            }
        }
    } else {
        rc = mm_camera_muxer_get_stream_bufs(my_obj);
    }
​
    if (0 != rc) {
        LOGE("Error get buf, rc = %d\n", rc);
        return rc;
    }
​
    LOGH("Buffer count = %d buf id = %d",my_obj->buf_num, my_obj->buf_idx);
    for (i = my_obj->buf_idx; i < (my_obj->buf_idx + my_obj->buf_num); i++) {
        my_obj->buf[i].stream_id = my_obj->my_hdl;
        my_obj->buf[i].stream_type = my_obj->stream_info->stream_type;
​
        if (my_obj->buf[i].buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
            my_obj->buf[i].user_buf.bufs_used =
                    (int8_t)my_obj->stream_info->user_buf_info.frame_buf_cnt;
            if (reg_flags) {
                my_obj->buf[i].user_buf.buf_in_use = reg_flags[i];
            }
        }
    }
​
    if (my_obj->stream_info->streaming_mode == CAM_STREAMING_MODE_BATCH) {
        my_obj->plane_buf = my_obj->buf[0].user_buf.plane_buf;
        if (my_obj->plane_buf != NULL) {
            my_obj->plane_buf_num =
                    my_obj->buf_num *
                    my_obj->stream_info->user_buf_info.frame_buf_cnt;
            for (i = 0; i < my_obj->plane_buf_num; i++) {
                my_obj->plane_buf[i].stream_id = my_obj->my_hdl;
                my_obj->plane_buf[i].stream_type = my_obj->stream_info->stream_type;
            }
        }
        my_obj->cur_bufs_staged = 0;
        my_obj->cur_buf_idx = -1;
    }
​
    free(reg_flags);
    reg_flags = NULL;
​
    /* update in stream info about number of stream buffers */
    my_obj->stream_info->num_bufs = my_obj->total_buf_cnt;
​
    return rc;
}

该函数中调用了my_obj->mem_vtbl.get_bufs函数:

/hardware/qcom/camera/msm8998/QCamera2/HAL3/QCamera3Stream.cpp

arduino 复制代码
int32_t QCamera3Stream::get_bufs(
                     cam_frame_len_offset_t *offset,
                     uint8_t *num_bufs,
                     uint8_t **initial_reg_flag,
                     mm_camera_buf_def_t **bufs,
                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
                     void *user_data)
{
    int32_t rc = NO_ERROR;
    QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data);
    if (!stream) {
        LOGE("getBufs invalid stream pointer");
        return NO_MEMORY;
    }
    rc = stream->getBufs(offset, num_bufs, initial_reg_flag, bufs, ops_tbl);
    if (NO_ERROR != rc) {
        LOGE("stream->getBufs failed");
        return NO_MEMORY;
    }
    if (stream->mBatchSize) {
        //Allocate batch buffers if mBatchSize is non-zero. All the output
        //arguments correspond to batch containers and not image buffers
        rc = stream->getBatchBufs(num_bufs, initial_reg_flag,
                bufs, ops_tbl);
    }
    return rc;
}

其中调用了stream->getBufs函数,stream类型为QCamera3Stream:

ini 复制代码
/*===========================================================================
 * FUNCTION   : getBufs
 *
 * DESCRIPTION: allocate stream buffers
 *
 * PARAMETERS :
 *   @offset     : offset info of stream buffers
 *   @num_bufs   : number of buffers allocated
 *   @initial_reg_flag: flag to indicate if buffer needs to be registered
 *                      at kernel initially
 *   @bufs       : output of allocated buffers
 *   @ops_tbl    : ptr to buf mapping/unmapping ops
 *
 * RETURN     : int32_t type of status
 *              NO_ERROR  -- success
 *              none-zero failure code
 *==========================================================================*/
int32_t QCamera3Stream::getBufs(cam_frame_len_offset_t *offset,
                     uint8_t *num_bufs,
                     uint8_t **initial_reg_flag,
                     mm_camera_buf_def_t **bufs,
                     mm_camera_map_unmap_ops_tbl_t *ops_tbl)
{
    int rc = NO_ERROR;
    uint8_t *regFlags;
    Mutex::Autolock lock(mLock);
​
    if (!ops_tbl) {
        LOGE("ops_tbl is NULL");
        return INVALID_OPERATION;
    }
​
    mFrameLenOffset = *offset;
    mMemOps = ops_tbl;
​
    if (mStreamBufs != NULL) {
       LOGE("Failed getBufs being called twice in a row without a putBufs call");
       return INVALID_OPERATION;
    }
    // 冲channel中获取stream中的buffers,该函数中其实调用了allocateAll(len),申请了对应长度的内存空间
    mStreamBufs = mChannel->getStreamBufs(mFrameLenOffset.frame_len);
    if (!mStreamBufs) {
        LOGE("Failed to allocate stream buffers");
        return NO_MEMORY;
    }
​
    for (uint32_t i = 0; i < mNumBufs; i++) {
        if (mStreamBufs->valid(i)) {
            // 获取buf的大小,返回mMemInfo[index].size
            ssize_t bufSize = mStreamBufs->getSize(i);
            if (BAD_INDEX != bufSize) {
                void* buffer = (mMapStreamBuffers ?
                        mStreamBufs->getPtr(i) : NULL);
                // 将buffer映射到camera的Daemon进程中
                rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i),
                        (size_t)bufSize, buffer,
                        CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                        ops_tbl->userdata);
                if (rc < 0) {
                    LOGE("map_stream_buf failed: %d", rc);
                    for (uint32_t j = 0; j < i; j++) {
                        if (mStreamBufs->valid(j)) {
                            ops_tbl->unmap_ops(j, -1,
                                    CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                                    ops_tbl->userdata);
                        }
                    }
                    return INVALID_OPERATION;
                }
            } else {
                LOGE("Failed to retrieve buffer size (bad index)");
                return INVALID_OPERATION;
            }
        }
    }
​
    //regFlags array is allocated by us, but consumed and freed by mm-camera-interface
    regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs);
    if (!regFlags) {
        LOGE("Out of memory");
        for (uint32_t i = 0; i < mNumBufs; i++) {
            if (mStreamBufs->valid(i)) {
                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                        ops_tbl->userdata);
            }
        }
        return NO_MEMORY;
    }
    memset(regFlags, 0, sizeof(uint8_t) * mNumBufs);
​
    // 分配内存
    mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));
    if (mBufDefs == NULL) {
        LOGE("Failed to allocate mm_camera_buf_def_t %d", rc);
        for (uint32_t i = 0; i < mNumBufs; i++) {
            if (mStreamBufs->valid(i)) {
                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                        ops_tbl->userdata);
            }
        }
        free(regFlags);
        regFlags = NULL;
        return INVALID_OPERATION;
    }
    memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
    for (uint32_t i = 0; i < mNumBufs; i++) {
        if (mStreamBufs->valid(i)) {
            // 填充内存
            mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i, mMapStreamBuffers);
        }
    }
​
    rc = mStreamBufs->getRegFlags(regFlags);
    if (rc < 0) {
        LOGE("getRegFlags failed %d", rc);
        for (uint32_t i = 0; i < mNumBufs; i++) {
            if (mStreamBufs->valid(i)) {
                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                        ops_tbl->userdata);
            }
        }
        free(mBufDefs);
        mBufDefs = NULL;
        free(regFlags);
        regFlags = NULL;
        return INVALID_OPERATION;
    }
​
    *num_bufs = mNumBufs;
    *initial_reg_flag = regFlags;
    *bufs = mBufDefs;
    return NO_ERROR;
}

通过mChannel->getStreamBufs函数为mStreamBufs分配了内存之后,就紧接着需要向mStreamBufs中填充数据:

ini 复制代码
int32_t QCamera3Memory::getBufDef(const cam_frame_len_offset_t &offset,
        mm_camera_buf_def_t &bufDef, uint32_t index, bool virtualAddr)
{
    Mutex::Autolock lock(mLock);
​
    if (!mBufferCount) {
        LOGE("Memory not allocated");
        return NO_INIT;
    }
​
    // 句柄
    bufDef.fd = mMemInfo[index].fd;
    bufDef.frame_len = mMemInfo[index].size;
    bufDef.mem_info = (void *)this;
    bufDef.buffer = virtualAddr ? getPtrLocked(index) : nullptr;
    bufDef.planes_buf.num_planes = (int8_t)offset.num_planes;
    bufDef.buf_idx = (uint8_t)index;
​
    /* Plane 0 needs to be set separately. Set other planes in a loop */
    bufDef.planes_buf.planes[0].length = offset.mp[0].len;
    bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd;
    bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset;
    bufDef.planes_buf.planes[0].reserved[0] = 0;
    for (int i = 1; i < bufDef.planes_buf.num_planes; i++) {
         bufDef.planes_buf.planes[i].length = offset.mp[i].len;
         bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd;
         bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset;
         bufDef.planes_buf.planes[i].reserved[0] =
                 bufDef.planes_buf.planes[i-1].reserved[0] +
                 bufDef.planes_buf.planes[i-1].length;
    }
​
    return NO_ERROR;
}

在对bufDef赋值的过程中,其中调用了getPtrLocked函数:

perl 复制代码
/*===========================================================================
 * FUNCTION   : getPtrLocked
 *
 * DESCRIPTION: Return buffer pointer. Please note 'mLock' must be acquired
 *              before calling this method.
 *
 * PARAMETERS :
 *   @index   : index of the buffer
 *
 * RETURN     : buffer ptr
 *==========================================================================*/
void *QCamera3GrallocMemory::getPtrLocked(uint32_t index)
{
    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
        LOGE("index %d out of bound [0, %d)",
                 index, MM_CAMERA_MAX_NUM_FRAMES);
        return NULL;
    }
    if (index < mStartIdx) {
        LOGE("buffer index %d less than starting index %d",
                 index, mStartIdx);
        return NULL;
    }
​
​
    if (0 == mMemInfo[index].handle) {
        LOGE("Buffer at %d not registered", index);
        return NULL;
    }
​
    if (mPtr[index] == nullptr) {
        void *vaddr = NULL;
        vaddr = mmap(NULL,
                mMemInfo[index].size,
                PROT_READ | PROT_WRITE,
                MAP_SHARED,
                mMemInfo[index].fd, 0);
​
        if (vaddr == MAP_FAILED) {
            LOGE("mmap failed for buffer index %d, size %d: %s(%d)",
                    index, mMemInfo[index].size, strerror(errno), errno);
            return NULL;
        } else {
            mPtr[index] = vaddr;
        }
    }
​
    return mPtr[index];
}

在该函数中,调用mmap函数通过mMemInfo[index].fd映射内存地址,最后将映射得到的mPtr[index]返回;

QCamera3Memory::getBufDef函数执行完成之后,bufDef参数就赋值完成了,然后逐层追溯,追溯到mm_stream_init_bufs函数中,通过调用流程可知,my_obj->buf最终被赋值完成了;

相关推荐
工业甲酰苯胺43 分钟前
MySQL 主从复制之多线程复制
android·mysql·adb
少说多做3431 小时前
Android 不同情况下使用 runOnUiThread
android·java
Estar.Lee2 小时前
时间操作[计算时间差]免费API接口教程
android·网络·后端·网络协议·tcp/ip
javaDocker3 小时前
业务架构、数据架构、应用架构和技术架构
架构
找藉口是失败者的习惯3 小时前
从传统到未来:Android XML布局 与 Jetpack Compose的全面对比
android·xml
Jinkey4 小时前
FlutterBasic - GetBuilder、Obx、GetX<Controller>、GetxController 有啥区别
android·flutter·ios
JosieBook5 小时前
【架构】主流企业架构Zachman、ToGAF、FEA、DoDAF介绍
架构
.生产的驴6 小时前
SpringCloud OpenFeign用户转发在请求头中添加用户信息 微服务内部调用
spring boot·后端·spring·spring cloud·微服务·架构
大白要努力!6 小时前
Android opencv使用Core.hconcat 进行图像拼接
android·opencv