基于Android Camera2_HAL3&device.start流程分析_1

基于Android P版本分析

在上节中描述了createCaptureSession以及HAL层对应的channel和stream配置以后,应用层这边就可以创建request并发送请求到hal层中了;

对应应用层,一共分为两步:

  • createCaptureRequest;
  • setRepeatingRequest;

createCaptureRequest

在之前的分析中可知,该方法会流转到Camera3Device::createDefaultRequest函数中:

ini 复制代码
status_t Camera3Device::createDefaultRequest(int templateId,
        CameraMetadata *request) {
    ATRACE_CALL();
    ALOGV("%s: for template %d", __FUNCTION__, templateId);
​
    if (templateId <= 0 || templateId >= CAMERA3_TEMPLATE_COUNT) {
        android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26866110",
                IPCThreadState::self()->getCallingUid(), nullptr, 0);
        return BAD_VALUE;
    }
​
    Mutex::Autolock il(mInterfaceLock);
​
    {
        Mutex::Autolock l(mLock);
        switch (mStatus) {
            case STATUS_ERROR:
                CLOGE("Device has encountered a serious error");
                return INVALID_OPERATION;
            case STATUS_UNINITIALIZED:
                CLOGE("Device is not initialized!");
                return INVALID_OPERATION;
            case STATUS_UNCONFIGURED:
            case STATUS_CONFIGURED:
            case STATUS_ACTIVE:
                // OK
                break;
            default:
                SET_ERR_L("Unexpected status: %d", mStatus);
                return INVALID_OPERATION;
        }
​
        if (!mRequestTemplateCache[templateId].isEmpty()) {
            *request = mRequestTemplateCache[templateId];
            mLastTemplateId = templateId;
            return OK;
        }
    }
​
    camera_metadata_t *rawRequest;
    status_t res = mInterface->constructDefaultRequestSettings(
            (camera3_request_template_t) templateId, &rawRequest);
​
    {
        Mutex::Autolock l(mLock);
        if (res == BAD_VALUE) {
            ALOGI("%s: template %d is not supported on this camera device",
                  __FUNCTION__, templateId);
            return res;
        } else if (res != OK) {
            CLOGE("Unable to construct request template %d: %s (%d)",
                    templateId, strerror(-res), res);
            return res;
        }
​
        set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
        // 在一块逻辑中,将创建好的rawRequest保存到mRequestTemplateCache中,即保存到request(mRequestTemplateCache类型)中,这样就创建好了对应的
        mRequestTemplateCache[templateId].acquire(rawRequest);
​
        *request = mRequestTemplateCache[templateId];
        mLastTemplateId = templateId;
    }
    return OK;
}

创建CameraMetadataNative对象,内在是创建CameraMetadata对象(CameraMetadata.cpp),对CameraMetadataNative对象的操作本质上都是操作CameraMetadata对象,即camera_metadata_t * metadata;

至此我们就创建了CameraMetadata对象,其管理着一块buffer,buffer中保存的是某个场景预览、录像或拍照的默认metadata;

该函数中调用了mInterface->constructDefaultRequestSettings函数,我们看一下这个函数的逻辑:

因为mInterface的类型为HalInterface,所以我们看一下HALInterface的constructDefaultRequestSettings函数:

ini 复制代码
status_t Camera3Device::HalInterface::constructDefaultRequestSettings(
        camera3_request_template_t templateId,
        /*out*/ camera_metadata_t **requestTemplate) {
    ATRACE_NAME("CameraHal::constructDefaultRequestSettings");
    if (!valid()) return INVALID_OPERATION;
    status_t res = OK;
​
    common::V1_0::Status status;
​
    auto requestCallback = [&status, &requestTemplate]
            (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
            status = s;
            if (status == common::V1_0::Status::OK) {
                const camera_metadata *r =
                        reinterpret_cast<const camera_metadata_t*>(request.data());
                size_t expectedSize = request.size();
                int ret = validate_camera_metadata_structure(r, &expectedSize);
                if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
                    *requestTemplate = clone_camera_metadata(r);
                    if (*requestTemplate == nullptr) {
                        ALOGE("%s: Unable to clone camera metadata received from HAL",
                                __FUNCTION__);
                        status = common::V1_0::Status::INTERNAL_ERROR;
                    }
                } else {
                    ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
                    status = common::V1_0::Status::INTERNAL_ERROR;
                }
            }
        };
    hardware::Return<void> err;
    RequestTemplate id;
    switch (templateId) {
        case CAMERA3_TEMPLATE_PREVIEW:
            id = RequestTemplate::PREVIEW;
            break;
        case CAMERA3_TEMPLATE_STILL_CAPTURE:
            id = RequestTemplate::STILL_CAPTURE;
            break;
        case CAMERA3_TEMPLATE_VIDEO_RECORD:
            id = RequestTemplate::VIDEO_RECORD;
            break;
        case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
            id = RequestTemplate::VIDEO_SNAPSHOT;
            break;
        case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
            id = RequestTemplate::ZERO_SHUTTER_LAG;
            break;
        case CAMERA3_TEMPLATE_MANUAL:
            id = RequestTemplate::MANUAL;
            break;
        default:
            // Unknown template ID, or this HAL is too old to support it
            return BAD_VALUE;
    }
    err = mHidlSession->constructDefaultRequestSettings(id, requestCallback);
​
    if (!err.isOk()) {
        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
        res = DEAD_OBJECT;
    } else {
        res = CameraProviderManager::mapToStatusT(status);
    }
​
    return res;
}

首先先说明一下该函数的入参参数:

参数 说明
templateId 这个参数是通过上级函数调用传入的,实际上就是应用层在调用createCaptureRequest()方法时传入的,在各级传递过程中没有发生变化
requestTemplate 请求模板,用于描述request

在该函数中通过templateId值来确定id,id的类型为RequestTemplate;

确定好id值之后,调用了mHidlSession->constructDefaultRequestSettings函数,在上一节可知,mHidlSession变量的类型就是CameraDeviceSession:

ini 复制代码
// Methods from ::android::hardware::camera::device::V3_2::ICameraDeviceSession follow.
Return<void> CameraDeviceSession::constructDefaultRequestSettings(
        RequestTemplate type, ICameraDeviceSession::constructDefaultRequestSettings_cb _hidl_cb)  {
    CameraMetadata outMetadata;
    Status status = constructDefaultRequestSettingsRaw( (int) type, &outMetadata);
    _hidl_cb(status, outMetadata);
    return Void();
}
​
Status CameraDeviceSession::constructDefaultRequestSettingsRaw(int type, CameraMetadata *outMetadata) {
    Status status = initStatus();
    const camera_metadata_t *rawRequest;
    if (status == Status::OK) {
        ATRACE_BEGIN("camera3->construct_default_request_settings");
        rawRequest = mDevice->ops->construct_default_request_settings(mDevice, (int) type);
        ATRACE_END();
        if (rawRequest == nullptr) {
            ALOGI("%s: template %d is not supported on this camera device",
                  __FUNCTION__, type);
            status = Status::ILLEGAL_ARGUMENT;
        } else {
            mOverridenRequest.clear();
            mOverridenRequest.append(rawRequest);
            // Derive some new keys for backward compatibility
            if (mDerivePostRawSensKey && !mOverridenRequest.exists(
                    ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST)) {
                int32_t defaultBoost[1] = {100};
                mOverridenRequest.update(
                        ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST,
                        defaultBoost, 1);
            }
            const camera_metadata_t *metaBuffer =
                    mOverridenRequest.getAndLock();
            convertToHidl(metaBuffer, outMetadata);
            mOverridenRequest.unlock(metaBuffer);
        }
    }
    return status;
}

紧接着调用了mDevice->ops->construct_default_request_settings函数:

arduino 复制代码
const camera_metadata_t* QCamera3HardwareInterface::
    construct_default_request_settings(const struct camera3_device *device,
                                        int type)
{
​
    LOGD("E");
    camera_metadata_t* fwk_metadata = NULL;
    QCamera3HardwareInterface *hw =
        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
    if (!hw) {
        LOGE("NULL camera device");
        return NULL;
    }
​
    fwk_metadata = hw->translateCapabilityToMetadata(type);
​
    LOGD("X");
    return fwk_metadata;
}

该函数主要用于构建一系列默认的Camera Usecase 的 capture设置项,由translateCapabilityToMetadata函数实现:

ini 复制代码
camera_metadata_t* QCamera3HardwareInterface::translateCapabilityToMetadata(int type)
{
    if (mDefaultMetadata[type] != NULL) {
        return mDefaultMetadata[type];
    }
    //first time we are handling this request
    //fill up the metadata structure using the wrapper class
    CameraMetadata settings;
    //translate from cam_capability_t to camera_metadata_tag_t
    static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
    settings.update(ANDROID_REQUEST_TYPE, &requestType, 1);
    int32_t defaultRequestID = 0;
    settings.update(ANDROID_REQUEST_ID, &defaultRequestID, 1);
​
    /* OIS disable */
    char ois_prop[PROPERTY_VALUE_MAX];
    memset(ois_prop, 0, sizeof(ois_prop));
    property_get("persist.camera.ois.disable", ois_prop, "0");
    uint8_t ois_disable = (uint8_t)atoi(ois_prop);
​
    /* Force video to use OIS */
    char videoOisProp[PROPERTY_VALUE_MAX];
    memset(videoOisProp, 0, sizeof(videoOisProp));
    property_get("persist.camera.ois.video", videoOisProp, "1");
    uint8_t forceVideoOis = (uint8_t)atoi(videoOisProp);
​
    // Hybrid AE enable/disable
    char hybrid_ae_prop[PROPERTY_VALUE_MAX];
    memset(hybrid_ae_prop, 0, sizeof(hybrid_ae_prop));
    property_get("persist.camera.hybrid_ae.enable", hybrid_ae_prop, "0");
    uint8_t hybrid_ae = (uint8_t)atoi(hybrid_ae_prop);
​
    uint8_t controlIntent = 0;
    uint8_t focusMode;
    uint8_t vsMode;
    uint8_t optStabMode;
    uint8_t cacMode;
    uint8_t edge_mode;
    uint8_t noise_red_mode;
    uint8_t shading_mode;
    uint8_t hot_pixel_mode;
    uint8_t tonemap_mode;
    bool highQualityModeEntryAvailable = FALSE;
    bool fastModeEntryAvailable = FALSE;
    uint8_t histogramEnable = false;
    vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
    optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
    uint8_t shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
    uint8_t trackingAfTrigger = NEXUS_EXPERIMENTAL_2017_TRACKING_AF_TRIGGER_IDLE;
    uint8_t enableZsl = ANDROID_CONTROL_ENABLE_ZSL_FALSE;
​
    switch (type) {
      case CAMERA3_TEMPLATE_PREVIEW:
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        edge_mode = ANDROID_EDGE_MODE_FAST;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        break;
      case CAMERA3_TEMPLATE_STILL_CAPTURE:
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
        edge_mode = ANDROID_EDGE_MODE_HIGH_QUALITY;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
        shading_mode = ANDROID_SHADING_MODE_HIGH_QUALITY;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
        tonemap_mode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
        // Order of priority for default CAC is HIGH Quality -> FAST -> OFF
        for (size_t i = 0; i < gCamCapability[mCameraId]->aberration_modes_count; i++) {
            if (gCamCapability[mCameraId]->aberration_modes[i] ==
                    CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY) {
                highQualityModeEntryAvailable = TRUE;
            } else if (gCamCapability[mCameraId]->aberration_modes[i] ==
                    CAM_COLOR_CORRECTION_ABERRATION_FAST) {
                fastModeEntryAvailable = TRUE;
            }
        }
        if (highQualityModeEntryAvailable) {
            cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY;
        } else if (fastModeEntryAvailable) {
            cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        }
        if (CAM_SENSOR_RAW == gCamCapability[mCameraId]->sensor_type.sens_type) {
            shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON;
        }
        enableZsl = ANDROID_CONTROL_ENABLE_ZSL_TRUE;
        break;
      case CAMERA3_TEMPLATE_VIDEO_RECORD:
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        edge_mode = ANDROID_EDGE_MODE_FAST;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        if (forceVideoOis)
            optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
        break;
      case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        edge_mode = ANDROID_EDGE_MODE_FAST;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        if (forceVideoOis)
            optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
        break;
      case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        edge_mode = ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        break;
      case CAMERA3_TEMPLATE_MANUAL:
        edge_mode = ANDROID_EDGE_MODE_FAST;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
        focusMode = ANDROID_CONTROL_AF_MODE_OFF;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
        break;
      default:
        edge_mode = ANDROID_EDGE_MODE_FAST;
        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
        shading_mode = ANDROID_SHADING_MODE_FAST;
        hot_pixel_mode = ANDROID_HOT_PIXEL_MODE_FAST;
        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
        break;
    }
    // Set CAC to OFF if underlying device doesn't support
    if (gCamCapability[mCameraId]->aberration_modes_count == 0) {
        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
    }
    settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &cacMode, 1);
    settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
    settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vsMode, 1);
    if (gCamCapability[mCameraId]->supported_focus_modes_cnt == 1) {
        focusMode = ANDROID_CONTROL_AF_MODE_OFF;
    }
    settings.update(ANDROID_CONTROL_AF_MODE, &focusMode, 1);
    settings.update(NEXUS_EXPERIMENTAL_2017_HISTOGRAM_ENABLE, &histogramEnable, 1);
    settings.update(NEXUS_EXPERIMENTAL_2017_TRACKING_AF_TRIGGER, &trackingAfTrigger, 1);
​
    if (gCamCapability[mCameraId]->optical_stab_modes_count == 1 &&
            gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_ON)
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
    else if ((gCamCapability[mCameraId]->optical_stab_modes_count == 1 &&
            gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_OFF)
            || ois_disable)
        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
    settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, &optStabMode, 1);
    settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &shadingmap_mode, 1);
​
    settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
            &gCamCapability[mCameraId]->exposure_compensation_default, 1);
​
    static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
    settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
​
    static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
    settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
​
    static const uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
    settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
​
    static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
    settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
​
    static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
    settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
​
    static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
    settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
​
    static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
    settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
​
    /*flash*/
    static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
    settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
​
    static const uint8_t flashFiringLevel = CAM_FLASH_FIRING_LEVEL_4;
    settings.update(ANDROID_FLASH_FIRING_POWER,
            &flashFiringLevel, 1);
​
    /* lens */
    float default_aperture = gCamCapability[mCameraId]->apertures[0];
    settings.update(ANDROID_LENS_APERTURE, &default_aperture, 1);
​
    if (gCamCapability[mCameraId]->filter_densities_count) {
        float default_filter_density = gCamCapability[mCameraId]->filter_densities[0];
        settings.update(ANDROID_LENS_FILTER_DENSITY, &default_filter_density,
                        gCamCapability[mCameraId]->filter_densities_count);
    }
​
    float default_focal_length = gCamCapability[mCameraId]->focal_length;
    settings.update(ANDROID_LENS_FOCAL_LENGTH, &default_focal_length, 1);
​
    static const uint8_t demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
    settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
​
    static const int32_t testpatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
    settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testpatternMode, 1);
​
    /* face detection (default to OFF) */
    static const uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
    settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
​
    static const uint8_t histogramMode = QCAMERA3_HISTOGRAM_MODE_OFF;
    settings.update(QCAMERA3_HISTOGRAM_MODE, &histogramMode, 1);
​
    static const uint8_t sharpnessMapMode = ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
    settings.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
​
    static const uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
    settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
​
​
    static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
    settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
​
    /* Exposure time(Update the Min Exposure Time)*/
    int64_t default_exposure_time = gCamCapability[mCameraId]->exposure_time_range[0];
    settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &default_exposure_time, 1);
​
    /* frame duration */
    static const int64_t default_frame_duration = NSEC_PER_33MSEC;
    settings.update(ANDROID_SENSOR_FRAME_DURATION, &default_frame_duration, 1);
​
    /* sensitivity */
    static const int32_t default_sensitivity = 100;
    settings.update(ANDROID_SENSOR_SENSITIVITY, &default_sensitivity, 1);
#ifndef USE_HAL_3_3
    static const int32_t default_isp_sensitivity =
            gCamCapability[mCameraId]->isp_sensitivity_range.min_sensitivity;
    settings.update(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST, &default_isp_sensitivity, 1);
#endif
​
    /*edge mode*/
    settings.update(ANDROID_EDGE_MODE, &edge_mode, 1);
​
    /*noise reduction mode*/
    settings.update(ANDROID_NOISE_REDUCTION_MODE, &noise_red_mode, 1);
​
    /*shading mode*/
    settings.update(ANDROID_SHADING_MODE, &shading_mode, 1);
​
    /*hot pixel mode*/
    settings.update(ANDROID_HOT_PIXEL_MODE, &hot_pixel_mode, 1);
​
    /*color correction mode*/
    static const uint8_t color_correct_mode = ANDROID_COLOR_CORRECTION_MODE_FAST;
    settings.update(ANDROID_COLOR_CORRECTION_MODE, &color_correct_mode, 1);
​
    /*transform matrix mode*/
    settings.update(ANDROID_TONEMAP_MODE, &tonemap_mode, 1);
​
    int32_t scaler_crop_region[4];
    scaler_crop_region[0] = 0;
    scaler_crop_region[1] = 0;
    scaler_crop_region[2] = gCamCapability[mCameraId]->active_array_size.width;
    scaler_crop_region[3] = gCamCapability[mCameraId]->active_array_size.height;
    settings.update(ANDROID_SCALER_CROP_REGION, scaler_crop_region, 4);
​
    static const uint8_t antibanding_mode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
    settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &antibanding_mode, 1);
​
    /*focus distance*/
    float focus_distance = 0.0;
    settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focus_distance, 1);
​
    /*target fps range: use maximum range for picture, and maximum fixed range for video*/
    /* Restrict template max_fps to 30 */
    float max_range = 0.0;
    float max_fixed_fps = 0.0;
    int32_t fps_range[2] = {0, 0};
    for (uint32_t i = 0; i < gCamCapability[mCameraId]->fps_ranges_tbl_cnt;
            i++) {
        if (gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps >
                TEMPLATE_MAX_PREVIEW_FPS) {
            continue;
        }
        float range = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps -
            gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
        if (type == CAMERA3_TEMPLATE_PREVIEW ||
                type == CAMERA3_TEMPLATE_STILL_CAPTURE ||
                type == CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG) {
            if (range > max_range) {
                fps_range[0] =
                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
                fps_range[1] =
                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
                max_range = range;
            }
        } else {
            if (range < 0.01 && max_fixed_fps <
                    gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps) {
                fps_range[0] =
                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
                fps_range[1] =
                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
                max_fixed_fps = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
            }
        }
    }
    settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, fps_range, 2);
​
    /*precapture trigger*/
    uint8_t precapture_trigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
    settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &precapture_trigger, 1);
​
    /*af trigger*/
    uint8_t af_trigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
    settings.update(ANDROID_CONTROL_AF_TRIGGER, &af_trigger, 1);
​
    /* ae & af regions */
    int32_t active_region[] = {
            gCamCapability[mCameraId]->active_array_size.left,
            gCamCapability[mCameraId]->active_array_size.top,
            gCamCapability[mCameraId]->active_array_size.left +
                    gCamCapability[mCameraId]->active_array_size.width,
            gCamCapability[mCameraId]->active_array_size.top +
                    gCamCapability[mCameraId]->active_array_size.height,
            0};
    settings.update(ANDROID_CONTROL_AE_REGIONS, active_region,
            sizeof(active_region) / sizeof(active_region[0]));
    settings.update(ANDROID_CONTROL_AF_REGIONS, active_region,
            sizeof(active_region) / sizeof(active_region[0]));
​
    /* black level lock */
    uint8_t blacklevel_lock = ANDROID_BLACK_LEVEL_LOCK_OFF;
    settings.update(ANDROID_BLACK_LEVEL_LOCK, &blacklevel_lock, 1);
​
    //special defaults for manual template
    if (type == CAMERA3_TEMPLATE_MANUAL) {
        static const uint8_t manualControlMode = ANDROID_CONTROL_MODE_OFF;
        settings.update(ANDROID_CONTROL_MODE, &manualControlMode, 1);
​
        static const uint8_t manualFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
        settings.update(ANDROID_CONTROL_AF_MODE, &manualFocusMode, 1);
​
        static const uint8_t manualAeMode = ANDROID_CONTROL_AE_MODE_OFF;
        settings.update(ANDROID_CONTROL_AE_MODE, &manualAeMode, 1);
​
        static const uint8_t manualAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
        settings.update(ANDROID_CONTROL_AWB_MODE, &manualAwbMode, 1);
​
        static const uint8_t manualTonemapMode = ANDROID_TONEMAP_MODE_FAST;
        settings.update(ANDROID_TONEMAP_MODE, &manualTonemapMode, 1);
​
        static const uint8_t manualColorCorrectMode = ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX;
        settings.update(ANDROID_COLOR_CORRECTION_MODE, &manualColorCorrectMode, 1);
    }
​
​
    /* TNR
     * We'll use this location to determine which modes TNR will be set.
     * We will enable TNR to be on if either of the Preview/Video stream requires TNR
     * This is not to be confused with linking on a per stream basis that decision
     * is still on per-session basis and will be handled as part of config stream
     */
    uint8_t tnr_enable = 0;
​
    if (m_bTnrPreview || m_bTnrVideo) {
​
        switch (type) {
            case CAMERA3_TEMPLATE_VIDEO_RECORD:
                    tnr_enable = 1;
                    break;
​
            default:
                    tnr_enable = 0;
                    break;
        }
​
        int32_t tnr_process_type = (int32_t)getTemporalDenoiseProcessPlate();
        settings.update(QCAMERA3_TEMPORAL_DENOISE_ENABLE, &tnr_enable, 1);
        settings.update(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, &tnr_process_type, 1);
​
        LOGD("TNR:%d with process plate %d for template:%d",
                             tnr_enable, tnr_process_type, type);
    }
​
    //Update Link tags to default
    uint8_t sync_type = CAM_TYPE_STANDALONE;
    settings.update(QCAMERA3_DUALCAM_LINK_ENABLE, &sync_type, 1);
​
    uint8_t is_main = 1;
    settings.update(QCAMERA3_DUALCAM_LINK_IS_MAIN, &is_main, 1);
​
    uint8_t related_camera_id = mCameraId;
    settings.update(QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID, &related_camera_id, 1);
​
    /* CDS default */
    char prop[PROPERTY_VALUE_MAX];
    memset(prop, 0, sizeof(prop));
    property_get("persist.camera.CDS", prop, "Auto");
    cam_cds_mode_type_t cds_mode = CAM_CDS_MODE_AUTO;
    cds_mode = lookupProp(CDS_MAP, METADATA_MAP_SIZE(CDS_MAP), prop);
    if (CAM_CDS_MODE_MAX == cds_mode) {
        cds_mode = CAM_CDS_MODE_AUTO;
    }
​
    /* Disabling CDS in templates which have TNR enabled*/
    if (tnr_enable)
        cds_mode = CAM_CDS_MODE_OFF;
​
    int32_t mode = cds_mode;
    settings.update(QCAMERA3_CDS_MODE, &mode, 1);
​
    /* Manual Convergence AEC Speed is disabled by default*/
    float default_aec_speed = 0;
    settings.update(QCAMERA3_AEC_CONVERGENCE_SPEED, &default_aec_speed, 1);
​
    /* Manual Convergence AWB Speed is disabled by default*/
    float default_awb_speed = 0;
    settings.update(QCAMERA3_AWB_CONVERGENCE_SPEED, &default_awb_speed, 1);
​
    // Set instant AEC to normal convergence by default
    uint8_t instant_aec_mode = (uint8_t)QCAMERA3_INSTANT_AEC_NORMAL_CONVERGENCE;
    settings.update(QCAMERA3_INSTANT_AEC_MODE, &instant_aec_mode, 1);
​
    uint8_t oisDataMode = ANDROID_STATISTICS_OIS_DATA_MODE_OFF;
    if (mCameraId == 0) {
        oisDataMode = ANDROID_STATISTICS_OIS_DATA_MODE_ON;
    }
    settings.update(ANDROID_STATISTICS_OIS_DATA_MODE, &oisDataMode, 1);
​
    if (gExposeEnableZslKey) {
        settings.update(ANDROID_CONTROL_ENABLE_ZSL, &enableZsl, 1);
        int32_t postview = 0;
        settings.update(NEXUS_EXPERIMENTAL_2017_POSTVIEW, &postview, 1);
        int32_t continuousZslCapture = 0;
        settings.update(NEXUS_EXPERIMENTAL_2017_CONTINUOUS_ZSL_CAPTURE, &continuousZslCapture, 1);
        // Disable HDR+ for templates other than CAMERA3_TEMPLATE_STILL_CAPTURE and
        // CAMERA3_TEMPLATE_PREVIEW.
        int32_t disableHdrplus = (type == CAMERA3_TEMPLATE_STILL_CAPTURE ||
                                  type == CAMERA3_TEMPLATE_PREVIEW) ? 0 : 1;
        settings.update(NEXUS_EXPERIMENTAL_2017_DISABLE_HDRPLUS, &disableHdrplus, 1);
​
        // Set hybrid_ae tag in PREVIEW and STILL_CAPTURE templates to 1 so that
        // hybrid ae is enabled for 3rd party app HDR+.
        if (type == CAMERA3_TEMPLATE_PREVIEW ||
                type == CAMERA3_TEMPLATE_STILL_CAPTURE) {
            hybrid_ae = 1;
        }
    }
    /* hybrid ae */
    settings.update(NEXUS_EXPERIMENTAL_2016_HYBRID_AE_ENABLE, &hybrid_ae, 1);
​
    int32_t fwk_hdr = QCAMERA3_VIDEO_HDR_MODE_OFF;
    settings.update(QCAMERA3_VIDEO_HDR_MODE, &fwk_hdr, 1);
​
    mDefaultMetadata[type] = settings.release();
​
    return mDefaultMetadata[type];
}

该函数中主要是根据type值对一些CameraMetadata需要配置的属性值进行赋值,然后通过调用CameraMetadata.update方式将刚刚被赋值的参数更新到CameraMetadata对象中,封装成一个最新状态的CameraMetadata对象;

我们需要看一下update函数的实现:

c 复制代码
status_t CameraMetadata::update(uint32_t tag,
        const int32_t *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_INT32)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const uint8_t *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const float *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_FLOAT)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const int64_t *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_INT64)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const double *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_DOUBLE)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const camera_metadata_rational_t *data, size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_RATIONAL)) != OK) {
        return res;
    }
    return updateImpl(tag, (const void*)data, data_count);
}
​
status_t CameraMetadata::update(uint32_t tag,
        const String8 &string) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
        return res;
    }
    // string.size() doesn't count the null termination character.
    return updateImpl(tag, (const void*)string.string(), string.size() + 1);
}
​
status_t CameraMetadata::update(const camera_metadata_ro_entry &entry) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    if ( (res = checkType(entry.tag, entry.type)) != OK) {
        return res;
    }
    return updateImpl(entry.tag, (const void*)entry.data.u8, entry.count);
}

如上,CameraMetadata类中存在多个update函数,用于保存不同类型tag以及对应的data数据,可以保证CameraMetadata不存在null的情况;

update函数最终都会调用updateImpl函数:

ini 复制代码
status_t CameraMetadata::updateImpl(uint32_t tag, const void *data,
        size_t data_count) {
    status_t res;
    if (mLocked) {
        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
        return INVALID_OPERATION;
    }
    int type = get_camera_metadata_tag_type(tag);
    if (type == -1) {
        ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
        return BAD_VALUE;
    }
    // Safety check - ensure that data isn't pointing to this metadata, since
    // that would get invalidated if a resize is needed
    size_t bufferSize = get_camera_metadata_size(mBuffer);
    uintptr_t bufAddr = reinterpret_cast<uintptr_t>(mBuffer);
    uintptr_t dataAddr = reinterpret_cast<uintptr_t>(data);
    if (dataAddr > bufAddr && dataAddr < (bufAddr + bufferSize)) {
        ALOGE("%s: Update attempted with data from the same metadata buffer!",
                __FUNCTION__);
        return INVALID_OPERATION;
    }
​
    size_t data_size = calculate_camera_metadata_entry_data_size(type,
            data_count);
​
    res = resizeIfNeeded(1, data_size);
​
    if (res == OK) {
        camera_metadata_entry_t entry;
        res = find_camera_metadata_entry(mBuffer, tag, &entry);
        if (res == NAME_NOT_FOUND) {
            res = add_camera_metadata_entry(mBuffer,
                    tag, data, data_count);
        } else if (res == OK) {
            res = update_camera_metadata_entry(mBuffer,
                    entry.index, data, data_count, NULL);
        }
    }
​
    if (res != OK) {
        ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
                __FUNCTION__, get_camera_metadata_section_name(tag),
                get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
    }
​
    IF_ALOGV() {
        ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) !=
                 OK,
​
                 "%s: Failed to validate metadata structure after update %p",
                 __FUNCTION__, mBuffer);
    }
​
    return res;
}

通过camera_metadata_t来进行描述,其中返回值是一个camera_metadata_t指针,其指向的内存地址是由HAL来进行维护的;

然后会在CameraDeviceSession::constructDefaultRequestSettings通过_hidl_cb(status, outMetadata)方式以回调的方式上报给应用层;

至此createCaptureRequest流程就结束了;

setRepeatingRequest

在分析setRepeatingRequest流程中,我们描述到RequestThread::threadLoop线程体:

scss 复制代码
bool Camera3Device::RequestThread::threadLoop() {
    ATRACE_CALL();
    status_t res;
    // Any function called from threadLoop() must not hold mInterfaceLock since
    // it could lead to deadlocks (disconnect() -> hold mInterfaceMutex -> wait for request thread
    // to finish -> request thread waits on mInterfaceMutex) http://b/143513518
​
    // Handle paused state.
    // pause状态下直接返回
    if (waitIfPaused()) {
        return true;
    }
​
    // Wait for the next batch of requests.
    // 准备下一次的Request请求
    waitForNextRequestBatch();
    if (mNextRequests.size() == 0) {
        return true;
    }
​
    // Get the latest request ID, if any
    int latestRequestId;
    camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
            captureRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_ID);
    if (requestIdEntry.count > 0) {
        latestRequestId = requestIdEntry.data.i32[0];
    } else {
        ALOGW("%s: Did not have android.request.id set in the request.", __FUNCTION__);
        latestRequestId = NAME_NOT_FOUND;
    }
​
    // 'mNextRequests' will at this point contain either a set of HFR batched requests
    //  or a single request from streaming or burst. In either case the first element
    //  should contain the latest camera settings that we need to check for any session
    //  parameter updates.
    if (updateSessionParameters(mNextRequests[0].captureRequest->mSettingsList.begin()->metadata)) {
        res = OK;
​
        //Input stream buffers are already acquired at this point so an input stream
        //will not be able to move to idle state unless we force it.
        if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
            res = mNextRequests[0].captureRequest->mInputStream->forceToIdle();
            if (res != OK) {
                ALOGE("%s: Failed to force idle input stream: %d", __FUNCTION__, res);
                cleanUpFailedRequests(/*sendRequestError*/ false);
                return false;
            }
        }
​
        if (res == OK) {
            sp<Camera3Device> parent = mParent.promote();
            if (parent != nullptr) {
                mReconfigured |= parent->reconfigureCamera(mLatestSessionParams, mStatusId);
            }
            setPaused(false);
​
            if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
                mNextRequests[0].captureRequest->mInputStream->restoreConfiguredState();
                if (res != OK) {
                    ALOGE("%s: Failed to restore configured input stream: %d", __FUNCTION__, res);
                    cleanUpFailedRequests(/*sendRequestError*/ false);
                    return false;
                }
            }
        }
    }
​
    // Prepare a batch of HAL requests and output buffers.
    // 为上一步准备好的Request请求的hal_request赋值,继续完善这个Request
    res = prepareHalRequests();
    if (res == TIMED_OUT) {
        // Not a fatal error if getting output buffers time out.
        cleanUpFailedRequests(/*sendRequestError*/ true);
        // Check if any stream is abandoned.
        checkAndStopRepeatingRequest();
        return true;
    } else if (res != OK) {
        cleanUpFailedRequests(/*sendRequestError*/ false);
        return false;
    }
​
    // Inform waitUntilRequestProcessed thread of a new request ID
    {
        Mutex::Autolock al(mLatestRequestMutex);
​
        mLatestRequestId = latestRequestId;
        mLatestRequestSignal.signal();
    }
​
    // Submit a batch of requests to HAL.
    // Use flush lock only when submitting multilple requests in a batch.
    // TODO: The problem with flush lock is flush() will be blocked by process_capture_request()
    // which may take a long time to finish so synchronizing flush() and
    // process_capture_request() defeats the purpose of cancelling requests ASAP with flush().
    // For now, only synchronize for high speed recording and we should figure something out for
    // removing the synchronization.
    bool useFlushLock = mNextRequests.size() > 1;
​
    if (useFlushLock) {
        mFlushLock.lock();
    }
​
    ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__,
            mNextRequests.size());
​
    sp<Camera3Device> parent = mParent.promote();
    if (parent != nullptr) {
        parent->mRequestBufferSM.onSubmittingRequest();
    }
​
    bool submitRequestSuccess = false;
    nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
    
    /* Android O 处理逻辑,和Android Q 处理逻辑有一定的区别,需要注意
        if (mInterface->supportBatchRequest()) {
            submitRequestSuccess = sendRequestsBatch();
        } else {
            submitRequestSuccess = sendRequestsOneByOne();
        }
    */
    submitRequestSuccess = sendRequestsBatch();
​
    nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
    mRequestLatency.add(tRequestStart, tRequestEnd);
​
    if (useFlushLock) {
        mFlushLock.unlock();
    }
​
    // Unset as current request
    {
        Mutex::Autolock l(mRequestLock);
        // 每一帧处理完成后,会将mNextRequests清空
        mNextRequests.clear();
    }
    mRequestSubmittedSignal.signal();
​
    return submitRequestSuccess;
}

根据该方法中的实现理一下大体的思路,有几个比较关键的点:

  1. waitForNextRequestBatch准备下一次的Request请求;

  2. prepareHalRequests为上一步准备好的Request请求的hal_request赋值,继续完善这个Request;

    该函数的主要作用就是通过传入的CaptureRequest构建对应HAL层的halRequest,然后通过一系列操作初始化halRequest,将halRequest->output_buffers关联到了我们的预览窗口,简单的理解就是将创建好的buffer添加到对应的stream中;

  3. 根据mInterface->supportBatchRequest()是否支持批处理,分别调用sendRequestsBatch、sendRequestsOneByOne将准备好的Request发送到HAL进程,也就是CameraHalServer当中去处理了,最终返回submitRequestSuccess,如果该值为true,那么继续循环,如果为false,那么肯定是中间出问题,RequestThread线程就会退出了;

理清了这个思路,我们大体也就明白了,相机整个预览循环的工作就是在这里完成的,也全部是围绕着mNextRequests成员变量来进行的。下面我们就来仔细看一下waitForNextRequestBatch、prepareHalRequests、sendRequestsBatch(我们假定支持批处理)三个函数的实现;

我们重点分析sendRequestsBatch函数;

sendRequestsBatch

在该函数中的调用流程如下:

rust 复制代码
--> Camera3Device::RequestThread::sendRequestsBatch()
​
        --> Camera3Device::HalInterface::processBatchCaptureRequests()

sendRequestsBatch函数中调用了processBatchCaptureRequests函数:

ini 复制代码
status_t Camera3Device::HalInterface::processBatchCaptureRequests(
        std::vector<camera3_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
    ATRACE_NAME("CameraHal::processBatchCaptureRequests");
    if (!valid()) return INVALID_OPERATION;
​
    sp<device::V3_4::ICameraDeviceSession> hidlSession_3_4;
    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
    if (castResult_3_4.isOk()) {
        hidlSession_3_4 = castResult_3_4;
    }
​
    hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
    hardware::hidl_vec<device::V3_4::CaptureRequest> captureRequests_3_4;
    size_t batchSize = requests.size();
    if (hidlSession_3_4 != nullptr) {
        captureRequests_3_4.resize(batchSize);
    } else {
        captureRequests.resize(batchSize);
    }
    std::vector<native_handle_t*> handlesCreated;
    std::vector<std::pair<int32_t, int32_t>> inflightBuffers;
​
    status_t res = OK;
    for (size_t i = 0; i < batchSize; i++) {
        if (hidlSession_3_4 != nullptr) {
            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
        } else {
            // Android O的逻辑,没有if-else判断
            //for (size_t i = 0; i < batchSize; i++) {
            //    wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
            //}
            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i],
                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
        }
        if (res != OK) {
            mBufferRecords.popInflightBuffers(inflightBuffers);
            cleanupNativeHandles(&handlesCreated);
            return res;
        }
    }
​
    std::vector<device::V3_2::BufferCache> cachesToRemove;
    {
        std::lock_guard<std::mutex> lock(mFreedBuffersLock);
        for (auto& pair : mFreedBuffers) {
            // The stream might have been removed since onBufferFreed
            if (mBufferRecords.isStreamCached(pair.first)) {
                cachesToRemove.push_back({pair.first, pair.second});
            }
        }
        mFreedBuffers.clear();
    }
​
    common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
    *numRequestProcessed = 0;
​
    // Write metadata to FMQ.
    for (size_t i = 0; i < batchSize; i++) {
        camera3_capture_request_t* request = requests[i];
        device::V3_2::CaptureRequest* captureRequest;
        if (hidlSession_3_4 != nullptr) {
            captureRequest = &captureRequests_3_4[i].v3_2;
        } else {
            captureRequest = &captureRequests[i];
        }
​
        if (request->settings != nullptr) {
            size_t settingsSize = get_camera_metadata_size(request->settings);
            if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
                    reinterpret_cast<const uint8_t*>(request->settings), settingsSize)) {
                captureRequest->settings.resize(0);
                captureRequest->fmqSettingsSize = settingsSize;
            } else {
                if (mRequestMetadataQueue != nullptr) {
                    ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
                }
                captureRequest->settings.setToExternal(
                        reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(request->settings)),
                        get_camera_metadata_size(request->settings));
                captureRequest->fmqSettingsSize = 0u;
            }
        } else {
            // A null request settings maps to a size-0 CameraMetadata
            captureRequest->settings.resize(0);
            captureRequest->fmqSettingsSize = 0u;
        }
​
        if (hidlSession_3_4 != nullptr) {
            captureRequests_3_4[i].physicalCameraSettings.resize(request->num_physcam_settings);
            for (size_t j = 0; j < request->num_physcam_settings; j++) {
                if (request->physcam_settings != nullptr) {
                    size_t settingsSize = get_camera_metadata_size(request->physcam_settings[j]);
                    if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
                                reinterpret_cast<const uint8_t*>(request->physcam_settings[j]),
                                settingsSize)) {
                        captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
                        captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize =
                            settingsSize;
                    } else {
                        if (mRequestMetadataQueue != nullptr) {
                            ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
                        }
                        captureRequests_3_4[i].physicalCameraSettings[j].settings.setToExternal(
                                reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
                                        request->physcam_settings[j])),
                                get_camera_metadata_size(request->physcam_settings[j]));
                        captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
                    }
                } else {
                    captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
                    captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
                }
                captureRequests_3_4[i].physicalCameraSettings[j].physicalCameraId =
                    request->physcam_id[j];
            }
        }
    }
​
    hardware::details::return_status err;
    auto resultCallback =
        [&status, &numRequestProcessed] (auto s, uint32_t n) {
                status = s;
                *numRequestProcessed = n;
        };
    if (hidlSession_3_4 != nullptr) {
        err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
                                                         resultCallback);
    } else {
        err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
                                                  resultCallback);
    }
    if (!err.isOk()) {
        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
        status = common::V1_0::Status::CAMERA_DISCONNECTED;
    }
​
    if (status == common::V1_0::Status::OK && *numRequestProcessed != batchSize) {
        ALOGE("%s: processCaptureRequest returns OK but processed %d/%zu requests",
                __FUNCTION__, *numRequestProcessed, batchSize);
        status = common::V1_0::Status::INTERNAL_ERROR;
    }
​
    res = CameraProviderManager::mapToStatusT(status);
    if (res == OK) {
        if (mHidlSession->isRemote()) {
            // Only close acquire fence FDs when the HIDL transaction succeeds (so the FDs have been
            // sent to camera HAL processes)
            cleanupNativeHandles(&handlesCreated, /*closeFd*/true);
        } else {
            // In passthrough mode the FDs are now owned by HAL
            cleanupNativeHandles(&handlesCreated);
        }
    } else {
        mBufferRecords.popInflightBuffers(inflightBuffers);
        cleanupNativeHandles(&handlesCreated);
    }
    return res;
}

在该函数中调用了mHidlSession->processCaptureRequest函数,mHidlSession的类型为CameraDeviceSession;

ini 复制代码
Return<void> CameraDeviceSession::processCaptureRequest(
        const hidl_vec<CaptureRequest>& requests,
        const hidl_vec<BufferCache>& cachesToRemove,
        ICameraDeviceSession::processCaptureRequest_cb _hidl_cb)  {
    updateBufferCaches(cachesToRemove);
​
    uint32_t numRequestProcessed = 0;
    Status s = Status::OK;
    for (size_t i = 0; i < requests.size(); i++, numRequestProcessed++) {
        s = processOneCaptureRequest(requests[i]);
        if (s != Status::OK) {
            break;
        }
    }
​
    if (s == Status::OK && requests.size() > 1) {
        mResultBatcher.registerBatch(requests[0].frameNumber, requests.size());
    }
​
    _hidl_cb(s, numRequestProcessed);
    return Void();
}
​
Status CameraDeviceSession::processOneCaptureRequest(const CaptureRequest& request)  {
    Status status = initStatus();
    if (status != Status::OK) {
        ALOGE("%s: camera init failed or disconnected", __FUNCTION__);
        return status;
    }
​
    camera3_capture_request_t halRequest;
    halRequest.frame_number = request.frameNumber;
​
    bool converted = true;
    CameraMetadata settingsFmq;  // settings from FMQ
    if (request.fmqSettingsSize > 0) {
        // non-blocking read; client must write metadata before calling
        // processOneCaptureRequest
        settingsFmq.resize(request.fmqSettingsSize);
        bool read = mRequestMetadataQueue->read(settingsFmq.data(), request.fmqSettingsSize);
        if (read) {
            converted = convertFromHidl(settingsFmq, &halRequest.settings);
        } else {
            ALOGE("%s: capture request settings metadata couldn't be read from fmq!", __FUNCTION__);
            converted = false;
        }
    } else {
        converted = convertFromHidl(request.settings, &halRequest.settings);
    }
​
    if (!converted) {
        ALOGE("%s: capture request settings metadata is corrupt!", __FUNCTION__);
        return Status::ILLEGAL_ARGUMENT;
    }
​
    if (mFirstRequest && halRequest.settings == nullptr) {
        ALOGE("%s: capture request settings must not be null for first request!",
                __FUNCTION__);
        return Status::ILLEGAL_ARGUMENT;
    }
​
    hidl_vec<buffer_handle_t*> allBufPtrs;
    hidl_vec<int> allFences;
    bool hasInputBuf = (request.inputBuffer.streamId != -1 &&
            request.inputBuffer.bufferId != 0);
    size_t numOutputBufs = request.outputBuffers.size();
    size_t numBufs = numOutputBufs + (hasInputBuf ? 1 : 0);
​
    if (numOutputBufs == 0) {
        ALOGE("%s: capture request must have at least one output buffer!", __FUNCTION__);
        return Status::ILLEGAL_ARGUMENT;
    }
​
    status = importRequest(request, allBufPtrs, allFences);
    if (status != Status::OK) {
        return status;
    }
​
    hidl_vec<camera3_stream_buffer_t> outHalBufs;
    outHalBufs.resize(numOutputBufs);
    bool aeCancelTriggerNeeded = false;
    ::android::hardware::camera::common::V1_0::helper::CameraMetadata settingsOverride;
    {
        Mutex::Autolock _l(mInflightLock);
        if (hasInputBuf) {
            auto key = std::make_pair(request.inputBuffer.streamId, request.frameNumber);
            auto& bufCache = mInflightBuffers[key] = camera3_stream_buffer_t{};
            convertFromHidl(
                    allBufPtrs[numOutputBufs], request.inputBuffer.status,
                    &mStreamMap[request.inputBuffer.streamId], allFences[numOutputBufs],
                    &bufCache);
            halRequest.input_buffer = &bufCache;
        } else {
            halRequest.input_buffer = nullptr;
        }
​
        halRequest.num_output_buffers = numOutputBufs;
        for (size_t i = 0; i < numOutputBufs; i++) {
            auto key = std::make_pair(request.outputBuffers[i].streamId, request.frameNumber);
            auto& bufCache = mInflightBuffers[key] = camera3_stream_buffer_t{};
            convertFromHidl(
                    allBufPtrs[i], request.outputBuffers[i].status,
                    &mStreamMap[request.outputBuffers[i].streamId], allFences[i],
                    &bufCache);
            outHalBufs[i] = bufCache;
        }
        halRequest.output_buffers = outHalBufs.data();
​
        AETriggerCancelOverride triggerOverride;
        aeCancelTriggerNeeded = handleAePrecaptureCancelRequestLocked(
                halRequest, &settingsOverride /*out*/, &triggerOverride/*out*/);
        if (aeCancelTriggerNeeded) {
            mInflightAETriggerOverrides[halRequest.frame_number] =
                    triggerOverride;
            halRequest.settings = settingsOverride.getAndLock();
        }
    }
    halRequest.num_physcam_settings = 0;
​
    ATRACE_ASYNC_BEGIN("frame capture", request.frameNumber);
    ATRACE_BEGIN("camera3->process_capture_request");
    status_t ret = mDevice->ops->process_capture_request(mDevice, &halRequest);
    ATRACE_END();
    if (aeCancelTriggerNeeded) {
        settingsOverride.unlock(halRequest.settings);
    }
    if (ret != OK) {
        Mutex::Autolock _l(mInflightLock);
        ALOGE("%s: HAL process_capture_request call failed!", __FUNCTION__);
​
        cleanupInflightFences(allFences, numBufs);
        if (hasInputBuf) {
            auto key = std::make_pair(request.inputBuffer.streamId, request.frameNumber);
            mInflightBuffers.erase(key);
        }
        for (size_t i = 0; i < numOutputBufs; i++) {
            auto key = std::make_pair(request.outputBuffers[i].streamId, request.frameNumber);
            mInflightBuffers.erase(key);
        }
        if (aeCancelTriggerNeeded) {
            mInflightAETriggerOverrides.erase(request.frameNumber);
        }
        return Status::INTERNAL_ERROR;
    }
​
    mFirstRequest = false;
    return Status::OK;
}

在该函数中调用了mDevice->ops->process_capture_request(mDevice, &halRequest)函数:

scss 复制代码
int QCamera3HardwareInterface::processCaptureRequest(
                    camera3_capture_request_t *request)
{
    ......
    //先调用mm_camera_interface设置参数
    for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) {
            LOGI("STREAM INFO : type %d, wxh: %d x %d, pp_mask: 0x%x "
                    "Format:%d",
                    mStreamConfigInfo.type[i],
                    mStreamConfigInfo.stream_sizes[i].width,
                    mStreamConfigInfo.stream_sizes[i].height,
                    mStreamConfigInfo.postprocess_mask[i],
                    mStreamConfigInfo.format[i]);
        }
​
        rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
                    mParameters);
    
    ........................
    //初始化所有channel
    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
            it != mStreamInfo.end(); it++) {
            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
            if ((((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) ||
               ((1U << CAM_STREAM_TYPE_PREVIEW) == channel->getStreamTypeMask())) &&
               setEis)
                rc = channel->initialize(is_type);
            else {
                rc = channel->initialize(IS_TYPE_NONE);
            }
            if (NO_ERROR != rc) {
                LOGE("Channel initialization failed %d", rc);
                pthread_mutex_unlock(&mMutex);
                goto error_exit;
            }
        }
    
    ........................
    //同步sensor
    c = mCameraHandle->ops->sync_related_sensors(
                    mCameraHandle->camera_handle, m_pRelCamSyncBuf);
    
    ........................
    // startChannel
    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
            it != mStreamInfo.end(); it++) {
            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
            LOGH("Start Processing Channel mask=%d",
                     channel->getStreamTypeMask());
            rc = channel->start();
            if (rc < 0) {
                LOGE("channel start failed");
                pthread_mutex_unlock(&mMutex);
                goto error_exit;
            }
        }
        
    ........................
    //request
    if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
            LOGD("snapshot request with output buffer %p, input buffer %p, frame_number %d",
                      output.buffer, request->input_buffer, frameNumber);
            if(request->input_buffer != NULL){
                rc = channel->request(output.buffer, frameNumber,
                        pInputBuffer, &mReprocMeta);
                if (rc < 0) {
                    LOGE("Fail to request on picture channel");
                    pthread_mutex_unlock(&mMutex);
                    return rc;
                }
            }
    }
    
    ........................
    rc = mCameraHandle->ops->start_channel(mCameraHandle->camera_handle,
                      mChannelHandle);
    
    ........................
}

该函数用于下发单次新的capture request到HAL中,上层必须保证该函数的调用都是在一个线程中完成,而且该函数是异步的,同时其结果并不是通过返回值给上层,而是通过HAL调用另一个接口process_capture_result()函数来将结果返回给上层,在使用的过程中,通过in-flight机制,保证短时间内下发足够多的request,从而满足帧率要求;

channel->initialize

这里的代码很长当然中间是做了很多配置的,上面主要调用了Channel的initialize、start、request;

我们以QCamera3YUVChannel为例:

ini 复制代码
int32_t QCamera3YUVChannel::initialize(cam_is_type_t isType)
{
    ATRACE_CAMSCOPE_CALL(CAMSCOPE_HAL3_YUV_CH_INIT);
    int32_t rc = NO_ERROR;
    cam_dimension_t streamDim;
​
    ........................
​
    mIsType  = isType;
    mStreamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_CALLBACK,
            mCamera3Stream->width, mCamera3Stream->height, m_bUBWCenable, mIsType);
    streamDim.width = mCamera3Stream->width;
    streamDim.height = mCamera3Stream->height;
​
    // mNumBufs:所需的流缓冲区数量
    rc = QCamera3Channel::addStream(mStreamType,
            mStreamFormat,
            streamDim,
            ROTATE_0,
            mNumBufs,
            mPostProcMask,
            mIsType);
    if (rc < 0) {
        LOGE("addStream failed");
        return rc;
    }
​
    ........................
​
    /* initialize offline meta memory for input reprocess */
    rc = QCamera3ProcessingChannel::initialize(isType);
    if (NO_ERROR != rc) {
        LOGE("Processing Channel initialize failed, rc = %d",
                 rc);
    }
​
    return rc;
}

在该函数中调用了QCamera3Channel::addStream函数:

arduino 复制代码
int32_t QCamera3Channel::addStream(cam_stream_type_t streamType,
                                  cam_format_t streamFormat,
                                  cam_dimension_t streamDim,
                                  cam_rotation_t streamRotation,
                                  uint8_t minStreamBufNum,
                                  cam_feature_mask_t postprocessMask,
                                  cam_is_type_t isType,
                                  uint32_t batchSize)
{
    ........................
    /*
    m_camHandle:camera 句柄
    m_handle:channel 句柄
    m_camOps:camera 操作表指针
    mPaddingInfo:填充信息指针
    */
    QCamera3Stream *pStream = new QCamera3Stream(m_camHandle,
                                               m_handle,
                                               m_camOps,
                                               &mPaddingInfo,
                                               this,
                                               mMapStreamBuffers);
    if (pStream == NULL) {
        LOGE("No mem for Stream");
        return NO_MEMORY;
    }
    LOGD("batch size is %d", batchSize);
​
    rc = pStream->init(streamType, streamFormat, streamDim, streamRotation,
            NULL, minStreamBufNum, postprocessMask, isType, batchSize,
            streamCbRoutine, this);
    if (rc == 0) {
        mStreams[m_numStreams] = pStream;
        m_numStreams++;
    } else {
        delete pStream;
    }
    return rc;
}

在该函数中首先先new QCamera3Stream,并执行stream->init()函数:

ini 复制代码
int32_t QCamera3Stream::init(cam_stream_type_t streamType,
                            cam_format_t streamFormat,
                            cam_dimension_t streamDim,
                            cam_rotation_t streamRotation,
                            cam_stream_reproc_config_t* reprocess_config,
                            uint8_t minNumBuffers,
                            cam_feature_mask_t postprocess_mask,
                            cam_is_type_t is_type,
                            uint32_t batchSize,
                            hal3_stream_cb_routine stream_cb,
                            void *userdata)
{
    int32_t rc = OK;
    ssize_t bufSize = BAD_INDEX;
    char value[PROPERTY_VALUE_MAX];
    uint32_t bOptimizeCacheOps = 0;
    mm_camera_stream_config_t stream_config;
    LOGD("batch size is %d", batchSize);
​
    // mCamOps为mm_camera_interface
    mHandle = mCamOps->add_stream(mCamHandle, mChannelHandle);
    if (!mHandle) {
        LOGE("add_stream failed");
        rc = UNKNOWN_ERROR;
        goto done;
    }
​
    // allocate and map stream info memory
    // 分配和映射流信息存储器
    mStreamInfoBuf = new QCamera3HeapMemory(1);
    if (mStreamInfoBuf == NULL) {
        LOGE("no memory for stream info buf obj");
        rc = -ENOMEM;
        goto err1;
    }
    rc = mStreamInfoBuf->allocate(sizeof(cam_stream_info_t));
    if (rc < 0) {
        LOGE("no memory for stream info");
        rc = -ENOMEM;
        goto err2;
    }
​
    mStreamInfo =
        reinterpret_cast<cam_stream_info_t *>(mStreamInfoBuf->getPtr(0));
    memset(mStreamInfo, 0, sizeof(cam_stream_info_t));
    mStreamInfo->stream_type = streamType;
    mStreamInfo->fmt = streamFormat;
    mStreamInfo->dim = streamDim;
    mStreamInfo->num_bufs = minNumBuffers;
    mStreamInfo->pp_config.feature_mask = postprocess_mask;
    mStreamInfo->is_type = is_type;
    mStreamInfo->pp_config.rotation = streamRotation;
    mStreamInfo->nr_mode = mNRMode;
​
    memset(value, 0, sizeof(value));
    property_get("persist.camera.cache.optimize", value, "1");
    bOptimizeCacheOps = atoi(value);
​
    if (bOptimizeCacheOps) {
        mStreamInfo->cache_ops = CAM_STREAM_CACHE_OPS_HONOUR_FLAGS;
    } else {
        mStreamInfo->cache_ops = CAM_STREAM_CACHE_OPS_DISABLED;
    }
    LOGD("stream_type is %d, feature_mask is %Ld",
           mStreamInfo->stream_type, mStreamInfo->pp_config.feature_mask);
​
    bufSize = mStreamInfoBuf->getSize(0);
    if (BAD_INDEX != bufSize) {
        // 映射buffer到user space
        rc = mCamOps->map_stream_buf(mCamHandle,
                mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO,
                0, -1, mStreamInfoBuf->getFd(0), (size_t)bufSize,
                mStreamInfoBuf->getPtr(0));
        if (rc < 0) {
            LOGE("Failed to map stream info buffer");
            goto err3;
        }
    } else {
        LOGE("Failed to retrieve buffer size (bad index)");
        goto err3;
    }
​
    mNumBufs = minNumBuffers;
    // reprocess_config由channel的addStream函数传入,源码里传入为null
    if (reprocess_config != NULL) {
        mStreamInfo->reprocess_config = *reprocess_config;
        mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
        //mStreamInfo->num_of_burst = reprocess_config->offline.num_of_bufs;
        mStreamInfo->num_of_burst = 1;
    } else if (batchSize) {
        if (batchSize > MAX_BATCH_SIZE) {
            LOGE("batchSize:%d is very large", batchSize);
            rc = BAD_VALUE;
            goto err4;
        }
        else {
            mNumBatchBufs = MAX_INFLIGHT_HFR_REQUESTS / batchSize;
            mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BATCH;
            mStreamInfo->user_buf_info.frame_buf_cnt = batchSize;
            mStreamInfo->user_buf_info.size =
                    (uint32_t)(sizeof(msm_camera_user_buf_cont_t));
            mStreamInfo->num_bufs = mNumBatchBufs;
            //Frame interval is irrelavent since time stamp calculation is not
            //required from the mCamOps
            mStreamInfo->user_buf_info.frameInterval = 0;
            LOGD("batch size is %d", batchSize);
        }
    } else {
        mStreamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
    }
​
    // 配置流
    // Configure the stream
    stream_config.stream_info = mStreamInfo;
    stream_config.mem_vtbl = mMemVtbl;
    stream_config.padding_info = mPaddingInfo;
    // Stream本体
    stream_config.userdata = this;
    // 这个为mm_camera层返回数据的回调
    stream_config.stream_cb = dataNotifyCB;
    stream_config.stream_cb_sync = NULL;
​
    // 在config的时候会把回调传入底层
    rc = mCamOps->config_stream(mCamHandle,
            mChannelHandle, mHandle, &stream_config);
    if (rc < 0) {
        LOGE("Failed to config stream, rc = %d", rc);
        goto err4;
    }
​
    // 这是channel传过来的回调
    mDataCB = stream_cb;
    // channel本体
    mUserData = userdata;
    mBatchSize = batchSize;
    return 0;
​
err4:
    mCamOps->unmap_stream_buf(mCamHandle,
            mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1);
err3:
    mStreamInfoBuf->deallocate();
err2:
    delete mStreamInfoBuf;
    mStreamInfoBuf = NULL;
    mStreamInfo = NULL;
err1:
    mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
    mHandle = 0;
    mNumBufs = 0;
done:
    return rc;
}

该函数中主要做了3件事:

  • mCamOps->add_stream:调用相机操作表函数指针实现的 add_stream
  • mCamOps->map_stream_buf:调用相机操作表函数指针实现的 map_stream_buf
  • mCamOps->config_stream:调用相机操作表函数指针实现的 config_stream

/hardware/qcom/camera/msm8998/QCamera2/stack/mm-camera-interface/src/mm_camera_interface.c

ini 复制代码
static mm_camera_ops_t mm_camera_ops = {
    ......
    .add_stream = mm_camera_intf_add_stream,
    ......
    .config_stream = mm_camera_intf_config_stream,
    ......
    .map_stream_buf = mm_camera_intf_map_stream_buf,
    .....
};

mCamOps->add_stream

scss 复制代码
static uint32_t mm_camera_intf_add_stream(uint32_t camera_handle,
                                          uint32_t ch_id)
{
    uint32_t stream_id = 0, aux_stream_id;
    mm_camera_obj_t *my_obj = NULL;
    uint32_t m_ch_id = get_main_camera_handle(ch_id);
    uint32_t aux_chid = get_aux_camera_handle(ch_id);
​
    LOGD("E handle = %d ch_id = %d",
          camera_handle, ch_id);
    // 主摄像头
    if (m_ch_id) {
        pthread_mutex_lock(&g_intf_lock);
        uint32_t handle = get_main_camera_handle(camera_handle);
        my_obj = mm_camera_util_get_camera_by_handler(handle);
        if(my_obj) {
            pthread_mutex_lock(&my_obj->cam_lock);
            pthread_mutex_unlock(&g_intf_lock);
            stream_id = mm_camera_add_stream(my_obj, m_ch_id);
       } else {
            pthread_mutex_unlock(&g_intf_lock);
       }
    }
​
    // 辅助摄像头
    if (aux_chid) {
        pthread_mutex_lock(&g_intf_lock);
        uint32_t aux_handle = get_aux_camera_handle(camera_handle);
        my_obj = mm_camera_util_get_camera_head(aux_handle);
        if (my_obj) {
            pthread_mutex_lock(&my_obj->muxer_lock);
            pthread_mutex_unlock(&g_intf_lock);
            aux_stream_id = mm_camera_muxer_add_stream(aux_handle, aux_chid,
                    m_ch_id, stream_id, my_obj);
            if (aux_stream_id <= 0) {
                LOGE("Failed to add stream");
                pthread_mutex_lock(&my_obj->cam_lock);
                mm_camera_del_stream(my_obj, m_ch_id, stream_id);
            } else {
                stream_id = stream_id | aux_stream_id;
            }
        } else {
            pthread_mutex_unlock(&g_intf_lock);
        }
    }
    LOGH("X ch_id = %u stream_id = %u", ch_id, stream_id);
    return stream_id;
}

首先先通过mm_camera_util_get_camera_by_handler函数获取mm_channel_t结构体,接着调用mm_camera_add_stream()函数进一步处理;

scss 复制代码
uint32_t mm_camera_add_stream(mm_camera_obj_t *my_obj,
                              uint32_t ch_id)
{
    uint32_t s_hdl = 0;
    mm_channel_t * ch_obj =
        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
​
    if (NULL != ch_obj) {
        pthread_mutex_lock(&ch_obj->ch_lock);
        pthread_mutex_unlock(&my_obj->cam_lock);
​
        mm_channel_fsm_fn(ch_obj,
                          MM_CHANNEL_EVT_ADD_STREAM,
                          NULL,
                          (void *)&s_hdl);
    } else {
        pthread_mutex_unlock(&my_obj->cam_lock);
    }
    return s_hdl;
}

根据ch_obj(channel)状态,传入事件将被不同地函数处理;

前面初始化channel时(mm_channel_init函数)赋值状态为MM_CHANNEL_STATE_STOPPED;

ini 复制代码
my_obj->state = MM_CHANNEL_STATE_STOPPED;

通过mm_channel_fsm_fn函数处理,判断条件为ch_obj状态;

/hardware/qcom/camera/msm8998/QCamera2/stack/mm-camera-interface/src/mm_camera_channel.c

arduino 复制代码
int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
                          mm_channel_evt_type_t evt,
                          void * in_val,
                          void * out_val)
{
    int32_t rc = -1;
​
    LOGD("E state = %d", my_obj->state);
    switch (my_obj->state) {
    case MM_CHANNEL_STATE_NOTUSED:
        rc = mm_channel_fsm_fn_notused(my_obj, evt, in_val, out_val);
        break;
    case MM_CHANNEL_STATE_STOPPED:
        rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
        break;
    case MM_CHANNEL_STATE_ACTIVE:
        rc = mm_channel_fsm_fn_active(my_obj, evt, in_val, out_val);
        break;
    case MM_CHANNEL_STATE_PAUSED:
        rc = mm_channel_fsm_fn_paused(my_obj, evt, in_val, out_val);
        break;
    default:
        LOGD("Not a valid state (%d)", my_obj->state);
        break;
    }
​
    /* unlock ch_lock */
    pthread_mutex_unlock(&my_obj->ch_lock);
    LOGD("X rc = %d", rc);
    return rc;
}

因为当前的my_obj->state为MM_CHANNEL_STATE_STOPPED,所以会执行mm_channel_fsm_fn_stopped函数;

arduino 复制代码
int32_t mm_channel_fsm_fn_stopped(mm_channel_t *my_obj,
                                  mm_channel_evt_type_t evt,
                                  void * in_val,
                                  void * out_val)
{
    int32_t rc = 0;
    LOGD("E evt = %d", evt);
    switch (evt) {
    case MM_CHANNEL_EVT_ADD_STREAM:
        {
            uint32_t s_hdl = 0;
            s_hdl = mm_channel_add_stream(my_obj);
            *((uint32_t*)out_val) = s_hdl;
            rc = 0;
        }
        break;
​
    ........................
​
    default:
        LOGW("invalid state (%d) for evt (%d)",
                    my_obj->state, evt);
        break;
    }
    LOGD("E rc = %d", rc);
    return rc;
}

因为在mm_camera_add_stream函数中,传入的evt参数值为MM_CHANNEL_EVT_ADD_STREAM,所以进入MM_CHANNEL_EVT_ADD_STREAM分支;

ini 复制代码
uint32_t mm_channel_add_stream(mm_channel_t *my_obj)
{
    int32_t rc = 0;
    uint8_t idx = 0;
    uint32_t s_hdl = 0;
    mm_stream_t *stream_obj = NULL;
​
    LOGD("E");
    /* check available stream */
    // 检查流的有效性
    for (idx = 0; idx < MAX_STREAM_NUM_IN_BUNDLE; idx++) {
        if (MM_STREAM_STATE_NOTUSED == my_obj->streams[idx].state) {
            stream_obj = &my_obj->streams[idx];
            break;
        }
    }
    if (NULL == stream_obj) {
        LOGE("streams reach max, no more stream allowed to add");
        return s_hdl;
    }
​
    /* initialize stream object */
    // 初始化流对象
    memset(stream_obj, 0, sizeof(mm_stream_t));
    stream_obj->fd = -1;
    stream_obj->my_hdl = mm_camera_util_generate_handler_by_num (
            my_obj->cam_obj->my_num, idx);
    stream_obj->ch_obj = my_obj;
    stream_obj->state = MM_STREAM_STATE_INITED;
​
    /* acquire stream */
    // 请求流
    rc = mm_stream_fsm_fn(stream_obj, MM_STREAM_EVT_ACQUIRE, NULL, NULL);
    if (0 == rc) {
        s_hdl = stream_obj->my_hdl;
    } else {
        /* error during acquire, de-init */
        pthread_cond_destroy(&stream_obj->buf_cond);
        pthread_mutex_destroy(&stream_obj->buf_lock);
        pthread_mutex_destroy(&stream_obj->cb_lock);
        pthread_mutex_destroy(&stream_obj->cmd_lock);
        memset(stream_obj, 0, sizeof(mm_stream_t));
    }
    LOGD("stream handle = %d", s_hdl);
    return s_hdl;
}

流有限状态机入口函数。根据流状态,传入事件将以不同的方式处理。此处传入 MM_STREAM_STATE_INITED;

然后调用mm_stream_fsm_fn函数:

arduino 复制代码
int32_t mm_stream_fsm_fn(mm_stream_t *my_obj,
                         mm_stream_evt_type_t evt,
                         void * in_val,
                         void * out_val)
{
    int32_t rc = -1;
​
    LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
          my_obj->my_hdl, my_obj->fd, my_obj->state);
    switch (my_obj->state) {
        
        ........................
        
    case MM_STREAM_STATE_INITED:
        rc = mm_stream_fsm_inited(my_obj, evt, in_val, out_val);
        break;
        
        ........................
        
    default:
        LOGD("Not a valid state (%d)", my_obj->state);
        break;
    }
    LOGD("X rc =%d",rc);
    return rc;
}

流有限状态机函数来处理 INITED 状态的事件。此处事件类型为 MM_STREAM_EVT_ACQUIRE。

  1. 调用 open 函数打开设备节点;
  2. 调用 mm_stream_set_ext_mode 函数设置流扩展模式到服务端;
c 复制代码
int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
                             mm_stream_evt_type_t evt,
                             void * in_val,
                             void * out_val)
{
    int32_t rc = 0;
    char dev_name[MM_CAMERA_DEV_NAME_LEN];
    const char *dev_name_value = NULL;
    if (NULL == my_obj) {
      LOGE("NULL camera object\n");
      return -1;
    }
​
    LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
          my_obj->my_hdl, my_obj->fd, my_obj->state);
    switch(evt) {
    case MM_STREAM_EVT_ACQUIRE:
        if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
            LOGE("NULL channel or camera obj\n");
            rc = -1;
            break;
        }
​
        dev_name_value = mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl);
        if (NULL == dev_name_value) {
            LOGE("NULL device name\n");
            rc = -1;
            break;
        }
​
        snprintf(dev_name, sizeof(dev_name), "/dev/%s",
                 dev_name_value);
​
        my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK);
        if (my_obj->fd < 0) {
            LOGE("open dev returned %d\n", my_obj->fd);
            rc = -1;
            break;
        }
        LOGD("open dev fd = %d\n", my_obj->fd);
        rc = mm_stream_set_ext_mode(my_obj);
        if (0 == rc) {
            my_obj->state = MM_STREAM_STATE_ACQUIRED;
        } else {
            /* failed setting ext_mode
             * close fd */
            close(my_obj->fd);
            my_obj->fd = -1;
            break;
        }
        break;
    default:
        LOGE("invalid state (%d) for evt (%d), in(%p), out(%p)",
                    my_obj->state, evt, in_val, out_val);
        break;
    }
    return rc;
}

这里的操作是开启一个dev,然后修改stream_obj->state的状态值为MM_STREAM_STATE_ACQUIRED;

add_stream过程就结束了;

mCamOps->config_stream

当然调用的路径和刚才的add_stream可以说是一摸一样,mm_camera也会调用channel的状态机方法,当前状态还是STOP而传递的事件为EVT_CONFIG_STREAM,会调用到channel里的mm_channel_config_stram,这里面也是继续调用stream的状态机函数当前状态为STATE_ACQUIRED传递事件为EVT_SET_FMT;

scss 复制代码
==> mm_camera_intf_config_stream()
    ==> mm_camera_config_stream()
        ==> mm_channel_fsm_fn_stopped()
            ==> mm_channel_config_stream()
                ==> mm_stream_fsm_fn()
                    ==> mm_stream_fsm_acquired():因为在add_stream过程中将stream_obj->state状态值修改为了MM_STREAM_STATE_ACQUIRED,所以执行了mm_stream_fsm_acquired分支;
                        ==> mm_stream_config()
                            ==> mm_stream_sync_info()
                                ==> mm_stream_set_fmt()

mm_stream_fsm_acquired()函数中调用了mm_stream_config()函数,然后将stream_obj->state修改为MM_STREAM_STATE_CFG;

rust 复制代码
int32_t mm_stream_config(mm_stream_t *my_obj,
                         mm_camera_stream_config_t *config)
{
    int32_t rc = 0;
    int32_t cb_index = 0;
​
    LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
          my_obj->my_hdl, my_obj->fd, my_obj->state);
    my_obj->stream_info = config->stream_info;
    my_obj->buf_num = (uint8_t) config->stream_info->num_bufs;
    my_obj->mem_vtbl = config->mem_vtbl;
    my_obj->padding_info = config->padding_info;
​
    if (config->stream_cb_sync != NULL) {
        /* SYNC callback is always placed at index 0*/
        my_obj->buf_cb[cb_index].cb = config->stream_cb_sync;//这个为NULL
        my_obj->buf_cb[cb_index].user_data = config->userdata;
        my_obj->buf_cb[cb_index].cb_count = -1; /* infinite by default */
        my_obj->buf_cb[cb_index].cb_type = MM_CAMERA_STREAM_CB_TYPE_SYNC;
        cb_index++;
    }
    // 这个为QCamera3Stream::init的时候赋值的
    my_obj->buf_cb[cb_index].cb = config->stream_cb;
    my_obj->buf_cb[cb_index].user_data = config->userdata;
    my_obj->buf_cb[cb_index].cb_count = -1; /* infinite by default */
    my_obj->buf_cb[cb_index].cb_type = MM_CAMERA_STREAM_CB_TYPE_ASYNC;
​
    rc = mm_stream_sync_info(my_obj);
    if (rc == 0) {
        rc = mm_stream_set_fmt(my_obj);
        if (rc < 0) {
            LOGE("mm_stream_set_fmt failed %d",
                     rc);
        }
    }
​
    my_obj->map_ops.map_ops = mm_stream_map_buf_ops;
    my_obj->map_ops.bundled_map_ops = mm_stream_bundled_map_buf_ops;
    my_obj->map_ops.unmap_ops = mm_stream_unmap_buf_ops;
    my_obj->map_ops.userdata = my_obj;
​
    if(my_obj->mem_vtbl.set_config_ops != NULL) {
        my_obj->mem_vtbl.set_config_ops(&my_obj->map_ops,
                my_obj->mem_vtbl.user_data);
    }
    return rc;
}

这里是把上层的callback传递给buffer,之后stream通过ioctl操作从驱动获取buffer会通过回调回传数据;

相关推荐
Light6028 分钟前
云途领航:现代应用架构助力企业转型新篇
微服务·架构·saas·paas·iaas·ipaas·apaas
zhangphil1 小时前
Android绘图Path基于LinearGradient线性动画渐变,Kotlin(2)
android·kotlin
watl02 小时前
【Android】unzip aar删除冲突classes再zip
android·linux·运维
键盘上的蚂蚁-2 小时前
PHP爬虫类的并发与多线程处理技巧
android
喜欢猪猪3 小时前
Java技术专家视角解读:SQL优化与批处理在大数据处理中的应用及原理
android·python·adb
言之。3 小时前
【面试题】构建高并发、高可用服务架构:技术选型与设计
架构
JasonYin~4 小时前
HarmonyOS NEXT 实战之元服务:静态案例效果---手机查看电量
android·华为·harmonyos
zhangphil4 小时前
Android adb查看某个进程的总线程数
android·adb
抛空5 小时前
Android14 - SystemServer进程的启动与工作流程分析
android