基于Android P版本分析
CameraMetaData这个结构贯穿整个camera的流程中,尤其在Request模块中使用最为频繁,所以我们分析一下CameraMetaData这个结构体;
Camera MetaData简介
CameraMetadata:camera控件和信息的基类;
该类定义用于查询camera特性或捕获结果以及设置camera请求参数的基本键/值映射;
CameraMetadata的所有实例都是不可变的。包含getKeys()的密钥列表不会更改,也不会在对象的整个生命周期中有get的任何密钥返回值;
在Camera1的时候,Camera参数的设置,都是通过调用setParameter/Parameters()来实现下发或者获取参数;
在Camera2+HAL3架构下,我们一般使用CameraCharacteristics来获取camera的参数配置,实际上就是使用CameraMetaData的形式来下发或者是获取参数;
scala
public final class CameraCharacteristics extends CameraMetadata<CameraCharacteristics.Key<?>> {
CameraCharacteristics继承自CameraMetadata;
CaptureRequest继承自CameraMetadata;
CameraMetadata就是将参数以共享内存的形式,将所有的camera参数以有序的结构体的形式保存在一块连接的内存中;
CameraMetaData 定义介绍
首先我们看一下在底层Metadata的定义:
Metadata层次结构定义及基本宏定义:/system/media/camera/include/system/camera_metadata_tags.h
Metadata枚举定义及常用API定义:/system/media/camera/include/system/camera_metadata.h
Metadata基本函数操作结构体定义:/system/media/camera/include/system/camera_vendor_tags.h
Metadata宏定义与字符串绑定:/system/media/camera/src/camera_metadata_tag_info.c
Metadata核心代码实现:/system/media/camera/src/camera_metadata.c
CameraMetadata 内存分布及数据结构
在camera_metadata.c中,有一幅内存分布图,可以看出CameraMetadata数据结构是一块连续的内存空间;
其内存区分布如下:
arduino
/**
* A packet of metadata. This is a list of entries, each of which may point to
* its values stored at an offset in data.
*
* It is assumed by the utility functions that the memory layout of the packet
* is as follows:
*
* |-----------------------------------------------|
* | camera_metadata_t | 区域1
* | |
* |-----------------------------------------------|
* | reserved for future expansion | 区域2
* |-----------------------------------------------|
* | camera_metadata_buffer_entry_t #0 | 区域3
* |-----------------------------------------------|
* | .... |
* |-----------------------------------------------|
* | camera_metadata_buffer_entry_t #entry_count-1 |
* |-----------------------------------------------|
* | free space for | 区域4
* | (entry_capacity-entry_count) entries |
* |-----------------------------------------------|
* | start of camera_metadata.data | 区域5
* | |
* |-----------------------------------------------|
* | free space for | 区域6
* | (data_capacity-data_count) bytes |
* |-----------------------------------------------|
*
* With the total length of the whole packet being camera_metadata.size bytes.
*
* In short, the entries and data are contiguous in memory after the metadata
* header.
*/
#define METADATA_ALIGNMENT ((size_t) 4)
struct camera_metadata {
metadata_size_t size; // 整个metadata数据的大小
uint32_t version; // version
uint32_t flags; // 标记当前是否由对entry进行排序(根据entry的tag从小到大)。 好处:排序后可以使用二分查找,可以提升性能
metadata_size_t entry_count; // 已经添加TAG的入口数量(即内存块中已经包含多少TAG)
metadata_size_t entry_capacity; // 最大能容纳TAG的入口数量(即最大能放多少TAG)
metadata_uptrdiff_t entries_start; // TAG区域相对开始处的偏移
metadata_size_t data_count; // 记录数据段当前已用的内存空间
metadata_size_t data_capacity; // 总的数据段内存空间
metadata_uptrdiff_t data_start; // 数据区相对开始处的偏移
uint32_t padding; // 8字节对齐,不够就填充。padding to 8 bytes boundary
metadata_vendor_id_t vendor_id; // 标记平台的id,default值为CAMERA_METADATA
};
// camera_metadata_t就是外部访问使用metadata的结构体
typedef struct camera_metadata camera_metadata_t;
区域1:camera_metadata_t结构体定义,占用内存96Byte;
区域2:保留区,供未来使用;
区域3:所有TAG结构体定义,TAG[0]、TAG[1]、......、TAG[entry_count - 1];
区域4:剩余未使用的TAG结构体的内存保留,该区域大小为(entry_capacity - entry_count)个TAG;
区域5:所有TAG对应的具体metadata数据;
区域6:剩余未使用的TAG占用的内存;
arduino
/**
* A single metadata entry, storing an array of values of a given type. If the
* array is no larger than 4 bytes in size, it is stored in the data.value[]
* array; otherwise, it can found in the parent's data array at index
* data.offset.
*/
#define ENTRY_ALIGNMENT ((size_t) 4)
typedef struct camera_metadata_buffer_entry {
uint32_t tag; // TAG的值
uint32_t count; // TAG的value对应的data的数量
union {
uint32_t offset; // offset标记当前的key值对应的value
uint8_t value[4]; // 当value占用的字节数<=4时,直接存储到这里
} data;
uint8_t type; // TYPE_BYTE、TYPE_INT32、TYPE_FLOAT、TYPE_INT64、TYPE_DOUBLE、TYPE_RATIONAL
uint8_t reserved[3];
} camera_metadata_buffer_entry_t;
........................
/**
* A datum of metadata. This corresponds to camera_metadata_entry_t::data
* with the difference that each element is not a pointer. We need to have a
* non-pointer type description in order to figure out the largest alignment
* requirement for data (DATA_ALIGNMENT).
*/
#define DATA_ALIGNMENT ((size_t) 8)
typedef union camera_metadata_data {
uint8_t u8;
int32_t i32;
float f;
int64_t i64;
double d;
camera_metadata_rational_t r;
} camera_metadata_data_t;
camera_metadata_data为每个TAG对应数据结构体,占用内存33Byte,但由于是以8字节对齐,所以该结构体占用40个Byte;
/system/media/camera/include/system/camera_metadata.h
arduino
#include "camera_metadata_tags.h"
/**
* Enum range for each top-level category
*/
ANDROID_API
extern unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2];
ANDROID_API
extern const char *camera_metadata_section_names[ANDROID_SECTION_COUNT];
/**
* Type definitions for camera_metadata_entry
* =============================================================================
*/
enum {
// Unsigned 8-bit integer (uint8_t)
TYPE_BYTE = 0,
// Signed 32-bit integer (int32_t)
TYPE_INT32 = 1,
// 32-bit float (float)
TYPE_FLOAT = 2,
// Signed 64-bit integer (int64_t)
TYPE_INT64 = 3,
// 64-bit float (double)
TYPE_DOUBLE = 4,
// A 64-bit fraction (camera_metadata_rational_t)
TYPE_RATIONAL = 5,
// Number of type fields
NUM_TYPES
};
typedef struct camera_metadata_rational {
int32_t numerator;
int32_t denominator;
} camera_metadata_rational_t;
/**
* A reference to a metadata entry in a buffer.
*
* The data union pointers point to the real data in the buffer, and can be
* modified in-place if the count does not need to change. The count is the
* number of entries in data of the entry's type, not a count of bytes.
*/
typedef struct camera_metadata_entry {
size_t index; // 该entry在当前metadata里面的index
uint32_t tag; // TAG的key值
uint8_t type; // TYPE_BYTE、TYPE_INT32、TYPE_FLOAT、TYPE_INT64、TYPE_DOUBLE、TYPE_RATIONAL
size_t count; // TAG的value对应的data的数量
union {
uint8_t *u8;
int32_t *i32;
float *f;
int64_t *i64;
double *d;
camera_metadata_rational_t *r;
} data; // TAG的value对应的data值
} _t;
camera_metadata_entry定义了每个TAG的数据结构体定义;
这三个结构体之间的关系:
metadata的基本操作就是增(增加tag)、删(删除tag)、查(根据tag查找对应的value)、改(修改tag对应的value);
到这里metadata的原理基本上可以推导出来了,以"查"为例:
- 当用户拿到 camera_metadata 以及对应的tag后,需要从该meta中,找到对应的value;
- 从metadata的 entries_start 成员中可以拿到entry的首地址,再根据 entry_count 可以遍历所有的entry;
- 根据tag来逐一比较camera_metadata_buffer_entry中的 tag ,就可以找到该tag对应的entry;
- 根据 count 和 type 可以计算出value的字节数。当字节数<=4的时候,直接取 data.value;否则就根据 offset 从metadata的 data_start 找到对应的value;
- 将其转换为结构体 camera_metadata_entry_t,返回给用户。用户通过count和type就可以找到该tag对应的value啦;
metadata关键函数接口
前面了解清楚它的内存分布、宏定义以及操作方法,我们现在看一下他们的核心逻辑,看看是如何使用上面描述的定义;
get_entries&get_data
arduino
// 获取entries
static camera_metadata_buffer_entry_t *get_entries(
const camera_metadata_t *metadata) {
// 返回entry的首地址
return (camera_metadata_buffer_entry_t*)
((uint8_t*)metadata + metadata->entries_start);
}
// 获取data数据
static uint8_t *get_data(const camera_metadata_t *metadata) {
// 返回data首地址
return (uint8_t*)metadata + metadata->data_start;
}
allocate_camera_metadata(分配metadata)
ini
// 分配一个camera_metadata结构体对象,传入max entry和max data,给metadata分配地址空间
camera_metadata_t *allocate_camera_metadata(size_t entry_capacity,
size_t data_capacity) {
//获取需要申请内存空间的size
size_t memory_needed = calculate_camera_metadata_size(entry_capacity,
data_capacity);
// calloc初始化已分配的内存为0,同时返回的是一个数组
void *buffer = calloc(1, memory_needed);
camera_metadata_t *metadata = place_camera_metadata(
buffer, memory_needed, entry_capacity, data_capacity);
if (!metadata) {
/* This should not happen when memory_needed is the same
* calculated in this function and in place_camera_metadata.
*/
free(buffer);
}
return metadata;
}
// 对刚申请的buffer初始化一些变量,为后面更新,插入TAG数据做准备
camera_metadata_t *place_camera_metadata(void *dst,
size_t dst_size,
size_t entry_capacity,
size_t data_capacity) {
if (dst == NULL) return NULL;
size_t memory_needed = calculate_camera_metadata_size(entry_capacity,
data_capacity);
if (memory_needed > dst_size) return NULL;
camera_metadata_t *metadata = (camera_metadata_t*)dst;
metadata->version = CURRENT_METADATA_VERSION;
metadata->flags = 0;
metadata->entry_count = 0;
metadata->entry_capacity = entry_capacity;
metadata->entries_start =
ALIGN_TO(sizeof(camera_metadata_t), ENTRY_ALIGNMENT);
metadata->data_count = 0;
metadata->data_capacity = data_capacity;
metadata->size = memory_needed;
size_t data_unaligned = (uint8_t*)(get_entries(metadata) +
metadata->entry_capacity) - (uint8_t*)metadata;
metadata->data_start = ALIGN_TO(data_unaligned, DATA_ALIGNMENT);
metadata->vendor_id = CAMERA_METADATA_INVALID_VENDOR_ID;
assert(validate_camera_metadata_structure(metadata, NULL) == OK);
return metadata;
}
........................
// 计算camera_metadata的size,包括了camera_metadata_t的大小、entry和data的alignment以及对应的count大小、metadata的数据大小
size_t calculate_camera_metadata_size(size_t entry_count,
size_t data_count) {
size_t memory_needed = sizeof(camera_metadata_t);
// Start entry list at aligned boundary
memory_needed = ALIGN_TO(memory_needed, ENTRY_ALIGNMENT);
memory_needed += sizeof(camera_metadata_buffer_entry_t[entry_count]);
// Start buffer list at aligned boundary
memory_needed = ALIGN_TO(memory_needed, DATA_ALIGNMENT);
memory_needed += sizeof(uint8_t[data_count]);
// Make sure camera metadata can be stacked in continuous memory
memory_needed = ALIGN_TO(memory_needed, METADATA_PACKET_ALIGNMENT);
return memory_needed;
}
find_camera_metadata_entry(从metadata中根据TAG查找 value)
ini
int find_camera_metadata_entry(camera_metadata_t *src,
uint32_t tag,
camera_metadata_entry_t *entry) {
if (src == NULL) return ERROR;
uint32_t index;
if (src->flags & FLAG_SORTED) {
// Sorted entries, do a binary search
camera_metadata_buffer_entry_t *search_entry = NULL;
camera_metadata_buffer_entry_t key;
key.tag = tag;
search_entry = bsearch(&key,
get_entries(src),
src->entry_count,
sizeof(camera_metadata_buffer_entry_t),
compare_entry_tags);
if (search_entry == NULL) return NOT_FOUND;
index = search_entry - get_entries(src);
} else {
// Not sorted, linear search
camera_metadata_buffer_entry_t *search_entry = get_entries(src);
for (index = 0; index < src->entry_count; index++, search_entry++) {
if (search_entry->tag == tag) {
break;
}
}
if (index == src->entry_count) return NOT_FOUND;
}
return get_camera_metadata_entry(src, index,
entry);
}
int get_camera_metadata_entry(camera_metadata_t *src,
size_t index,
camera_metadata_entry_t *entry) {
if (src == NULL || entry == NULL) return ERROR;
if (index >= src->entry_count) return ERROR;
camera_metadata_buffer_entry_t *buffer_entry = get_entries(src) + index;
entry->index = index;
entry->tag = buffer_entry->tag;
entry->type = buffer_entry->type;
entry->count = buffer_entry->count;
if (buffer_entry->count *
camera_metadata_type_size[buffer_entry->type] > 4) {
entry->data.u8 = get_data(src) + buffer_entry->data.offset;
} else {
entry->data.u8 = buffer_entry->data.value;
}
return OK;
}
add_camera_metadata_entry(增加TAG和value到metadata)
ini
int add_camera_metadata_entry(camera_metadata_t *dst,
uint32_t tag,
const void *data,
size_t data_count) {
// 1.根据 TAG,找到该TAG对应的value的type,这个函数的具体实现不再粘贴出来,里面涉及到tag section相关结构体,后文描述
int type = get_local_camera_metadata_tag_type(tag, dst);
if (type == -1) {
ALOGE("%s: Unknown tag %04x.", __FUNCTION__, tag);
return ERROR;
}
// 2.将TAG和data添加到metadata中
return add_camera_metadata_entry_raw(dst,
tag,
type,
data,
data_count);
}
static int add_camera_metadata_entry_raw(camera_metadata_t *dst,
uint32_t tag,
uint8_t type,
const void *data,
size_t data_count) {
if (dst == NULL) return ERROR;
if (dst->entry_count == dst->entry_capacity) return ERROR;
if (data_count && data == NULL) return ERROR;
// 1.计算size,并进行4字节判断,如果小于4字节,将返回0
size_t data_bytes =
calculate_camera_metadata_entry_data_size(type, data_count);
if (data_bytes + dst->data_count > dst->data_capacity) return ERROR;
// 2.计算数据的size
size_t data_payload_bytes =
data_count * camera_metadata_type_size[type];
// 3.生成camera_metadata_buffer_entry_t
camera_metadata_buffer_entry_t *entry = get_entries(dst) + dst->entry_count;
memset(entry, 0, sizeof(camera_metadata_buffer_entry_t));
entry->tag = tag;
entry->type = type;
entry->count = data_count;
// 4.copy数据到entry中
if (data_bytes == 0) {
memcpy(entry->data.value, data,
data_payload_bytes);
} else {
entry->data.offset = dst->data_count;
memcpy(get_data(dst) + entry->data.offset, data,
data_payload_bytes);
dst->data_count += data_bytes;
}
// 5.增加一个entry
dst->entry_count++;
dst->flags &= ~FLAG_SORTED; // add后,是没有经过排序的
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
size_t calculate_camera_metadata_entry_data_size(uint8_t type,
size_t data_count) {
if (type >= NUM_TYPES) return 0;
size_t data_bytes = data_count *
camera_metadata_type_size[type];
return data_bytes <= 4 ? 0 : ALIGN_TO(data_bytes, DATA_ALIGNMENT);
}
delete_camera_metadata_entry(删除TAG)
删除的逻辑相对有点复杂,因为tag对应的value可能在data数组的中间,需要后面的内容,覆盖要删除的内容;
ini
int delete_camera_metadata_entry(camera_metadata_t *dst,
size_t index) {
if (dst == NULL) return ERROR;
if (index >= dst->entry_count) return ERROR;
// 1.根据index,找到对应的entry
camera_metadata_buffer_entry_t *entry = get_entries(dst) + index;
// 2.获取value的size
size_t data_bytes = calculate_camera_metadata_entry_data_size(entry->type,
entry->count);
if (data_bytes > 0) {
// 3.data_bytes > 0,value的size > 4字节,所以存储在data数组中
// Shift data buffer to overwrite deleted data
uint8_t *start = get_data(dst) + entry->data.offset;
uint8_t *end = start + data_bytes;
// data_count是数组总长度,offset是value的起始位置,data_types是value的长度。相减就是value后面的数据的长度
size_t length = dst->data_count - entry->data.offset - data_bytes;
// memmove用于拷贝字节,如果目标区域和源区域有重叠的话,memmove能够保证源串在被覆盖之前将重叠区域的字节拷贝到目标区域中,但复制后源内容会被更改。但是当目标区域与源区域没有重叠则和memcpy函数功能相同
// value后面的数据向前移动到start位置,从end开始计算length个字节
memmove(start, end, length);
// Update all entry indices to account for shift
camera_metadata_buffer_entry_t *e = get_entries(dst);
size_t i;
for (i = 0; i < dst->entry_count; i++) {
if (calculate_camera_metadata_entry_data_size(
e->type, e->count) > 0 &&
e->data.offset > entry->data.offset) {
e->data.offset -= data_bytes;
}
++e;
}
dst->data_count -= data_bytes;
}
// Shift entry array
// 5.移动entry
memmove(entry, entry + 1,
sizeof(camera_metadata_buffer_entry_t) *
(dst->entry_count - index - 1) );
dst->entry_count -= 1;
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
size_t calculate_camera_metadata_entry_data_size(uint8_t type,
size_t data_count) {
if (type >= NUM_TYPES) return 0;
size_t data_bytes = data_count *
camera_metadata_type_size[type];
return data_bytes <= 4 ? 0 : ALIGN_TO(data_bytes, DATA_ALIGNMENT);
}
update_camera_metadata_entry(更新TAG的value值)
在调用update_cmaera_metadata_entry之前,首先需要调用find_camera_metadata_entry找到对应的entry,通过该entry获取index;
ini
int update_camera_metadata_entry(camera_metadata_t *dst,
size_t index,
const void *data,
size_t data_count,
camera_metadata_entry_t *updated_entry) {
if (dst == NULL) return ERROR;
if (index >= dst->entry_count) return ERROR;
// 1.根据index找到对应的entry
camera_metadata_buffer_entry_t *entry = get_entries(dst) + index;
// 2.data_bytes是新的value的size,如果小于4,则为0
size_t data_bytes =
calculate_camera_metadata_entry_data_size(entry->type,
data_count);
// data_payload是新的value的size,是真正的size值
size_t data_payload_bytes =
data_count * camera_metadata_type_size[entry->type];
// entry_bytes是旧的value的size
size_t entry_bytes =
calculate_camera_metadata_entry_data_size(entry->type,
entry->count);
// 比较新的和旧的value的size
if (data_bytes != entry_bytes) {
// May need to shift/add to data array
// 3.确认data的容量是否可以满足新的value
if (dst->data_capacity < dst->data_count + data_bytes - entry_bytes) {
// No room
return ERROR;
}
// 4.删除旧的TAG对应value,实现类似delete函数
if (entry_bytes != 0) {
// Remove old data
uint8_t *start = get_data(dst) + entry->data.offset;
uint8_t *end = start + entry_bytes;
size_t length = dst->data_count - entry->data.offset - entry_bytes;
memmove(start, end, length);
dst->data_count -= entry_bytes;
// Update all entry indices to account for shift
camera_metadata_buffer_entry_t *e = get_entries(dst);
size_t i;
for (i = 0; i < dst->entry_count; i++) {
if (calculate_camera_metadata_entry_data_size(
e->type, e->count) > 0 &&
e->data.offset > entry->data.offset) {
e->data.offset -= entry_bytes;
}
++e;
}
}
// 5.将新的TAG对应的value插入到最后方
if (data_bytes != 0) {
// Append new data
entry->data.offset = dst->data_count;
memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes);
dst->data_count += data_bytes;
}
} else if (data_bytes != 0) {
// data size unchanged, reuse same data location
// 6.data的size相等时直接override
memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes);
}
if (data_bytes == 0) {
// Data fits into entry
memcpy(entry->data.value, data,
data_payload_bytes);
}
entry->count = data_count;
if (updated_entry != NULL) {
get_camera_metadata_entry(dst,
index,
updated_entry);
}
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
copy_camera_metadata
scss
// 拷贝 metadata 结构体
camera_metadata_t* copy_camera_metadata(void *dst, size_t dst_size,
const camera_metadata_t *src) {
size_t memory_needed = get_camera_metadata_compact_size(src);
if (dst == NULL) return NULL;
if (dst_size < memory_needed) return NULL;
// 这个流程只是初始化了一些默认的变量值
camera_metadata_t *metadata =
place_camera_metadata(dst, dst_size, src->entry_count, src->data_count);
metadata->flags = src->flags;
metadata->entry_count = src->entry_count;
metadata->data_count = src->data_count;
metadata->vendor_id = src->vendor_id;
// memcpy指的是C和C++使用的内存拷贝函数,sizeof指定拷贝大小
memcpy(get_entries(metadata), get_entries(src),
sizeof(camera_metadata_buffer_entry_t[metadata->entry_count]));
memcpy(get_data(metadata), get_data(src),
sizeof(uint8_t[metadata->data_count]));
assert(validate_camera_metadata_structure(metadata, NULL) == OK);
return metadata;
}
TAG分类
TAG从归属方可以被分为两类:
- Android平台原生TAG;
- vendor TAG(platform如Qcom/MTK新增的TAG);
Camera Metadata中所有的TAG定义在camera_metadata_tags.h中;
/system/media/camera/include/system/camera_metadata_tags.h
arduino
/**
* Top level hierarchy definitions for camera metadata. *_INFO sections are for
* the static metadata that can be retrived without opening the camera device.
* New sections must be added right before ANDROID_SECTION_COUNT to maintain
* existing enumerations.
*/
typedef enum camera_metadata_section {
ANDROID_COLOR_CORRECTION,
ANDROID_CONTROL,
ANDROID_DEMOSAIC,
ANDROID_EDGE,
ANDROID_FLASH,
ANDROID_FLASH_INFO,
ANDROID_HOT_PIXEL,
ANDROID_JPEG,
ANDROID_LENS,
ANDROID_LENS_INFO,
ANDROID_NOISE_REDUCTION,
ANDROID_QUIRKS,
ANDROID_REQUEST,
ANDROID_SCALER,
ANDROID_SENSOR,
ANDROID_SENSOR_INFO,
ANDROID_SHADING,
ANDROID_STATISTICS,
ANDROID_STATISTICS_INFO,
ANDROID_TONEMAP,
ANDROID_LED,
ANDROID_INFO,
ANDROID_BLACK_LEVEL,
ANDROID_SYNC,
ANDROID_REPROCESS,
ANDROID_DEPTH,
ANDROID_LOGICAL_MULTI_CAMERA,
ANDROID_DISTORTION_CORRECTION,
ANDROID_SECTION_COUNT,
VENDOR_SECTION = 0x8000
} camera_metadata_section_t;
可以看出,目录系统默认定义了26个TAG,都是Android原生TAG的section,每一个section支持的TAG总数最大是65536;
ini
/**
* Hierarchy positions in enum space. All vendor extension tags must be
* defined with tag >= VENDOR_SECTION_START
*/
typedef enum camera_metadata_section_start {
ANDROID_COLOR_CORRECTION_START = ANDROID_COLOR_CORRECTION << 16,
ANDROID_CONTROL_START = ANDROID_CONTROL << 16,
ANDROID_DEMOSAIC_START = ANDROID_DEMOSAIC << 16,
ANDROID_EDGE_START = ANDROID_EDGE << 16,
ANDROID_FLASH_START = ANDROID_FLASH << 16,
ANDROID_FLASH_INFO_START = ANDROID_FLASH_INFO << 16,
ANDROID_HOT_PIXEL_START = ANDROID_HOT_PIXEL << 16,
ANDROID_JPEG_START = ANDROID_JPEG << 16,
ANDROID_LENS_START = ANDROID_LENS << 16,
ANDROID_LENS_INFO_START = ANDROID_LENS_INFO << 16,
ANDROID_NOISE_REDUCTION_START = ANDROID_NOISE_REDUCTION << 16,
ANDROID_QUIRKS_START = ANDROID_QUIRKS << 16,
ANDROID_REQUEST_START = ANDROID_REQUEST << 16,
ANDROID_SCALER_START = ANDROID_SCALER << 16,
ANDROID_SENSOR_START = ANDROID_SENSOR << 16,
ANDROID_SENSOR_INFO_START = ANDROID_SENSOR_INFO << 16,
ANDROID_SHADING_START = ANDROID_SHADING << 16,
ANDROID_STATISTICS_START = ANDROID_STATISTICS << 16,
ANDROID_STATISTICS_INFO_START = ANDROID_STATISTICS_INFO << 16,
ANDROID_TONEMAP_START = ANDROID_TONEMAP << 16,
ANDROID_LED_START = ANDROID_LED << 16,
ANDROID_INFO_START = ANDROID_INFO << 16,
ANDROID_BLACK_LEVEL_START = ANDROID_BLACK_LEVEL << 16,
ANDROID_SYNC_START = ANDROID_SYNC << 16,
ANDROID_REPROCESS_START = ANDROID_REPROCESS << 16,
ANDROID_DEPTH_START = ANDROID_DEPTH << 16,
ANDROID_LOGICAL_MULTI_CAMERA_START
= ANDROID_LOGICAL_MULTI_CAMERA
<< 16,
ANDROID_DISTORTION_CORRECTION_START
= ANDROID_DISTORTION_CORRECTION
<< 16,
VENDOR_SECTION_START = VENDOR_SECTION << 16
} camera_metadata_section_start_t;
定义了各个TAG对应的偏移地址;
由于在内存中,各个TAG数据都是以有序的结构体形式保存起来;
arduino
/**
* Main enum for defining camera metadata tags. New entries must always go
* before the section _END tag to preserve existing enumeration values. In
* addition, the name and type of the tag needs to be added to
* system/media/camera/src/camera_metadata_tag_info.c
*/
typedef enum camera_metadata_tag {
ANDROID_COLOR_CORRECTION_MODE = // enum | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_START,
ANDROID_COLOR_CORRECTION_TRANSFORM, // rational[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_GAINS, // float[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_ABERRATION_MODE, // enum | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
// byte[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_END,
ANDROID_CONTROL_AE_ANTIBANDING_MODE = // enum | public | HIDL v3.2
ANDROID_CONTROL_START,
ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, // int32 | public | HIDL v3.2
ANDROID_CONTROL_AE_LOCK, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_TARGET_FPS_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_LOCK, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT, // enum | public | HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_SCENE_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_COMPENSATION_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_COMPENSATION_STEP, // rational | public | HIDL v3.2
ANDROID_CONTROL_AF_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_EFFECTS, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_SCENE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
// byte[] | public | HIDL v3.2
ANDROID_CONTROL_AWB_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_MAX_REGIONS, // int32[] | ndk_public | HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_OVERRIDES, // byte[] | system | HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_ID, // int32 | system | HIDL v3.2
ANDROID_CONTROL_AE_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER_ID, // int32 | system | HIDL v3.2
ANDROID_CONTROL_AWB_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
// int32[] | hidden | HIDL v3.2
ANDROID_CONTROL_AE_LOCK_AVAILABLE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_LOCK_AVAILABLE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST, // int32 | public | HIDL v3.2
ANDROID_CONTROL_ENABLE_ZSL, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_SCENE_CHANGE, // enum | public | HIDL v3.3
ANDROID_CONTROL_END,
........................
ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS = // byte[] | hidden | HIDL v3.3
ANDROID_LOGICAL_MULTI_CAMERA_START,
ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE, // enum | public | HIDL v3.3
ANDROID_LOGICAL_MULTI_CAMERA_END,
ANDROID_DISTORTION_CORRECTION_MODE = // enum | public | HIDL v3.3
ANDROID_DISTORTION_CORRECTION_START,
ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES, // byte[] | public | HIDL v3.3
ANDROID_DISTORTION_CORRECTION_END,
} camera_metadata_tag_t;
定义了各个TAG对应详细的参数,每个TAG以 ##TAG##START 和 ##TAG##END结束;
宏与字符串绑定
/system/media/camera/src/camera_metadata_tag_info.c
csharp
const char *camera_metadata_section_names[ANDROID_SECTION_COUNT] = {
[ANDROID_COLOR_CORRECTION] = "android.colorCorrection",
[ANDROID_CONTROL] = "android.control",
[ANDROID_DEMOSAIC] = "android.demosaic",
[ANDROID_EDGE] = "android.edge",
[ANDROID_FLASH] = "android.flash",
[ANDROID_FLASH_INFO] = "android.flash.info",
[ANDROID_HOT_PIXEL] = "android.hotPixel",
[ANDROID_JPEG] = "android.jpeg",
[ANDROID_LENS] = "android.lens",
[ANDROID_LENS_INFO] = "android.lens.info",
[ANDROID_NOISE_REDUCTION] = "android.noiseReduction",
[ANDROID_QUIRKS] = "android.quirks",
[ANDROID_REQUEST] = "android.request",
[ANDROID_SCALER] = "android.scaler",
[ANDROID_SENSOR] = "android.sensor",
[ANDROID_SENSOR_INFO] = "android.sensor.info",
[ANDROID_SHADING] = "android.shading",
[ANDROID_STATISTICS] = "android.statistics",
[ANDROID_STATISTICS_INFO] = "android.statistics.info",
[ANDROID_TONEMAP] = "android.tonemap",
[ANDROID_LED] = "android.led",
[ANDROID_INFO] = "android.info",
[ANDROID_BLACK_LEVEL] = "android.blackLevel",
[ANDROID_SYNC] = "android.sync",
[ANDROID_REPROCESS] = "android.reprocess",
[ANDROID_DEPTH] = "android.depth",
[ANDROID_LOGICAL_MULTI_CAMERA] = "android.logicalMultiCamera",
[ANDROID_DISTORTION_CORRECTION]
= "android.distortionCorrection",
};
至此,涉及CameraMetadata的定义和描述就结束了,紧接着就是开始分析CameraMetadata在整个request的使用流程;
CameraMetadata流程分析
我们知道,在Camera2中Java层直接对参数进行设置并将其封装到CaptureRequest中,Camera2引入了管道的概念将android设备和摄像头之间联系起来,系统向camera发送Capture请求,camera会返回CameraMetadata,这一切建立在一个叫做CameraCaptureSession的会话中;
为了兼容Camera1,则在API1中的setParameter()/Parameters()方法中进行转换,最终以MetaData的形式传递下去;
首先通过Java层分析;
/frameworks/base/core/java/android/hardware/camera2/CameraMetadata.java
java
package android.hardware.camera2;
import android.annotation.NonNull;
import android.hardware.camera2.impl.CameraMetadataNative;
import android.hardware.camera2.impl.PublicKey;
import android.hardware.camera2.impl.SyntheticKey;
import android.util.Log;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
*
* @see CameraDevice
* @see CameraManager
* @see CameraCharacteristics
**/
public abstract class CameraMetadata<TKey> {
private static final String TAG = "CameraMetadataAb";
private static final boolean DEBUG = false;
private CameraMetadataNative mNativeInstance = null;
/**
* @hide
*/
protected CameraMetadata() {
}
/**
* Get a camera metadata field value.
*
* @hide
*/
protected abstract <T> T getProtected(TKey key);
/**
* @hide
*/
protected void setNativeInstance(CameraMetadataNative nativeInstance) {
mNativeInstance = nativeInstance;
}
/**
* 返回此映射中包含的键的列表
* 返回的列表是不可修改的,所以任何修改它的尝试都会抛出异常
* 所有由get列表中的密钥检索的值都保证不为null。每个键只在列表中列出一次。密钥的顺序是未定义的
* @hide
*/
protected abstract Class<TKey> getKeyClass();
@SuppressWarnings("unchecked")
@NonNull
public List<TKey> getKeys() {
Class<CameraMetadata<TKey>> thisClass = (Class<CameraMetadata<TKey>>) getClass();
return Collections.unmodifiableList(
getKeys(thisClass, getKeyClass(), this, /*filterTags*/null));
}
/*package*/ @SuppressWarnings("unchecked")
<TKey> ArrayList<TKey> getKeys(
Class<?> type, Class<TKey> keyClass,
CameraMetadata<TKey> instance,
int[] filterTags) {
if (DEBUG) Log.v(TAG, "getKeysStatic for " + type);
// TotalCaptureResult does not have any of the keys on it, use CaptureResult instead
if (type.equals(TotalCaptureResult.class)) {
type = CaptureResult.class;
}
if (filterTags != null) {
Arrays.sort(filterTags);
}
ArrayList<TKey> keyList = new ArrayList<TKey>();
Field[] fields = type.getDeclaredFields();
for (Field field : fields) {
// Filter for Keys that are public
if (field.getType().isAssignableFrom(keyClass) &&
(field.getModifiers() & Modifier.PUBLIC) != 0) {
TKey key;
try {
key = (TKey) field.get(instance);
} catch (IllegalAccessException e) {
throw new AssertionError("Can't get IllegalAccessException", e);
} catch (IllegalArgumentException e) {
throw new AssertionError("Can't get IllegalArgumentException", e);
}
if (instance == null || instance.getProtected(key) != null) {
if (shouldKeyBeAdded(key, field, filterTags)) {
keyList.add(key);
if (DEBUG) {
Log.v(TAG, "getKeysStatic - key was added - " + key);
}
} else if (DEBUG) {
Log.v(TAG, "getKeysStatic - key was filtered - " + key);
}
}
}
}
if (null == mNativeInstance) {
return keyList;
}
ArrayList<TKey> vendorKeys = mNativeInstance.getAllVendorKeys(keyClass);
if (vendorKeys != null) {
for (TKey k : vendorKeys) {
String keyName;
long vendorId;
if (k instanceof CaptureRequest.Key<?>) {
keyName = ((CaptureRequest.Key<?>) k).getName();
vendorId = ((CaptureRequest.Key<?>) k).getVendorId();
} else if (k instanceof CaptureResult.Key<?>) {
keyName = ((CaptureResult.Key<?>) k).getName();
vendorId = ((CaptureResult.Key<?>) k).getVendorId();
} else if (k instanceof CameraCharacteristics.Key<?>) {
keyName = ((CameraCharacteristics.Key<?>) k).getName();
vendorId = ((CameraCharacteristics.Key<?>) k).getVendorId();
} else {
continue;
}
if (filterTags == null || Arrays.binarySearch(filterTags,
CameraMetadataNative.getTag(keyName, vendorId)) >= 0) {
keyList.add(k);
}
}
}
return keyList;
}
@SuppressWarnings("rawtypes")
private static <TKey> boolean shouldKeyBeAdded(TKey key, Field field, int[] filterTags) {
if (key == null) {
throw new NullPointerException("key must not be null");
}
CameraMetadataNative.Key nativeKey;
/*
* Get the native key from the public api key
*/
if (key instanceof CameraCharacteristics.Key) {
nativeKey = ((CameraCharacteristics.Key)key).getNativeKey();
} else if (key instanceof CaptureResult.Key) {
nativeKey = ((CaptureResult.Key)key).getNativeKey();
} else if (key instanceof CaptureRequest.Key) {
nativeKey = ((CaptureRequest.Key)key).getNativeKey();
} else {
// Reject fields that aren't a key
throw new IllegalArgumentException("key type must be that of a metadata key");
}
if (field.getAnnotation(PublicKey.class) == null) {
// Never expose @hide keys up to the API user
return false;
}
// No filtering necessary
if (filterTags == null) {
return true;
}
if (field.getAnnotation(SyntheticKey.class) != null) {
// This key is synthetic, so calling #getTag will throw IAE
return true;
}
int keyTag = nativeKey.getTag();
// non-negative result is returned iff the value is in the array
return Arrays.binarySearch(filterTags, keyTag) >= 0;
}
public static final int LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED = 0;
public static final int LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE = 1;
public static final int LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED = 2;
........................
public static final int SYNC_FRAME_NUMBER_CONVERGING = -1;
public static final int SYNC_FRAME_NUMBER_UNKNOWN = -2;
}
CameraMetadata类中,只有几个方法,其余都是常量定义,这些常量都是用于定义camera request对于camera device或者是数据参数配置的定义。
而在该类中,最重要的一个变量就是mNativeInstance,该变量类型为CameraMetadataNative;
CameraMetadata为抽象类,其实现类为CaptureRequest,就是咱们Builder出来的CaptureRequest对象。同理,在processCaptureResult中上报的结果也是以CameraMetadata的方式上报上来的,因为CaptureResult同样也继承了CameraMetadata;
CaptureRequest.Builder实质上就是创建出了一个CameraMetadataNative对象(由之前的preview&capture_request流程分析可知);
java
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.hardware.camera2.impl;
import android.graphics.ImageFormat;
........................
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.HashMap;
/**
* Implementation of camera metadata marshal/unmarshal across Binder to
* the camera service
*/
public class CameraMetadataNative implements Parcelable {
........................
private long mMetadataPtr; // native CameraMetadata*
private native long nativeAllocate();
private native long nativeAllocateCopy(CameraMetadataNative other)
throws NullPointerException;
private native synchronized void nativeWriteToParcel(Parcel dest);
private native synchronized void nativeReadFromParcel(Parcel source);
private native synchronized void nativeSwap(CameraMetadataNative other)
throws NullPointerException;
private native synchronized void nativeClose();
private native synchronized boolean nativeIsEmpty();
private native synchronized int nativeGetEntryCount();
private native synchronized byte[] nativeReadValues(int tag);
private native synchronized void nativeWriteValues(int tag, byte[] src);
private native synchronized void nativeDump() throws IOException; // dump to ALOGD
private native synchronized ArrayList nativeGetAllVendorKeys(Class keyClass);
private native synchronized int nativeGetTagFromKeyLocal(String keyName)
throws IllegalArgumentException;
private native synchronized int nativeGetTypeFromTagLocal(int tag)
throws IllegalArgumentException;
private static native int nativeGetTagFromKey(String keyName, long vendorId)
throws IllegalArgumentException;
private static native int nativeGetTypeFromTag(int tag, long vendorId)
throws IllegalArgumentException;
/**
* <p>Perform a 0-copy swap of the internal metadata with another object.</p>
*
* <p>Useful to convert a CameraMetadata into e.g. a CaptureRequest.</p>
*
* @param other Metadata to swap with
* @throws NullPointerException if other was null
* @hide
*/
public void swap(CameraMetadataNative other) {
nativeSwap(other);
}
/**
* @hide
*/
public int getEntryCount() {
return nativeGetEntryCount();
}
/**
* Does this metadata contain at least 1 entry?
*
* @hide
*/
public boolean isEmpty() {
return nativeIsEmpty();
}
/**
* Return a list containing keys of the given key class for all defined vendor tags.
*
* @hide
*/
public <K> ArrayList<K> getAllVendorKeys(Class<K> keyClass) {
if (keyClass == null) {
throw new NullPointerException();
}
return (ArrayList<K>) nativeGetAllVendorKeys(keyClass);
}
/**
* Convert a key string into the equivalent native tag.
*
* @throws IllegalArgumentException if the key was not recognized
* @throws NullPointerException if the key was null
*
* @hide
*/
public static int getTag(String key) {
return nativeGetTagFromKey(key, Long.MAX_VALUE);
}
/**
* Convert a key string into the equivalent native tag.
*
* @throws IllegalArgumentException if the key was not recognized
* @throws NullPointerException if the key was null
*
* @hide
*/
public static int getTag(String key, long vendorId) {
return nativeGetTagFromKey(key, vendorId);
}
/**
* Get the underlying native type for a tag.
*
* @param tag An integer tag, see e.g. {@link #getTag}
* @param vendorId A vendor tag provider id
* @return An int enum for the metadata type, see e.g. {@link #TYPE_BYTE}
*
* @hide
*/
public static int getNativeType(int tag, long vendorId) {
return nativeGetTypeFromTag(tag, vendorId);
}
/**
* <p>Updates the existing entry for tag with the new bytes pointed by src, erasing
* the entry if src was null.</p>
*
* <p>An empty array can be passed in to update the entry to 0 elements.</p>
*
* @param tag An integer tag, see e.g. {@link #getTag}
* @param src An array of bytes, or null to erase the entry
*
* @hide
*/
public void writeValues(int tag, byte[] src) {
nativeWriteValues(tag, src);
}
/**
* <p>Returns a byte[] of data corresponding to this tag. Use a wrapped bytebuffer to unserialize
* the data properly.</p>
*
* <p>An empty array can be returned to denote an existing entry with 0 elements.</p>
*
* @param tag An integer tag, see e.g. {@link #getTag}
*
* @return {@code null} if there were 0 entries for this tag, a byte[] otherwise.
* @hide
*/
public byte[] readValues(int tag) {
// TODO: Optimization. Native code returns a ByteBuffer instead.
return nativeReadValues(tag);
}
/**
* Dumps the native metadata contents to logcat.
*
* <p>Visibility for testing/debugging only. The results will not
* include any synthesized keys, as they are invisible to the native layer.</p>
*
* @hide
*/
public void dumpToLog() {
try {
nativeDump();
} catch (IOException e) {
Log.wtf(TAG, "Dump logging failed", e);
}
}
@Override
protected void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
........................
}
而在操作CameraMetadataNative中的方法时,大多是其实是调用了native中的方法,即/frameworks/base/core/jni/android_hardware_camera2_CameraMetadata.cpp中定义的方法;
arduino
........................
static void CameraMetadata_readFromParcel(JNIEnv *env, jobject thiz, jobject parcel) {
ALOGV("%s", __FUNCTION__);
CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz);
if (metadata == NULL) {
return;
}
Parcel* parcelNative = parcelForJavaObject(env, parcel);
if (parcelNative == NULL) {
jniThrowNullPointerException(env, "parcel");
return;
}
status_t err;
if ((err = metadata->readFromParcel(parcelNative)) != OK) {
jniThrowExceptionFmt(env, "java/lang/IllegalStateException",
"Failed to read from parcel (error code %d)", err);
return;
}
}
static void CameraMetadata_writeToParcel(JNIEnv *env, jobject thiz, jobject parcel) {
ALOGV("%s", __FUNCTION__);
CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz);
if (metadata == NULL) {
return;
}
Parcel* parcelNative = parcelForJavaObject(env, parcel);
if (parcelNative == NULL) {
jniThrowNullPointerException(env, "parcel");
return;
}
status_t err;
if ((err = metadata->writeToParcel(parcelNative)) != OK) {
jniThrowExceptionFmt(env, "java/lang/IllegalStateException",
"Failed to write to parcel (error code %d)", err);
return;
}
}
} // extern "C"
static const JNINativeMethod gCameraMetadataMethods[] = {
// static methods
{ "nativeGetTagFromKey",
"(Ljava/lang/String;J)I",
(void *)CameraMetadata_getTagFromKey },
{ "nativeGetTypeFromTag",
"(IJ)I",
(void *)CameraMetadata_getTypeFromTag },
{ "nativeSetupGlobalVendorTagDescriptor",
"()I",
(void*)CameraMetadata_setupGlobalVendorTagDescriptor },
// instance methods
{ "nativeAllocate",
"()J",
(void*)CameraMetadata_allocate },
{ "nativeAllocateCopy",
"(L" CAMERA_METADATA_CLASS_NAME ";)J",
(void *)CameraMetadata_allocateCopy },
{ "nativeIsEmpty",
"()Z",
(void*)CameraMetadata_isEmpty },
{ "nativeGetEntryCount",
"()I",
(void*)CameraMetadata_getEntryCount },
{ "nativeClose",
"()V",
(void*)CameraMetadata_close },
{ "nativeSwap",
"(L" CAMERA_METADATA_CLASS_NAME ";)V",
(void *)CameraMetadata_swap },
{ "nativeGetTagFromKeyLocal",
"(Ljava/lang/String;)I",
(void *)CameraMetadata_getTagFromKeyLocal },
{ "nativeGetTypeFromTagLocal",
"(I)I",
(void *)CameraMetadata_getTypeFromTagLocal },
{ "nativeReadValues",
"(I)[B",
(void *)CameraMetadata_readValues },
{ "nativeWriteValues",
"(I[B)V",
(void *)CameraMetadata_writeValues },
{ "nativeDump",
"()V",
(void *)CameraMetadata_dump },
{ "nativeGetAllVendorKeys",
"(Ljava/lang/Class;)Ljava/util/ArrayList;",
(void *)CameraMetadata_getAllVendorKeys},
// Parcelable interface
{ "nativeReadFromParcel",
"(Landroid/os/Parcel;)V",
(void *)CameraMetadata_readFromParcel },
{ "nativeWriteToParcel",
"(Landroid/os/Parcel;)V",
(void *)CameraMetadata_writeToParcel },
};
在native方法中,其实调用的就是CameraMetadata.cpp中的方法,至此,Java层的CameraMetadataNative和C++层的CameraMetadata.cpp就联系上了;
而在CameraMetadata.cpp中定义了很多方法,其实本质上又是调用了camera_metadata.c中定义的函数,所以从Java -> Native -> C++ -> C的链路就通了;
arduino
#define LOG_TAG "Camera2-Metadata"
#include <utils/Log.h>
#include <utils/Errors.h>
#include <binder/Parcel.h>
#include <camera/CameraMetadata.h>
#include <camera/VendorTagDescriptor.h>
namespace android {
#define ALIGN_TO(val, alignment) \
(((uintptr_t)(val) + ((alignment) - 1)) & ~((alignment) - 1))
typedef Parcel::WritableBlob WritableBlob;
typedef Parcel::ReadableBlob ReadableBlob;
CameraMetadata::CameraMetadata() :
mBuffer(NULL), mLocked(false) {
}
CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) :
mLocked(false)
{
mBuffer = allocate_camera_metadata(entryCapacity, dataCapacity);
}
CameraMetadata::CameraMetadata(const CameraMetadata &other) :
mLocked(false) {
mBuffer = clone_camera_metadata(other.mBuffer);
}
CameraMetadata::CameraMetadata(camera_metadata_t *buffer) :
mBuffer(NULL), mLocked(false) {
acquire(buffer);
}
CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other) {
return operator=(other.mBuffer);
}
CameraMetadata &CameraMetadata::operator=(const camera_metadata_t *buffer) {
if (mLocked) {
ALOGE("%s: Assignment to a locked CameraMetadata!", __FUNCTION__);
return *this;
}
if (CC_LIKELY(buffer != mBuffer)) {
camera_metadata_t *newBuffer = clone_camera_metadata(buffer);
clear();
mBuffer = newBuffer;
}
return *this;
}
CameraMetadata::~CameraMetadata() {
mLocked = false;
clear();
}
const camera_metadata_t* CameraMetadata::getAndLock() const {
mLocked = true;
return mBuffer;
}
status_t CameraMetadata::unlock(const camera_metadata_t *buffer) const {
if (!mLocked) {
ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__);
return INVALID_OPERATION;
}
if (buffer != mBuffer) {
ALOGE("%s: Can't unlock CameraMetadata with wrong pointer!",
__FUNCTION__);
return BAD_VALUE;
}
mLocked = false;
return OK;
}
camera_metadata_t* CameraMetadata::release() {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return NULL;
}
camera_metadata_t *released = mBuffer;
mBuffer = NULL;
return released;
}
void CameraMetadata::clear() {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
}
if (mBuffer) {
free_camera_metadata(mBuffer);
mBuffer = NULL;
}
}
void CameraMetadata::acquire(camera_metadata_t *buffer) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return;
}
clear();
mBuffer = buffer;
ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/NULL) != OK,
"%s: Failed to validate metadata structure %p",
__FUNCTION__, buffer);
}
........................
CameraMetadata参数总结
CameraMetadata流转下发过程(预览模式):
- 初始化mMetadata对象,获取TAG为CAMERA3_TEMPLATE_PREVIEW的Metadata;
- 调用mMetadata->update更新Metadata参数,调用setStreamingRequest下发参数;
- 在Camera3Device.cpp中,最终将request放入到mRequestQueue中;
- 在Camera3Device::RequestThread::threadLoop()来对mRequestQueue队列进行处理,将获取出来的request封装成halRequest,传入hal,由hal层进行处理;
- 在hal层中调用process_capture_request()函数,即调用了QCamera3HardwareInterface::processCaptureRequest处理上层下发的request;
- 最终通过ioctl()函数往V4L2下发参数;
- 在V4L2中,根据具体设备注册V4L2_ctrl时的ops,调用不同的操作函数,来更新具体的硬件寄存器;