Android 11 AudioPolicyService 启动流程

AudioPolicyService在init进程中启动,源码路径:frameworks/av/media/audioserver/audioserver.rc

c 复制代码
service audioserver /system/bin/audioserver
    class core
    user audioserver
    # media gid needed for /dev/fm (radio) and for /data/misc/media (tee)
    group audio camera drmrpc media mediadrm net_bt net_bt_admin net_bw_acct wakelock
    capabilities BLOCK_SUSPEND
    ioprio rt 4
    task_profiles ProcessCapacityHigh HighPerformance
    onrestart restart vendor.audio-hal
    onrestart restart vendor.audio-hal-4-0-msd
    # Keep the original service names for backward compatibility
    onrestart restart vendor.audio-hal-2-0
    onrestart restart audio-hal-2-0

启动audioserver服务,对应的源文件为:frameworks/av/media/audioserver/main_audioserver.cpp

c 复制代码
int main(int argc __unused, char **argv)
{
	if (doLog && (childPid = fork()) != 0) {
		//省略
	}else{
		//省略
		AudioFlinger::instantiate();//添加"media.audio_flinger"服务
        AudioPolicyService::instantiate();//添加media.audio_policy服务
        //省略
	}
}

创建AudioPolicyService时,导致其onFirstRef函数被调用

c 复制代码
//frameworks/av/services/audiopolicy/service/AudioPolicyService.cpp
void AudioPolicyService::onFirstRef()
{
    {
        Mutex::Autolock _l(mLock);

        // start audio commands thread
        mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
        // start output activity command thread
        mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);

        mAudioPolicyClient = new AudioPolicyClient(this);
        mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
    }
 //省略

createAudioPolicyManager

c 复制代码
//frameworks/av/services/audiopolicy/manager/AudioPolicyFactory.cpp
extern "C" AudioPolicyInterface* createAudioPolicyManager(
        AudioPolicyClientInterface *clientInterface)
{
    AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);//1
    status_t status = apm->initialize();//2
    if (status != NO_ERROR) {
        delete apm;
        apm = nullptr;
    }
    return apm;
}

注释1处创建AudioPolicyManager对象,加载配置文件。关于配置文件的加载,参考:Android 11 Audio音频系统配置文件解析

注释2处,调用AudioPolicyManager的initialize函数

initialize

c 复制代码
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::initialize() {
    {
        auto engLib = EngineLibrary::load(
                        "libaudiopolicyengine" + getConfig().getEngineLibraryNameSuffix() + ".so");//加载libaudiopolicyenginedefault.so
        
        mEngine = engLib->createEngine();//得到Engine对象
        
    }
  
    // after parsing the config, mOutputDevicesAll and mInputDevicesAll contain all known devices;
    // open all output streams needed to access attached devices
    onNewAudioModulesAvailableInt(nullptr /*newDevices*/);

	//省略

在函数中onNewAudioModulesAvailableInt中,主要完成以下三件事情:

  1. 根据配置文件中hwModule的名字加载对应的so文件(loadHwModule)
  2. 每个hwModule的mOutputProfiles中的每个outProfile,打开输出流,创建播放线程
  3. 每个hwModule的mInputProfiles中的每个inProfile,打开输入流,创建录音线程

1,加载so文件

c 复制代码
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
void AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices)
{
    for (const auto& hwModule : mHwModulesAll) {
        if (std::find(mHwModules.begin(), mHwModules.end(), hwModule) != mHwModules.end()) {
            continue;
        }
        hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName()));//1
 	//省略      
}	

注释1处,调用loadHwModule处理,并将返回的结果handle赋值给hwModule 的mHandle。注意传入的是hwModule的名字,如:primary。最终调用到AudioFlinger的loadHwModule函数

c 复制代码
//frameworks/av/services/audioflinger/AudioFlinger.cpp
audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
{
    //省略
    Mutex::Autolock _l(mLock);
    AutoMutex lock(mHardwareLock);
    return loadHwModule_l(name);
}

loadHwModule_l

c 复制代码
//frameworks/av/services/audioflinger/AudioFlinger.cpp
audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{
	//省略
	sp<DeviceHalInterface> dev;

    int rc = mDevicesFactoryHal->openDevice(name, &dev);//1
	//省略
	audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
    AudioHwDevice *audioDevice = new AudioHwDevice(handle, name, dev, flags);//2
   
    mAudioHwDevs.add(handle, audioDevice);//3

    return handle;
}

注释1处通过hidl,去加载对应的so(如:audio.primary.default.so),并调用audio hal的open函数,从hal中得到一个audio_hw_device对象。注释2处将该对象封装在AudioHwDevice 中,然后注释3处将AudioHwDevice 添加到mAudioHwDevs数组中。

打开输出流创建线程

c 复制代码
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
void AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices)
{
    for (const auto& hwModule : mHwModulesAll) {
        if (std::find(mHwModules.begin(), mHwModules.end(), hwModule) != mHwModules.end()) {
            continue;
        }
       
 		//省略   
 		for (const auto& outProfile : hwModule->getOutputProfiles()) {
 			//省略
			sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,mpClientInterface);
            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
            status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
                                               AUDIO_STREAM_DEFAULT,
                                               AUDIO_OUTPUT_FLAG_NONE, &output);

			//省略
				addOutput(output, outputDesc);//保存到mOutputs数组中
		}   

	//省略
}	

针对module下的每个outProfile ,都会创建一个SwAudioOutputDescriptor对象,并调用其open方法,然后将该SwAudioOutputDescriptor保存到mOutputs数组中,注意是根据返回的output来保存的。open函数最终调用到AudioFlinger的openOutput_l函数

c 复制代码
//frameworks/av/services/audioflinger/AudioFlinger.cpp
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                        audio_io_handle_t *output,
                                                        audio_config_t *config,
                                                        audio_devices_t deviceType,
                                                        const String8& address,
                                                        audio_output_flags_t flags)
{
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType);//根据handle,从mAudioHwDevs数组中找出AudioHwDevice 

	//省略
	AudioStreamOut *outputStream = NULL;
    status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            deviceType,
            flags,
            config,
            address.string());//1
    
    if (status == NO_ERROR) {
        if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
            //省略
        } else {
            sp<PlaybackThread> thread;
            if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
               //省略
            } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
                    || !isValidPcmSinkFormat(config->format)
                    || !isValidPcmSinkChannelMask(config->channel_mask)) {
              //省略
            } else {
                thread = new MixerThread(this, outputStream, *output, mSystemReady);//创建MixerThread播放线程
           
            }
            mPlaybackThreads.add(*output, thread);//添加到mPlaybackThreads
            return thread;
        }
    }

    return 0;
}

注释1处调用AudioHwDevice 的openOutputStream函数去打开输出流。然后就是创建MixerThread播放线程并保存在mPlaybackThreads数组中,需要注意的是,也是根据output添加的。这里的output和上面添加outputDesc时的output是同一个。

openOutputStream

c 复制代码
//frameworks/av/services/audioflinger/AudioHwDevice.cpp
status_t AudioHwDevice::openOutputStream(
        AudioStreamOut **ppStreamOut,
        audio_io_handle_t handle,
        audio_devices_t deviceType,
        audio_output_flags_t flags,
        struct audio_config *config,
        const char *address)
{

    struct audio_config originalConfig = *config;
    AudioStreamOut *outputStream = new AudioStreamOut(this, flags);//创建AudioStreamOut对象
    
    status_t status = outputStream->open(handle, deviceType, config, address);

	//省略
	*ppStreamOut = outputStream;
    return status;
}

AudioStreamOut::open

c 复制代码
//frameworks/av/services/audioflinger/AudioStreamOut.cpp
status_t AudioStreamOut::open(
        audio_io_handle_t handle,
        audio_devices_t deviceType,
        struct audio_config *config,
        const char *address)
{
    sp<StreamOutHalInterface> outStream;

    int status = hwDev()->openOutputStream(
            handle,
            deviceType,
            customFlags,
            config,
            address,
            &outStream);
//省略

openOutputStream最后会通过hidl,调用到audio hal 的open_output_stream函数,返回一个audio_stream_out对象。

打开输入流,创建录音线程

这个流程和上面的是一样的,最终调用到AudioFlinger的openInput_l处理

c 复制代码
//frameworks/av/services/audioflinger/AudioFlinger.cpp
sp<AudioFlinger::ThreadBase> AudioFlinger::openInput_l(audio_module_handle_t module,
                                                         audio_io_handle_t *input,
                                                         audio_config_t *config,
                                                         audio_devices_t devices,
                                                         const String8& address,
                                                         audio_source_t source,
                                                         audio_input_flags_t flags,
                                                         audio_devices_t outputDevice,
                                                         const String8& outputDeviceAddress)
{
    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, devices);//找出AudioHwDevice 
    //省略
    sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
    sp<StreamInHalInterface> inStream;
    status_t status = inHwHal->openInputStream(
            *input, devices, &halconfig, flags, address.string(), source,
            outputDevice, outputDeviceAddress, &inStream);//1

	//省略

	 if (status == NO_ERROR && inStream != 0) {
        AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream, flags);//将HAL层得到的audio_stream_in保存在AudioStreamIn中
        if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
          //省略
        } else {
            // Start record thread
            // RecordThread requires both input and output device indication to forward to audio
            // pre processing modules
            sp<RecordThread> thread = new RecordThread(this, inputStream, *input, mSystemReady);//创建RecordThread线程
            mRecordThreads.add(*input, thread);//保存在mRecordThreads数组中
            ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
            return thread;
        }
	//省略

注释1处,通过hidl,调用到audio hal 的open_input_stream函数,返回一个audio_stream_in对象。

总结

在AudioPolicyService服务的启动过程中,会解析配置文件。针对每个HwModule,根据名字去加载对应的HAL库,得到audio_hw_device对象,将其保存在AudioHwDevice对象中,并将AudioHwDevice对象添加到mAudioHwDevs数组中。

对于HwModule下的每个outProfile ,都会创建SwAudioOutputDescriptor对象,并去打开输出流,创建播放线程。根据返回的output,将SwAudioOutputDescriptor添加到mOutput数组中。在打开输出流的过程中,会得到HAL层的audio_stream_out对象,并将其保存在AudioStreamOut中,创建播放线程的时候,传入该AudioStreamOut,最后将播放线程保存到mPlaybackThreads中。

对于HwModule下的每个inProfile,都会去打开输入流,创建录音线程。在打开输出流的过程中,会得到HAL层的audio_stream_in对象,并将其保存在AudioStreamIn中,创建录音线程的时候,传入该AudioStreamIn。最后将录音线程保存在mRecordThreads中

保存AudioHwDevice和保存播放线程,是根据同一个output保存的。

相关推荐
长亭外的少年2 小时前
Kotlin 编译失败问题及解决方案:从守护进程到 Gradle 配置
android·开发语言·kotlin
建群新人小猿4 小时前
会员等级经验问题
android·开发语言·前端·javascript·php
1024小神5 小时前
tauri2.0版本开发苹果ios和安卓android应用,环境搭建和最后编译为apk
android·ios·tauri
兰琛5 小时前
20241121 android中树结构列表(使用recyclerView实现)
android·gitee
Y多了个想法6 小时前
RK3568 android11 适配敦泰触摸屏 FocalTech-ft5526
android·rk3568·触摸屏·tp·敦泰·focaltech·ft5526
NotesChapter7 小时前
Android吸顶效果,并有着ViewPager左右切换
android
_祝你今天愉快8 小时前
分析android :The binary version of its metadata is 1.8.0, expected version is 1.5.
android
暮志未晚Webgl8 小时前
109. UE5 GAS RPG 实现检查点的存档功能
android·java·ue5
麦田里的守望者江9 小时前
KMP 中的 expect 和 actual 声明
android·ios·kotlin
Dnelic-9 小时前
解决 Android 单元测试 No tests found for given includes:
android·junit·单元测试·问题记录·自学笔记