Audio音频输出通道

一、AudioTrack的使用

//设置音频属性
AudioAttributes audioAttributes = new AudioAttributes.Builder()
									.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
									.setUsage(AudioAttributes.USAGE_MEDIA)
									.build();
//设置音频格式
AudioFormat audioFormat = new AudioFormat.Builder()
							.setEncoding(AudioFormat.ENCODING_PCM_8BIT)
							.setSampleRate(44100)
							.setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
							.build();
//计算最小缓冲区
int bufferSize = AudioTrack.getMinBufferSize(44100, AudioFormat.CHANNEL_OUT_MONO,             
                AudioFormat.ENCODING_PCM_8BIT);

//创建AudioTrack对象
final AudioTrack audioTracker = new AudioTrack(audioAttributes, audioFormat, bufferSize, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);

//开始播放
audioTracker.play();

//创建线程池
ExecutorService executorService = Executors.newFixedThreadPool(2);
executorService.submit(new Runnable() {
    @Override
    public void run() {
        byte[] data = new byte[1024]; // small buffer size to not overflow AudioTrack's internal buffer
        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(new File(PCM_FILE));
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        }
        int readSize = 0;
        while (readSize != -1) {
            try {
                readSize = fileInputStream.read(data);
            } catch (IOException e) {
                e.printStackTrace();
                continue;
            }
            Log.d(TAG, "start play read size is " + readSize);
            if (readSize > 0) {
                //写入数据
                audioTracker.write(data, 0, readSize);
            }
        }
        try {
            fileInputStream.close();
        }
        catch (IOException e) {
            // handle exception
        }
        //播放结束
        audioTracker.stop();
        //释放资源
        audioTracker.release();
    }
});

1.mode模式: mode分为MODE_STREAM和MODE_STATIC模式,static模式是指将数据一次性写入到共享内存;stream模式是指将数据分多次写入共享内存。

2.输出流程: (attr、format、mode等) -> streamType -> strategy ->device -> output

二、output输出通道的选择

private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
        int mode, int sessionId, boolean offload)
                throws IllegalArgumentException {
    
	........................................................................................

    // Check if we should enable deep buffer mode
    if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
        mAttributes = new AudioAttributes.Builder(mAttributes)
            .replaceFlags((mAttributes.getAllFlags()
                    | AudioAttributes.FLAG_DEEP_BUFFER)
                    & ~AudioAttributes.FLAG_LOW_LATENCY)
            .build();
    }

    .......................................................................................

    int[] sampleRate = new int[] {mSampleRate};
    int[] session = new int[1];
    session[0] = sessionId;
    // native initialization
    int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
            sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
            mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
            offload);
    
	.......................................................................................
	
}

应用层创建AudioTrack,会调用native_setup方法,native_setup方法对应android_media_AudioTrack.cpp类中的android_media_AudioTrack_setup方法。

xref: /frameworks/base/core/jni/android_media_AudioTrack.cpp

static const JNINativeMethod gMethods[] = {
    ..............................................................................

    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[IJZ)I",
                                         (void *)android_media_AudioTrack_setup},
    .............................................................................
};

android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
        jlong nativeAudioTrack, jboolean offload) {

    ............................................................................................

    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    if (nativeAudioTrack == 0) {
        
		............................................................................................

        // 创建native端的AudioTrack对象
        lpTrack = new AudioTrack();

        ............................................................................................

        switch (memoryMode) {// 针对两种不同的模式设置不同的参数
        case MODE_STREAM:// 多次写入数据模式
            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    offload ? 0 : frameCount,
                    offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK : AudioTrack::TRANSFER_SYNC,
                    offload ? &offloadInfo : NULL,
                    -1, -1,                       // default uid, pid values
                    paa.get());

            break;

        case MODE_STATIC:// 一次写入数据模式
            // AudioTrack is using shared memory

            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa.get());
            break;

        ............................................................................................

    } else {  // end if (nativeAudioTrack == 0)
        ............................................................................................

}

lpTrack = new AudioTrack()创建AudioTrack,pTrack->set调用AudioTrack.cpp的set方法设置参数。

xref: /frameworks/av/media/libaudioclient/AudioTrack.cpp

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        uid_t uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed,
        audio_port_handle_t selectedDeviceId)
{

    .............................................................................................
	
    mCbf = cbf;

    if (cbf != NULL) {// 如果设置了回调函数,则创建AudioTrackThread线程来提供音频数据
        mAudioTrackThread = new AudioTrackThread(*this);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
        // thread begins in paused state, and will not reference us until start()
    }

    // 创建IAudioTrack
    {
        AutoMutex lock(mLock);
        status = createTrack_l();
    }
    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        goto exit;
    }

    .............................................................................................
	
}

streamType: 音频流类型,如 Music、Voice-Call、DTMF、Alarm 等等

sampleRate: 音频采样率,如 16KHz、44.1KHz、48KHz 等等

format:音频格式,如 PCM、MP3、AAC 等等

channelMask: 声道数, 如 Mono(单声道)、Stereo(双声道)

frameCount: 帧数, 用于audioFlinger申请内存

flags: 输出标识位,详见 AUDIO_OUTPUT_FLAG 描述

cbf: 回调接口

sharedBuffer:共享内存缓冲区,数据模式是MODE_STATIC时使用,数据模式是MODE_STREAM时为空

sessionId: 与客户端会话的值,在AudioFlinger时就创建好得到具体值,并传递到这里,最终输出通道确定后会保存这个session,后续就可以使用这个session来确定连接关系

pAttributes: 音频属性,应用层传递的usage,contentType等参数

status = createTrack_l()调用createTrack_l方法,

status_t AudioTrack::createTrack_l()
{
    status_t status;
    bool callbackAdded = false;

	// 获取AudioFlinger代理
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
        ALOGE("%s(%d): Could not get audioflinger",
                __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }

    .......................................................................................

	// 通过AudioFlinger创建IAudioTrack代理
    sp<IAudioTrack> track = audioFlinger->createTrack(input,
                                                      output,
                                                      &status);

    .......................................................................................


    // 获取共享内存
    sp<IMemory> iMem = track->getCblk();
    if (iMem == 0) {
        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }
    void *iMemPointer = iMem->pointer();
    if (iMemPointer == NULL) {
        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }
	
    .......................................................................................


    // update proxy
    if (mSharedBuffer == 0) {// 如果是MODE_STREAM模式,则创建AudioTrackClientProxy
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
    } else {// 如果是MODE_STATIC模式,则创建StaticAudioTrackClientProxy
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
        mProxy = mStaticProxy;
    }

	//调用mProxy设置相关参数
    mProxy->setVolumeLR(gain_minifloat_pack(
            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));

    mProxy->setSendLevel(mSendLevel);
    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
    mProxy->setSampleRate(effectiveSampleRate);

    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
    playbackRateTemp.mSpeed = effectiveSpeed;
    playbackRateTemp.mPitch = effectivePitch;
    mProxy->setPlaybackRate(playbackRateTemp);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
	
	.......................................................................................

}

1.获取AudioFlinger代理

2.通过AudioFlinger创建AudioTrack代理

3.通过AudioTrack获取共享内存

4.根据MODE创建不同的AudioTrackClientProxy

5.通过mProxy设置参数

xref: /frameworks/av/services/audioflinger/AudioFlinger.cpp

sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
                                          CreateTrackOutput& output,
                                          status_t *status)
{
    sp<PlaybackThread::Track> track;
    sp<TrackHandle> trackHandle;
    sp<Client> client;
    status_t lStatus;
    audio_stream_type_t streamType;
    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
    std::vector<audio_io_handle_t> secondaryOutputs;

    bool updatePid = (input.clientInfo.clientPid == -1);
    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
    uid_t clientUid = input.clientInfo.clientUid;
    audio_io_handle_t effectThreadId = AUDIO_IO_HANDLE_NONE;
    std::vector<int> effectIds;
    audio_attributes_t localAttr = input.attr;

    if (!isAudioServerOrMediaServerUid(callingUid)) {
        ALOGW_IF(clientUid != callingUid,
                "%s uid %d tried to pass itself off as %d",
                __FUNCTION__, callingUid, clientUid);
        clientUid = callingUid;
        updatePid = true;
    }
    pid_t clientPid = input.clientInfo.clientPid;
    const pid_t callingPid = IPCThreadState::self()->getCallingPid();
    if (updatePid) {
        ALOGW_IF(clientPid != -1 && clientPid != callingPid,
                 "%s uid %d pid %d tried to pass itself off as pid %d",
                 __func__, callingUid, callingPid, clientPid);
        clientPid = callingPid;
    }

    audio_session_t sessionId = input.sessionId;
    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
        lStatus = BAD_VALUE;
        goto Exit;
    }

    output.sessionId = sessionId;
    output.outputId = AUDIO_IO_HANDLE_NONE;
    output.selectedDeviceId = input.selectedDeviceId;
	// 通过attr属性获取输出通道output输出句柄
    lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType,
                                            clientPid, clientUid, &input.config, input.flags,
                                            &output.selectedDeviceId, &portId, &secondaryOutputs);

    ..............................................................................................

    {
        Mutex::Autolock _l(mLock);
		// 根据outputId找到对应的PlaybackThread线程,PlaybackThread线程是以key-value形式保存在AudioFlinger的mPlaybackThreads集合中
        PlaybackThread *thread = checkPlaybackThread_l(output.outputId);
        if (thread == NULL) {
            ALOGE("no playback thread found for output handle %d", output.outputId);
            lStatus = BAD_VALUE;
            goto Exit;
        }

        ........................................................................................................
		
		//保存音频属性
        output.sampleRate = input.config.sample_rate;
        output.frameCount = input.frameCount;
        output.notificationFrameCount = input.notificationFrameCount;
        output.flags = input.flags;

		// 在AudioFlinger端创建一个Track,与应用端的AudioTrack一一对应。共享内存的操作也是通过track、audioTrack完成。
        track = thread->createTrack_l(client, streamType, localAttr, &output.sampleRate,
                                      input.config.format, input.config.channel_mask,
                                      &output.frameCount, &output.notificationFrameCount,
                                      input.notificationsPerBuffer, input.speed,
                                      input.sharedBuffer, sessionId, &output.flags,
                                      callingPid, input.clientInfo.clientTid, clientUid,
                                      &lStatus, portId);
        LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
        // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless

        output.afFrameCount = thread->frameCount();
        output.afSampleRate = thread->sampleRate();
        output.afLatencyMs = thread->latency();
        output.portId = portId;

        ................................................................................................................

    // 将track包装成trackHandler并返回给应用端
    trackHandle = new TrackHandle(track);

Exit:
    if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
        AudioSystem::releaseOutput(portId);
    }
    *status = lStatus;
    return trackHandle;
}

在该函数中会获取到output输出句柄,通过outputId找到对应的PlaybackThread线程,通过PlaybackThread线程创建一个track,该track与应用端的AudioTrack一一对应,数据就是在AudioTrack和Track之间完成传输。

xref: /frameworks/av/media/libaudioclient/AudioSystem.cpp

status_t AudioSystem::getOutputForAttr(audio_attributes_t *attr,
                                        audio_io_handle_t *output,
                                        audio_session_t session,
                                        audio_stream_type_t *stream,
                                        pid_t pid,
                                        uid_t uid,
                                        const audio_config_t *config,
                                        audio_output_flags_t flags,
                                        audio_port_handle_t *selectedDeviceId,
                                        audio_port_handle_t *portId,
                                        std::vector<audio_io_handle_t> *secondaryOutputs)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
    if (aps == 0) return NO_INIT;
    return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
                                 config,
                                 flags, selectedDeviceId, portId, secondaryOutputs);
}

调用AudioSystem类中的getOutputForAttr方法,在此方法中调用AudioPolicyService类中的getOutputForAttr方法,最终实现类是AudioPolicyInterfaceImpl.cpp中的getOutputForAttr方法。

xref: /frameworks/av/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp

status_t AudioPolicyService::getOutputForAttr(audio_attributes_t *attr,
                                              audio_io_handle_t *output,
                                              audio_session_t session,
                                              audio_stream_type_t *stream,
                                              pid_t pid,
                                              uid_t uid,
                                              const audio_config_t *config,
                                              audio_output_flags_t flags,
                                              audio_port_handle_t *selectedDeviceId,
                                              audio_port_handle_t *portId,
                                              std::vector<audio_io_handle_t> *secondaryOutputs)
{
    if (mAudioPolicyManager == NULL) {
        return NO_INIT;
    }
    ALOGV("getOutputForAttr()");
    Mutex::Autolock _l(mLock);

    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
    if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) {
        ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
        uid = callingUid;
    }
    if (!mPackageManager.allowPlaybackCapture(uid)) {
        attr->flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION;
    }
    if (((attr->flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
            && !bypassInterruptionPolicyAllowed(pid, uid)) {
        attr->flags &= ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE);
    }
    audio_output_flags_t originalFlags = flags;
    AutoCallerClear acc;
    status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
                                                 config,
                                                 &flags, selectedDeviceId, portId,
                                                 secondaryOutputs);

    // FIXME: Introduce a way to check for the the telephony device before opening the output
    if ((result == NO_ERROR) &&
        (flags & AUDIO_OUTPUT_FLAG_INCALL_MUSIC) &&
        !modifyPhoneStateAllowed(pid, uid)) {
        // If the app tries to play music through the telephony device and doesn't have permission
        // the fallback to the default output device.
        mAudioPolicyManager->releaseOutput(*portId);
        flags = originalFlags;
        *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
        *portId = AUDIO_PORT_HANDLE_NONE;
        secondaryOutputs->clear();
        result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, config,
                                                       &flags, selectedDeviceId, portId,
                                                       secondaryOutputs);
    }

    if (result == NO_ERROR) {
        sp <AudioPlaybackClient> client =
            new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
        mAudioPlaybackClients.add(*portId, client);
    }
    return result;
}

在此方法中调用AudioPolicyManager中的getOutputForAttr方法。

xref: /frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp

status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *output,
                                              audio_session_t session,
                                              audio_stream_type_t *stream,
                                              uid_t uid,
                                              const audio_config_t *config,
                                              audio_output_flags_t *flags,
                                              audio_port_handle_t *selectedDeviceId,
                                              audio_port_handle_t *portId,
                                              std::vector<audio_io_handle_t> *secondaryOutputs)
{
    // The supplied portId must be AUDIO_PORT_HANDLE_NONE
    if (*portId != AUDIO_PORT_HANDLE_NONE) {
        return INVALID_OPERATION;
    }
    const audio_port_handle_t requestedPortId = *selectedDeviceId;
    audio_attributes_t resultAttr;
    bool isRequestedDeviceForExclusiveUse = false;
    std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputDescs;
    const sp<DeviceDescriptor> requestedDevice =
      mAvailableOutputDevices.getDeviceFromId(requestedPortId);

    // Prevent from storing invalid requested device id in clients
    const audio_port_handle_t sanitizedRequestedPortId =
      requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE;
    *selectedDeviceId = sanitizedRequestedPortId;
	// 通过调用getOutputForAttrInt方法获取输出通道
    status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
            config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse,
            &secondaryOutputDescs);
    if (status != NO_ERROR) {
        return status;
    }
    ...........................................................................................
	
    return NO_ERROR;
}

在此方法中调用getOutputForAttrInt方法获取输出通道

tatus_t AudioPolicyManager::getOutputForAttrInt(
       audio_attributes_t *resultAttr,
       audio_io_handle_t *output,
       audio_session_t session,
       const audio_attributes_t *attr,
       audio_stream_type_t *stream,
       uid_t uid,
       const audio_config_t *config,
       audio_output_flags_t *flags,
       audio_port_handle_t *selectedDeviceId,
       bool *isRequestedDeviceForExclusiveUse,
       std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs)

   DeviceVector outputDevices;
   const audio_port_handle_t requestedPortId = *selectedDeviceId;
   DeviceVector msdDevices = getMsdAudioOutDevices();
   const sp<DeviceDescriptor> requestedDevice =
       mAvailableOutputDevices.getDeviceFromId(requestedPortId);
	
	// 获取音频属性,如果attr不为null,则将attr赋值给resultAttr,否则通过stream来赋值给resultAttr
   status_t status = getAudioAttributes(resultAttr, attr, *stream);
   if (status != NO_ERROR) {
       return status;
   }
   if (auto it = mAllowedCapturePolicies.find(uid); it != end(mAllowedCapturePolicies)) {
       resultAttr->flags |= it->second;
   }
   //通过已赋值的resultAttr获取StreamType,最终是在ProductStrategy类中与supportedAttr.mAttributes属性进行对比
   *stream = mEngine->getStreamTypeForAttributes(*resultAttr);

.......................................................................................

// 根据属性查找输出设备
outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false);

  .......................................................................................

    return NO_ERROR;
}

在此方法中通过getAudioAttributes方法获取音频属性,如果attr不为null,则将attr赋值给resultAttr,否则通过stream来赋值给resultAttr

status_t AudioPolicyManager::getAudioAttributes(audio_attributes_t *dstAttr,
                                                const audio_attributes_t *srcAttr,
                                                audio_stream_type_t srcStream)
{
   if (srcAttr != NULL) {
       if (!isValidAttributes(srcAttr)) {
           ALOGE("%s invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
                   __func__,
                   srcAttr->usage, srcAttr->content_type, srcAttr->flags,
                   srcAttr->tags);
           return BAD_VALUE;
       }
       *dstAttr = *srcAttr;
   } else {
       if (srcStream < AUDIO_STREAM_MIN || srcStream >= AUDIO_STREAM_PUBLIC_CNT) {
           ALOGE("%s:  invalid stream type", __func__);
           return BAD_VALUE;
       }
       *dstAttr = mEngine->getAttributesForStreamType(srcStream);
   }

   // Only honor audibility enforced when required. The client will be
   // forced to reconnect if the forced usage changes.
   if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
       dstAttr->flags &= ~AUDIO_FLAG_AUDIBILITY_ENFORCED;
   }

   return NO_ERROR;
}

我们接着分析mEngine->getStreamTypeForAttributes(*resultAttr)方法。通过已赋值的resultAttr获取StreamType,最终是在ProductStrategy类中与supportedAttr.mAttributes属性进行对比

xref: /frameworks/av/services/audiopolicy/engine/common/src/EngineBase.cpp

audio_stream_type_t EngineBase::getStreamTypeForAttributes(const audio_attributes_t &attr) const
{
    return mProductStrategies.getStreamTypeForAttributes(attr);
}

接着调用ProductStrategy的getStreamTypeForAttributes

xref: /frameworks/av/services/audiopolicy/engine/common/src/ProductStrategy.cpp

audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(
        const audio_attributes_t &attr) const
{
    const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
                                   [&attr](const auto &supportedAttr) {
        return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr); });
    return iter != end(mAttributesVector) ? iter->mStream : AUDIO_STREAM_DEFAULT;
}

进行属性对比

xref: /frameworks/av/media/libaudioclient/AudioProductStrategy.cpp

bool AudioProductStrategy::attributesMatches(const audio_attributes_t refAttributes,
                                        const audio_attributes_t clientAttritubes)
{
    if (refAttributes == AUDIO_ATTRIBUTES_INITIALIZER) {
        // The default product strategy is the strategy that holds default attributes by convention.
        // All attributes that fail to match will follow the default strategy for routing.
        // Choosing the default must be done as a fallback, the attributes match shall not
        // select the default.
        return false;
    }
    return ((refAttributes.usage == AUDIO_USAGE_UNKNOWN) ||
            (clientAttritubes.usage == refAttributes.usage)) &&
            ((refAttributes.content_type == AUDIO_CONTENT_TYPE_UNKNOWN) ||
             (clientAttritubes.content_type == refAttributes.content_type)) &&
            ((refAttributes.flags == AUDIO_FLAG_NONE) ||
             (clientAttritubes.flags != AUDIO_FLAG_NONE &&
            (clientAttritubes.flags & refAttributes.flags) == refAttributes.flags)) &&
            ((strlen(refAttributes.tags) == 0) ||
             (std::strcmp(clientAttritubes.tags, refAttributes.tags) == 0));
}

接下来分析outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false)调用。

xref: /frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp

DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
                                                   const sp<DeviceDescriptor> &preferredDevice,
                                                   bool fromCache) const
{
    // First check for explict routing device
    if (preferredDevice != nullptr) {
        ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
        return DeviceVector(preferredDevice);
    }
	// 通过属性获取到strategy
    product_strategy_t strategy = getProductStrategyForAttributes(attributes);
	// 获取所有可用的输出设备
    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
	// 获取所有已经打开的输出设备
    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
    // 根据outputs、strategy、availableOutputDevices找到合适的device
    sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices);
    if (device != nullptr) {
        return DeviceVector(device);
    }

    return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
}

1.通过属性获取到strategy

2.获取所有可用的输出设备

3.获取所有可用的输出设备

4.根据outputs、strategy、availableOutputDevices找到合适的device,其实这个device是之前已经打开的device。如果没有就需要通过strategy去打开一个device。

DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t strategy) const
{
    DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
    DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();

    auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
                mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
	// 通过strategy选择合适的device
    audio_devices_t devices = getDeviceForStrategyInt(legacyStrategy,
                                                      availableOutputDevices,
                                                      availableInputDevices, outputs,
                                                      (uint32_t)AUDIO_DEVICE_NONE);
    return availableOutputDevices.getDevicesFromTypeMask(devices);
}

通过strategy选择合适的device

audio_devices_t Engine::getDeviceForStrategyInt(legacy_strategy strategy,
                                                DeviceVector availableOutputDevices,
                                                DeviceVector availableInputDevices,
                                                const SwAudioOutputCollection &outputs,
                                                uint32_t outputDeviceTypesToIgnore) const
{
    uint32_t device = AUDIO_DEVICE_NONE;
    uint32_t availableOutputDevicesType =
            availableOutputDevices.types() & ~outputDeviceTypesToIgnore;

    switch (strategy) {

    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        break;

    case STRATEGY_SONIFICATION_RESPECTFUL:
	
        ..........................................................................................
		
        break;

    case STRATEGY_DTMF:
        
		..........................................................................................
		
	// 普通通话的时候走这个策略
    case STRATEGY_PHONE:
        // Force use of only devices on primary output if:
        // - in call AND
        //   - cannot route from voice call RX OR
        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
        if (getPhoneState() == AUDIO_MODE_IN_CALL) {
            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
            audio_devices_t availPrimaryInputDevices =
                 availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());

            // TODO: getPrimaryOutput return only devices from first module in
            // audio_policy_configuration.xml, hearing aid is not there, but it's
            // a primary device
            // FIXME: this is not the right way of solving this problem
            audio_devices_t availPrimaryOutputDevices =
                (primaryOutput->supportedDevices().types() | AUDIO_DEVICE_OUT_HEARING_AID) &
                availableOutputDevices.types();

            if (((availableInputDevices.types() &
                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
                         (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
                availableOutputDevicesType = availPrimaryOutputDevices;
            }
        }
        // for phone strategy, we first consider the forced use and then the available devices by
        // order of priority
        switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
        case AUDIO_POLICY_FORCE_BT_SCO: //蓝牙通话的时候输出会走这里
            if (!isInCall() || strategy != STRATEGY_DTMF) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
            if (device) break;
            // if SCO device is requested but no SCO device is available, fall back to default case
            FALLTHROUGH_INTENDED;

        default:    // FORCE_NONE
            
			..........................................................................................
			
            break;

        case AUDIO_POLICY_FORCE_SPEAKER:
		
            ..........................................................................................

            break;
        }
    break;

    case STRATEGY_SONIFICATION:

        ..........................................................................................

    case STRATEGY_ENFORCED_AUDIBLE:
	
        ..........................................................................................		

    case STRATEGY_ACCESSIBILITY:
	
        ..........................................................................................
		

    // FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now
    case STRATEGY_REROUTING:
    case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        if (strategy != STRATEGY_SONIFICATION) {
            // no sonification on remote submix (e.g. WFD)
            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                                                 String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
            }
        }
		// 如果是通话中,则使用通话策略
        if (isInCall() && (strategy == STRATEGY_MEDIA)) {
            device = getDeviceForStrategyInt(
                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
                    outputDeviceTypesToIgnore);
            break;
        }
        // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                 outputs.isA2dpSupported()) {
			// 优先使用A2DP蓝牙设备
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
			// 使用蓝牙耳机设备
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
			// 使用蓝牙扬声器
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
		// 强制使用扬声器输出
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
		// 耳机输出,普通耳机,只能听,不能操控播放
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
		
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
		// 线控耳机
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
		// USB耳机
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
		// USB
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
		// USB设备
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
		//模拟数字话筒耳机
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
		//扬声器
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }

        device2 |= device3;
        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
        device |= device2;

        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }

        // for STRATEGY_SONIFICATION:
        // if SPEAKER was selected, and SPEAKER_SAFE is available, use SPEAKER_SAFE instead
        if ((strategy == STRATEGY_SONIFICATION) &&
                (device & AUDIO_DEVICE_OUT_SPEAKER) &&
                (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
            device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }
        } break;

    default:
        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
        break;
    }

    ..........................................................................................
		
    return device;
}

按照先通话 >SCO蓝牙->低功耗蓝牙->经典蓝牙->原生audio设备依次选择,如果是强制使用ForceUse就优先使用强制使用的设备。我们通过getDeviceForStrategyInt获取到的device是audio_policy_configuration中的devicePort标签中的type类型的值,而我们是要DeviceDescriptor,还需要通过availableOutputDevices.getDevicesFromTypeMask(devices)来得到DeviceDescriptor。

从attr、sampleRate、format等音频参数获取得到streamType和DeviceScriptor,接下来我们通过getOutputForDevices来获取output输出通道

xref: /frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp

audio_io_handle_t AudioPolicyManager::getOutputForDevices(
        const DeviceVector &devices,
        audio_session_t session,
        audio_stream_type_t stream,
        const audio_config_t *config,
        audio_output_flags_t *flags,
        bool forceMutingHaptic)
{
    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
    status_t status;

    ........................................................................................

    sp<IOProfile> profile;
    
    ........................................................................................

	// flags不包含AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
    if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
            !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
		// 获取支持device、sampleRate、format等的profile
        profile = getProfileForOutput(devices,
                                   config->sample_rate,
                                   config->format,
                                   channelMask,
                                   (audio_output_flags_t)*flags,
                                   true /* directOnly */);
    }
 
    if (profile != 0) {
        // 遍历所有已经打开的output输出通道,如果对应的参数相同返回这个output
        for (size_t i = 0; i < mOutputs.size(); i++) {
            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
                // reuse direct output if currently open by the same client
                // and configured with same parameters
                if ((config->sample_rate == desc->mSamplingRate) &&
                    (config->format == desc->mFormat) &&
                    (channelMask == desc->mChannelMask) &&
                    (session == desc->mDirectClientSession)) {
                    desc->mDirectOpenCount++;
                    ALOGI("%s reusing direct output %d for session %d", __func__,
                        mOutputs.keyAt(i), session);
                    return mOutputs.keyAt(i);
                }
            }
        }

		// 不可以打开新的Io设备,就跳转到non_direct_output选择设备
        if (!profile->canOpenNewIo()) {
            goto non_direct_output;
        }

sp<SwAudioOutputDescriptor> outputDesc =
        new SwAudioOutputDescriptor(profile, mpClientInterface);
        ........................................................................................

        
        // 打开输出通道
        status = outputDesc->open(config, devices, stream, *flags, &output);

        // only accept an output with the requested parameters
        if (status != NO_ERROR ||
            (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
            (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
            (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
            ALOGV("%s failed opening direct output: output %d sample rate %d %d,"
                    "format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
                    outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
                    channelMask, outputDesc->mChannelMask);
            if (output != AUDIO_IO_HANDLE_NONE) {
                outputDesc->close();
            }
            // fall back to mixer output if possible when the direct output could not be open
            if (audio_is_linear_pcm(config->format) &&
                    config->sample_rate  <= SAMPLE_RATE_HZ_MAX) {
                goto non_direct_output;
            }
            return AUDIO_IO_HANDLE_NONE;
        }
        outputDesc->mDirectOpenCount = 1;
        outputDesc->mDirectClientSession = session;
		
		// 将outputDesc添加到outputs集合
        addOutput(output, outputDesc);
        mPreviousOutputs = mOutputs;
        ALOGV("%s returns new direct output %d", __func__, output);
        mpClientInterface->onAudioPortListUpdate();
        return output;
    }
//没有能直接打开的输出通道时,从已经打开的设备中选择一个匹配度最高的
non_direct_output:

    ................................................................................................

    // for non direct outputs, only PCM is supported
    if (audio_is_linear_pcm(config->format)) {
        // get which output is suitable for the specified stream. The actual
        // routing change will happen when startOutput() will be called
        SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);

        // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
        *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
		// 根据flags、format、sampleRate等选择一个output
        output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
    }
    ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
            "sampling rate %d, format %#x, channels %#x, flags %#x",
            stream, config->sample_rate, config->format, channelMask, *flags);

    return output;
}

总结:1.通过输入的属性attributes获取流类型streamType
2.根据属性attributes去匹配输出通道设备outputDevices,匹配规则是先通话 >SCO蓝牙->低功耗蓝牙->经典蓝牙->原生audio
3.根据sampleRate、format等参数匹配到合适的profile,然后根据profile创建一个SwAudioOutputDescriptor对象也就是outputDesc,然后再打开对应config、devices、stream的输出通道并返回output。如果没有匹配到合适的profile就直接根据outputs、flags、format、sampleRate等参数选择一个最接近的输出通道

相关推荐
Crossoads30 分钟前
【汇编语言】外中断(三)—— 探秘汇编外中断:从安装新INT 9例程到指令系统总结
android·开发语言·汇编·stm32·单片机·嵌入式硬件·dubbo
_可乐无糖1 小时前
Appium:Android 和 iOS 的capabilities是否需要前缀?
android·ui·ios·appium·自动化
花生的酱1 小时前
mycat介绍与操作步骤
android·数据库·sql·mysql
molong9311 小时前
Android基于监听的事件处理机制
android
孑么9 小时前
GDPU Android移动应用 重点习题集
android·xml·java·okhttp·kotlin·android studio·webview
ta叫我小白12 小时前
Kotlin 中 forEach 的 return@forEach 的使用误区
android·开发语言·kotlin
archko12 小时前
试用kotlin multiplatform
android·开发语言·kotlin
C4rpeDime13 小时前
当歌 - RSS 订阅分发平台开发
android
大雄野比14 小时前
UOS系统mysql服务安装
android·mysql·adb