音视频学习阶段(二)AudioTrack源码分析1

分析AudioTrack的源码,就需要分析他的工作流程,下面就从创建流程开始分析

sequenceDiagram APP->>AudioTrack: new

在创建过程中的遇到的第一个比较难以理解的地方就是 AudioTrack.getMinBufferSize ,这个方法下面具体分析一下

AudioTrack.getMinBufferSize

arduino 复制代码
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
    int channelCount = 0;
    //调整声道数
    switch(channelConfig) {
    case AudioFormat.CHANNEL_OUT_MONO:
    case AudioFormat.CHANNEL_CONFIGURATION_MONO:
        channelCount = 1;
        break;
    case AudioFormat.CHANNEL_OUT_STEREO:
    case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
        channelCount = 2;
        break;
    default:
        if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
            loge("getMinBufferSize(): Invalid channel configuration.");
            return ERROR_BAD_VALUE;
        } else {
            channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
        }
    }
    //确定采样精度是否支持 包括pcm aac mp3等
    if (!AudioFormat.isPublicEncoding(audioFormat)) {
        loge("getMinBufferSize(): Invalid audio format.");
        return ERROR_BAD_VALUE;
    }

    // sample rate, note these values are subject to change
    // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
    //检查采样频率是否在支持的范围内
    if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
            (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
        loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
        return ERROR_BAD_VALUE;
    }
    //调用底层的native 方法,获取buffer大小
    int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
    if (size <= 0) {
        loge("getMinBufferSize(): error querying hardware");
        return ERROR;
    }
    else {
        return size;
    }
}

在这个方法里面先确定了声道的个数,双声道的采样数是单声道的2倍,对应的buffer的大小肯定会有所调整,检查采样精度是否支持,检查采样频率是否支持,最后调用到 native 层获取 buufer 大小

重点1: 参数介绍

1:RateInHz 采样频率 高音质一般使用44100 , 如果对音质有非常高的要求,可以使用48000 2:channel 声道数,单声道 双声道 立体声等 3:audioFormat 采样位数 即精度

重点2: jni查找

由于在AudioTrack中没有发现加载native库,尝试去AndroidRunTime.cpp查找,找到对应文件android_media_AudioTrack.cpp , 打开android_media_AudioTrack.cpp 找到 JNINativeMethod gMethods 的方法列表找到 native_get_min_buff_size 的对应方法 android_media_AudioTrack_get_min_buff_size ,接下来就可以具体分析了

具体的调用流程如下,是一个非常复杂的流程,

到了这里就可以分析 AudioTrack 的构造函数了

ini 复制代码
private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
        int mode, int sessionId, boolean offload, int encapsulationMode,
        @Nullable TunerConfiguration tunerConfiguration)
                throws IllegalArgumentException {
    super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
    // mState already == STATE_UNINITIALIZED

    mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.

    if (format == null) {
        throw new IllegalArgumentException("Illegal null AudioFormat");
    }

    // Check if we should enable deep buffer mode
    if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
        mAttributes = new AudioAttributes.Builder(mAttributes)
            .replaceFlags((mAttributes.getAllFlags()
                    | AudioAttributes.FLAG_DEEP_BUFFER)
                    & ~AudioAttributes.FLAG_LOW_LATENCY)
            .build();
    }

    // remember which looper is associated with the AudioTrack instantiation
    Looper looper;
    if ((looper = Looper.myLooper()) == null) {
        looper = Looper.getMainLooper();
    }

    int rate = format.getSampleRate();
    if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
        rate = 0;
    }

    int channelIndexMask = 0;
    if ((format.getPropertySetMask()
            & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
        channelIndexMask = format.getChannelIndexMask();
    }
    int channelMask = 0;
    if ((format.getPropertySetMask()
            & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
        channelMask = format.getChannelMask();
    } else if (channelIndexMask == 0) { // if no masks at all, use stereo
        channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
                | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
    }
    int encoding = AudioFormat.ENCODING_DEFAULT;
    if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
        encoding = format.getEncoding();
    }
    audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
    mOffloaded = offload;
    mStreamType = AudioSystem.STREAM_DEFAULT;

    audioBuffSizeCheck(bufferSizeInBytes);

    mInitializationLooper = looper;

    if (sessionId < 0) {
        throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
    }

    int[] sampleRate = new int[] {mSampleRate};
    int[] session = new int[1];
    session[0] = sessionId;
    // native initialization
    int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
            sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
            mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
            offload, encapsulationMode, tunerConfiguration,
            getCurrentOpPackageName());
    if (initResult != SUCCESS) {
        loge("Error code "+initResult+" when initializing AudioTrack.");
        return; // with mState == STATE_UNINITIALIZED
    }

    mSampleRate = sampleRate[0];
    mSessionId = session[0];

    // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.

    if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
        int frameSizeInBytes;
        if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
            frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
        } else {
            frameSizeInBytes = 1;
        }
        mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
    }

    if (mDataLoadMode == MODE_STATIC) {
        mState = STATE_NO_STATIC_DATA;
    } else {
        mState = STATE_INITIALIZED;
    }

    baseRegisterPlayer(mSessionId);
    native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
}

这个构建方法比较长,来说一下他具体的内容 1:检查参数 2:调用 native_setup 方法,创建底层 AudioTrack.cpp 对象 3:更新状态 4:创建成功后将自身注册到 AudioService 的 PlaybackActivityMonitor 中的 MPlayer 中,便于系统管理 这个注册的方法是父类PlayerBase的方法,同时MediaPlay也实现了这个类,证明他是系统管理音频播放的基类

如果你对FrameWork相关的知识不够了解,可以看一下我的简书 FrameWork专栏,这里主要分析 native层的工作

native_setup跟踪 android_media_AudioTrack.cpp->android_media_AudioTrack_setup

ini 复制代码
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
                                           jobject jaa, jintArray jSampleRate,
                                           jint channelPositionMask, jint channelIndexMask,
                                           jint audioFormat, jint buffSizeInBytes, jint memoryMode,
                                           jintArray jSession, jlong nativeAudioTrack,
                                           jboolean offload, jint encapsulationMode,
                                               jobject tunerConfiguration, jstring opPackageName) {

       
    // 省略检查参数代码
    省略...

    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    sp<AudioTrack> lpTrack;
    if (nativeAudioTrack == 0) {
        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
        int sampleRateInHertz = sampleRates[0];
        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);

        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                channelPositionMask, channelIndexMask);
        if (!audio_is_output_channel(nativeChannelMask)) {
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
        }

        uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
        audio_format_t format = audioFormatToNative(audioFormat);
        if (format == AUDIO_FORMAT_INVALID) {
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
        }
        size_t frameCount;
        if (audio_has_proportional_frames(format)) {
            const size_t bytesPerSample = audio_bytes_per_sample(format);
            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
        } else {
            frameCount = buffSizeInBytes;
        }
        ScopedUtfChars opPackageNameStr(env, opPackageName);
        //创建 底层 AudioTrack
        lpTrack = new AudioTrack(opPackageNameStr.c_str());
        auto paa = JNIAudioAttributeHelper::makeUnique();
        jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
        if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
            return jStatus;
        }
        //创建回调
        lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
        lpJniStorage->mCallbackData.isOffload = offload;
        lpJniStorage->mCallbackData.busy = false;

        audio_offload_info_t offloadInfo;
        if (offload == JNI_TRUE) {
            offloadInfo = AUDIO_INFO_INITIALIZER;
            offloadInfo.format = format;
            offloadInfo.sample_rate = sampleRateInHertz;
            offloadInfo.channel_mask = nativeChannelMask;
            offloadInfo.has_video = false;
            offloadInfo.stream_type = AUDIO_STREAM_MUSIC; //required for offload
        }
        status_t status = NO_ERROR;
        // 根据不同的模式,匹配不同的加载方法
        switch (memoryMode) {
        case MODE_STREAM:
            status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                        // in paa (last argument)
                                  sampleRateInHertz,
                                  format, // word length, PCM
                                  nativeChannelMask, offload ? 0 : frameCount,
                                  offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
                                          : AUDIO_OUTPUT_FLAG_NONE,
                                  audioCallback,
                                  &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                  0,    // notificationFrames == 0 since not using EVENT_MORE_DATA
                                        // to feed the AudioTrack
                                  0,    // shared mem  注意这里0代表的是Stream
                                  true, // thread can call Java
                                  sessionId, // audio session ID
                                  offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
                                          : AudioTrack::TRANSFER_SYNC,
                                  offload ? &offloadInfo : NULL, -1, -1, // default uid, pid values
                                  paa.get());
            break;

        case MODE_STATIC:
            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                goto native_init_failure;
            }

            status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                        // in paa (last argument)
                                  sampleRateInHertz,
                                  format, // word length, PCM
                                  nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
                                  audioCallback,
                                  &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                  0, // notificationFrames == 0 since not using EVENT_MORE_DATA
                                     // to feed the AudioTrack
                                  lpJniStorage->mMemBase, // shared mem
                                  true,                   // thread can call Java
                                  sessionId,              // audio session ID
                                  AudioTrack::TRANSFER_SHARED,
                                  NULL,   // default offloadInfo
                                  -1, -1, // default uid, pid values
                                  paa.get());
            break;

        default:
            goto native_init_failure;
        }
        lpTrack->setCallerName("java");
    } else {  // end if (nativeAudioTrack == 0)
       //省略其他构建方法
    }
    // 给回调设置方法
    lpJniStorage->mAudioTrackCallback =
            new JNIAudioTrackCallback(env, thiz, lpJniStorage->mCallbackData.audioTrack_ref,
                                      javaAudioTrackFields.postNativeEventInJava);
    lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback);

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        goto native_init_failure;
    }
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    {
        const jint elements[1] = { (jint) lpTrack->getSampleRate() };
        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
    }

    {   // scope for the lock
        Mutex::Autolock l(sLock);
        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
    }
    setAudioTrack(env, thiz, lpTrack);
    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);
    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());

    return (jint) AUDIO_JAVA_SUCCESS;

删除了部分校验的代码与失败逻辑后,发现上面的代码还是非常的多,但是他做的事情相对来说比较简单 1: 检查参数 2:构建创建 智能指针 sp lpTrack 的参数 3:初始化 sp lpTrack 参数 4:初始化回调信息

5: 将创建好的 native 层的 AudioTrack 交给 Java 层的 mNativeTrackInJavaObj 变量,但是查看这个已经不再使用了

底层这部分代码实在是又臭又长 #24

ini 复制代码
status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,//注意这里0代表的是Stream
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        uid_t uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed,
        audio_port_handle_t selectedDeviceId)
{
    status_t status;
    uint32_t channelCount;
    pid_t callingPid;
    pid_t myPid;


    mThreadCanCallJava = threadCanCallJava;
    mSelectedDeviceId = selectedDeviceId;
    mSessionId = sessionId;

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            transferType = TRANSFER_SHARED;
        } else if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
    case TRANSFER_SYNC_NOTIF_CALLBACK:
        if (cbf == NULL || sharedBuffer != 0) {
            ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
                    convertTransferToText(transferType), __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        if (sharedBuffer != 0) {
            ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    case TRANSFER_SHARED:
        if (sharedBuffer == 0) {
            ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    default:
        ALOGE("%s(): Invalid transfer type %d",
                __func__, transferType);
        status = BAD_VALUE;
        goto exit;
    }
    mSharedBuffer = sharedBuffer;
    mTransfer = transferType;
    mDoNotReconnect = doNotReconnect;

    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        ALOGE("%s(): Track already in use", __func__);
        status = INVALID_OPERATION;
        goto exit;
    }

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (pAttributes == NULL) {
        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
            ALOGE("%s(): Invalid stream type %d", __func__, streamType);
            status = BAD_VALUE;
            goto exit;
        }
        mStreamType = streamType;

    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        mStreamType = AUDIO_STREAM_DEFAULT;
        audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
    }

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("%s(): Invalid format %#x", __func__, format);
        status = BAD_VALUE;
        goto exit;
    }
    mFormat = format;

    if (!audio_is_output_channel(channelMask)) {
        status = BAD_VALUE;
        goto exit;
    }
    mChannelMask = channelMask;
    channelCount = audio_channel_count_from_out_mask(channelMask);
    mChannelCount = channelCount;

    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
    }

    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
        if (audio_has_proportional_frames(format)) {
            mFrameSize = channelCount * audio_bytes_per_sample(format);
        } else {
            mFrameSize = sizeof(uint8_t);
        }
    } else {
        ALOG_ASSERT(audio_has_proportional_frames(format));
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    }

    // sampling rate must be specified for direct outputs
    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
        status = BAD_VALUE;
        goto exit;
    }
    mSampleRate = sampleRate;
    mOriginalSampleRate = sampleRate;
    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);

    if (offloadInfo != NULL) {
        mOffloadInfoCopy = *offloadInfo;
        mOffloadInfo = &mOffloadInfoCopy;
    } else {
        mOffloadInfo = NULL;
        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
    }

    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    // mFrameCount is initialized in createTrack_l
    mReqFrameCount = frameCount;
    if (notificationFrames >= 0) {
        mNotificationFramesReq = notificationFrames;
        mNotificationsPerBufferReq = 0;
    } else {
        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
            status = BAD_VALUE;
            goto exit;
        }
        if (frameCount > 0) {
            status = BAD_VALUE;
            goto exit;
        }
        mNotificationFramesReq = 0;
        const uint32_t minNotificationsPerBuffer = 1;
        const uint32_t maxNotificationsPerBuffer = 8;
        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
    }
    mNotificationFramesAct = 0;
    
    /// 这里用的Binder的IPC 
    callingPid = IPCThreadState::self()->getCallingPid();
    myPid = getpid();
    if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    if (pid == -1 || (callingPid != myPid)) {
        mClientPid = callingPid;
    } else {
        mClientPid = pid;
    }
    mAuxEffectId = 0;
    mOrigFlags = mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioTrackThread = new AudioTrackThread(*this);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
        // thread begins in paused state, and will not reference us until start()
    }
    {
        AutoMutex lock(mLock);
        status = createTrack_l();
    }
    if (status != NO_ERROR) {
        goto exit;
    }
}

这里也是转换和检查了一下播放参数参数与进程参数,加锁后调用了 createTrack_l()方法,但是到目前为止,都没有看到 AudioFliger出现过,所以只能继续向下分析了

ini 复制代码
    status_t AudioTrack::createTrack_l()
{
    status_t status;
    bool callbackAdded = false;
    
    // 获取 AudioFlinger
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
        status = NO_INIT;
        goto exit;
    }

    {
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        // either of these use cases:
        // use case 1: shared buffer
        bool sharedBuffer = mSharedBuffer != 0;
        bool transferAllowed =
            // use case 2: callback transfer mode
            (mTransfer == TRANSFER_CALLBACK) ||
            // use case 3: obtain/release mode
            (mTransfer == TRANSFER_OBTAIN) ||
            // use case 4: synchronous write
            ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
                    && mThreadCanCallJava);

        bool fastAllowed = sharedBuffer || transferAllowed;
        if (!fastAllowed) {
            ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
                  " not shared buffer and transfer = %s",
                  __func__, mPortId,
                  convertTransferToText(mTransfer));
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
        }
    }
    
    // 初始化 AudioFlinger ,将处理好的参数给 他
    IAudioFlinger::CreateTrackInput input;
    if (mStreamType != AUDIO_STREAM_DEFAULT) {
        input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
    } else {
        input.attr = mAttributes;
    }
    input.config = AUDIO_CONFIG_INITIALIZER;
    input.config.sample_rate = mSampleRate;
    input.config.channel_mask = mChannelMask;
    input.config.format = mFormat;
    input.config.offload_info = mOffloadInfoCopy;
    input.clientInfo.clientUid = mClientUid;
    input.clientInfo.clientPid = mClientPid;
    input.clientInfo.clientTid = -1;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
            input.clientInfo.clientTid = mAudioTrackThread->getTid();
        }
    }
    input.sharedBuffer = mSharedBuffer;
    input.notificationsPerBuffer = mNotificationsPerBufferReq;
    input.speed = 1.0;
    if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
            (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
        input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
                        max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
    }
    input.flags = mFlags;
    input.frameCount = mReqFrameCount;
    input.notificationFrameCount = mNotificationFramesReq;
    input.selectedDeviceId = mSelectedDeviceId;
    input.sessionId = mSessionId;
    input.audioTrackCallback = mAudioTrackCallback;
    input.opPackageName = mOpPackageName;

    IAudioFlinger::CreateTrackOutput output;
    
    //调用 audioFlinger 创建Track
    sp<IAudioTrack> track = audioFlinger->createTrack(input,
                                                      output,
                                                      &status);

    if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
        if (status == NO_ERROR) {
            status = NO_INIT;
        }
        goto exit;
    }
    ALOG_ASSERT(track != 0);

    mFrameCount = output.frameCount;
    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
    mRoutedDeviceId = output.selectedDeviceId;
    mSessionId = output.sessionId;

    mSampleRate = output.sampleRate;
    if (mOriginalSampleRate == 0) {
        mOriginalSampleRate = mSampleRate;
    }

    mAfFrameCount = output.afFrameCount;
    mAfSampleRate = output.afSampleRate;
    mAfLatency = output.afLatencyMs;

    mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
    
    // 创建共享内存
    sp<IMemory> iMem = track->getCblk();
    if (iMem == 0) {
        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }
    void *iMemPointer = iMem->unsecurePointer();
    if (iMemPointer == NULL) {
        status = NO_INIT;
        goto exit;
    }
    if (mAudioTrack != 0) {
        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = track;
    mCblkMemory = iMem;
    IPCThreadState::self()->flushCommands();

    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
    mCblk = cblk;

    mAwaitBoost = false;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
            ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
                  __func__, mPortId, mReqFrameCount, mFrameCount);
            if (!mThreadCanCallJava) {
                mAwaitBoost = true;
            }
        } else {
        }
    }
    mFlags = output.flags;
creation
    if (mDeviceCallback != 0) {
        if (mOutput != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
        }
        AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
        callbackAdded = true;
    }

    mPortId = output.portId;
    // We retain a copy of the I/O handle, but don't own the reference
    mOutput = output.outputId;
    mRefreshRemaining = true;

    void* buffers;
    if (mSharedBuffer == 0) {//注意这里0代表的是Stream
        buffers = cblk + 1;
    } else {
        buffers = mSharedBuffer->unsecurePointer();
        if (buffers == NULL) {
            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
            status = NO_INIT;
            goto exit;
        }
    }

    mAudioTrack->attachAuxEffect(mAuxEffectId);

    if (mFrameCount > mReqFrameCount) {
        mReqFrameCount = mFrameCount;
    }

    // reset server position to 0 as we have new cblk.
    mServer = 0;

    // update proxy
    if (mSharedBuffer == 0) {//注意这里0代表的是Stream
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
    } else {
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
        mProxy = mStaticProxy;
    }

    mProxy->setVolumeLR(gain_minifloat_pack(
            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));

    mProxy->setSendLevel(mSendLevel);
    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
    mProxy->setSampleRate(effectiveSampleRate);

    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
    playbackRateTemp.mSpeed = effectiveSpeed;
    playbackRateTemp.mPitch = effectivePitch;
    mProxy->setPlaybackRate(playbackRateTemp);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);

    // This is the first log sent from the AudioTrack client.
    // The creation of the audio track by AudioFlinger (in the code above)
    // is the first log of the AudioTrack and must be present before
    // any AudioTrack client logs will be accepted.

    mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
    mediametrics::LogItem(mMetricsId)
        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
        // the following are immutable
        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
        .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
        .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
        .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
        .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.outputId)
        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
        .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
        // the following are NOT immutable
        .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
        .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
        .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
        .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveSampleRate)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)effectiveSpeed)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)effectivePitch)
        .record();
    }

exit:
    if (status != NO_ERROR && callbackAdded) {
        // note: mOutput is always valid is callbackAdded is true
        AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
    }

    mStatus = status;

    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
    return status;
}

在这个方法里面 AudioFliger 已进入的时候就出场了,但是由于这里面出现了很多我见过的使用,比如说共享内存是如何创建的,这个共享内存的工作应该在是 java层write的时候,接受buffer 将 buffer映射到 Track 上面 ,这篇文章就到这里了,共享内存的原理以及剩下的部分,我会在后续娓娓道来!

相关推荐
丘狸尾44 分钟前
[cisco 模拟器] ftp服务器配置
android·运维·服务器
van叶~3 小时前
探索未来编程:仓颉语言的优雅设计与无限可能
android·java·数据库·仓颉
Crossoads7 小时前
【汇编语言】端口 —— 「从端口到时间:一文了解CMOS RAM与汇编指令的交汇」
android·java·汇编·深度学习·网络协议·机器学习·汇编语言
li_liuliu8 小时前
Android4.4 在系统中添加自己的System Service
android
C4rpeDime10 小时前
自建MD5解密平台-续
android
鲤籽鲲12 小时前
C# Random 随机数 全面解析
android·java·c#
m0_5485147715 小时前
2024.12.10——攻防世界Web_php_include
android·前端·php
凤邪摩羯16 小时前
Android-性能优化-03-启动优化-启动耗时
android
凤邪摩羯16 小时前
Android-性能优化-02-内存优化-LeakCanary原理解析
android
喀什酱豆腐16 小时前
Handle
android