在创建 AudioTrack 有一个变量需要设置
MODE_STATIC 一整段 PCM,一次性给 AudioFlinger,之后只读不写
MODE_STREAM 是用来播放一个音频流
Kotlin
.setTransferMode(AudioTrack.MODE_STREAM)
public static final int MODE_STATIC = 0;
public static final int MODE_STREAM = 1;
base/core/jni/android_media_AudioTrack.cpp android_media_AudioTrack_setup
audio_has_proportional_frames 用来判断音频是不是 pcm 线性关系
计算出总 frameCount
重点看下这个memoryMode参数
如果参数是 MODE_STREAM shared mem 参数就是 0
如果参数是 MODE_STATIC 那直接在app层申请内存 allocSharedMem
调用lpTrack->set设置下去
cpp
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate,
jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode,
jintArray jSession, jobject jAttributionSource,
jlong nativeAudioTrack, jboolean offload,
jint encapsulationMode, jobject tunerConfiguration,
jstring opPackageName) {
// compute the frame count
size_t frameCount;
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
} else {
frameCount = buffSizeInBytes;
}
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
// in paa (last argument)
sampleRateInHertz,
format, // word length, PCM
nativeChannelMask, offload ? 0 : frameCount,
offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
: AUDIO_OUTPUT_FLAG_NONE,
lpJniStorage,
0, // notificationFrames == 0 since not using EVENT_MORE_DATA
// to feed the AudioTrack
0, // shared mem
true, // thread can call Java
sessionId, // audio session ID
offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
: AudioTrack::TRANSFER_SYNC,
(offload || encapsulationMode) ? &offloadInfo : NULL,
attributionSource, // Passed from Java
paa.get());
break;
case MODE_STATIC:
{
// AudioTrack is using shared memory
const auto iMem = allocSharedMem(buffSizeInBytes);
status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
// in paa (last argument)
sampleRateInHertz,
format, // word length, PCM
nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
lpJniStorage,
0, // notificationFrames == 0 since not using EVENT_MORE_DATA
// to feed the AudioTrack
iMem, // shared mem
true, // thread can call Java
sessionId, // audio session ID
AudioTrack::TRANSFER_SHARED,
nullptr, // default offloadInfo
attributionSource, // Passed from Java
paa.get());
break;
}
default:
ALOGE("Unknown mode %d", memoryMode);
goto native_init_failure;
}
return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
sp<IMemory> allocSharedMem(int sizeInBytes) {
const auto heap = sp<MemoryHeapBase>::make(sizeInBytes, 0, "AudioTrack Heap Base");
if (heap->getBase() == MAP_FAILED || heap->getBase() == nullptr) {
return nullptr;
}
return sp<MemoryBase>::make(heap, 0, sizeInBytes);
}
av/media/libaudioclient/AudioTrack.cpp lpTrack->set
sharedBuffer
主要就看这个函数, 使用返回的 response 解析出 Cblk
STREAM 模式下音频数据 buffers 就是 + 1
STATIC 模式下音频数据 buffers 就是传进来的 buffer
cpp
status_t AudioTrack::set(
...
const sp<IMemory>& sharedBuffer,
...)
{
mSharedBuffer = sharedBuffer;
{
AutoMutex lock(mLock);
status = createTrack_l();
}
return logIfErrorAndReturnStatus(status, "");
}
status_t AudioTrack::createTrack_l()
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
IAudioFlinger::CreateTrackInput input;
input.sharedBuffer = mSharedBuffer;
auto aidlInput = input.toAidl();
// 使用返回的 response 解析出 Cblk
status = audioFlinger->createTrack(aidlInput.value(), response);
IAudioFlinger::CreateTrackOutput output{};
auto trackOutput = IAudioFlinger::CreateTrackOutput::fromAidl(response);
output = trackOutput.value();
output.audioTrack->getCblk(&sfr);
auto iMemory = aidl2legacy_NullableSharedFileRegion_IMemory(sfr);
sp<IMemory> iMem = iMemory.value();
void *iMemPointer = iMem->unsecurePointer();
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
void* buffers;
if (mSharedBuffer == 0) {
// STREAM 模式下音频数据 buffers 就是 + 1
buffers = cblk + 1;
} else {
// STATIC 模式下音频数据 buffers 就是传进来的 buffer
buffers = mSharedBuffer->unsecurePointer();
}
}
return logIfErrorAndReturnStatus(status, "");
}
audioflinger
av/services/audioflinger/AudioFlinger.cpp createTrack
input.sharedBuffer
cpp
status_t AudioFlinger::createTrack(const media::CreateTrackRequest& _input,
media::CreateTrackResponse& _output)
{
CreateTrackInput input = VALUE_OR_RETURN_STATUS(CreateTrackInput::fromAidl(_input));
track = thread->createTrack_l(..., input.sharedBuffer, ...);
return lStatus;
}
av/services/audioflinger/Threads.cpp createTrack_l
const sp<IMemory>& sharedBuffer
cpp
sp<IAfTrack> PlaybackThread::createTrack_l(...,
const sp<IMemory>& sharedBuffer, ...)
{
//检测内存是否正常
if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
lStatus = BAD_VALUE;
goto Exit;
}
track = IAfTrack::create(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, creatorPid, attributionSource, trackFlags,
IAfTrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/,
speed, isSpatialized, isBitPerfect, volume, muted);
return track;
}
av/services/audioflinger/Tracks.cpp
cpp
sp<IAfTrack> IAfTrack::create(..., const sp<IMemory>& sharedBuffer, ...)
{
return sp<Track>::make(thread, ..., sharedBuffer, ...);
}
av/services/audioflinger/Tracks.cpp
在初始化父类构造器的时候,alloc 值为 ALLOC_CBLK
cpp
Track::Track(... , const sp<IMemory>& sharedBuffer, ...)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
... ,
ALLOC_CBLK, /*alloc*/
...),
mFillingStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
{
}
// native/libs/binder/IMemory.cpp
void* IMemory::unsecurePointer() const {
ssize_t offset;
sp<IMemoryHeap> heap = getMemory(&offset);
void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
if (base == MAP_FAILED)
return nullptr;
return static_cast<char*>(base) + offset;
}
size_t IMemory::size() const {
size_t size;
getMemory(nullptr, &size);
return size;
}
MODE_STATIC: buffer != NULL
size = sizeof(audio_track_cblk_t);
mCblk = allocator.allocate(size);
mBuffer = buffer;
MODE_STREAM: buffer == NULL
size = sizeof(audio_track_cblk_t) + bufferSize;
mCblk = allocator.allocate(size);
内存长这样
| audio_track_cblk_t | PCM ring buffer |
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
cpp
TrackBase::TrackBase(... ,
void *buffer,
size_t bufferSize, ...)
{
size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
|| mFrameSize == 0 // format needs to be correct
|| minBufferSize > SIZE_MAX / mFrameSize) {
android_errorWriteLog(0x534e4554, "34749571");
return;
}
minBufferSize *= mFrameSize;
if (buffer == nullptr) {
bufferSize = minBufferSize; // allocated here.
} else if (minBufferSize > bufferSize) {
android_errorWriteLog(0x534e4554, "38340117");
return;
}
size_t size = sizeof(audio_track_cblk_t);
if (buffer == NULL && alloc == ALLOC_CBLK) {
// check overflow when computing allocation size for streaming tracks.
if (size > SIZE_MAX - bufferSize) {
android_errorWriteLog(0x534e4554, "34749571");
return;
}
size += bufferSize;
}
if (client != 0) {
mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
std::string("Track ID: ").append(std::to_string(mId))});
if (mCblkMemory == 0 ||
(mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
ALOGE("%s", client->allocator().dump().c_str());
mCblkMemory.clear();
return;
}
} else {
//...
}
if (mCblk != NULL) {
new(mCblk) audio_track_cblk_t();
switch (alloc) {
//...
case ALLOC_CBLK:
// clear all buffers
if (buffer == NULL) {
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
memset(mBuffer, 0, bufferSize);
} else {
mBuffer = buffer;
}
break;
//...
case ALLOC_NONE:
mBuffer = buffer;
break;
}