Android 13 中 AudioTrack 写数据的调用流程是一个从应用层到 HAL 层的完整数据通路。以下是详细的调用流程分析:
- Java 层调用入口
应用层写入数据
// 应用代码
AudioTrack audioTrack = new AudioTrack(...);
audioTrack.play();
byte[] audioData = new byte[bufferSize];
int bytesWritten = audioTrack.write(audioData, 0, audioData.length,
AudioTrack.WRITE_BLOCKING);
- Java Framework 层
AudioTrack.write() 方法
// frameworks/base/media/java/android/media/AudioTrack.java
public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
@WriteMode int writeMode) {
// 参数验证
if (audioData == null) {
throw new IllegalArgumentException("audioData is null");
}
// 调用 Native 方法
return native_write_byte(audioData, offsetInBytes, sizeInBytes, writeMode);
}
- JNI 层转换
JNI 方法映射
// frameworks/base/core/jni/android_media_AudioTrack.cpp
static const JNINativeMethod gMethods[] = {
{"native_write_byte", "([BIII)I", (void *)android_media_AudioTrack_write_byte},
{"native_write_short", "([SIII)I", (void *)android_media_AudioTrack_write_short},
{"native_write_float", "([FIII)I", (void *)android_media_AudioTrack_write_float},
};
JNI write 实现
static jint android_media_AudioTrack_write_byte(JNIEnv *env, jobject thiz,
jbyteArray javaData,
jint offsetInBytes,
jint sizeInBytes,
jint writeMode) {
// 获取 Native AudioTrack 对象
AudioTrack *lpTrack = getAudioTrack(env, thiz);
if (lpTrack == NULL) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return 0;
}
// 处理 Java 数组
jbyte *nativeData = env->GetByteArrayElements(javaData, NULL);
jbyte *data = nativeData + offsetInBytes;
// 调用 Native write 方法
ssize_t written = lpTrack->write(data, sizeInBytes,
writeMode == WRITE_BLOCKING);
env->ReleaseByteArrayElements(javaData, nativeData, JNI_ABORT);
return (jint)written;
}
- Native AudioTrack 层
Native write 方法
// frameworks/av/media/libaudioclient/AudioTrack.cpp
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
// 状态检查
if (mState != STATE_ACTIVE && mState != STATE_STOPPING) {
return INVALID_OPERATION;
}
// 共享内存写入路径
if (mSharedBuffer != 0) {
return writeToSharedBuffer(buffer, userSize, blocking);
}
// 流模式写入路径
return writeToPipe(buffer, userSize, blocking);
}
- 共享内存写入路径
writeToSharedBuffer 实现
ssize_t AudioTrack::writeToSharedBuffer(const void* buffer, size_t userSize, bool blocking) {
audio_track_cblk_t* cblk = mCblk;
// 获取可写缓冲区
uint32_t framesReady = cblk->framesReady();
if (framesReady == 0) {
if (!blocking) {
return WOULD_BLOCK;
}
// 等待缓冲区可用
cblk->waitAvailableFrames(userSize / mFrameSize);
}
// 拷贝数据到共享内存
size_t sizeToWrite = min(userSize, framesReady * mFrameSize);
memcpy(cblk->buffer(mCblk->user), buffer, sizeToWrite);
// 更新写位置
cblk->stepUser(sizeToWrite / mFrameSize);
return sizeToWrite;
}
- 流模式写入路径
writeToPipe 实现
ssize_t AudioTrack::writeToPipe(const void* buffer, size_t userSize, bool blocking) {
size_t bytesWritten = 0;
while (bytesWritten < userSize) {
// 获取音频缓冲区
Buffer audioBuffer;
status_t status = obtainBuffer(&audioBuffer,
blocking ? &kForever : &kNonBlocking);
if (status != OK) {
if (status == WOULD_BLOCK && !blocking) {
break;
}
return status;
}
// 计算本次写入大小
size_t toWrite = audioBuffer.size;
if (toWrite > userSize - bytesWritten) {
toWrite = userSize - bytesWritten;
}
// 拷贝数据到音频缓冲区
memcpy(audioBuffer.i8,
static_cast<const char*>(buffer) + bytesWritten,
toWrite);
// 释放缓冲区给 AudioFlinger
releaseBuffer(&audioBuffer);
bytesWritten += toWrite;
}
return bytesWritten;
}
- obtainBuffer 实现
获取音频缓冲区
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec requested) {
audio_track_cblk_t cblk = mCblk;
// 检查是否有可用缓冲区
uint32_t framesAvail = cblk->framesAvailable();
if (framesAvail == 0) {
if (requested == &kNonBlocking) {
return WOULD_BLOCK;
}
// 等待缓冲区可用
cblk->waitFramesAvailable(framesAvail, requested);
}
// 设置缓冲区参数
audioBuffer->raw = cblk->buffer(cblk->user);
audioBuffer->frameCount = framesAvail;
audioBuffer->size = framesAvail * mFrameSize;
return OK;
}
- Binder IPC 调用到 AudioFlinger
IAudioTrack Binder 接口
// frameworks/av/media/libaudioclient/IAudioTrack.cpp
class BpAudioTrack : public BpInterface {
public:
virtual status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount) {
Parcel data, reply;
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
// 序列化参数
remote()->transact(OBTAIN_BUFFER, data, &reply);
// 反序列化结果
return reply.readInt32();
}
};
- AudioFlinger 服务端处理
Track::obtainBuffer 实现
// frameworks/av/services/audioflinger/Tracks.cpp
status_t AudioFlinger::Track::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) {
// 检查轨道状态
if (mState == PAUSED || mState == STOPPED) {
return INVALID_OPERATION;
}
// 从线程的混音缓冲区分配
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
return thread->obtainBuffer(this, audioBuffer, waitCount);
}
return BAD_VALUE;
}
- PlaybackThread 缓冲区管理
obtainBuffer 实现
// frameworks/av/services/audioflinger/Threads.cpp
status_t AudioFlinger::PlaybackThread::obtainBuffer(
AudioFlinger::Track* track, Buffer* audioBuffer, int32_t waitCount) {
// 获取轨道的缓冲区信息
size_t frameCount = track->frameCount();
void* buffer = track->buffer();
// 检查是否有足够的空间
if (mMixBufferStatus != MIXBUFFER_READY) {
return WOULD_BLOCK;
}
// 设置返回的缓冲区
audioBuffer->raw = buffer;
audioBuffer->frameCount = frameCount;
audioBuffer->size = frameCount * mFrameSize;
return OK;
}
-
完整的调用序列图
↓
AudioTrack.write() [Java]
↓
native_write_byte() [JNI]
↓
AudioTrack::write() [Native C++]
↓
obtainBuffer() [获取缓冲区]
↓
IAudioTrack::obtainBuffer() [Binder IPC]
↓
AudioFlinger::Track::obtainBuffer()
↓
PlaybackThread::obtainBuffer()
↓
memcpy() [数据拷贝到共享内存]
↓
releaseBuffer() [通知 AudioFlinger 数据就绪]
↓
AudioFlinger 混音循环处理数据
-
快速路径优化
快速混合器路径
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
// 检查是否可以使用快速路径
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
return fastWrite(buffer, userSize, blocking);
}
return normalWrite(buffer, userSize, blocking);
}
ssize_t AudioTrack::fastWrite(const void* buffer, size_t userSize, bool blocking) {
// 使用无锁环形缓冲区
return mFastBuffer->write(buffer, userSize);
}
- 错误处理和状态管理
错误处理逻辑
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
// 检查各种错误条件
if (buffer == NULL || userSize == 0) {
return BAD_VALUE;
}
if (mState == STATE_UNINITIALIZED) {
return INVALID_OPERATION;
}
if (mTransfer != TRANSFER_SYNC) {
return INVALID_OPERATION;
}
// 实际写入逻辑...
}
- 性能统计和监控
写入统计
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
nsecs_t startTime = systemTime();
ssize_t written = actualWrite(buffer, userSize, blocking);
nsecs_t endTime = systemTime();
// 更新统计信息
mTotalBytesWritten += (written > 0 ? written : 0);
mTotalWriteTime += (endTime - startTime);
mWriteCount++;
// 跟踪性能
if (ATRACE_ENABLED()) {
ATRACE_INT("AudioTrackWriteBytes", written);
}
return written;
}
- 调试和日志
详细调试信息
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
if (LOG_IO_VERBOSE) {
ALOGV("write: buffer=%p, userSize=%zu, blocking=%d, state=%d",
buffer, userSize, blocking, mState);
}
ssize_t result = actualWrite(buffer, userSize, blocking);
if (result != userSize) {
ALOGW("write: wrote %zd of %zu bytes", result, userSize);
}
return result;
}
总结:Android 13 中 AudioTrack 的写数据调用流程是一个复杂的多层级过程,涉及 Java 层、JNI、Native 层、Binder IPC、共享内存管理等多个组件。数据最终通过共享内存传递给 AudioFlinger 进行混音和硬件输出,整个过程经过高度优化以支持低延迟和高性能的音频播放。