环形缓冲区,[ audio_track_cblk_t ] [ FIFO ] 如何读取和写入
环形缓冲区:初始 R = 0,W = 0,buf 长度为 LEN
写入一个数据:w = W % LEN;buf[w] = data;W++;等价于 w = W & (LEN -1)
读取一个数据:r = R % LEN;buf[r] = data;R++;等价于 r = R & (LEN -1)
判断 满:W - R == LEN
判断 空:W == R
接上文
https://blog.csdn.net/we1less/article/details/156458655?spm=1001.2014.3001.5502
av/media/libaudioclient/AudioTrack.cpp createTrack_l
**STATIC:**StaticAudioTrackClientProxy
**STREAM:**AudioTrackClientProxy
cblk, buffers, mFrameCount, mFrameSize
cpp
status_t AudioTrack::createTrack_l()
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
// 使用返回的 response 解析出 Cblk
status = audioFlinger->createTrack(aidlInput.value(), response);
output.audioTrack->getCblk(&sfr);
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
void* buffers;
if (mSharedBuffer == 0) {
// STREAM 模式下音频数据 buffers 就是 + 1
buffers = cblk + 1;
} else {
// STATIC 模式下音频数据 buffers 就是传进来的 buffer
buffers = mSharedBuffer->unsecurePointer();
}
}
if (mSharedBuffer == 0) {
mStaticProxy.clear();
//stream
mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
} else {
//static
mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy = mStaticProxy;
}
return logIfErrorAndReturnStatus(status, "");
}
app 调用 streamTrack.write(buffer, 0, len)
base/media/java/android/media/AudioTrack.java
native_write_byte
java
public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
@WriteMode int writeMode) {
final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
writeMode == WRITE_BLOCKING);
return ret;
}
base/core/jni/android_media_AudioTrack.cpp android_media_AudioTrack_writeArray
STATIC 模式下直接使用的memcpy
cpp
template <typename T>
static jint android_media_AudioTrack_writeArray(JNIEnv *env, jobject thiz,
T javaAudioData,
jint offsetInSamples, jint sizeInSamples,
jint javaAudioFormat,
jboolean isWriteBlocking) {
//ALOGV("android_media_AudioTrack_writeArray(offset=%d, sizeInSamples=%d) called",
// offsetInSamples, sizeInSamples);
sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
jint samplesWritten = writeToTrack(lpTrack, javaAudioFormat, cAudioData,
offsetInSamples, sizeInSamples, isWriteBlocking == JNI_TRUE /* blocking */);
return samplesWritten;
}
template <typename T>
static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
jint offsetInSamples, jint sizeInSamples, bool blocking) {
ssize_t written = 0;
size_t sizeInBytes = sizeInSamples * sizeof(T);
if (track->sharedBuffer() == 0) {
written = track->write(data + offsetInSamples, sizeInBytes, blocking);
if (written == (ssize_t) WOULD_BLOCK) {
written = 0;
}
} else {
// STATIC 模式下直接使用的memcpy
if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
sizeInBytes = track->sharedBuffer()->size();
}
memcpy(track->sharedBuffer()->unsecurePointer(), data + offsetInSamples, sizeInBytes);
written = sizeInBytes;
}
if (written >= 0) {
return written / sizeof(T);
}
return interpretWriteSizeError(written);
}
audioflinger
av/services/audioflinger/Threads.cpp createTrack_l