整体思路
1、监听用户onTouchStart事件,设置一个定时器记录是否长按,然后调用JSBridge开始录制事件
2、通过JSBridge监听录音过程,拿到录音的数据,前端用数组变量存放
3、监听用户onTouchEnd松开事件,清除定时器,处理录音数组转换成一个文件上传到oss
难点
难点一:将base64录音片段转为WAV文件
首先将一系列Base64编码的音频段合并成一个PCM数据流;
然后创建一个WAV文件的头部信息;
最后合并WAV文件头和PCM数据
难点二:TypedArray数据的合并
TypedArray: 存储的是固定类型的数值数据,如整数或浮点数。
Array: 可以存储任何类型的数据,包括数字、字符串、对象等
开始录音
/**
* 开始录音
*/
const handleTouchStart = (event) => {
event.preventDefault();
timerId = setTimeout(() => {
setLongPress(true);
console.log('handleTouchStart 长按了');
JSBridge(XX.startRecording', {
numberOfChannels: 1, // 声道数
// sampleRate: 16000, // 采样率
sampleRate: 44100, // 更改采样率为 44100 Hz
bitsPerChannel: 16, // 位深
format: 'PCM',
}).then(() => {
setRecordStatus('dialog_listening');
});
}, 100); // 长按时长,这里设置为100ms
};
监听录音过程
const onRecordChange = (event) => {
console.log(event);
const { error, param } = event || {};
const { pcm } = param || {};
const { errorCode, errorMsg } = error || {};
if (errorCode) {
Toast.show({
type: 'error',
content: `录制失败,${errorMsg}`,
});
baseArrayRef.current = [];
} else {
baseArrayRef.current.push(pcm);
}
};
useEffect(() => {
document.addEventListener('RecordingDataBufferTransfer', onRecordChange);
return () => {
// 清除长按定时器
if (timerId !== null) clearTimeout(timerId);
};
}, []);
结束录制
/**
* 结束录音
* @returns
*/
const handleTouchEnd = (event) => {
if (timerId !== null) {
clearTimeout(timerId)
timerId = null
}
if (!longPress) return;
setLongPress(false);
console.log('handleTouchEnd 松开了');
JSBridge('XX.stopRecording').then(() => {
// 移除事件监听器
document.removeEventListener(
'RecordingDataBufferTransfer',
onRecordChange,
);
setRecordStatus('dialog_sleep');
onMerge();
});
};
音频波动动画
VoiceAnimation/index.tsx
import cls from 'classnames';
import debounce from 'lodash/debounce';
import { useLayoutEffect, useMemo, useRef } from 'react';
import styles from './index.module.less';
interface IProps {
status: string;
}
export default function (props: IProps) {
const { status = 'dialog_sleep' } = props;
const list = useMemo(() => new Array(5).fill(true), []);
return (
<div className={cls(styles.voice, status)}>
{list.map((_, index) => (
<AnimationItem status={status} index={index} />
))}
</div>
);
}
function getTransationByStatus(status: string, index?) {
return {
dialog_sleep: {
transition: 'all 0.3s',
height: '8px',
transform: 'translateY(0)',
},
dialog_idle: {
transition: 'all 0.3s',
height: '8px',
transform: 'translateY(0)',
},
dialog_listening: {
transition: 'all 0.3s',
height: '24px',
transform: index % 2 ? 'translateY(8px)' : 'translateY(-8px)',
onTransitionEnd: debounce(
(event) => {
if (
event.target.parentElement.className.indexOf('dialog_listening') ===
-1
)
return;
event.target.style.transitionDuration = '0.5s';
event.target.style.height = '24px';
event.target.style.transform =
event.target.style.transform === 'translateY(8px)'
? 'translateY(-8px)'
: 'translateY(8px)';
},
{
leading: true,
trailing: false,
},
),
},
dialog_thinking: {
transition: 'all 0.3s',
height: `${[52, 44, 36, 28, 24][index]}px`,
transform: 'translateY(0)',
onTransitionEnd: debounce(
(event) => {
if (
event.target.parentElement.className.indexOf('dialog_thinking') ===
-1
)
return;
event.target.style.transitionDuration = '0.5s';
event.target.style.height = {
'52px': '24px',
'44px': '28px',
'36px': '32px',
'32px': '36px',
'28px': '44px',
'24px': '52px',
}[event.target.style.height];
},
{
leading: true,
trailing: false,
},
),
},
dialog_responding: {
transition: 'all 0.2s',
height: `${Math.random() * (index + 1) * 10 + 24}px`,
transform: 'translateY(0)',
onTransitionEnd: debounce(
(event) => {
if (
event.target.parentElement.className.indexOf(
'dialog_responding',
) === -1
)
return;
event.target.style.transitionDuration = '0.15s';
event.target.style.height = `${Math.random() * (index + 1) * 10 + 24}px`;
},
{
leading: true,
trailing: false,
},
),
},
}[status];
}
function AnimationItem({ status, index }: { status: string; index?: number }) {
const div = useRef<any>();
useLayoutEffect(() => {
const container = div.current as HTMLDivElement;
function reset() {
container.ontransitionend = (e) => {};
container.style.transition = 'all .1s';
container.style.height = '24px';
container.style.transform = 'translateY(0)';
}
reset();
const { onTransitionEnd = () => {}, ...style } =
getTransationByStatus(status, index) || {};
container.ontransitionend = onTransitionEnd;
for (let prop in style) {
container.style[prop] = style[prop];
}
return () => {};
}, [status]);
return (
<div ref={div} className={styles.item} style={{ width: 24, height: 24 }} />
);
}
VoiceAnimation/index.module.less
.voice {
display: flex;
justify-content: center;
align-items: center;
height: 56px;
.item {
// width: 24px;
// height: 24px;
background-color: var(--TY-Text-Brand-1);
border-radius: 20px;
margin: 0 4px;
transform: translateY(0);
}
}
.loop(@n, @i: 0) when (@i <= @n) {
&:nth-child(@{i}) {
animation-delay: (@i * 0.2s);
}
.loop(@n, (@i + 1));
}
一个完整的音频录制------播放的例子
html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>pcmtowav</title>
</head>
<body>
<div>
getUserMedia需要https,使用localhost或127.0.0.1时,可用http。
</div>
<button id="start">开始录音</button>
<button id="end">结束录音</button>
<button id="play">播放录音</button>
</body>
<script>
var context = null,
inputData = [],
size = 0,
audioInput = null,
recorder = null,
dataArray;
document.getElementById('start').addEventListener('click', function () {
context = new (window.AudioContext || window.webkitAudioContext)();
// 清空数据
inputData = [];
// 录音节点
recorder = context.createScriptProcessor(4096, 1, 1);
recorder.onaudioprocess = function (e) {
var data = e.inputBuffer.getChannelData(0);
inputData.push(new Float32Array(data));
size += data.length;
}
navigator.mediaDevices.getUserMedia({
audio: true
}).then((stream) => {
audioInput = context.createMediaStreamSource(stream);
}).catch((err) => {
console.log('error');
}).then(function () {
audioInput.connect(recorder);
recorder.connect(context.destination);
});
});
document.getElementById('end').addEventListener('click', function () {
recorder.disconnect();
});
document.getElementById('play').addEventListener('click', function () {
recorder.disconnect();
if (0 !== size) {
// 组合数据
// var data = combine(inputData, size);
inputSampleRate = context.sampleRate;
context.decodeAudioData(encodeWAV().buffer, function (buffer) {
// decodeAudioData,是支持promise,三参数的知识兼容老的
playSound(buffer);
}, function () {
console.log('error');
});
// console.log(data.buffer);
}
});
// ----------------------
// 以下是增加的内容
var inputSampleRate = 0; // 输入采样率
var oututSampleBits = 16; // 输出采样数位
// 数据简单处理
function decompress() {
// 合并
var data = new Float32Array(size);
var offset = 0; // 偏移量计算
// 将二维数据,转成一维数据
for (var i = 0; i < inputData.length; i++) {
data.set(inputData[i], offset);
offset += inputData[i].length;
}
return data;
};
function encodePCM() {
let bytes = decompress(),
sampleBits = oututSampleBits,
offset = 0,
dataLength = bytes.length * (sampleBits / 8),
buffer = new ArrayBuffer(dataLength),
data = new DataView(buffer);
// 写入采样数据
if (sampleBits === 8) {
for (var i = 0; i < bytes.length; i++, offset++) {
// 范围[-1, 1]
var s = Math.max(-1, Math.min(1, bytes[i]));
// 8位采样位划分成2^8=256份,它的范围是0-255; 16位的划分的是2^16=65536份,范围是-32768到32767
// 因为我们收集的数据范围在[-1,1],那么你想转换成16位的话,只需要对负数*32768,对正数*32767,即可得到范围在[-32768,32767]的数据。
// 对于8位的话,负数*128,正数*127,然后整体向上平移128(+128),即可得到[0,255]范围的数据。
var val = s < 0 ? s * 128 : s * 127;
val = parseInt(val + 128);
data.setInt8(offset, val, true);
}
} else {
for (var i = 0; i < bytes.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, bytes[i]));
// 16位直接乘就行了
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
return data;
}
function encodeWAV() {
var sampleRate = inputSampleRate;
var sampleBits = oututSampleBits;
var bytes = encodePCM();
var buffer = new ArrayBuffer(44 + bytes.byteLength);
var data = new DataView(buffer);
var channelCount = 1; // 单声道
var offset = 0;
// 资源交换文件标识符
writeString(data, offset, 'RIFF'); offset += 4;
// 下个地址开始到文件尾总字节数,即文件大小-8
data.setUint32(offset, 36 + bytes.byteLength, true); offset += 4;
// WAV文件标志
writeString(data, offset, 'WAVE'); offset += 4;
// 波形格式标志
writeString(data, offset, 'fmt '); offset += 4;
// 过滤字节,一般为 0x10 = 16
data.setUint32(offset, 16, true); offset += 4;
// 格式类别 (PCM形式采样数据)
data.setUint16(offset, 1, true); offset += 2;
// 通道数
data.setUint16(offset, channelCount, true); offset += 2;
// 采样率,每秒样本数,表示每个通道的播放速度
data.setUint32(offset, sampleRate, true); offset += 4;
// 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4;
// 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2;
// 每样本数据位数
data.setUint16(offset, sampleBits, true); offset += 2;
// 数据标识符
writeString(data, offset, 'data'); offset += 4;
// 采样数据总数,即数据总大小-44
data.setUint32(offset, bytes.byteLength, true); offset += 4;
// 给wav头增加pcm体
for (let i = 0; i < bytes.byteLength; ++i) {
data.setUint8(offset, bytes.getUint8(i, true), true);
offset++;
}
return data;
}
function getWAVBlob() {
return new Blob([encodeWAV()], { type: 'audio/wav' });
}
function playSound(buffer) {
var source = context.createBufferSource();
// 设置数据
source.buffer = buffer;
// connect到扬声器
source.connect(context.destination);
source.start();
}
function writeString(data, offset, str) {
for (var i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i));
}
}
function combineDataView(resultConstructor, ...arrays) {
let totalLength = 0,
offset = 0;
// 统计长度
for (let arr of arrays) {
totalLength += arr.length || arr.byteLength;
}
// 创建新的存放变量
let buffer = new ArrayBuffer(totalLength),
result = new resultConstructor(buffer);
// 设置数据
for (let arr of arrays) {
// dataview合并
for (let i = 0, len = arr.byteLength; i < len; ++i) {
result.setInt8(offset, arr.getInt8(i));
offset += 1;
}
}
return result;
}
</script>
</html>