web端手机录音

可以将每个片段的音频,变成完整的mp3(或其他格式文件)

采样率使用16000(本代码中:其他采样率可能会导致噪音或者播放(具体采样率自行研究))

引入第三方依赖

javascript 复制代码
<script src="https://cdnjs.cloudflare.com/ajax/libs/lamejs/1.2.0/lame.min.js"></script> 

webRecorder的js 代码

javascript 复制代码
export function to16BitPCM(input) {
  const dataLength = input.length * (16 / 8);
  const dataBuffer = new ArrayBuffer(dataLength);
  const dataView = new DataView(dataBuffer);
  let offset = 0;
  for (let i = 0; i < input.length; i++, offset += 2) {
    const s = Math.max(-1, Math.min(1, input[i]));
    dataView.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
  }
  return dataView;
}
export function to16kHz(audioData, sampleRate = 44100) {
  const data = new Float32Array(audioData);
  const fitCount = Math.round(data.length * (16000 / sampleRate));
  const newData = new Float32Array(fitCount);
  const springFactor = (data.length - 1) / (fitCount - 1);
  newData[0] = data[0];
  for (let i = 1; i < fitCount - 1; i++) {
    const tmp = i * springFactor;
    const before = Math.floor(tmp).toFixed();
    const after = Math.ceil(tmp).toFixed();
    const atPoint = tmp - before;
    newData[i] = data[before] + (data[after] - data[before]) * atPoint;
  }
  newData[fitCount - 1] = data[data.length - 1];
  return newData;
}

const audioWorkletCode = `
  class MyProcessor extends AudioWorkletProcessor {
    constructor(options) {
      super(options);
      this.audioData = [];
      this.audioDataFloat32 = [];
      this.sampleCount = 0;
      this.bitCount = 0;
      this.preTime = 0;
    }
  
    process(inputs) {
      // 去处理音频数据
      // eslint-disable-next-line no-undef
      if (inputs[0][0]) {
        const output = ${to16kHz}(inputs[0][0], sampleRate);
        this.sampleCount += 1;
        const audioData = ${to16BitPCM}(output);
        this.bitCount += 1;
        const data = [...new Int16Array(audioData.buffer)];
        this.audioData = this.audioData.concat(data);

        const dataFloat32 = [...output];
        this.audioDataFloat32 = this.audioDataFloat32.concat(dataFloat32);

        if (new Date().getTime() - this.preTime > 100) {
          this.port.postMessage({
            audioData: new Int16Array(this.audioData),
            audioDataFloat32: new Float32Array(this.audioDataFloat32),
            sampleCount: this.sampleCount,
            bitCount: this.bitCount
          });
          this.preTime = new Date().getTime();
          this.audioData = [];
          this.audioDataFloat32 = [];
        }
          return true;
        }
    }
  }
  
  registerProcessor('my-processor', MyProcessor);
  `;
const TAG = 'WebRecorder';
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia
  || navigator.mozGetUserMedia || navigator.msGetUserMedia;


export default class WebRecorder {
  constructor(requestId, params, isLog) {
    this.audioData = [];
    this.audioDataFloat32 = [];
    this.allAudioData = [];
    this.stream = null;
    this.audioContext = null;
    this.requestId = requestId;
    this.frameTime = [];
    this.frameCount = 0;
    this.sampleCount = 0;
    this.bitCount = 0;
    this.mediaStreamSource = null;
    this.isLog = isLog;
    this.params = params;
  }
  static isSupportMediaDevicesMedia() {
    return !!(navigator.getUserMedia || (navigator.mediaDevices && navigator.mediaDevices.getUserMedia));
  }
  static isSupportUserMediaMedia() {
    return !!navigator.getUserMedia;
  }
  static isSupportAudioContext() {
    return typeof AudioContext !== 'undefined' || typeof webkitAudioContext !== 'undefined';
  }
  static isSupportMediaStreamSource(requestId, audioContext) {
    return typeof audioContext.createMediaStreamSource === 'function';
  }
  static isSupportAudioWorklet(audioContext) {
    return audioContext.audioWorklet && typeof audioContext.audioWorklet.addModule === 'function'
      && typeof AudioWorkletNode !== 'undefined';
  }
  static isSupportCreateScriptProcessor(requestId, audioContext) {
    return typeof audioContext.createScriptProcessor === 'function';
  }
  start() {
    this.frameTime = [];
    this.frameCount = 0;
    this.allAudioData = [];
    this.audioData = [];
    this.sampleCount = 0;
    this.bitCount = 0;
    this.getDataCount = 0;
    this.audioContext = null;
    this.mediaStreamSource = null;
    this.stream = null;
    this.preTime = 0;
    try {
      if (WebRecorder.isSupportAudioContext()) {
        this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
      } else {
        this.isLog && console.log(this.requestId, '浏览器不支持AudioContext', TAG);
        this.OnError('浏览器不支持AudioContext');
      }
    } catch (e) {
      this.isLog && console.log(this.requestId, '浏览器不支持webAudioApi相关接口', e, TAG);
      this.OnError('浏览器不支持webAudioApi相关接口');
    }
    this.getUserMedia(this.requestId, this.getAudioSuccess, this.getAudioFail);
  }
  stop() {
    if (!(/Safari/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent))) {
      this.audioContext && this.audioContext.suspend();
    }
    this.audioContext && this.audioContext.suspend();
    this.isLog && console.log(this.requestId, `webRecorder stop ${this.sampleCount}/${this.bitCount}/${this.getDataCount}`, JSON.stringify(this.frameTime), TAG);
    this.OnStop(this.allAudioData);
  }
  destroyStream() {
    // 关闭通道
    if (this.stream) {
      this.stream.getTracks().map((val) => {
        val.stop();
      });
      this.stream = null;
    }
  }
  async getUserMedia(requestId, getStreamAudioSuccess, getStreamAudioFail) {
    let audioOption = {
      echoCancellation: true,
    };
    if (this.params && String(this.params.echoCancellation) === 'false') { // 关闭回声消除
      audioOption = {
        echoCancellation: false,
      };
    }
    const mediaOption = {
      audio: audioOption,
      video: false,
    };
    // 获取用户的麦克风
    if (WebRecorder.isSupportMediaDevicesMedia()) {
      navigator.mediaDevices
        .getUserMedia(mediaOption)
        .then(stream => {
          this.stream = stream;
          getStreamAudioSuccess.call(this, requestId, stream);
        })
        .catch(e => {
          getStreamAudioFail.call(this, requestId, e);
        });
    } else if (WebRecorder.isSupportUserMediaMedia()) {
      navigator.getUserMedia(mediaOption,
        stream => {
          this.stream = stream;
          getStreamAudioSuccess.call(this, requestId, stream);
        },
        function (err) {
          getStreamAudioFail.call(this, requestId, err);
        }
      );
    } else {
      if (navigator.userAgent.toLowerCase().match(/chrome/) && location.origin.indexOf('https://') < 0) {
        this.isLog && console.log(this.requestId, 'chrome下获取浏览器录音功能,因为安全性问题,需要在localhost或127.0.0.1或https下才能获取权限', TAG);
        this.OnError('chrome下获取浏览器录音功能,因为安全性问题,需要在localhost或127.0.0.1或https下才能获取权限');
      } else {
        this.isLog && console.log(this.requestId, '无法获取浏览器录音功能,请升级浏览器或使用chrome', TAG);
        this.OnError('无法获取浏览器录音功能,请升级浏览器或使用chrome');
      }
      this.audioContext && this.audioContext.close();
    }
  }
  async getAudioSuccess(requestId, stream) {
    if (!this.audioContext) {
      return false;
    }
    if (this.mediaStreamSource) {
      this.mediaStreamSource.disconnect();
      this.mediaStreamSource = null;
    }
    this.audioTrack = stream.getAudioTracks()[0];
    const mediaStream = new MediaStream();
    mediaStream.addTrack(this.audioTrack);
    this.mediaStreamSource = this.audioContext.createMediaStreamSource(mediaStream);
    if (WebRecorder.isSupportMediaStreamSource(requestId, this.audioContext)) {
      if (WebRecorder.isSupportAudioWorklet(this.audioContext)) { // 不支持 AudioWorklet 降级
        this.audioWorkletNodeDealAudioData(this.mediaStreamSource, requestId);
      } else {
        this.scriptNodeDealAudioData(this.mediaStreamSource, requestId);
      }
    } else { // 不支持 MediaStreamSource
      this.isLog && console.log(this.requestId, '不支持MediaStreamSource', TAG);
      this.OnError('不支持MediaStreamSource');
    }
  }
  getAudioFail(requestId, err) {
    if (err && err.err && err.err.name === 'NotAllowedError') {
      this.isLog && console.log(requestId, '授权失败', JSON.stringify(err.err), TAG);
    }
    this.isLog && console.log(this.requestId, 'getAudioFail', JSON.stringify(err), TAG);
    this.OnError(err);
    this.stop();
  }
  scriptNodeDealAudioData(mediaStreamSource, requestId) {
    if (WebRecorder.isSupportCreateScriptProcessor(requestId, this.audioContext)) {
      // 创建一个音频分析对象,采样的缓冲区大小为0(自动适配),输入和输出都是单声道
      const scriptProcessor = this.audioContext.createScriptProcessor(1024, 1, 1);
      // 连接
      this.mediaStreamSource && this.mediaStreamSource.connect(scriptProcessor);
      scriptProcessor && scriptProcessor.connect(this.audioContext.destination);
      scriptProcessor.onaudioprocess = (e) => {
        this.getDataCount += 1;
        // 去处理音频数据
        const inputData = e.inputBuffer.getChannelData(0);
        const output = to16kHz(inputData, this.audioContext.sampleRate);
        const audioData = to16BitPCM(output);
        this.audioDataFloat32.push(...output);
        this.audioData.push(...new Int16Array(audioData.buffer));
        this.allAudioData.push(...new Int16Array(audioData.buffer));
        if (new Date().getTime() - this.preTime > 100) {
          this.frameTime.push(`${Date.now()}-${this.frameCount}`);
          this.frameCount += 1;
          this.preTime = new Date().getTime();
          const audioDataArray = new Int16Array(this.audioData);
          this.OnReceivedData(audioDataArray);
          this.audioData = [];
          this.sampleCount += 1;
          this.bitCount += 1;
        }
      };
    } else { // 不支持
      this.isLog && console.log(this.requestId, '不支持createScriptProcessor', TAG);
    }
  }
  async audioWorkletNodeDealAudioData(mediaStreamSource, requestId) {
    try {
      const audioWorkletBlobURL = window.URL.createObjectURL(new Blob([audioWorkletCode], { type: 'text/javascript' }));
      await this.audioContext.audioWorklet.addModule(audioWorkletBlobURL);
      const myNode = new AudioWorkletNode(this.audioContext, 'my-processor', { numberOfInputs: 1, numberOfOutputs: 1, channelCount: 1 });
      myNode.onprocessorerror = (event) => {
        // 降级
        this.scriptNodeDealAudioData(mediaStreamSource, this.requestId);
        return false;
      }
      myNode.port.onmessage = (event) => {
        console.log(event)
        this.frameTime.push(`${Date.now()}-${this.frameCount}`);
        this.OnReceivedData(event.data.audioData);
        this.frameCount += 1;
        this.allAudioData.push(...event.data.audioData);
        this.sampleCount = event.data.sampleCount;
        this.bitCount = event.data.bitCount;
      };
      myNode.port.onmessageerror = (event) => {
        // 降级
        this.scriptNodeDealAudioData(mediaStreamSource, requestId);
        return false;
      }
      mediaStreamSource && mediaStreamSource.connect(myNode).connect(this.audioContext.destination);
    } catch (e) {
      this.isLog && console.log(this.requestId, 'audioWorkletNodeDealAudioData catch error', JSON.stringify(e), TAG);
      this.OnError(e);
    }
  }
  // 获取音频数据
  OnReceivedData(data) { }
  OnError(res) { }
  OnStop(res) { }
}
typeof window !== 'undefined' && (window.WebRecorder = WebRecorder);

代码,里面有一些测试demo(不一定能用),看主要代码即可

javascript 复制代码
<template>
  <div style="padding: 20px">
    <h3>录音上传</h3>

    <div style="font-size: 14px">
      <el-button type="primary" @click="handleStart">开始录音</el-button>
      <el-button type="info" @click="handlePause">暂停录音</el-button>
      <el-button type="info" @click="handlePlay">播放录音</el-button>
      <el-button type="info" @click="makemp3">生成MP3</el-button>
    </div>
  </div>
</template>

<script setup>
import lamejs from "lamejs";
import webRecorder from "./assets/js/index";

import MPEGMode from "lamejs/src/js/MPEGMode";
import BitStream from "lamejs/src/js/BitStream";

// window.MPEGMode = MPEGMode;
// window.Lame = Lame;
// window.BitStream = BitStream;

const recorder = new WebRecorder();

const audioData = [];

function int8ArrayToMp3(int8ArrayData, sampleRate) {
  const numChannels = 1;
  const bufferSize = 4096;
  const encoder = new lamejs.Mp3Encoder(numChannels, sampleRate, 128);
  let remainingData = int8ArrayData;
  let mp3Data = [];
  while (remainingData.length > 0) {
    const chunkSize = Math.min(bufferSize, remainingData.length);
    const chunk = remainingData.subarray(0, chunkSize);
    const leftChannel = new Int16Array(chunk.length);

    for (let i = 0; i < chunk.length; i++) {
      leftChannel[i] = chunk[i];
    }

    const mp3buffer = encoder.encodeBuffer(leftChannel);
    if (mp3buffer.length > 0) {
      mp3Data.push(new Uint8Array(mp3buffer));
    }
    remainingData = remainingData.subarray(chunkSize);
  }
  const mp3buffer = encoder.flush();

  if (mp3buffer.length > 0) {
    mp3Data.push(new Uint8Array(mp3buffer));
  }

  return new Blob(mp3Data, { type: "audio/mp3" });
}

function int8ArrayToWavURL(int8ArrayData) {
  const numChannels = 1; // 单声道
  const sampleRate = 44100; // 采样率
  const bytesPerSample = 2; // 16-bit audio
  const byteRate = sampleRate * numChannels * bytesPerSample;
  const dataLength = int8ArrayData.length * bytesPerSample;

  const header = createWavHeader(numChannels, sampleRate, byteRate, dataLength);
  const wavBuffer = new Uint8Array(
    header.buffer.byteLength + int8ArrayData.length * bytesPerSample
  );
  wavBuffer.set(new Uint8Array(header.buffer), 0);

  for (let i = 0; i < int8ArrayData.length; i++) {
    const value = int8ArrayData[i];
    wavBuffer[i * 2 + 44] = value & 0xff;
    wavBuffer[i * 2 + 45] = (value >> 8) & 0xff;
  }

  const blob = new Blob([wavBuffer], { type: "audio/wav" });
  return URL.createObjectURL(blob);
}

function createWavHeader(numChannels, sampleRate, byteRate, dataLength) {
  const buffer = new ArrayBuffer(44);
  const view = new DataView(buffer);

  // RIFF chunk descriptor
  writeString(view, 0, "RIFF");
  view.setUint32(4, 36 + dataLength, true);
  writeString(view, 8, "WAVE");

  // fmt sub-chunk
  writeString(view, 12, "fmt ");
  view.setUint32(16, 16, true);
  view.setUint16(20, 0x0001, true); // WAVE_FORMAT_PCM
  view.setUint16(22, numChannels, true);
  view.setUint32(24, sampleRate, true);
  view.setUint32(28, byteRate, true);
  view.setUint16(32, 2, true); // BLOCK_ALIGN
  view.setUint16(34, 16, true);

  // data sub-chunk
  writeString(view, 36, "data");
  view.setUint32(40, dataLength, true);

  return view;
}

function writeString(view, offset, string) {
  for (let i = 0; i < string.length; i++) {
    view.setUint8(offset + i, string.charCodeAt(i));
  }
}

// 获取采集到的音频数据
// recorder.OnReceivedData = (data) => {
//   // console.log(data);
//   audioData.push(...data);
// };

// 获取采集到的音频数据
recorder.OnReceivedData = (data) => {
  // console.log(data);
  handlePlay2(data);
};



const handleStart = () => {
  recorder.start();
};

const handlePause = () => {
  recorder.stop();
};

function downloadMP3(url, filename) {
  const a = document.createElement("a");
  a.href = url;
  a.download = filename || "audio.mp3";
  document.body.appendChild(a);
  a.click();
  document.body.removeChild(a);
}


let i = 0;
let tempAudioBuffer = []; // 用于存储累积的音频数据
let startTime = null; // 记录开始累积的时间
const handlePlay2 = (audioData) => {
  i += 1;
  // 将音频数据转换为 Int16Array
  //const int16ArrayAudioData = new Int16Array(audioData);
  // 如果这是第一次接收数据,记录开始时间
  if (startTime === null) {
     startTime = Date.now();
  }
  // 将新接收到的数据添加到缓冲区
   tempAudioBuffer.push(...audioData);
  // 检查是否已经累积了3秒的数据
  const currentTime = Date.now();
  if (currentTime - startTime >= 5000) { // 5000毫秒即5秒
    startTime = Date.now();
    let copiedArray = [...tempAudioBuffer];
    tempAudioBuffer=[];
    processAudioBuffer2(copiedArray);
      // // 重置变量以准备下一次累积
      // audioBuffer = [];
  }
};
const processAudioBuffer2 = (audioBuffer) => {
  // console.log(audioData);
  // 转 wav
  const int16ArrayAudioData = new Int16Array(audioBuffer);
  //console.log("8位录音数据:",int16ArrayAudioData);
  var mp3Data = [];
  // var audioData; // 假设这里是你的 PCM 音频数据
  var sampleRate = 16000; // 通常的采样率
  const encoder = new lamejs.Mp3Encoder(1, sampleRate, 128);
  var mp3Tmp = encoder.encodeBuffer(int16ArrayAudioData);
  mp3Data.push(mp3Tmp);
  mp3Tmp = encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data
  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
};

const handlePlay = () => {
  // console.log(audioData);
  // 转 wav
  const int16ArrayAudioData = new Int16Array(audioData);
  console.log("8位录音数据:",int16ArrayAudioData);
  var mp3Data = [];
  // var audioData; // 假设这里是你的 PCM 音频数据
  var sampleRate = 16000; // 通常的采样率
  const encoder = new lamejs.Mp3Encoder(1, sampleRate, 128);
  var mp3Tmp = encoder.encodeBuffer(int16ArrayAudioData);
  mp3Data.push(mp3Tmp);

  mp3Tmp = encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data

  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
};

const makemp3 = () => {
  var mp3Data = [];
  var mp3encoder = new lamejs.Mp3Encoder(1, 44100, 128); // mono 44.1kHz encode to 128kbps

  // 生成一秒钟的正弦波样本
  var sampleRate = 44100;
  var frequency = 440; // A4 音符
  var samples = new Int16Array(sampleRate);
  for (var i = 0; i < sampleRate; i++) {
      samples[i] = 32767 * Math.sin(2 * Math.PI * frequency * (i / sampleRate)); // 生成正弦波
  }
  console.log("16位正弦数据:",samples);

  var mp3Tmp = mp3encoder.encodeBuffer(samples); // encode mp3
  mp3Data.push(mp3Tmp); // Push encode buffer to mp3Data variable

  mp3Tmp = mp3encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data
  


  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
}
</script>
相关推荐
G_G#4 分钟前
纯前端js插件实现同一浏览器控制只允许打开一个标签,处理session变更问题
前端·javascript·浏览器标签页通信·只允许一个标签页
@大迁世界20 分钟前
TypeScript 的本质并非类型,而是信任
开发语言·前端·javascript·typescript·ecmascript
GIS之路28 分钟前
GDAL 实现矢量裁剪
前端·python·信息可视化
是一个Bug32 分钟前
后端开发者视角的前端开发面试题清单(50道)
前端
Amumu1213834 分钟前
React面向组件编程
开发语言·前端·javascript
持续升级打怪中1 小时前
Vue3 中虚拟滚动与分页加载的实现原理与实践
前端·性能优化
GIS之路1 小时前
GDAL 实现矢量合并
前端
hxjhnct1 小时前
React useContext的缺陷
前端·react.js·前端框架
前端 贾公子1 小时前
从入门到实践:前端 Monorepo 工程化实战(4)
前端
菩提小狗1 小时前
Sqlmap双击运行脚本,双击直接打开。
前端·笔记·安全·web安全