前端 vue 或者麦克风,智能语音识别和播放功能
1. 终端安装
javascript
复制代码
npm install recordrtc
2.引入
javascript
复制代码
import RecordRTC from 'recordrtc'
3.html(根据自己业务更改)
javascript
复制代码
<div class="Page">
<el-form ref="mainFormRef" class="mainForm" :model="main_form" label-width="100px" label-position="top">
<el-form-item :label="'热词设置:\n(一行一个关键字,空格隔开权重,如将1号屏和3号屏调换 20)'">
<el-input type="textarea" v-model="main_form.hotWords" placeholder="请输入热词"
:autosize="{ minRows: 5, maxRows: 15 }"></el-input>
</el-form-item>
<el-form-item label="语音识别结果显示:">
<div :class="resultDetails && resultDetails.status <= 0 ? 'result_content r_h_input_red' : 'result_content'">{{
main_form.result }}</div>
</el-form-item>
<el-form-item label="声纹采集:" prop="file">
<div class="voiceGather_btn">
<el-select :disabled="voiceStatus" style="width: 100%" v-model="main_form.chooseMicDeviceId"
placeholder="请选择麦克风">
<el-option v-for="item in Mic" :key="item.deviceId" :label="item.label" :value="item.deviceId">
</el-option>
</el-select>
<div class="voiceGather" v-if="main_form.chooseMicDeviceId != ''">
<el-button style="margin-left: 20px" @click="voiceInput" :loading="startLoading">{{ voiceStatus ? "取消录音" :
"开始录音" }}</el-button>
</div>
</div>
<div class="voiceGather_btn">
<audio controls v-if="recordedBlob" :src="recordedBlob"></audio>
</div>
</el-form-item>
</el-form>
</div>
4.data初始化数据
javascript
复制代码
data() {
return {
recorder: '',
voiceStatus: false, // 是否正在录音
main_form: {
chooseMicDeviceId: '', // 选择的麦克风id
hotWords: '', // 热词
result: '', // 语音识别结果
},
Mic: [], // 可选择的麦克风
RMSList: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
],
audioURL: null,
file: null,
resultDetails: {},//语音识别结果
startLoading: false,
recordedBlob: null, // Blob对象存储录制内容
};
},
5.mounted获取getMic
javascript
复制代码
mounted() {
this.getMic()
}
6.methods中开始录音和结束之后上传到后台服务器
javascript
复制代码
methods() {
getMic() {
let that = this;
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 弹框获取麦克风
navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
navigator.mediaDevices.enumerateDevices().then(function (devices) {
devices.forEach(function (device) {
if (device.kind === 'audioinput') { // 麦克风
if (device.deviceId != 'default' && device.deviceId != 'communications') {
that.Mic.push(device)
}
}
that.main_form.chooseMicDeviceId = that.Mic[0] ? that.Mic[0].deviceId : ''
});
})
stream.getTracks().forEach(track => track.stop());
})
}
},
// 语音输入点击按钮事件
voiceInput() {
// 正在语音输入
if (this.voiceStatus) {
this.stopRecord() // 停止输入
} else { // 开启语音输入
this.resultDetails ={}
this.main_form.result = ''
this.recordedBlob = null
this.startRecord()
}
},
// 开始录音
startRecord() {
console.log('startRecord:鼠标摁下------------------------------')
var that = this
this.voiceStatus = true
// mediaDevices可提供对相机和麦克风等媒体输入设备的连接访问
window.navigator.mediaDevices.getUserMedia({ audio: { deviceId: this.main_form.chooseMicDeviceId } }).then((stream) => {
this.stream = stream;
this.getVoice()
this.recorder = RecordRTC(stream, {
type: 'audio',// 音频 或者video
mimeType: 'audio/wav',
recorderType: RecordRTC.StereoAudioRecorder,
desiredSampRate: 16000,
numberOfAudioChannels: 1, // 单声道
timeSlice: 1000,
// bufferSize: 4096, // 缓存大小
ondataavailable: this.sendData,
});
this.recorder.startRecording();
}).catch(function (err) {
alert('当前浏览器不支持开启麦克风!');
that.voiceStatus = false
});
},
// 获取到文件流(没用到这个函数内容,可以忽略)
sendData(blob) {
return
//var BB = new Blob([blob], { 'type': 'audio/wav; codecs=opus' })
// var audioURL = window.URL.createObjectURL(BB)
// 播放
// const audio = document.createElement('audio')
// audio.controls = true // 音频是否显示控件
// audio.src = audioURL
// audio.play()
// 下载
// let a = document.createElement("a");
// a.href = audioURL;
// a.download = '测试';
// a.click();
// // 释放这个临时的对象url
// window.URL.revokeObjectURL(audioURL);
// let file = new window.File([BB], '测试.wav')
// this.file = file
// console.log('录音已停止,文件已保存---------------------', this.file);
},
// 结束录音
stopRecord() {
console.log('stopRecord:鼠标放开------------------------------')
if (this.recorder != null) {
this.startLoading = true
let recorder = this.recorder
// 处理停止事件
recorder.stopRecording(() => {
const blob = this.recorder.getBlob(); // 获取Blob对象
this.recordedBlob = URL.createObjectURL(blob);// 创建URL对象,用于<audio>标签播放
// console.log('录音已停止,文件已保存---------------------', this.recordedBlob);
var BB = new Blob([blob], { 'type': 'audio/wav; codecs=opus' })
let file = new window.File([BB], '测试.wav')
this.file = file
console.log('获取到文件流上传到后台---------------------', this.file);
this.uploadSubmit();
});
let stream = this.stream;
clearInterval(this.timer1);
stream.getAudioTracks().forEach(track => track.stop());
}
},
// 上传到后台服务器
uploadSubmit() {
uploadAudio(this.file, this.main_form.hotWords).then(res => {
this.resultDetails = res.data.data || {}
this.main_form.result = res.data.data.result || ''
this.voiceStatus = false
this.startLoading = false
}).catch(err => {
this.voiceStatus = false
this.startLoading = false
})
},
// 获取音量值大小
getVoice() {
const audioContext = new (window.AudioContext || window.webkitAudioContext)()
// 将麦克风的声音输入这个对象
const mediaStreamSource = audioContext.createMediaStreamSource(this.stream)
// 创建分析节点
const analyserNode = audioContext.createAnalyser()
// 连接节点
mediaStreamSource.connect(analyserNode)
// 可以实时听到麦克风采集的声音
// analyserNode.connect(audioContext.destination)
// 获取音量数据
const dataArray = new Uint8Array(analyserNode.frequencyBinCount);
function getVolume() {
analyserNode.getByteFrequencyData(dataArray);
let sum = 0;
for (let i = 0; i < dataArray.length; i++) {
sum += dataArray[i];
}
// 计算平均音量
const averageVolume = sum / dataArray.length;
return averageVolume;
}
// 每隔一段时间获取一次音量
this.timer1 = setInterval(() => {
const volume = getVolume();
console.log('音量:', Math.round(volume));
// this.RMSList.value.unshift(Math.round(volume));
// this.RMSList.value.pop();
// 在这里可以根据需要进行相应的处理
}, 1000);
},
}