
uniapp 接入Ai支持流式输出支持app和H5, 实现方式可以用webView,hybrid的方式,也可以用render的方式
使用render的方式支持app和H5,首先我们想要流式输入结果要使用@microsoft/fetch-event-source,支持图片和视频输出
xml
*render层
npm install @microsoft/fetch-event-source -S
//流式的核心代码
<script module="eventSource" lang="renderjs">
import { fetchEventSource } from '@microsoft/fetch-event-source';
let ctrl = new AbortController();、、
const userInfo = localStorage.getItem('userInfo') && JSON.parse(localStorage.getItem('userInfo'));
export default {
methods: {
regImage(txt) { //将图片地址用image包裹
const regex = /\/conghua.*?\.png/g;
return txt.replace(regex, (match) => {
const fullUrl = `http://169.254.136.102:18888${match}`;
return `<div class="imageItem"><img mode='widthFix' src="${fullUrl}" controls width="100%"></img></p>`;
});
},
regVideo(txt) { //将mp4的链接放video里
const regex = /\/conghua.*?\.mp4/g;
return txt.replace(regex, (match) => {
const fullUrl = `http://169.254.136.102:18888${match}`;
return `<div class="videoItem"><video class="video" src="${fullUrl}" id="${match}" controls width="420" ></div>`;
});
},
renderPropsChange(nVal) {
console.log('xxxx', '已经触发了')
let { isSend, chatListData } = nVal;
if (nVal.isSend) {
console.log('发送了--------')
this.handleSSE(nVal);
}
},
handleSSE(nVal) {
console.log('xxxx发送xxxx')
let str = '';
let that = this;
let { chatListData } = nVal;
let len = chatListData.length;
let headers = {
'Content-type': 'application/json',
"Authorization": `Bearer app-3wLWrvkUoDkMeeYevxAxjF6f`
}
fetchEventSource('http://:18880/v1/chat-messages', {
signal: ctrl.signal,//停止流式问答
method: 'POST',
headers,
body: JSON.stringify({
inputs: {},
query: nVal.keyword,
response_mode: "streaming",
conversation_id: "",
user: userInfo?.id || 'admin',
}),
async onopen() {
console.log('sse连接成功')
},
async onerror(error) {
console.log(error)
},
async onmessage(msg) {
let { data } = msg;
let parseData = JSON.parse(data);
// if (parseData.event == 'workflow_started') {
// that.$ownerInstance.callMethod('emits', { task_id: parseData.task_id })
// } else
if (parseData.event == "message") {
str += parseData.answer;
that.$ownerInstance.callMethod('emits', { str })
}
},
onclose() {
const txt = that.regVideo(that.regImage(str));
that.$ownerInstance.callMethod('emits', { str: txt, isOver: true })
that.$ownerInstance.callMethod('emitScroll')
}
}).catch(err => {
console.log('errrrrr', err);
})
},
stopFetch(val) {
let { stopStatus, chatListData, aiMessageIndex } = val
if (stopStatus) {
console.log('关闭')
ctrl.abort();
ctrl = new AbortController();//停止流式问答的,必须每次都从新赋值
this.$ownerInstance.callMethod('emitsStop')
this.$ownerInstance.callMethod('emits', { str: chatListData[aiMessageIndex].content || '已取消操作' })
}
}
}
}
</script>
视图层的拼接一定要有一个结束判断否则每次mergeProps改变都会触发render

kotlin
//视图层
<view :props="mergeProps" :change:props="eventSource.renderPropsChange" v-show="false"></view>
emits({ str, isOver }) {
this.isLoading = false;
this.isSend = false;
const item = this.chatListData[this.aiMessageIndex];
this.$set(this.chatListData, this.aiMessageIndex, { ...item, content: str, isOver });
},
// 数据流式输出要滚动持续滚动
watch: {
chatListData: {
handler() {
this.$nextTick(() => {
this.scrollToBottom();
});
},
deep: true,
},
}
// 内容显示用的是
<zero-markdown-view :markdown="item.content"></zero-markdown-view>
录音转换成文字的线上的模型有很多,我们用的是内网开发部署到本地的,录音用的是uniapp自带的也可以用浏览器自带的
我这里有个问题如果3s中监听到的声音的分贝小于某个值再去上传语音,这样就可以做到一直说话一直收音,但是uni.getRecorderManager 监听不到,浏览器的就可以
javascript
//开始录音 我这里是录了6s
this.recorderManager.start({
duration: 6000,
sampleRate: 16000,
numberOfChannels: 1,
encodeBitRate: 16000,
format: 'wav',
});
initRecorderManager() {
this.recorderManager = uni.getRecorderManager();
this.recorderManager.onStart(() => {
console.log('recorder start');
});
// 监听录音错误事件
this.recorderManager.onError((err) => {
console.error('录音错误:', err);
uni.showToast({
title: '录音出错',
icon: 'none'
});
this.resetRecordingState();
});
// // 监听录音中断事件
this.recorderManager.onInterruptionBegin(() => {
console.log('录音被中断');
// this.recorderManager.stop();
// this.resetRecordingState();
});
// 监听录音结束事件上传wav文件解析出音转文字
this.recorderManager.onStop((res) => {
this.handleRecordingFile(res.tempFilePath);
});
}

点击开始录音的时候给个提示音

ini
const audio = uni.createInnerAudioContext();
audio.src = '/static/y2040.mp3';
audio.play();
浏览器录音可以监听分贝值,确定是否上传录音
ini
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const audioContext = window.AudioContext || window.webkitAudioContext;
// const audioContext = new(window.AudioContext || window.webkitAudioContext)();
const audioCtx = new audioContext();
// 创建媒体流输入源节点,将音频流连接到该节点
const liveSource = audioCtx.createMediaStreamSource(stream);
// 创建音频分析对象,用于检测音频的音量级别
// 采样的缓冲区大小为2048,输入和输出都是单声道
this.levelChecker = audioCtx.createScriptProcessor(2048, 1, 1);
// 将该分析对象与麦克风音频进行连接
liveSource.connect(this.levelChecker);
// 将该分析对象连接到音频上下文的目标节点(通常是扬声器)
this.levelChecker.connect(audioCtx.destination);
this.levelChecker.onaudioprocess = (e) => {
// 获取输入缓冲区的数据
const buffer = e.inputBuffer.getChannelData(0);
// 计算音频的平方和,即音频信号的能量
let sum = 0.0;
for (let i = 0; i < buffer.length; i += 1) {
sum += buffer[i] * buffer[i];
}
// 计算音频的平均音量并将其转化为百分比形式
const volume = Math.round(Math.sqrt(sum / buffer.length) * 100);
if (volume <= 5) {
this.slientCount++;
console.log('this.slientCount', this.slientCount)
console.log('volume', volume)
} else {
this.slientCount = 0;
}
if (this.slientCount >= 60) {
console.log('这里是静音')
// liveSource.disconnect();
this.slientCount = 0;
this.handleStop();
this.handleDestroy();
this.levelChecker.onaudioprocess = null;
}
// console.log(volume, 'volume')
};
})
.catch(error => {
console.error('Error accessing microphone:', error);
});
//上传录音转成文字
handleStop() {
console.log('上传录音')
this.recorder.stop() // 停止录音
const formData = new FormData();
const blob = this.recorder.getWAVBlob();
const newbolb = new Blob([blob], { type: 'audio/wav' })
const fileOfBlob = new File([newbolb], new Date().getTime() + '.wav')
formData.append('audio', fileOfBlob);
console.log('formData')
axios.post('http://xxxx:15000/transcribe', formData, {
headers: {
'Content-Type': 'multipart/form-data'
}
}).then(({ data }) => {
const result = data.transcription[0][0].text.replace(
/<\|zh\|>([\s\S]*?)<\|woitn\|>/g, "");
this.keyword = result;
this.voiceIng = false;
this.startVoice = false;
this.sendDeepseek()
}).catch(err => {
})
},