📂 项目最终目录结构
text
编辑
smart-pool-ai-fullstack/
├── backend/ # 后端代码 (之前提供的 Python 代码)
│ ├── app.py
│ ├── brain_agent/
│ ├── data_engine/
│ ├── digital_human/
│ ├── requirements.txt
│ └── Dockerfile
├── frontend/ # Vue3 前端代码
│ ├── src/
│ │ ├── components/
│ │ │ ├── DigitalHuman.vue # 数字人视频组件
│ │ │ ├── ChatInterface.vue # 对话与图表组件
│ │ │ └── VoiceControl.vue # 语音控制组件
│ │ ├── App.vue
│ │ └── main.js
│ ├── package.json
│ ├── vite.config.js
│ └── Dockerfile
├── docker-compose.yml # 编排文件
└── .env # 环境变量配置
1. 🎨 前端部分 (Vue 3 + TypeScript + ECharts)
1.1 初始化项目
bash
编辑
npm create vite@latest frontend -- --template vue-ts
cd frontend
npm install echarts websocket-async axios
.2 核心组件:src/components/DigitalHuman.vue
功能:渲染 UE5 推流的数字人视频,并根据情感指令切换滤镜/覆盖层。
vue
编辑
<template>
<div class="digital-human-container">
<!-- 数字人视频流 (假设 UE5 Pixel Streaming 或 WebRTC 地址) -->
<video
ref="videoRef"
autoplay
playsinline
class="human-video"
:class="currentEmotion"
></video>
<!-- 情感状态指示器 (调试用) -->
<div class="status-badge">{{ currentEmotion }}</div>
<!-- 加载遮罩 -->
<div v-if="!isConnected" class="loading-overlay">
正在连接数字人专家...
</div>
</div>
</template>
<script setup lang="ts">
import { ref, onMounted, watch } from 'vue';
const videoRef = ref<HTMLVideoElement | null>(null);
const isConnected = ref(false);
const currentEmotion = ref('calm'); // calm, urgent, happy, concerned
// 接收来自父组件的情感指令
defineExpose({
updateEmotion: (emotion: string) => {
currentEmotion.value = emotion;
// 这里可以添加更复杂的逻辑,比如触发特定的 CSS 动画或叠加层
console.log(`[DigitalHuman] 表情切换为: ${emotion}`);
},
setStream: (stream: MediaStream) => {
if (videoRef.value) {
videoRef.value.srcObject = stream;
isConnected.value = true;
}
}
});
onMounted(() => {
// 实际项目中,这里会建立 WebRTC 连接连接到 UE5 服务器
// 演示模式:模拟一个黑屏或占位图
console.log("数字人组件已挂载");
});
</script>
<style scoped>
.digital-human-container {
position: relative;
width: 100%;
height: 100%;
background: #000;
overflow: hidden;
border-radius: 12px;
}
.human-video {
width: 100%;
height: 100%;
object-fit: cover;
transition: filter 0.3s ease;
}
/* 情感滤镜效果 */
.human-video.urgent {
filter: contrast(1.2) saturate(1.5) hue-rotate(-10deg); /* 偏红,高对比 */
box-shadow: 0 0 20px rgba(255, 0, 0, 0.3) inset;
}
.human-video.happy {
filter: brightness(1.1) saturate(1.2);
}
.human-video.concerned {
filter: grayscale(0.2) brightness(0.9);
}
.status-badge {
position: absolute;
top: 10px;
right: 10px;
background: rgba(0,0,0,0.6);
color: #fff;
padding: 4px 8px;
border-radius: 4px;
font-size: 12px;
text-transform: uppercase;
}
.loading-overlay {
position: absolute;
top: 0; left: 0; right: 0; bottom: 0;
display: flex;
align-items: center;
justify-content: center;
color: white;
background: rgba(0,0,0,0.8);
}
</style>
.3 核心组件:src/components/ChatInterface.vue
功能:显示对话气泡,动态渲染 ECharts 图表,处理后端指令。
vue
编辑
<template>
<div class="chat-interface">
<div class="messages-container" ref="messagesContainer">
<div v-for="(msg, index) in messages" :key="index" class="message" :class="msg.role">
<div class="avatar">{{ msg.role === 'user' ? '👤' : '🤖' }}</div>
<div class="content">
<p>{{ msg.text }}</p>
<!-- 动态图表区域 -->
<div v-if="msg.chartData" class="chart-wrapper">
<div :id="'chart-' + index" class="chart-container"></div>
</div>
</div>
</div>
</div>
<div class="input-area">
<input v-model="inputText" @keyup.enter="sendMessage" placeholder="询问泳池数据或设备状态..." />
<button @click="sendMessage">发送</button>
</div>
</div>
</template>
<script setup lang="ts">
import { ref, nextTick, watch } from 'vue';
import * as echarts from 'echarts';
const messages = ref<any[]>([]);
const inputText = ref('');
const messagesContainer = ref<HTMLElement | null>(null);
// 暴露方法供父组件调用,用于接收后端 WebSocket 消息
const addMessage = (text: string, role: 'user' | 'ai', chartType?: string) => {
const newMsg = {
text,
role,
chartData: chartType ? { type: chartType, mockData: generateMockData(chartType) } : null
};
messages.value.push(newMsg);
nextTick(() => {
scrollToBottom();
if (newMsg.chartData) {
renderChart('chart-' + (messages.value.length - 1), newMsg.chartData);
}
});
};
const renderChart = (domId: string, data: any) => {
const chartDom = document.getElementById(domId);
if (!chartDom) return;
const myChart = echarts.init(chartDom);
const option = {
backgroundColor: 'transparent',
textStyle: { color: '#fff' },
xAxis: { type: 'category', data: ['00:00', '04:00', '08:00', '12:00', '16:00', '20:00'] },
yAxis: { type: 'value', name: data.type === 'temp' ? '温度(℃)' : '风险概率' },
series: [{
data: data.mockData,
type: 'line',
smooth: true,
areaStyle: { opacity: 0.3 },
itemStyle: { color: data.type === 'risk' ? '#ff4d4f' : '#1890ff' }
}]
};
myChart.setOption(option);
};
const generateMockData = (type: string) => {
return type === 'risk'
? [0.1, 0.2, 0.4, 0.9, 0.85, 0.6]
: [24, 24.5, 26, 27.5, 27, 26.5];
};
const sendMessage = () => {
if (!inputText.value.trim()) return;
addMessage(inputText.value, 'user');
// 这里触发父组件的 WebSocket 发送逻辑
emit('send-message', inputText.value);
inputText.value = '';
};
const scrollToBottom = () => {
if (messagesContainer.value) {
messagesContainer.value.scrollTop = messagesContainer.value.scrollHeight;
}
};
const emit = defineEmits(['send-message']);
defineExpose({ addMessage });
</script>
<style scoped>
.chat-interface {
display: flex;
flex-direction: column;
height: 100%;
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border-radius: 12px;
padding: 20px;
}
.messages-container {
flex: 1;
overflow-y: auto;
margin-bottom: 20px;
}
.message {
display: flex;
margin-bottom: 15px;
align-items: flex-start;
}
.message.user { flex-direction: row-reverse; }
.avatar { font-size: 24px; margin: 0 10px; }
.content {
background: rgba(255,255,255,0.1);
padding: 10px 15px;
border-radius: 8px;
max-width: 70%;
color: #fff;
}
.chart-wrapper {
margin-top: 10px;
width: 300px;
height: 200px;
}
.input-area { display: flex; gap: 10px; }
input {
flex: 1;
padding: 10px;
border-radius: 6px;
border: none;
background: rgba(255,255,255,0.9);
}
button {
padding: 10px 20px;
background: #1890ff;
color: white;
border: none;
border-radius: 6px;
cursor: pointer;
}
</style>
.4 主入口:src/App.vue
功能:整合 WebSocket 通信,协调数字人和聊天界面。
vue
编辑
<template>
<div class="app-container">
<div class="left-panel">
<DigitalHuman ref="humanRef" />
</div>
<div class="right-panel">
<ChatInterface ref="chatRef" @send-message="handleSendMessage" />
</div>
</div>
</template>
<script setup lang="ts">
import { ref, onMounted } from 'vue';
import DigitalHuman from './components/DigitalHuman.vue';
import ChatInterface from './components/ChatInterface.vue';
const humanRef = ref<InstanceType<typeof DigitalHuman> | null>(null);
const chatRef = ref<InstanceType<typeof ChatInterface> | null>(null);
let ws: WebSocket | null = null;
const connectWebSocket = () => {
ws = new WebSocket('ws://localhost:8080/ws/chat');
ws.onopen = () => console.log('WS Connected');
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
// 1. 更新对话文本
if (chatRef.value) {
// 如果是流式片段,实际项目中需要合并文本,这里简化处理
chatRef.value.addMessage(data.text, 'ai', data.metadata?.action === 'render' ? data.metadata.chart_type : undefined);
}
// 2. 更新数字人表情
if (humanRef.value && data.emotion) {
humanRef.value.updateEmotion(data.emotion);
}
};
ws.onclose = () => setTimeout(connectWebSocket, 3000); // 断线重连
};
const handleSendMessage = (text: string) => {
if (ws && ws.readyState === WebSocket.OPEN) {
ws.send(text);
}
};
onMounted(() => {
connectWebSocket();
});
</script>
<style>
body { margin: 0; background: #1a1a1a; color: white; font-family: sans-serif; }
.app-container {
display: grid;
grid-template-columns: 1.5fr 1fr;
height: 100vh;
gap: 20px;
padding: 20px;
box-sizing: border-box;
}
.left-panel, .right-panel {
height: 100%;
border-radius: 12px;
overflow: hidden;
}
</style>
. 🐳 Docker 部署脚本
.1 后端 backend/Dockerfile
dockerfile
编辑
FROM python:3.9-slim
WORKDIR /app
安装系统依赖
RUN apt-get update && apt-get install -y gcc g++ && rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
暴露端口
EXPOSE 8080
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"]
.2 前端 frontend/Dockerfile
dockerfile
编辑
构建阶段
FROM node:18-alpine as build-stage
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
RUN npm run build
生产阶段
FROM nginx:alpine
COPY --from=build-stage /app/dist /usr/share/nginx/html
COPY nginx.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
注意:需要在 frontend/ 下创建一个简单的 nginx.conf:
nginx
编辑
server {
listen 80;
location / {
root /usr/share/nginx/html;
index index.html;
try_files uri uri/ /index.html;
}
代理 WebSocket 请求到后端 (如果前端和后端域名不同)
location /ws/ {
proxy_pass http://backend:8080/ws/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
.3 根目录 docker-compose.yml
功能:一键启动 LLM (vLLM)、后端、前端、向量库 (Milvus)。
yaml
编辑
version: '3.8'
services:
1. 大模型推理服务 (需要 NVIDIA GPU)
llm-service:
image: vllm/vllm-openai:latest
command: --model /models/Qwen-14B-Chat --tensor-parallel-size 1 --trust-remote-code
volumes:
- ./models:/models # 请将下载好的 Qwen 模型放在此目录
ports:
- "8000:8000"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
restart: always
2. 向量数据库 (Milvus)
milvus:
image: milvusdb/milvus:v2.3.0
environment:
-
ETCD_ENDPOINTS=etcd:2379
-
MINIO_ADDRESS=minio:9000
depends_on:
-
etcd
-
minio
ports:
- "19530:19530"
volumes:
- milvus_data:/var/lib/milvus
简化版:Etcd 和 Minio (Milvus 依赖)
etcd:
image: quay.io/coreos/etcd:v3.5.5
environment:
-
ETCD_AUTO_COMPACTION_MODE=revision
-
ETCD_AUTO_COMPACTION_RETENTION=1000
volumes:
- etcd_data:/etcd
minio:
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
command: server /minio_data
environment:
-
MINIO_ROOT_USER=minioadmin
-
MINIO_ROOT_PASSWORD=minioadmin
volumes:
- minio_data:/minio_data
3. 后端 API 服务
backend:
build: ./backend
ports:
- "8080:8080"
environment:
-
LLM_URL=http://llm-service:8000/v1
-
MILVUS_HOST=milvus
-
MILVUS_PORT=19530
depends_on:
-
llm-service
-
milvus
restart: always
4. 前端 Web 服务
frontend:
build: ./frontend
ports:
- "3000:80"
depends_on:
- backend
restart: always
volumes:
milvus_data:
etcd_data:
minio_data:
. 🚀 快速启动指南
步骤 1: 准备模型
你需要下载一个 Qwen-14B (或 7B) 的模型文件放到 ./models 目录。
bash
编辑
示例:使用 huggingface-cli 下载 (需安装 huggingface_hub)
mkdir -p models
huggingface-cli download Qwen/Qwen-14B-Chat --local-dir ./models/Qwen-14B-Chat
如果显存不足,可改为 Qwen-7B-Chat 并修改 docker-compose.yml 中的 model 路径。
步骤 2: 启动所有服务
在项目根目录执行:
bash
编辑
docker-compose up --build
步骤 3: 访问系统
前端界面:打开浏览器访问 http://localhost:3000。
验证功能:
在输入框输入:"3号水泵有什么风险?"
观察:
右侧聊天框出现 AI 回复。
如果出现高风险预警,左侧数字人区域变红(CSS 滤镜效果)。
如果 AI 返回了图表指令,聊天框内自动渲染 ECharts 折线图。
关键注意事项
GPU 要求:运行 vllm + Qwen-14B 至少需要 24GB 显存 (如 RTX 3090/4090)。如果是 12GB 显存,请改用 Qwen-7B 或 Qwen-1.8B。
数字人视频流:目前的 DigitalHuman.vue 是模拟状态。真实生产中,你需要部署 UE5 Pixel Streaming 服务,并将 <video> 标签的 srcObject 绑定到 WebRTC 流。
网络配置:如果在局域网其他机器访问,需将 localhost 替换为服务器 IP,并在防火墙开放 3000 和 8080 端口。
这套代码实现了从数据预测到大模型决策再到前端可视化交互的完整闭环。您可以直接在此基础上进行业务数据的对接和 UI 的美化!