从模型到生产:AI大模型落地工程实战指南

🎯 第一章:从零构建企业级AI系统(实战演练)

1.1 实战项目:智能客服系统

复制代码
# 项目:基于LLM的智能客服系统
# 技术栈:FastAPI + LangChain + PostgreSQL + Redis + Docker
# 1. 项目结构
smart_customer_service/
├── app/
│   ├── api/              # API接口层
│   │   ├── __init__.py
│   │   ├── endpoints.py  # 业务端点
│   │   └── middleware.py # 中间件
│   ├── core/             # 核心业务
│   │   ├── config.py     # 配置管理
│   │   ├── security.py   # 安全认证
│   │   └── database.py   # 数据库
│   ├── models/           # 数据模型
│   │   ├── user.py       # 用户模型
│   │   ├── conversation.py # 对话模型
│   │   └── knowledge.py  # 知识库模型
│   ├── services/         # 业务服务
│   │   ├── llm_service.py # LLM服务
│   │   ├── vector_service.py # 向量服务
│   │   └── cache_service.py # 缓存服务
│   ├── utils/           # 工具函数
│   └── main.py          # 应用入口
├── docker/
│   ├── Dockerfile
│   ├── docker-compose.yml
│   └── nginx.conf
├── tests/              # 测试代码
├── scripts/           # 部署脚本
├── .env.example       # 环境变量示例
├── requirements.txt   # 依赖包
└── README.md
# 2. 核心实现 - LLM服务层
# app/services/llm_service.py
import os
from typing import List, Dict, Any
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
import redis
import json
from datetime import datetime
class LLMService:
    def __init__(self, config: Dict):
        self.config = config
        self.redis_client = redis.Redis(
            host=config['REDIS_HOST'],
            port=config['REDIS_PORT'],
            db=0,
            decode_responses=True
        )
        self.setup_llm()
        self.setup_vector_store()
    
    def setup_llm(self):
        """初始化LLM(支持本地和云端)"""
        if self.config['LLM_TYPE'] == 'local':
            # 本地模型(使用Ollama或本地部署)
            from langchain.llms import Ollama
            self.llm = Ollama(
                model=self.config['MODEL_NAME'],
                temperature=0.3,
                num_predict=512
            )
        else:
            # 云端API(OpenAI/Claude/通义千问)
            from langchain.chat_models import ChatOpenAI
            self.llm = ChatOpenAI(
                model_name=self.config['MODEL_NAME'],
                temperature=0.3,
                openai_api_key=self.config['API_KEY'],
                openai_api_base=self.config.get('API_BASE', None)
            )
    
    def setup_vector_store(self):
        """初始化向量知识库"""
        # 1. 加载知识库文档
        knowledge_path = self.config['KNOWLEDGE_PATH']
        documents = self.load_documents(knowledge_path)
        
        # 2. 文档分割
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50
        )
        texts = text_splitter.split_documents(documents)
        
        # 3. 创建向量存储
        embeddings = HuggingFaceEmbeddings(
            model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
        )
        
        self.vector_store = FAISS.from_documents(texts, embeddings)
        
        # 4. 创建检索链
        prompt_template = """基于以下上下文和用户问题,提供专业、准确的回答。
        
        上下文:
        {context}
        
        问题: {question}
        
        回答(用中文):"""
        
        PROMPT = PromptTemplate(
            template=prompt_template,
            input_variables=["context", "question"]
        )
        
        self.qa_chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff",
            retriever=self.vector_store.as_retriever(
                search_kwargs={"k": 3}
            ),
            chain_type_kwargs={"prompt": PROMPT}
        )
    
    def get_response(self, question: str, user_id: str, session_id: str) -> Dict:
        """获取AI回复(带缓存和日志)"""
        # 1. 检查缓存
        cache_key = f"response:{user_id}:{hash(question)}"
        cached = self.redis_client.get(cache_key)
        if cached:
            return json.loads(cached)
        
        # 2. 调用LLM
        start_time = datetime.now()
        
        try:
            # 先进行意图识别
            intent = self.detect_intent(question)
            
            if intent == "知识库查询":
                response = self.qa_chain.run(question)
            elif intent == "闲聊":
                response = self.chat(question)
            elif intent == "任务型":
                response = self.handle_task(question)
            else:
                response = self.qa_chain.run(question)
            
            # 3. 后处理
            processed_response = self.post_process(response)
            
            # 4. 记录日志
            self.log_interaction(
                user_id=user_id,
                session_id=session_id,
                question=question,
                response=processed_response,
                processing_time=(datetime.now() - start_time).total_seconds()
            )
            
            # 5. 缓存结果(1小时过期)
            result = {
                "response": processed_response,
                "intent": intent,
                "timestamp": datetime.now().isoformat()
            }
            
            self.redis_client.setex(
                cache_key,
                3600,
                json.dumps(result)
            )
            
            return result
            
        except Exception as e:
            # 降级处理:返回预设回复
            return {
                "response": "系统正在升级,请稍后再试",
                "intent": "error",
                "error": str(e)
            }

1.2 Docker容器化部署实战

复制代码
# docker-compose.yml - 完整生产环境配置
version: '3.8'
services:
  # 主应用服务
  ai-service:
    build:
      context: .
      dockerfile: docker/Dockerfile
    container_name: ai-service
    restart: unless-stopped
    ports:
      - "8000:8000"
    environment:
      - ENVIRONMENT=production
      - DATABASE_URL=postgresql://postgres:password@postgres:5432/ai_service
      - REDIS_URL=redis://redis:6379/0
      - MODEL_PATH=/app/models
      - LOG_LEVEL=INFO
    volumes:
      - model_volume:/app/models
      - data_volume:/app/data
      - logs_volume:/app/logs
    depends_on:
      - postgres
      - redis
      - vector-db
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
      interval: 30s
      timeout: 10s
      retries: 3
    networks:
      - ai-network
    deploy:
      resources:
        limits:
          cpus: '2'
          memory: 4G
        reservations:
          cpus: '1'
          memory: 2G
  # PostgreSQL数据库
  postgres:
    image: postgres:14-alpine
    container_name: ai-postgres
    restart: unless-stopped
    environment:
      - POSTGRES_DB=ai_service
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=password
    volumes:
      - postgres_data:/var/lib/postgresql/data
      - ./init-db.sql:/docker-entrypoint-initdb.d/init.sql
    ports:
      - "5432:5432"
    networks:
      - ai-network
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U postgres"]
      interval: 10s
      timeout: 5s
      retries: 5
  # Redis缓存
  redis:
    image: redis:7-alpine
    container_name: ai-redis
    restart: unless-stopped
    command: redis-server --appendonly yes --requirepass redis_password
    volumes:
      - redis_data:/data
    ports:
      - "6379:6379"
    networks:
      - ai-network
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 3s
      retries: 5
  # 向量数据库(Milvus)
  vector-db:
    image: milvusdb/milvus:2.2.0
    container_name: ai-milvus
    restart: unless-stopped
    environment:
      - ETCD_ENDPOINTS=etcd:2379
      - MINIO_ADDRESS=minio:9000
    volumes:
      - milvus_data:/var/lib/milvus
    ports:
      - "19530:19530"
      - "9091:9091"
    networks:
      - ai-network
    depends_on:
      - etcd
      - minio
  # Nginx负载均衡
  nginx:
    image: nginx:1.21-alpine
    container_name: ai-nginx
    restart: unless-stopped
    volumes:
      - ./docker/nginx.conf:/etc/nginx/nginx.conf
      - ./docker/ssl:/etc/nginx/ssl
    ports:
      - "80:80"
      - "443:443"
    depends_on:
      - ai-service
    networks:
      - ai-network
  # 监控系统(Prometheus + Grafana)
  prometheus:
    image: prom/prometheus:latest
    container_name: ai-prometheus
    restart: unless-stopped
    volumes:
      - ./docker/prometheus.yml:/etc/prometheus/prometheus.yml
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/etc/prometheus/console_libraries'
      - '--web.console.templates=/etc/prometheus/console_templates'
      - '--storage.tsdb.retention.time=200h'
      - '--web.enable-lifecycle'
    ports:
      - "9090:9090"
    networks:
      - ai-network
  grafana:
    image: grafana/grafana:latest
    container_name: ai-grafana
    restart: unless-stopped
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafana_data:/var/lib/grafana
      - ./docker/grafana/provisioning:/etc/grafana/provisioning
    ports:
      - "3000:3000"
    depends_on:
      - prometheus
    networks:
      - ai-network
  # 日志收集(ELK Stack精简版)
  elasticsearch:
    image: elasticsearch:8.5.0
    container_name: ai-elasticsearch
    environment:
      - discovery.type=single-node
      - xpack.security.enabled=false
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    volumes:
      - elasticsearch_data:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"
    networks:
      - ai-network
  logstash:
    image: logstash:8.5.0
    container_name: ai-logstash
    volumes:
      - ./docker/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "5000:5000"
    depends_on:
      - elasticsearch
    networks:
      - ai-network
  kibana:
    image: kibana:8.5.0
    container_name: ai-kibana
    ports:
      - "5601:5601"
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
    depends_on:
      - elasticsearch
    networks:
      - ai-network
volumes:
  postgres_data:
  redis_data:
  milvus_data:
  prometheus_data:
  grafana_data:
  elasticsearch_data:
  model_volume:
  data_volume:
  logs_volume:
networks:
  ai-network:
    driver: bridge

# Dockerfile - 优化后的生产环境镜像
# 阶段1:构建阶段
FROM python:3.9-slim as builder
WORKDIR /app
# 安装系统依赖
RUN apt-get update && apt-get install -y \
    gcc \
    g++ \
    curl \
    git \
    && rm -rf /var/lib/apt/lists/*
# 复制依赖文件
COPY requirements.txt .
# 创建虚拟环境并安装依赖
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --no-cache-dir --upgrade pip && \
    pip install --no-cache-dir -r requirements.txt
# 阶段2:运行阶段
FROM python:3.9-slim as runtime
WORKDIR /app
# 安装运行时依赖
RUN apt-get update && apt-get install -y \
    curl \
    libgomp1 \
    && rm -rf /var/lib/apt/lists/*
# 从构建阶段复制虚拟环境
COPY --from=builder /opt/venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# 复制应用代码
COPY . .
# 创建非root用户
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
USER appuser
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
    CMD curl -f http://localhost:8000/health || exit 1
# 暴露端口
EXPOSE 8000
# 运行应用
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", \
     "--workers", "4", "--access-log", "--log-level", "info"]

第二章:性能优化实战

2.1 GPU推理优化实战

复制代码
# app/services/gpu_optimizer.py
import torch
import torch.nn as nn
from torch.cuda.amp import autocast, GradScaler
import time
from dataclasses import dataclass
from typing import List, Dict, Any
import psutil
import GPUtil
@dataclass
class GPUOptimizationConfig:
    """GPU优化配置"""
    use_fp16: bool = True
    use_cuda_graph: bool = True
    batch_size: int = 8
    max_seq_len: int = 512
    enable_quantization: bool = False
    quantization_bits: int = 8
    enable_streaming: bool = True
    cache_size: int = 1000
class GPUOptimizer:
    """GPU推理优化器"""
    
    def __init__(self, config: GPUOptimizationConfig):
        self.config = config
        self.scaler = GradScaler() if config.use_fp16 else None
        self.cuda_graphs = {}
        self.cache = {}
        
    def optimize_model(self, model: nn.Module, example_input: torch.Tensor):
        """优化模型推理"""
        # 1. 模型移到GPU
        model.cuda()
        
        # 2. 模型评估模式
        model.eval()
        
        # 3. 应用优化技术
        optimized_model = self.apply_optimizations(model, example_input)
        
        return optimized_model
    
    def apply_optimizations(self, model: nn.Module, example_input: torch.Tensor):
        """应用多种优化技术"""
        optimizations = []
        
        # 1. 半精度优化
        if self.config.use_fp16:
            model.half()
            optimizations.append("FP16精度")
        
        # 2. CUDA Graph优化(减少kernel启动开销)
        if self.config.use_cuda_graph:
            self.create_cuda_graph(model, example_input)
            optimizations.append("CUDA Graph")
        
        # 3. 量化优化
        if self.config.enable_quantization:
            model = self.quantize_model(model)
            optimizations.append(f"INT{self.config.quantization_bits}量化")
        
        # 4. 算子融合
        model = self.fuse_operators(model)
        optimizations.append("算子融合")
        
        # 5. 内存优化
        self.optimize_memory(model)
        optimizations.append("内存优化")
        
        print(f"已应用优化: {', '.join(optimizations)}")
        return model
    
    def quantize_model(self, model: nn.Module):
        """动态量化模型"""
        if self.config.quantization_bits == 8:
            model = torch.quantization.quantize_dynamic(
                model,
                {nn.Linear, nn.Conv2d},
                dtype=torch.qint8
            )
        return model
    
    def create_cuda_graph(self, model: nn.Module, example_input: torch.Tensor):
        """创建CUDA Graph"""
        g = torch.cuda.CUDAGraph()
        
        with torch.cuda.graph(g):
            with autocast(enabled=self.config.use_fp16):
                static_output = model(example_input)
        
        self.cuda_graphs[model] = (g, static_output)
    
    def inference_with_optimizations(self, model: nn.Module, inputs: List[torch.Tensor]):
        """优化后的推理"""
        batch_start_time = time.time()
        
        # 1. 动态批处理
        batched_inputs = self.dynamic_batching(inputs)
        
        # 2. 缓存检查
        cache_key = self.generate_cache_key(batched_inputs)
        if cache_key in self.cache:
            return self.cache[cache_key]
        
        # 3. 执行推理
        with torch.no_grad():
            if self.config.use_cuda_graph and model in self.cuda_graphs:
                # 使用CUDA Graph
                g, static_output = self.cuda_graphs[model]
                g.replay()
                outputs = static_output
            else:
                # 常规推理
                if self.config.use_fp16:
                    with autocast():
                        outputs = model(batched_inputs)
                else:
                    outputs = model(batched_inputs)
        
        # 4. 结果处理
        processed_outputs = self.post_process(outputs)
        
        # 5. 更新缓存
        self.cache[cache_key] = processed_outputs
        if len(self.cache) > self.config.cache_size:
            self.cache.popitem()
        
        # 6. 监控性能
        self.monitor_performance(batch_start_time, len(inputs))
        
        return processed_outputs
    
    def dynamic_batching(self, inputs: List[torch.Tensor]):
        """动态批处理"""
        if len(inputs) == 0:
            return None
        
        # 根据当前GPU使用率调整批大小
        gpu_info = GPUtil.getGPUs()[0]
        current_gpu_usage = gpu_info.memoryUtil
        
        if current_gpu_usage > 0.8:
            batch_size = max(1, self.config.batch_size // 2)
        elif current_gpu_usage < 0.3:
            batch_size = min(32, self.config.batch_size * 2)
        else:
            batch_size = self.config.batch_size
        
        # 按批大小分组
        batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
        
        # 填充最后一个批次
        last_batch = batches[-1]
        if len(last_batch) < batch_size:
            padding = torch.zeros_like(last_batch[0]).unsqueeze(0)
            last_batch = torch.cat([last_batch, padding], dim=0)
            batches[-1] = last_batch
        
        return batches
    
    def monitor_performance(self, start_time: float, batch_size: int):
        """监控性能指标"""
        inference_time = time.time() - start_time
        throughput = batch_size / inference_time
        
        # 收集GPU信息
        gpus = GPUtil.getGPUs()
        gpu_info = gpus[0] if gpus else None
        
        metrics = {
            "inference_time_ms": inference_time * 1000,
            "throughput_qps": throughput,
            "batch_size": batch_size,
            "gpu_utilization": gpu_info.load * 100 if gpu_info else 0,
            "gpu_memory_used": gpu_info.memoryUsed if gpu_info else 0,
            "gpu_memory_total": gpu_info.memoryTotal if gpu_info else 0,
            "cpu_utilization": psutil.cpu_percent(),
            "memory_utilization": psutil.virtual_memory().percent
        }
        
        # 打印性能报告
        self.print_performance_report(metrics)
        
        # 发送到监控系统
        self.send_metrics_to_monitoring(metrics)
    
    def print_performance_report(self, metrics: Dict):
        """打印性能报告"""
        report = f"""
        ⚡ GPU推理性能报告 ⚡
        ------------------------------
        推理延迟: {metrics['inference_time_ms']:.2f} ms
        吞吐量: {metrics['throughput_qps']:.2f} QPS
        批大小: {metrics['batch_size']}
        GPU利用率: {metrics['gpu_utilization']:.1f}%
        GPU显存: {metrics['gpu_memory_used']}/{metrics['gpu_memory_total']} MB
        CPU利用率: {metrics['cpu_utilization']:.1f}%
        内存利用率: {metrics['memory_utilization']:.1f}%
        ------------------------------
        """
        print(report)
# 使用示例
def benchmark_optimizations():
    """性能优化对比测试"""
    # 原始模型
    original_model = load_llm_model()
    
    # 优化配置
    config = GPUOptimizationConfig(
        use_fp16=True,
        use_cuda_graph=True,
        batch_size=16,
        enable_quantization=True,
        quantization_bits=8
    )
    
    optimizer = GPUOptimizer(config)
    
    # 优化前基准测试
    print("🔍 优化前性能测试...")
    start_time = time.time()
    for _ in range(100):
        original_output = original_model(test_inputs)
    original_time = time.time() - start_time
    
    # 优化模型
    print("🔧 应用优化...")
    optimized_model = optimizer.optimize_model(original_model, test_inputs[0])
    
    # 优化后基准测试
    print("🚀 优化后性能测试...")
    start_time = time.time()
    for _ in range(100):
        optimized_output = optimizer.inference_with_optimizations(
            optimized_model, test_inputs
        )
    optimized_time = time.time() - start_time
    
    # 性能提升报告
    speedup = original_time / optimized_time
    print(f"""
    📊 性能优化对比结果
    ==============================
    优化前耗时: {original_time:.2f}s
    优化后耗时: {optimized_time:.2f}s
    性能提升: {speedup:.2f}x
    预计成本节省: {(1 - 1/speedup)*100:.1f}%
    ==============================
    """)

2.2 缓存策略实战

复制代码
# app/services/cache_manager.py
import redis
import pickle
import hashlib
import json
from datetime import datetime, timedelta
from typing import Any, Optional, Dict, List
import zlib
from functools import wraps
import asyncio
from dataclasses import dataclass
from enum import Enum
@dataclass
class CacheConfig:
    """缓存配置"""
    redis_host: str = "localhost"
    redis_port: int = 6379
    redis_db: int = 0
    redis_password: Optional[str] = None
    default_ttl: int = 3600  # 默认1小时
    max_memory: str = "1gb"  # 最大内存
    compression: bool = True  # 是否压缩
    compression_level: int = 6  # 压缩级别
    enable_cluster: bool = False  # 是否集群模式
class CacheStrategy(Enum):
    """缓存策略"""
    LRU = "lru"  # 最近最少使用
    LFU = "lfu"  # 最不经常使用
    FIFO = "fifo"  # 先进先出
    RANDOM = "random"  # 随机淘汰
    TTL = "ttl"  # 基于过期时间
class SmartCacheManager:
    """智能缓存管理器"""
    
    def __init__(self, config: CacheConfig):
        self.config = config
        self.setup_redis()
        self.cache_stats = {
            "hits": 0,
            "misses": 0,
            "evictions": 0,
            "memory_usage": 0
        }
        
    def setup_redis(self):
        """初始化Redis连接"""
        if self.config.enable_cluster:
            from redis.cluster import RedisCluster
            self.redis = RedisCluster(
                startup_nodes=[
                    {"host": self.config.redis_host, "port": self.config.redis_port}
                ],
                decode_responses=False,
                password=self.config.redis_password
            )
        else:
            self.redis = redis.Redis(
                host=self.config.redis_host,
                port=self.config.redis_port,
                db=self.config.redis_db,
                password=self.config.redis_password,
                decode_responses=False,
                max_connections=50
            )
        
        # 配置Redis策略
        self.redis.config_set("maxmemory", self.config.max_memory)
        self.redis.config_set("maxmemory-policy", "allkeys-lru")
        
    def generate_key(self, *args, **kwargs) -> str:
        """生成缓存键"""
        # 将参数序列化为字符串
        key_parts = []
        
        # 处理位置参数
        for arg in args:
            if isinstance(arg, (str, int, float, bool)):
                key_parts.append(str(arg))
            else:
                key_parts.append(hashlib.md5(pickle.dumps(arg)).hexdigest())
        
        # 处理关键字参数
        for k, v in sorted(kwargs.items()):
            key_parts.append(f"{k}:{v}")
        
        # 生成最终key
        key_string = "|".join(key_parts)
        return f"cache:{hashlib.md5(key_string.encode()).hexdigest()}"
    
    def compress_data(self, data: Any) -> bytes:
        """压缩数据"""
        if not self.config.compression:
            return pickle.dumps(data)
        
        serialized = pickle.dumps(data)
        compressed = zlib.compress(serialized, level=self.config.compression_level)
        
        # 只有压缩率高于阈值时才使用压缩
        if len(compressed) < len(serialized) * 0.8:
            return b"compressed:" + compressed
        else:
            return b"uncompressed:" + serialized
    
    def decompress_data(self, data: bytes) -> Any:
        """解压数据"""
        if data.startswith(b"compressed:"):
            compressed = data[11:]  # 移除前缀
            serialized = zlib.decompress(compressed)
        elif data.startswith(b"uncompressed:"):
            serialized = data[13:]  # 移除前缀
        else:
            serialized = data
        
        return pickle.loads(serialized)
    
    def get(self, key: str, default: Any = None) -> Any:
        """获取缓存"""
        start_time = datetime.now()
        
        try:
            data = self.redis.get(key)
            
            if data is not None:
                # 缓存命中
                value = self.decompress_data(data)
                self.cache_stats["hits"] += 1
                
                # 更新访问时间(用于LRU)
                self.redis.zadd("cache:access:times", {key: datetime.now().timestamp()})
                
                # 记录性能指标
                self.record_metrics("hit", key, start_time)
                
                return value
            else:
                # 缓存未命中
                self.cache_stats["misses"] += 1
                self.record_metrics("miss", key, start_time)
                
                return default
                
        except Exception as e:
            # 缓存出错,降级处理
            print(f"缓存获取失败: {e}")
            return default
    
    def set(self, key: str, value: Any, ttl: Optional[int] = None) -> bool:
        """设置缓存"""
        try:
            # 压缩数据
            compressed_data = self.compress_data(value)
            
            # 计算TTL
            actual_ttl = ttl if ttl is not None else self.config.default_ttl
            
            # 存储到Redis
            result = self.redis.setex(key, actual_ttl, compressed_data)
            
            if result:
                # 记录访问时间
                self.redis.zadd("cache:access:times", {key: datetime.now().timestamp()})
                
                # 记录大小
                size = len(compressed_data)
                self.redis.hset("cache:sizes", key, size)
                
                # 更新统计信息
                self.update_memory_stats()
            
            return result
            
        except Exception as e:
            print(f"缓存设置失败: {e}")
            return False
    
    def cache_decorator(self, ttl: int = 3600, key_prefix: str = ""):
        """缓存装饰器"""
        def decorator(func):
            @wraps(func)
            def wrapper(*args, **kwargs):
                # 生成缓存键
                cache_key = self.generate_key(key_prefix, func.__name__, *args, **kwargs)
                
                # 尝试从缓存获取
                cached_result = self.get(cache_key)
                if cached_result is not None:
                    return cached_result
                
                # 缓存未命中,执行函数
                result = func(*args, **kwargs)
                
                # 将结果存入缓存
                self.set(cache_key, result, ttl)
                
                return result
            
            @wraps(func)
            async def async_wrapper(*args, **kwargs):
                # 生成缓存键
                cache_key = self.generate_key(key_prefix, func.__name__, *args, **kwargs)
                
                # 尝试从缓存获取
                cached_result = self.get(cache_key)
                if cached_result is not None:
                    return cached_result
                
                # 缓存未命中,执行异步函数
                result = await func(*args, **kwargs)
                
                # 将结果存入缓存
                self.set(cache_key, result, ttl)
                
                return result
            
            return async_wrapper if asyncio.iscoroutinefunction(func) else wrapper
        return decorator
    
    def record_metrics(self, cache_type: str, key: str, start_time: datetime):
        """记录性能指标"""
        duration = (datetime.now() - start_time).total_seconds() * 1000  # 毫秒
        
        metrics = {
            "timestamp": datetime.now().isoformat(),
            "cache_type": cache_type,
            "key": key[:50],  # 只记录前50个字符
            "duration_ms": duration,
            "hits": self.cache_stats["hits"],
            "misses": self.cache_stats["misses"],
            "hit_rate": self.cache_stats["hits"] / max(1, self.cache_stats["hits"] + self.cache_stats["misses"])
        }
        
        # 发送到监控系统
        self.send_to_monitoring(metrics)
    
    def update_memory_stats(self):
        """更新内存统计"""
        try:
            info = self.redis.info("memory")
            self.cache_stats["memory_usage"] = info.get("used_memory", 0)
            
            # 检查内存使用率
            memory_ratio = info.get("used_memory", 0) / max(1, info.get("maxmemory", 1))
            
            if memory_ratio > 0.8:
                # 内存使用率高,触发清理
                self.cleanup_old_cache()
            
        except Exception as e:
            print(f"更新内存统计失败: {e}")
    
    def cleanup_old_cache(self):
        """清理旧缓存"""
        try:
            # 获取最久未访问的100个key
            old_keys = self.redis.zrange("cache:access:times", 0, 99)
            
            if old_keys:
                # 删除这些key
                deleted = self.redis.delete(*old_keys)
                self.cache_stats["evictions"] += deleted
                
                # 从有序集合中移除
                self.redis.zrem("cache:access:times", *old_keys)
                
                print(f"清理了 {deleted} 个旧缓存")
        
        except Exception as e:
            print(f"缓存清理失败: {e}")
    
    def get_stats(self) -> Dict:
        """获取缓存统计"""
        return {
            **self.cache_stats,
            "hit_rate": self.cache_stats["hits"] / max(1, self.cache_stats["hits"] + self.cache_stats["misses"]),
            "total_operations": self.cache_stats["hits"] + self.cache_stats["misses"],
            "eviction_rate": self.cache_stats["evictions"] / max(1, self.cache_stats["hits"] + self.cache_stats["misses"])
        }
# 使用示例
def cache_demo():
    """缓存使用演示"""
    # 配置缓存
    config = CacheConfig(
        redis_host="localhost",
        redis_port=6379,
        default_ttl=300,  # 5分钟
        compression=True,
        compression_level=6
    )
    
    cache_manager = SmartCacheManager(config)
    
    # 使用缓存装饰器
    @cache_manager.cache_decorator(ttl=600, key_prefix="user_profile")
    def get_user_profile(user_id: str):
        """获取用户资料(模拟耗时操作)"""
        print(f"查询数据库获取用户 {user_id} 的资料...")
        time.sleep(2)  # 模拟数据库查询
        return {
            "user_id": user_id,
            "name": f"用户{user_id}",
            "email": f"user{user_id}@example.com",
            "created_at": datetime.now().isoformat()
        }
    
    # 测试缓存效果
    print("第一次调用(会查询数据库):")
    start_time = time.time()
    profile1 = get_user_profile("123")
    time1 = time.time() - start_time
    print(f"耗时: {time1:.2f}秒")
    
    print("\n第二次调用(从缓存获取):")
    start_time = time.time()
    profile2 = get_user_profile("123")
    time2 = time.time() - start_time
    print(f"耗时: {time2:.2f}秒")
    
    print(f"\n性能提升: {time1/time2:.1f}倍")
    
    # 获取缓存统计
    stats = cache_manager.get_stats()
    print(f"\n缓存统计: {stats}")

🔧 第三章:监控与运维实战

3.1 实时监控系统实战

复制代码
# app/monitoring/real_time_monitor.py
import time
import psutil
import GPUtil
from prometheus_client import start_http_server, Gauge, Counter, Histogram, Summary
from typing import Dict, List, Optional
import threading
import json

AI大模型的落地不仅是技术挑战,更是组织、流程和文化的全面转型。掌握工程化思维,才能在AI时代获得真正的竞争优势。 🚀推荐使用DMXAPI

相关推荐
Tony Bai1 小时前
从“手搓 Prompt”到“无限循环”:AI 编码的下一个形态是“Ralph”吗?
人工智能·prompt
实战产品说1 小时前
从豆包日报下架,看到的字节战略和市场机会
人工智能·经验分享·学习·产品经理
ar01231 小时前
AR汽车智能巡检-汽车运维的新技术变革
人工智能·ar
OpenMiniServer2 小时前
从大统一逻辑链到后大模型时代:AI系统意识的必然演化
人工智能
txinyu的博客2 小时前
静态库 & 动态库
linux·运维·服务器
福客AI智能客服2 小时前
体验突围:智能电商客服与AI客服软件重塑服务竞争新范式
人工智能
MatrixOrigin2 小时前
喜报|矩阵起源获InfoQ极客传媒2025年度技术生态构建品牌奖
ai·矩阵起源·技术生态构建品牌奖·极客科技伙伴时刻·技术生态共建
Maynor9962 小时前
CloudFlare:零成本搭建个人图床
人工智能
钮钴禄·爱因斯晨2 小时前
机器学习(二):KNN算法简介及API介绍(分类、回归)
人工智能·算法·机器学习·分类·回归