跟AI学一手之运维Agent

最近AI 圈好不热闹,全民养龙虾,第一波吃螃蟹的,第一波被坑的,接连上热搜,我也来凑凑热闹,让 AI 帮我写了一个运维的 agent,虽然还不能跑,但是借鉴学习一下,还是不错的。工具使用的 langChain+langGraph。

python 复制代码
# 运维Agent系统完整实现
# 需要安装: pip install langchain langgraph langchain-openai pydantic

from typing import TypedDict, Annotated, List, Dict, Any, Literal, Optional
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver
from pydantic import BaseModel, Field
import operator
import json
from datetime import datetime
import random  # 模拟数据用

# ==================== 数据模型定义 ====================

class MetricData(BaseModel):
    """监控指标数据"""
    timestamp: str
    service: str
    metric_name: str
    value: float
    threshold: float
    status: Literal["normal", "warning", "critical"]
    
class Alert(BaseModel):
    """告警事件"""
    id: str
    severity: Literal["P0", "P1", "P2", "P3"]
    service: str
    description: str
    metrics: List[MetricData]
    created_at: str
    
class DiagnosisResult(BaseModel):
    """诊断结果"""
    alert_id: str
    root_cause: str
    confidence: float
    related_services: List[str]
    suggested_actions: List[str]
    risk_level: Literal["low", "medium", "high"]

class ExecutionPlan(BaseModel):
    """执行计划"""
    diagnosis_id: str
    actions: List[Dict[str, Any]]
    rollback_plan: List[str]
    requires_approval: bool
    estimated_impact: str

# ==================== 全局状态定义 ====================

class OpsState(TypedDict):
    """运维Agent系统状态"""
    messages: Annotated[List[Any], operator.add]
    current_alert: Optional[Alert]
    diagnosis: Optional[DiagnosisResult]
    execution_plan: Optional[ExecutionPlan]
    execution_result: Optional[str]
    logs: Annotated[List[str], operator.add]
    pending_approval: bool
    next_step: str

# ==================== 工具函数定义 ====================

@tool
def query_metrics(service: str, metric_type: str, time_range: str = "1h") -> str:
    """
    查询服务监控指标
    Args:
        service: 服务名称 (如: user-service, order-service)
        metric_type: 指标类型 (cpu, memory, latency, error_rate)
        time_range: 时间范围 (15m, 1h, 6h, 24h)
    """
    # 模拟Prometheus查询
    mock_data = {
        "cpu": {"current": 85, "trend": "increasing", "anomaly": True},
        "memory": {"current": 72, "trend": "stable", "anomaly": False},
        "latency": {"current": 1200, "trend": "spiking", "anomaly": True},
        "error_rate": {"current": 5.2, "trend": "increasing", "anomaly": True}
    }
    
    data = mock_data.get(metric_type, {})
    return json.dumps({
        "service": service,
        "metric": metric_type,
        "data": data,
        "timestamp": datetime.now().isoformat()
    }, ensure_ascii=False)

@tool
def query_logs(service: str, keyword: str, limit: int = 50) -> str:
    """
    查询服务日志
    Args:
        service: 服务名称
        keyword: 搜索关键词 (如: ERROR, Exception, timeout)
        limit: 返回条数
    """
    # 模拟日志查询
    mock_logs = [
        f"[ERROR] {service} - Connection timeout to database at {datetime.now()}",
        f"[WARN] {service} - High latency detected: 1200ms",
        f"[ERROR] {service} - OutOfMemoryError: Java heap space",
        f"[INFO] {service} - Auto-scaling triggered"
    ]
    return json.dumps({
        "service": service,
        "logs": mock_logs[:limit],
        "total": len(mock_logs)
    }, ensure_ascii=False)

@tool
def get_service_topology(service: str) -> str:
    """
    获取服务依赖拓扑
    Args:
        service: 中心服务名称
    """
    # 模拟拓扑数据
    topology = {
        "user-service": {
            "upstream": ["api-gateway"],
            "downstream": ["user-db", "redis-cache"],
            "peers": ["order-service", "payment-service"]
        },
        "order-service": {
            "upstream": ["api-gateway", "user-service"],
            "downstream": ["order-db", "message-queue"],
            "peers": ["payment-service", "inventory-service"]
        }
    }
    return json.dumps(topology.get(service, {}), ensure_ascii=False)

@tool
def execute_kubectl(command: str, namespace: str = "default", dry_run: bool = True) -> str:
    """
    执行Kubernetes命令
    Args:
        command: kubectl命令 (如: scale, rollout restart, get pods)
        namespace: 命名空间
        dry_run: 是否仅模拟执行
    """
    if dry_run:
        return f"[DRY RUN] 将执行: kubectl {command} -n {namespace}"
    
    # 实际执行逻辑(这里模拟)
    return f"[EXECUTED] kubectl {command} -n {namespace} - 执行成功"

@tool
def restart_service(service: str, strategy: str = "rolling") -> str:
    """
    重启服务
    Args:
        service: 服务名称
        strategy: 重启策略 (rolling, immediate, canary)
    """
    return f"服务 {service} 正在使用 {strategy} 策略重启..."

@tool
def scale_service(service: str, replicas: int) -> str:
    """
    扩缩容服务
    Args:
        service: 服务名称
        replicas: 目标副本数
    """
    return f"服务 {service} 扩容至 {replicas} 个副本"

@tool
def send_notification(channel: str, message: str, priority: str = "normal") -> str:
    """
    发送通知
    Args:
        channel: 通知渠道 (slack, email, sms, pagerduty)
        message: 消息内容
        priority: 优先级
    """
    return f"[{channel.upper()}] 已发送{priority}优先级通知: {message[:50]}..."

@tool
def create_incident_report(alert_id: str, resolution: str) -> str:
    """
    生成事故报告
    """
    return f"事故报告 {alert_id} 已生成,解决方案: {resolution}"

# ==================== LLM 模型初始化 ====================

llm = ChatOpenAI(
    model="gpt-4-turbo-preview",
    temperature=0.1  # 运维场景需要确定性
)

# 绑定工具
tools = [
    query_metrics, query_logs, get_service_topology,
    execute_kubectl, restart_service, scale_service,
    send_notification, create_incident_report
]
llm_with_tools = llm.bind_tools(tools)

# ==================== Agent节点定义 ====================

def monitoring_agent(state: OpsState) -> OpsState:
    """
    监控Agent: 检测异常,生成告警
    """
    print("🔍 [监控Agent] 正在分析监控数据...")
    
    # 模拟检测到异常
    mock_alert = Alert(
        id=f"ALT-{datetime.now().strftime('%Y%m%d%H%M%S')}",
        severity="P1",
        service="user-service",
        description="服务延迟飙升,错误率超过阈值",
        metrics=[
            MetricData(
                timestamp=datetime.now().isoformat(),
                service="user-service",
                metric_name="latency_p99",
                value=2500,
                threshold=1000,
                status="critical"
            ),
            MetricData(
                timestamp=datetime.now().isoformat(),
                service="user-service",
                metric_name="error_rate",
                value=8.5,
                threshold=5.0,
                status="warning"
            )
        ],
        created_at=datetime.now().isoformat()
    )
    
    msg = SystemMessage(content=f"""
    检测到新的运维事件:
    告警ID: {mock_alert.id}
    服务: {mock_alert.service}
    级别: {mock_alert.severity}
    描述: {mock_alert.description}
    
    相关指标:
    {json.dumps([m.dict() for m in mock_alert.metrics], indent=2)}
    
    请进入诊断阶段。
    """)
    
    return {
        **state,
        "current_alert": mock_alert,
        "messages": state["messages"] + [msg],
        "next_step": "diagnose"
    }

def diagnosis_agent(state: OpsState) -> OpsState:
    """
    诊断Agent: 根因分析
    """
    print("🧠 [诊断Agent] 正在进行根因分析...")
    
    alert = state["current_alert"]
    
    # 构建诊断提示
    prompt = f"""
    你是一位资深SRE专家,请对以下告警进行根因分析:
    
    告警信息:
    - 服务: {alert.service}
    - 症状: {alert.description}
    - 指标异常: {[m.metric_name for m in alert.metrics]}
    
    请使用可用工具查询以下信息辅助诊断:
    1. 查询相关服务的日志 (关键词: ERROR, timeout, exception)
    2. 查询上下游依赖状态
    3. 分析指标趋势
    
    然后给出:
    1. 最可能的根因 (置信度)
    2. 影响范围评估
    3. 建议的修复操作列表
    """
    
    response = llm_with_tools.invoke(
        state["messages"] + [HumanMessage(content=prompt)]
    )
    
    return {
        **state,
        "messages": state["messages"] + [response],
        "next_step": "tools" if response.tool_calls else "plan"
    }

def planning_agent(state: OpsState) -> OpsState:
    """
    决策Agent: 制定执行计划
    """
    print("📋 [决策Agent] 正在制定修复方案...")
    
    # 分析诊断结果,制定计划
    prompt = """
    基于诊断结果,请制定详细的修复执行计划:
    
    要求:
    1. 列出具体的操作步骤
    2. 评估每个步骤的风险等级
    3. 制定回滚方案
    4. 判断是否需要人工审批 (高风险操作需要)
    
    可用操作:
    - 服务重启 (restart_service)
    - 扩缩容 (scale_service)
    - 配置回滚 (execute_kubectl rollout undo)
    - 流量切换 (execute_kubectl patch)
    
    请以JSON格式输出执行计划。
    """
    
    response = llm.invoke(state["messages"] + [HumanMessage(content=prompt)])
    
    # 解析是否需要审批
    requires_approval = "high" in response.content.lower() or "P0" in str(state["current_alert"].severity)
    
    return {
        **state,
        "messages": state["messages"] + [response],
        "pending_approval": requires_approval,
        "next_step": "approval_check"
    }

def approval_node(state: OpsState) -> OpsState:
    """
    审批节点: 检查是否需要人工介入
    """
    if state["pending_approval"]:
        print("⏸️ [审批节点] 高风险操作,等待人工审批...")
        # 发送通知
        send_notification(
            channel="slack",
            message=f"🚨 请审批修复方案: {state['current_alert'].id}",
            priority="high"
        )
        return {**state, "next_step": "awaiting_approval"}
    else:
        print("✅ [审批节点] 低风险操作,自动通过")
        return {**state, "next_step": "execute"}

def execution_agent(state: OpsState) -> OpsState:
    """
    执行Agent: 自动化修复
    """
    print("🔧 [执行Agent] 正在执行修复操作...")
    
    prompt = """
    现在执行修复计划。请根据之前的诊断和计划:
    1. 按顺序调用必要的工具执行修复
    2. 每个步骤后验证结果
    3. 如失败则触发回滚
    
    当前支持的操作工具已绑定。
    """
    
    response = llm_with_tools.invoke(
        state["messages"] + [HumanMessage(content=prompt)]
    )
    
    return {
        **state,
        "messages": state["messages"] + [response],
        "next_step": "tools" if response.tool_calls else "verify"
    }

def verification_agent(state: OpsState) -> OpsState:
    """
    验证Agent: 验证修复效果
    """
    print("✅ [验证Agent] 正在验证修复效果...")
    
    alert = state["current_alert"]
    
    # 模拟验证
    verification_result = {
        "service": alert.service,
        "metrics_status": "normal",
        "latency": 450,  # 已恢复正常
        "error_rate": 0.2,
        "verification_time": datetime.now().isoformat()
    }
    
    prompt = f"""
    修复验证结果:
    {json.dumps(verification_result, indent=2)}
    
    请判断:
    1. 故障是否已解决?
    2. 是否需要后续观察?
    3. 生成事故总结报告
    """
    
    response = llm.invoke(state["messages"] + [HumanMessage(content=prompt)])
    
    return {
        **state,
        "messages": state["messages"] + [response],
        "execution_result": "success",
        "next_step": "report"
    }

def reporting_agent(state: OpsState) -> OpsState:
    """
    报告Agent: 生成事故报告和归档
    """
    print("📝 [报告Agent] 生成事故报告...")
    
    alert = state["current_alert"]
    
    report = f"""
    ========================================
    运维事件处理报告
    ========================================
    事件ID: {alert.id}
    处理时间: {datetime.now().isoformat()}
    影响服务: {alert.service}
    严重级别: {alert.severity}
    
    处理流程:
    1. 监控检测 -> 2. 智能诊断 -> 3. 方案制定 -> 4. 自动修复 -> 5. 效果验证
    
    状态: 已解决 ✅
    
    后续行动:
    - 持续监控24小时
    - 复盘会议已预约
    """
    
    create_incident_report(alert.id, "自动修复成功")
    send_notification("slack", f"✅ 事件 {alert.id} 已处理完成", "normal")
    
    return {
        **state,
        "messages": state["messages"] + [AIMessage(content=report)],
        "next_step": "end"
    }

# ==================== 工具节点与条件路由 ====================

tool_node = ToolNode(tools)

def should_continue(state: OpsState) -> str:
    """决定下一步路由"""
    last_message = state["messages"][-1]
    
    # 如果有工具调用,先执行工具
    if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
        return "tools"
    
    return state["next_step"]

def route_after_tools(state: OpsState) -> str:
    """工具执行后的路由"""
    return state["next_step"]

# ==================== 构建LangGraph工作流 ====================

workflow = StateGraph(OpsState)

# 添加节点
workflow.add_node("monitor", monitoring_agent)
workflow.add_node("diagnose", diagnosis_agent)
workflow.add_node("plan", planning_agent)
workflow.add_node("approval_check", approval_node)
workflow.add_node("execute", execution_agent)
workflow.add_node("verify", verification_agent)
workflow.add_node("report", reporting_agent)
workflow.add_node("tools", tool_node)

# 添加边
workflow.set_entry_point("monitor")

workflow.add_conditional_edges(
    "monitor",
    should_continue,
    {"diagnose": "diagnose", "tools": "tools"}
)

workflow.add_conditional_edges(
    "diagnose",
    should_continue,
    {"plan": "plan", "tools": "tools", "diagnose": "diagnose"}
)

workflow.add_edge("tools", route_after_tools)

workflow.add_conditional_edges(
    "plan",
    should_continue,
    {"approval_check": "approval_check"}
)

workflow.add_conditional_edges(
    "approval_check",
    should_continue,
    {"awaiting_approval": END, "execute": "execute"}
)

workflow.add_conditional_edges(
    "execute",
    should_continue,
    {"tools": "tools", "verify": "verify"}
)

workflow.add_conditional_edges(
    "verify",
    should_continue,
    {"report": "report", "execute": "execute"}
)

workflow.add_edge("report", END)

# 添加记忆功能
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)

# ==================== 运行示例 ====================

def run_ops_agent():
    """运行运维Agent"""
    print("🚀 启动智能运维Agent系统...")
    print("=" * 50)
    
    initial_state = {
        "messages": [],
        "current_alert": None,
        "diagnosis": None,
        "execution_plan": None,
        "execution_result": None,
        "logs": [],
        "pending_approval": False,
        "next_step": "monitor"
    }
    
    config = {"configurable": {"thread_id": "ops-incident-001"}}
    
    for event in app.stream(initial_state, config):
        for key, value in event.items():
            if key != "messages":
                print(f"节点 [{key}] 完成")
            else:
                # 打印最后一条AI消息
                if value and hasattr(value[-1], 'content'):
                    content = value[-1].content
                    if content:
                        print(f"\n🤖 Agent输出:\n{content[:500]}...")
    
    print("\n" + "=" * 50)
    print("✅ 运维事件处理流程结束")

if __name__ == "__main__":
    run_ops_agent()

然后的话,如果工具函数特别多,则需要进行工具治理,所以让AI 又写了一个工具治理代码

python 复制代码
"""
企业级运维Agent工具治理系统
架构:工具路由器(Tool Router) + 领域Agent(Domain Agent) + 工具注册中心
"""

from typing import TypedDict, Annotated, List, Dict, Any, Literal, Optional, Callable
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage
from langchain_core.tools import tool, StructuredTool, BaseTool
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from pydantic import BaseModel, Field
from dataclasses import dataclass, field
from functools import lru_cache
import hashlib
import json
from datetime import datetime
import numpy as np
from collections import defaultdict

# ==================== 核心架构:工具注册中心 ====================

class ToolMetadata(BaseModel):
    """工具元数据"""
    name: str
    description: str
    category: str  # compute, network, storage, security, database
    subcategory: str  # k8s, aws, ali, monitoring
    risk_level: Literal["read", "write", "critical"]  # 权限分级
    tags: List[str]  # 用于向量检索
    input_schema: Dict[str, Any]
    output_schema: Dict[str, Any]
    version: str = "1.0"
    owner: str = "platform-team"
    deprecated: bool = False
    
    def embedding_text(self) -> str:
        """生成用于向量检索的文本"""
        return f"{self.name}: {self.description}. Category: {self.category}. Tags: {', '.join(self.tags)}"

class ToolRegistry:
    """工具注册中心 - 统一管理所有工具"""
    
    def __init__(self):
        self._tools: Dict[str, BaseTool] = {}
        self._metadata: Dict[str, ToolMetadata] = {}
        self._embeddings: Dict[str, List[float]] = {}
        self.embedding_model = OpenAIEmbeddings()
        
        # 索引结构
        self._category_index: Dict[str, List[str]] = defaultdict(list)
        self._risk_index: Dict[str, List[str]] = defaultdict(list)
        self._tag_index: Dict[str, List[str]] = defaultdict(list)
    
    def register(self, tool_obj: BaseTool, metadata: ToolMetadata):
        """注册工具"""
        self._tools[metadata.name] = tool_obj
        self._metadata[metadata.name] = metadata
        
        # 构建索引
        self._category_index[metadata.category].append(metadata.name)
        self._risk_index[metadata.risk_level].append(metadata.name)
        for tag in metadata.tags:
            self._tag_index[tag].append(metadata.name)
        
        # 生成向量(异步场景可改为异步)
        try:
            self._embeddings[metadata.name] = self.embedding_model.embed_query(
                metadata.embedding_text()
            )
        except:
            pass  # 降级处理
    
    def search_by_intent(self, query: str, top_k: int = 5) -> List[ToolMetadata]:
        """基于意图的工具检索"""
        query_vec = self.embedding_model.embed_query(query)
        
        # 余弦相似度计算
        scores = []
        for name, vec in self._embeddings.items():
            similarity = np.dot(query_vec, vec) / (np.linalg.norm(query_vec) * np.linalg.norm(vec))
            scores.append((name, similarity))
        
        scores.sort(key=lambda x: x[1], reverse=True)
        return [self._metadata[name] for name, _ in scores[:top_k]]
    
    def get_by_category(self, category: str, limit: int = 10) -> List[BaseTool]:
        """按类别获取工具"""
        names = self._category_index.get(category, [])[:limit]
        return [self._tools[name] for name in names if name in self._tools]
    
    def get_by_risk(self, risk_level: str) -> List[BaseTool]:
        """按风险等级获取工具"""
        names = self._risk_index.get(risk_level, [])
        return [self._tools[name] for name in names if name in self._tools]
    
    def get_tool(self, name: str) -> Optional[BaseTool]:
        return self._tools.get(name)

# 全局注册中心
registry = ToolRegistry()

# ==================== 第一层:工具路由器 ====================

class ToolRouter:
    """
    工具路由器:负责意图识别和工具分发
    类比:K8s Ingress Controller / API Gateway
    """
    
    def __init__(self, registry: ToolRegistry):
        self.registry = registry
        self.llm = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0)
        
        # 领域分类模型(轻量级)
        self.domain_prompt = ChatPromptTemplate.from_messages([
            ("system", """你是运维工具路由器,负责将用户请求分发到正确的领域Agent。
            
可用领域:
1. kubernetes - K8s集群管理、Pod操作、Deployment管理
2. cloud_infra - 云资源管理、ECS、VPC、SLB
3. database - 数据库运维、SQL执行、备份恢复
4. monitoring - 监控告警、日志查询、链路追踪
5. security - 安全审计、漏洞扫描、权限检查
6. network - 网络诊断、DNS、CDN、防火墙
7. ci_cd - 发布部署、流水线管理、制品库

请分析用户请求,输出JSON格式:
{
    "domain": "领域名称",
    "confidence": 0.95,
    "sub_intents": ["具体操作1", "操作2"],
    "risk_assessment": "read/write/critical"
}"""),
            ("human", "{input}")
        ])
    
    def route(self, user_input: str, context: Dict = None) -> Dict[str, Any]:
        """路由决策"""
        # 1. LLM粗分类
        response = self.llm.invoke(
            self.domain_prompt.format_messages(input=user_input)
        )
        
        try:
            decision = json.loads(response.content)
        except:
            # 降级:使用向量检索
            relevant_tools = self.registry.search_by_intent(user_input, top_k=3)
            categories = list(set([t.category for t in relevant_tools]))
            decision = {
                "domain": categories[0] if categories else "general",
                "confidence": 0.7,
                "tools": [t.name for t in relevant_tools],
                "risk_assessment": "read"
            }
        
        # 2. 基于决策召回工具
        if "tools" not in decision:
            decision["tools"] = self._recall_tools(decision, user_input)
        
        return decision
    
    def _recall_tools(self, decision: Dict, query: str) -> List[str]:
        """多路召回工具"""
        tools = set()
        
        # 路1:类别召回
        domain_tools = self.registry.get_by_category(decision["domain"])
        tools.update([t.name for t in domain_tools])
        
        # 路2:向量召回
        similar_tools = self.registry.search_by_intent(query, top_k=5)
        tools.update([t.name for t in similar_tools])
        
        # 路3:风险过滤(高危操作需要额外确认)
        if decision.get("risk_assessment") == "critical":
            critical_tools = self.registry.get_by_risk("critical")
            tools.update([t.name for t in critical_tools])
        
        return list(tools)[:10]  # 限制数量

# ==================== 第二层:领域Agent ====================

class DomainAgent:
    """
    领域专用Agent:每个领域维护自己的工具集
    类比:微服务架构中的各个Service
    """
    
    def __init__(self, name: str, registry: ToolRegistry):
        self.name = name
        self.registry = registry
        self.llm = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0)
        self.tools: List[BaseTool] = []
        
    def load_tools(self, tool_names: List[str]):
        """动态加载工具"""
        self.tools = [
            self.registry.get_tool(name) 
            for name in tool_names 
            if self.registry.get_tool(name)
        ]
        # 重新绑定LLM
        self.llm_with_tools = self.llm.bind_tools(self.tools)
    
    def execute(self, task: str, context: Dict) -> Dict[str, Any]:
        """执行领域任务"""
        # 构建领域专属Prompt
        system_prompt = f"""你是{self.name}领域专家,擅长使用以下工具解决问题:
        
可用工具: {[t.name for t in self.tools]}

约束:
1. 优先使用只读工具进行诊断
2. 写操作需要明确确认
3. 错误时提供清晰的排查步骤
"""
        
        messages = [
            SystemMessage(content=system_prompt),
            HumanMessage(content=f"任务: {task}\n上下文: {json.dumps(context, ensure_ascii=False)}")
        ]
        
        response = self.llm_with_tools.invoke(messages)
        return {
            "domain": self.name,
            "response": response,
            "tools_used": getattr(response, 'tool_calls', []),
            "status": "pending_tools" if hasattr(response, 'tool_calls') and response.tool_calls else "completed"
        }

# ==================== 第三层:具体工具实现 ====================

# 模拟大量工具注册(实际项目中这些会分散在不同模块)

def create_all_tools():
    """创建所有运维工具并注册"""
    
    # === Kubernetes 领域工具(20+个)===
    k8s_tools = [
        ("k8s_get_pods", "获取Pod列表", "compute", "k8s", "read", ["pod", "status"]),
        ("k8s_get_logs", "获取容器日志", "compute", "k8s", "read", ["logs", "debug"]),
        ("k8s_describe_pod", "查看Pod详情", "compute", "k8s", "read", ["describe", "yaml"]),
        ("k8s_exec_command", "在容器中执行命令", "compute", "k8s", "write", ["exec", "bash", "debug"]),
        ("k8s_delete_pod", "删除Pod", "compute", "k8s", "write", ["delete", "restart"]),
        ("k8s_scale_deployment", "扩缩容Deployment", "compute", "k8s", "write", ["scale", "replicas", "hpa"]),
        ("k8s_rollout_restart", "滚动重启", "compute", "k8s", "write", ["restart", "deployment"]),
        ("k8s_rollout_status", "查看发布状态", "compute", "k8s", "read", ["rollout", "status"]),
        ("k8s_apply_yaml", "应用YAML配置", "compute", "k8s", "critical", ["apply", "create", "update"]),
        ("k8s_get_events", "查看集群事件", "compute", "k8s", "read", ["events", "warning"]),
        ("k8s_top_nodes", "查看节点资源使用", "compute", "k8s", "read", ["metrics", "node", "cpu", "memory"]),
        ("k8s_cordon_node", "标记节点不可调度", "compute", "k8s", "write", ["cordon", "drain", "maintenance"]),
        ("k8s_drain_node", "驱逐节点上的Pod", "compute", "k8s", "critical", ["drain", "evict", "maintenance"]),
        ("k8s_port_forward", "端口转发", "compute", "k8s", "read", ["port-forward", "local"]),
        ("k8s_get_secret", "查看Secret", "security", "k8s", "read", ["secret", "config"]),
        ("k8s_create_secret", "创建Secret", "security", "k8s", "critical", ["secret", "create"]),
    ]
    
    # === 云基础设施工具(15+个)===
    cloud_tools = [
        ("aws_describe_instances", "查询EC2实例", "compute", "aws", "read", ["ec2", "instance"]),
        ("aws_start_instance", "启动实例", "compute", "aws", "write", ["start", "power"]),
        ("aws_stop_instance", "停止实例", "compute", "aws", "write", ["stop", "power"]),
        ("aws_reboot_instance", "重启实例", "compute", "aws", "write", ["reboot"]),
        ("aws_modify_instance_type", "修改实例规格", "compute", "aws", "critical", ["resize", "type"]),
        ("aws_create_snapshot", "创建磁盘快照", "storage", "aws", "write", ["snapshot", "backup"]),
        ("ali_get_rds_instances", "查询RDS实例", "database", "ali", "read", ["rds", "mysql"]),
        ("ali_create_rds_backup", "创建RDS备份", "database", "ali", "write", ["backup", "rds"]),
    ]
    
    # === 监控工具(10+个)===
    monitoring_tools = [
        ("prometheus_query", "Prometheus查询", "monitoring", "prometheus", "read", ["metrics", "promql"]),
        ("grafana_get_dashboard", "获取Grafana仪表盘", "monitoring", "grafana", "read", ["dashboard", "view"]),
        ("alertmanager_get_alerts", "获取告警列表", "monitoring", "alertmanager", "read", ["alerts", "firing"]),
        ("alertmanager_silence", "静默告警", "monitoring", "alertmanager", "write", ["silence", "mute"]),
        ("jaeger_trace_query", "查询链路追踪", "monitoring", "jaeger", "read", ["trace", "span"]),
        ("loki_query_logs", "查询Loki日志", "monitoring", "loki", "read", ["logs", "grafana"]),
    ]
    
    # === 数据库工具(12+个)===
    db_tools = [
        ("mysql_execute_query", "执行MySQL查询", "database", "mysql", "read", ["sql", "select"]),
        ("mysql_show_processlist", "查看连接列表", "database", "mysql", "read", ["process", "connection"]),
        ("mysql_kill_thread", "终止线程", "database", "mysql", "write", ["kill", "terminate"]),
        ("redis_info", "查看Redis信息", "database", "redis", "read", ["info", "stats"]),
        ("redis_slowlog", "查看慢查询", "database", "redis", "read", ["slowlog", "performance"]),
        ("redis_flush_db", "清空数据库", "database", "redis", "critical", ["flush", "delete"]),
        ("mongo_rs_status", "查看副本集状态", "database", "mongo", "read", ["replica", "status"]),
    ]
    
    # === 网络工具(8+个)===
    network_tools = [
        ("ping", "网络连通性测试", "network", "basic", "read", ["ping", "icmp"]),
        ("traceroute", "路由追踪", "network", "basic", "read", ["route", "trace"]),
        ("nslookup", "DNS查询", "network", "dns", "read", ["dns", "resolve"]),
        ("dig", "高级DNS查询", "network", "dns", "read", ["dns", "dig"]),
        ("curl", "HTTP请求测试", "network", "http", "read", ["http", "test"]),
        ("tcpdump", "抓包分析", "network", "advanced", "read", ["packet", "capture"]),
    ]
    
    all_tools = k8s_tools + cloud_tools + monitoring_tools + db_tools + network_tools
    
    # 注册到注册中心
    for name, desc, category, sub, risk, tags in all_tools:
        # 创建模拟工具函数
        def make_tool_func(n=name):
            return lambda **kwargs: f"[{n}] 执行结果: 模拟数据"
        
        tool_obj = StructuredTool.from_function(
            func=make_tool_func(),
            name=name,
            description=desc
        )
        
        metadata = ToolMetadata(
            name=name,
            description=desc,
            category=category,
            subcategory=sub,
            risk_level=risk,
            tags=tags,
            input_schema={"type": "object"},
            output_schema={"type": "object"}
        )
        
        registry.register(tool_obj, metadata)
    
    print(f"✅ 已注册 {len(all_tools)} 个工具到注册中心")

# ==================== 主工作流:分层调用 ====================

class HierarchicalOpsState(TypedDict):
    """分层运维状态"""
    user_input: str
    router_decision: Optional[Dict]
    domain_results: Annotated[List[Dict], operator.add]
    final_response: Optional[str]
    execution_trace: Annotated[List[str], operator.add]

class HierarchicalOpsAgent:
    """
    分层运维Agent主控制器
    """
    
    def __init__(self):
        self.registry = registry
        self.router = ToolRouter(registry)
        self.domain_agents: Dict[str, DomainAgent] = {}
        
        # 初始化领域Agent
        for domain in ["kubernetes", "cloud_infra", "database", "monitoring", "security", "network"]:
            self.domain_agents[domain] = DomainAgent(domain, registry)
        
        # 构建LangGraph
        self.workflow = self._build_workflow()
    
    def _build_workflow(self):
        """构建分层工作流"""
        
        def router_node(state: HierarchicalOpsState):
            """路由决策节点"""
            decision = self.router.route(state["user_input"])
            return {
                **state,
                "router_decision": decision,
                "execution_trace": state["execution_trace"] + [f"路由决策: {decision['domain']}"]
            }
        
        def domain_dispatch_node(state: HierarchicalOpsState):
            """领域分发节点"""
            decision = state["router_decision"]
            domain = decision["domain"]
            
            if domain not in self.domain_agents:
                return {**state, "final_response": f"不支持的领域: {domain}"}
            
            agent = self.domain_agents[domain]
            agent.load_tools(decision["tools"])
            
            result = agent.execute(
                task=state["user_input"],
                context={"original_input": state["user_input"]}
            )
            
            return {
                **state,
                "domain_results": state["domain_results"] + [result],
                "execution_trace": state["execution_trace"] + [f"领域执行: {domain}"]
            }
        
        def synthesize_node(state: HierarchicalOpsState):
            """结果合成节点"""
            # 多领域结果合成
            results = state["domain_results"]
            
            if len(results) == 1:
                final = results[0]["response"].content
            else:
                # 多领域协作,需要LLM合成
                synthesis_prompt = f"""综合以下各领域分析结果,给出统一回答:
                
{json.dumps([r["response"].content for r in results], indent=2, ensure_ascii=False)}

请给出:
1. 问题根因总结
2. 跨领域影响分析  
3. 建议操作步骤(按优先级排序)
4. 风险提醒
"""
                llm = ChatOpenAI(model="gpt-4-turbo-preview")
                final = llm.invoke(synthesis_prompt).content
            
            return {
                **state,
                "final_response": final,
                "execution_trace": state["execution_trace"] + ["结果合成完成"]
            }
        
        # 构建图
        workflow = StateGraph(HierarchicalOpsState)
        workflow.add_node("router", router_node)
        workflow.add_node("domain_execute", domain_dispatch_node)
        workflow.add_node("synthesize", synthesize_node)
        
        workflow.set_entry_point("router")
        workflow.add_edge("router", "domain_execute")
        workflow.add_edge("domain_execute", "synthesize")
        workflow.add_edge("synthesize", END)
        
        return workflow.compile()
    
    def run(self, user_input: str):
        """运行分层Agent"""
        initial_state = {
            "user_input": user_input,
            "router_decision": None,
            "domain_results": [],
            "final_response": None,
            "execution_trace": []
        }
        
        result = self.workflow.invoke(initial_state)
        
        print("=" * 60)
        print("🔍 执行链路追踪:")
        for step in result["execution_trace"]:
            print(f"  → {step}")
        
        print("\n📊 最终响应:")
        print(result["final_response"])
        
        return result

# ==================== 运行示例 ====================

if __name__ == "__main__":
    # 1. 初始化所有工具(实际项目中这会从配置文件/数据库加载)
    create_all_tools()
    
    print(f"\n📦 注册中心统计:")
    print(f"  - 总工具数: {len(registry._tools)}")
    print(f"  - 分类数: {len(registry._category_index)}")
    for cat, tools in registry._category_index.items():
        print(f"    · {cat}: {len(tools)}个")
    
    # 2. 初始化分层Agent
    agent = HierarchicalOpsAgent()
    
    # 3. 测试场景
    test_queries = [
        "user-service的Pod最近频繁重启,帮我查一下日志和事件",
        "生产MySQL数据库CPU飙高,查询慢,需要紧急处理",
        "昨晚的发布导致订单服务延迟增加,需要回滚",
        "扫描一下集群里的安全漏洞和异常配置",
    ]
    
    for query in test_queries:
        print(f"\n{'='*60}")
        print(f"📝 用户输入: {query}")
        agent.run(query)
相关推荐
2501_943124052 小时前
专精特新之路:青岛福尔蒂新材料的功能母粒品牌突围战略
大数据·人工智能
季远迩2 小时前
240. 搜索二维矩阵 II(中等)
人工智能·算法·矩阵
WLJT1231231232 小时前
赋能工业制造 铸就品质基石
人工智能·制造
Lab_AI2 小时前
创腾科技推出DeepSeek智能一体机:AI4S驱动研发效率提升300%,打造科学家“第二大脑”
人工智能·ai4s·deepseek·科学智能
IT_陈寒2 小时前
Redis 缓存击穿 vs 雪崩:5个实战方案让你的系统稳如磐石
前端·人工智能·后端
1941s2 小时前
Dify(Agent + RAG)指南:从安装到实战的开源 LLM 应用开发平台
人工智能·低代码
fareast_mzh2 小时前
Mistral AI本地部署 C++无需Nvidiad独立显卡也能运行(CPU推理)
开发语言·c++·人工智能
深小乐2 小时前
从 AI Skills 学实战技能(二):读懂 skill-vetter,学会 Skills 安全审查
人工智能
NocoBase2 小时前
【教程】用 NocoBase 2.0 搭建一个极简的 IT 工单系统
人工智能·开源·github·无代码