Langchain mcp 可视化界面

界面

依赖包

bash 复制代码
    "gradio>=6.13.0",
    "langchain>=1.2.15",
    "langchain-mcp-adapters>=0.2.2",
    "langchain-openai>=1.1.12",
    "langgraph-cli[inmem]>=0.4.21",

代码

python 复制代码
import asyncio
import uuid
from typing import List, Dict

import gradio as gr
from gradio import ChatMessage
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.types import Command
from langgraph.checkpoint.memory import InMemorySaver
from langchain.chat_models import init_chat_model
from dotenv import load_dotenv, find_dotenv
import os

load_dotenv(find_dotenv())
ZHIPU_API_KEY = os.getenv("ZHIPU_API_KEY")

# ModelScope 上的 Qwen3-32B(OpenAI 接口兼容)
QWEN3_32B = "Qwen/Qwen3-32B"

# 优先读取 ModelScope 的 API Key;若未配置则回退到 OPENAI_API_KEY(兼容常见环境)
API_KEY = os.getenv("MODELSCOPE_API_KEY") or os.getenv("OPENAI_API_KEY") or ""

model = init_chat_model(
    model=QWEN3_32B,
    model_provider="openai",
    base_url="https://api-inference.modelscope.cn/v1/",
    api_key=API_KEY,
    temperature=0,
    # 通过 extra_body 传入思考/推理开关与流式选项(content_blocks 将包含 reasoning 与 text)
    extra_body={
        # 常见思考模式参数的并集,未识别的键会被后端忽略
        "enable_thinking": False
    },
)


# 网络搜索MCP服务端(工具的配置)
zhipuai_mcp_server_config = {
    "url": "https://open.bigmodel.cn/api/mcp/web_search/sse?Authorization="+ZHIPU_API_KEY,
    "transport": "sse",
}


# https://modelscope.cn/mcp/servers/@Joooook/12306-mcp 部署
mcp_12306_server_config = {
    "url": "https://mcp.api-inference.modelscope.net/xxx/sse",
    'transport': 'sse'
}
# https://modelscope.cn/mcp/servers/antvis/mcp-server-chart 部署
mcp_chart_server_config = {
    "url": "https://mcp.api-inference.modelscope.net/xxx/sse",
    'transport': 'sse'
}

#  创建一个MCP的客户端去连接
mcp_client = MultiServerMCPClient(
    {
        "zhipuai_mcp_server_config": zhipuai_mcp_server_config,
        "my12306_mcp_server_config": mcp_12306_server_config,
        "chart_mcp_server_config": mcp_12306_server_config,
    }
)

# 这些MCP服务端的工具,只支持异步模式,所以只能用MCP的异步模式去调用
async def create_new_agent():
    mcp_tools = await mcp_client.get_tools()  # 获取MCP的所有tools
    return create_agent(
        model,
        tools=mcp_tools,
        system_prompt='你是一个智能助手,尽可能的调用工具回答用户的问题',
        checkpointer=InMemorySaver()  # 创建一个内存的保存器: 保存对话上下文
    )


agent = asyncio.run(create_new_agent())
# 配置参数,包含会话ID
config = {
    "configurable": {
        # 检查点由session_id访问
        "thread_id": str(uuid.uuid4()),
    }
}


# res = agent.invoke(input={'messages': [HumanMessage(content='你好!')]}, config=config)
# print(res)

def add_message(chat_history, user_message):
    if user_message:
        chat_history.append({"role": "user", "content": user_message})
    return chat_history, gr.Textbox(value=None, interactive=False)


async def submit_messages(chat_history: List[Dict]):
    """流式处理消息的核心函数"""
    user_input = chat_history[-1]['content']
    current_state = agent.get_state(config)
    full_response = ""  # 累积完整响应
    tool_calls = []  # 记录工具调用

    # 处理中断恢复或正常消息
    inputs = Command(resume={'answer': user_input}) if current_state.next else {
        'messages': [HumanMessage(content=user_input)]}

    async for chunk in agent.astream(
            inputs,
            config,
            stream_mode=["messages", "updates"],  # 同时监听消息和状态更新
    ):
        if 'messages' in chunk:
            for message in chunk[1]:
                # 处理AI消息流式输出
                if isinstance(message, AIMessage) and message.content:
                    full_response += message.content
                    # 更新最后一条消息而非追加
                    if chat_history and isinstance(chat_history[-1], ChatMessage) and 'title' not in chat_history[-1].metadata:
                        chat_history[-1].content = full_response
                    else:
                        chat_history.append(ChatMessage(role="assistant", content=message.content))
                    yield chat_history

                # 处理工具调用消息
                elif isinstance(message, ToolMessage):
                    tool_msg = f"🔧 调用工具: {message.name}\n{message.content}"
                    chat_history.append(ChatMessage(role="assistant", content=tool_msg,
                                        metadata={"title": f"🛠️ Used tool {message.name}"}))
                    yield chat_history


# 创建Gradio界面
with gr.Blocks(
        title='我的智能小秘书',
        theme=gr.themes.Soft(),
        css=".system {color: #666; font-style: italic;}"  # 自定义系统消息样式
) as demo:
    # 聊天历史记录组件
    chatbot = gr.Chatbot(

        height=500,
        render_markdown=True,  # 支持Markdown格式
        line_breaks=False  # 禁用自动换行符
    )

    # 输入组件
    chat_input = gr.Textbox(
        placeholder="请输入您的消息...",
        label="用户输入",
        max_lines=5,
        container=False
    )

    # 控制按钮
    with gr.Row():
        submit_btn = gr.Button("发送", variant="primary")
        clear_btn = gr.Button("清空对话")

    # 消息提交处理链
    msg_handler = chat_input.submit(
        fn=add_message,
        inputs=[chatbot, chat_input],
        outputs=[chatbot, chat_input],
        queue=False
    ).then(
        fn=submit_messages,
        inputs=chatbot,
        outputs=chatbot,
        api_name="chat_stream"  # API端点名称
    )

    # 按钮点击处理链
    btn_handler = submit_btn.click(
        fn=add_message,
        inputs=[chatbot, chat_input],
        outputs=[chatbot, chat_input],
        queue=False
    ).then(
        fn=submit_messages,
        inputs=chatbot,
        outputs=chatbot
    )

    # 清空对话
    clear_btn.click(
        fn=lambda: [],
        inputs=None,
        outputs=chatbot,
        queue=False
    )

    # 重置输入框状态
    msg_handler.then(
        lambda: gr.Textbox(interactive=True),
        None,
        [chat_input]
    )
    btn_handler.then(
        lambda: gr.Textbox(interactive=True),
        None,
        [chat_input]
    )

if __name__ == '__main__':
    demo.launch()
相关推荐
AI_小站1 小时前
6个GitHub爆火的免费大模型教程,助你快速进阶AI编程
人工智能·langchain·github·知识图谱·agent·llama·rag
茉莉玫瑰花茶11 小时前
LangChain 核心组件 [ 2 ]
java·数据库·langchain
hrhcode12 小时前
【LangGraph】四.持久化:保存和恢复执行状态
python·ai·langchain·agent·langgraph
米小虾14 小时前
MCP 协议深度解析:AI 时代的「USB-C」接口如何重塑智能体生态
人工智能·mcp
库洛西鲁14 小时前
OpenClaw 16G 内存怎么配模型?实测 3 套方案,最后一套跑满不卡
claude·mcp
JaydenAI14 小时前
[MCP在LangChain中的应用-03]在Session构建的上下文中与MCP Server交互
python·langchain·ai编程·ai agent·mcp·fastmcp
weisian15115 小时前
进阶篇-LangChain篇-20--从零构建企业大脑:RAG系统全流程实战
开发语言·langchain·rag·实战编码
tangweiguo0305198715 小时前
LangGraph 入门:多智能体工作流实战(阿里云百炼)
人工智能·python·langchain
2301_8152795215 小时前
实战分享LangChain WebUI 部署智能客服:从零搭建到生产环境优化
人工智能·langchain