mcp demo 智能天气服务:经纬度预报与城市警报

编写一个输入经纬度获取天气预报,和输入城市名称获取天气警告的mcp服务

代码

mcp 1.24.0

python 复制代码
from typing import Any, List
import random
from datetime import datetime, timedelta

from mcp.server.fastmcp import FastMCP
from starlette.middleware.trustedhost import TrustedHostMiddleware

import functools
import json
import time
from typing import Callable


def log_tool_call(func: Callable):
    @functools.wraps(func)
    async def wrapper(*args, **kwargs):
        start = time.time()

        print("\n================ MCP TOOL CALL ================")
        print(f"Tool: {func.__name__}")
        print("Input:")
        print(json.dumps(kwargs, ensure_ascii=False, indent=2))

        try:
            result = await func(*args, **kwargs)

            duration = round((time.time() - start) * 1000, 2)

            print("Output:")
            print(result)
            print(f"Duration: {duration} ms")
            print("================================================\n")

            return result

        except Exception as e:
            print("ERROR:")
            print(str(e))
            print("================================================\n")
            raise

    return wrapper


# Initialize FastMCP server
mcp = FastMCP("weather")

# =========================
# Mock Data Generators
# =========================


def mock_alerts(state: str) -> List[dict[str, Any]]:
    """Generate fake weather alerts."""
    possible_events = [
        "Severe Thunderstorm Warning",
        "Heat Advisory",
        "Flood Watch",
        "Winter Storm Warning",
    ]

    if state.upper() == "CA":
        return [
            {
                "event": "Heat Advisory",
                "area": "Central Valley, CA",
                "severity": "Moderate",
                "description": "High temperatures are expected to reach dangerous levels.",
                "instruction": "Stay hydrated and avoid outdoor activity during peak heat.",
            }
        ]

    # Randomly decide whether there are alerts
    if random.random() < 0.4:
        return []

    return [
        {
            "event": random.choice(possible_events),
            "area": f"{state.upper()} Statewide",
            "severity": random.choice(["Minor", "Moderate", "Severe"]),
            "description": "This is a simulated weather alert for testing purposes.",
            "instruction": "Follow standard safety procedures.",
        }
    ]


def format_alert(alert: dict) -> str:
    """Format an alert into a readable string."""
    return f"""
Event: {alert.get("event", "Unknown")}
Area: {alert.get("area", "Unknown")}
Severity: {alert.get("severity", "Unknown")}
Description: {alert.get("description", "No description available")}
Instructions: {alert.get("instruction", "No specific instructions provided")}
""".strip()


def mock_forecast(latitude: float, longitude: float) -> List[dict[str, Any]]:
    """Generate fake forecast data."""
    periods = []
    base_temp = random.randint(10, 30)

    for i in range(5):
        day = datetime.now() + timedelta(days=i)
        periods.append(
            {
                "name": day.strftime("%A"),
                "temperature": base_temp + random.randint(-3, 3),
                "temperatureUnit": "C",
                "windSpeed": f"{random.randint(5, 20)} km/h",
                "windDirection": random.choice(["N", "E", "S", "W"]),
                "detailedForecast": "This is a simulated forecast with generally stable weather conditions.",
            }
        )

    return periods


# =========================
# MCP Tools
# =========================


@mcp.tool()
@log_tool_call
async def get_alerts(state: str) -> str:
    """Get simulated weather alerts for a US state.

    Args:
        state: Two-letter US state code (e.g. 北京, 上海, 广州, 深圳)
    """
    alerts = mock_alerts(state)

    if not alerts:
        return "No active alerts for this state (simulated data)."

    formatted = [format_alert(alert) for alert in alerts]
    return "\n---\n".join(formatted)


@mcp.tool()
@log_tool_call
async def get_forecast(latitude: float, longitude: float) -> str:
    """Get simulated weather forecast for a location.

    Args:
        latitude: Latitude of the location
        longitude: Longitude of the location
    """
    periods = mock_forecast(latitude, longitude)

    forecasts = []
    for period in periods:
        forecast = f"""
{period["name"]}:
Temperature: {period["temperature"]}°{period["temperatureUnit"]}
Wind: {period["windSpeed"]} {period["windDirection"]}
Forecast: {period["detailedForecast"]}
""".strip()
        forecasts.append(forecast)

    return "\n---\n".join(forecasts)


def main():
    mcp.run(transport="sse")


if __name__ == "__main__":
    main()

启动后

POST测试下

http://127.0.0.1:8000/sse

可以发现一共有两个tool,并且测试获取天气预报成功

并且可以看到是先发个GET建立长连接,后续通过POST session交流

cursor测试

setting里面配置

python 复制代码
{
  "mcpServers": {
    "weather": {
      "transport": "sse",
      "url": "http://127.0.0.1:8000/sse"
    }
  }
}

打开chat测试下

编写代码测试

直接调用mcp server
python 复制代码
import asyncio
from mcp import ClientSession
from mcp.client.sse import sse_client


MCP_SSE_URL = "http://127.0.0.1:8000/sse"


async def main():
    # 1. 建立 SSE 连接
    async with sse_client(MCP_SSE_URL) as (read, write):
        # 2. 创建 MCP 会话
        async with ClientSession(read, write) as session:
            # 3. 初始化(必须)
            await session.initialize()

            # 4. 看一下服务端暴露了哪些 tools
            tools = await session.list_tools()
            print("Available tools:")
            for t in tools:
                print(t)

            # 5. 直接调用 tool
            print("\nCalling get_forecast_by_city...\n")

            result = await session.call_tool(
                name="get_alerts", arguments={"state": "北京"}
            )

            # 6. 输出结果
            print("===== TOOL RESULT =====")
            print(result.content)
            result = await session.call_tool(
                name="get_forecast", arguments={"latitude": 100, "longitude": 100}
            )

            # 6. 输出结果
            print("===== TOOL RESULT =====")
            print(result.content)


if __name__ == "__main__":
    asyncio.run(main())

其实就是建立连接,发起调用啦

那么大模型能做的无非就是帮我们识别该调用哪个tool,该传入什么参入,都可以从用户输入给大模型的内容来提取出来,本质就是function calling

加入大模型
python 复制代码
from mcp import ClientSession
from mcp.client.sse import sse_client


MCP_SSE_URL = "http://127.0.0.1:8000/sse"


async def call_mcp_tool(tool_name: str, arguments: dict) -> str:
    async with sse_client(MCP_SSE_URL) as (read, write):
        async with ClientSession(read, write) as session:
            await session.initialize()
            # 4. 看一下服务端暴露了哪些 tools
            tools = await session.list_tools()
            print("Available tools:")
            for t in tools:
                print(t)

            result = await session.call_tool(
                # name="get_alerts", arguments={"state": "北京"}
                name=tool_name, arguments=arguments
            )

            return result.content


import asyncio
import json

import httpx


OLLAMA_URL = "http://127.0.0.1:11434/api/chat"
MODEL = "qwen3:8b"


SYSTEM_PROMPT = """
你是一个工具调度助手。

你只能以 JSON 格式回答,且只能是以下两种之一:

1. 如果需要调用工具:
{
  "type": "tool_call",
  "name": "<tool_name>",
  "arguments": { ... }
}

2. 如果不需要工具:
{
  "type": "final",
  "content": "<answer>"
}

可用工具:
- get_alerts(state: string)  查询城市天气预警

示例:
用户:北京最近几天天气怎么样?
你应该返回 tool_call。
"""


def ask_llm(user_input: str) -> dict:
    payload = {
        "model": MODEL,
        "messages": [
            {"role": "system", "content": SYSTEM_PROMPT},
            {"role": "user", "content": user_input},
        ],
        "stream": False,
        "options": {"temperature": 0},
    }

    resp = httpx.post(OLLAMA_URL, json=payload, timeout=60)
    resp.raise_for_status()

    content = resp.json()["message"]["content"]

    return content


async def main():
    user_input = "北京的天气预警?"

    print("User:", user_input)

    # 1. 问大模型
    raw = ask_llm(user_input)

    print("\nLLM raw output:")
    print(raw)

    # 2. 解析 JSON
    try:
        decision = json.loads(raw)
    except json.JSONDecodeError:
        raise RuntimeError("LLM did not return valid JSON")

    # 3. 判断是否需要调用 tool
    if decision["type"] == "tool_call":
        tool_name = decision["name"]
        arguments = decision["arguments"]

        print(f"\nCalling MCP tool: {tool_name} {arguments}")

        tool_result = await call_mcp_tool(tool_name, arguments)

        print("\n===== MCP RESULT =====")
        print(tool_result)

    else:
        print("\n===== FINAL ANSWER =====")
        print(decision["content"])


if __name__ == "__main__":
    asyncio.run(main())

大致原理功能就这样了,其他都是些精细的优化了,比如可以用的tool不能直接写死在prompt里等等。langchain无非就是把这些步骤抽象成了一些标准的流程,拿来照着填就是了。

相关推荐
杭州泽沃电子科技有限公司1 小时前
为电气风险定价:如何利用监测数据评估工厂的“电气安全风险指数”?
人工智能·安全
Godspeed Zhao3 小时前
自动驾驶中的传感器技术24.3——Camera(18)
人工智能·机器学习·自动驾驶
顾北124 小时前
MCP协议实战|Spring AI + 高德地图工具集成教程
人工智能
wfeqhfxz25887824 小时前
毒蝇伞品种识别与分类_Centernet模型优化实战
人工智能·分类·数据挖掘
中杯可乐多加冰5 小时前
RAG 深度实践系列(七):从“能用”到“好用”——RAG 系统优化与效果评估
人工智能·大模型·llm·大语言模型·rag·检索增强生成
珠海西格电力科技5 小时前
微电网系统架构设计:并网/孤岛双模式运行与控制策略
网络·人工智能·物联网·系统架构·云计算·智慧城市
FreeBuf_5 小时前
AI扩大攻击面,大国博弈引发安全新挑战
人工智能·安全·chatgpt
weisian1516 小时前
进阶篇-8-数学篇-7--特征值与特征向量:AI特征提取的核心逻辑
人工智能·pca·特征值·特征向量·降维
Java程序员 拥抱ai6 小时前
撰写「从0到1构建下一代游戏AI客服」系列技术博客的初衷
人工智能
186******205316 小时前
AI重构项目开发全流程:效率革命与实践指南
人工智能·重构