我将基于之前的Pandas积木设计,为您创建一个完整的MCP工具项目。这个工具让LLM只需通过简单的参数配置就能执行复杂的数据分析任务。
项目结构
pandas_blocks_mcp/
├── requirements.txt
├── server.py
├── config/
│ └── claude_desktop_config.json
├── examples/
│ ├── sample_data.csv
│ └── example_usage.py
└── README.md
1. 依赖配置 (requirements.txt)
python
# requirements.txt
mcp[cli]>=1.4.0
pandas>=2.0.0
numpy>=1.24.0
matplotlib>=3.7.0
seaborn>=0.12.0
plotly>=5.15.0
httpx>=0.24.0
python-dotenv>=1.0.0
2. 核心MCP服务器代码 (server.py)
python
#!/usr/bin/env python3
"""
Pandas积木MCP服务器
让LLM通过简单的积木配置实现复杂的数据分析能力
"""
import pandas as pd
import numpy as np
import json
import io
import base64
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Any, Optional, Union
from datetime import datetime
from mcp.server.fastmcp import FastMCP
import traceback
# 初始化MCP服务器
mcp = FastMCP("PandasBlocks")
class PandasBlocksEngine:
"""Pandas积木执行引擎"""
def __init__(self):
self.datasets = {} # 存储多个数据集
self.current_dataset_id = None
self.analysis_history = [] # 记录分析历史
def load_data(self, data_source: str, data_type: str = "csv", dataset_name: str = None) -> Dict[str, Any]:
"""加载数据积木"""
try:
df = None
if data_type == "csv":
if data_source.startswith(("http://", "https://")):
df = pd.read_csv(data_source)
else:
# 尝试解析CSV字符串
df = pd.read_csv(io.StringIO(data_source))
elif data_type == "json":
df = pd.read_json(io.StringIO(data_source))
elif data_type == "excel":
df = pd.read_excel(io.BytesIO(data_source.encode() if isinstance(data_source, str) else data_source))
else:
# 自动检测类型
try:
df = pd.read_csv(io.StringIO(data_source))
except:
try:
df = pd.read_json(io.StringIO(data_source))
except:
return {"error": f"无法自动检测数据类型: {data_type}"}
# 生成数据集ID
if not dataset_name:
dataset_name = f"dataset_{len(self.datasets) + 1}_{datetime.now().strftime('%H%M%S')}"
self.datasets[dataset_name] = df
self.current_dataset_id = dataset_name
# 记录操作历史
self.analysis_history.append({
"timestamp": datetime.now().isoformat(),
"operation": "load_data",
"dataset": dataset_name,
"shape": df.shape
})
return {
"success": True,
"dataset_id": dataset_name,
"message": f"数据加载成功: {dataset_name}",
"data_shape": df.shape,
"columns": list(df.columns),
"sample_data": df.head(3).to_dict('records')
}
except Exception as e:
return {"error": f"数据加载失败: {str(e)}"}
def execute_pipeline(self, blocks_config: List[Dict], dataset_id: str = None) -> Dict[str, Any]:
"""执行积木流水线"""
try:
# 获取当前数据集
if not dataset_id:
dataset_id = self.current_dataset_id
if dataset_id not in self.datasets:
return {"error": f"数据集不存在: {dataset_id}"}
df = self.datasets[dataset_id].copy()
execution_results = []
execution_log = []
for i, block in enumerate(blocks_config):
block_type = block.get("type")
params = block.get("params", {})
block_name = block.get("name", f"block_{i+1}")
execution_log.append(f"执行积木 {i+1}: {block_type} - {block_name}")
# 执行单个积木
result = self._execute_single_block(df, block_type, params, block_name)
if "error" in result:
return {
"error": f"积木执行失败: {result['error']}",
"failed_block": block_name,
"execution_log": execution_log
}
# 更新数据框
if "data" in result:
df = result["data"]
# 记录结果
execution_results.append({
"block_name": block_name,
"block_type": block_type,
"result": result.get("summary", "执行成功"),
"data_shape_after": df.shape,
"details": result.get("details", {})
})
# 更新数据集
self.datasets[dataset_id] = df
# 记录历史
self.analysis_history.append({
"timestamp": datetime.now().isoformat(),
"operation": "execute_pipeline",
"dataset": dataset_id,
"blocks_executed": len(blocks_config),
"final_shape": df.shape
})
return {
"success": True,
"execution_results": execution_results,
"final_data_shape": df.shape,
"sample_data": df.head(5).to_dict('records'),
"columns_info": {
"names": list(df.columns),
"dtypes": {col: str(dtype) for col, dtype in df.dtypes.items()}
},
"execution_log": execution_log
}
except Exception as e:
return {"error": f"积木流水线执行失败: {str(e)}", "traceback": traceback.format_exc()}
def _execute_single_block(self, df: pd.DataFrame, block_type: str, params: Dict, block_name: str) -> Dict:
"""执行单个积木"""
try:
if block_type == "clean":
return self._clean_block(df, params, block_name)
elif block_type == "filter":
return self._filter_block(df, params, block_name)
elif block_type == "transform":
return self._transform_block(df, params, block_name)
elif block_type == "analyze":
return self._analyze_block(df, params, block_name)
elif block_type == "groupby":
return self._groupby_block(df, params, block_name)
elif block_type == "visualize":
return self._visualize_block(df, params, block_name)
else:
return {"error": f"不支持的积木类型: {block_type}"}
except Exception as e:
return {"error": f"积木执行错误: {str(e)}", "traceback": traceback.format_exc()}
def _clean_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""数据清洗积木"""
original_shape = df.shape
operations = params.get("operations", [])
for op in operations:
method = op.get("method")
if method == "fillna":
columns = op.get("columns", df.columns)
value = op.get("value", 0)
df[columns] = df[columns].fillna(value)
elif method == "drop_duplicates":
subset = op.get("subset", None)
df = df.drop_duplicates(subset=subset)
elif method == "drop_columns":
columns = op.get("columns", [])
df = df.drop(columns=columns)
elif method == "rename_columns":
rename_map = op.get("rename_map", {})
df = df.rename(columns=rename_map)
elif method == "correct_types":
type_map = op.get("type_map", {})
for col, dtype in type_map.items():
if col in df.columns:
try:
if dtype == "datetime":
df[col] = pd.to_datetime(df[col])
else:
df[col] = df[col].astype(dtype)
except:
pass # 类型转换失败时保持原样
return {
"data": df,
"summary": f"数据清洗完成: {original_shape} -> {df.shape}",
"details": {
"rows_removed": original_shape[0] - df.shape[0],
"columns_removed": original_shape[1] - df.shape[1],
"operations_performed": len(operations)
}
}
def _filter_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""数据过滤积木"""
original_shape = df.shape
# 支持多种过滤条件
condition = params.get("condition", "")
query = params.get("query", "")
if condition:
# 简单的列条件过滤
if ">" in condition or "<" in condition or "==" in condition:
df = df.query(condition)
elif query:
# 使用pandas query
df = df.query(query)
else:
# 默认返回原数据
pass
return {
"data": df,
"summary": f"数据过滤完成: {original_shape} -> {df.shape}",
"details": {
"rows_kept": df.shape[0],
"rows_filtered_out": original_shape[0] - df.shape[0],
"condition_applied": condition or query
}
}
def _transform_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""数据转换积木"""
action = params.get("action", "")
if action == "select":
columns = params.get("columns", [])
df = df[columns]
elif action == "sort":
by = params.get("by", [])
ascending = params.get("ascending", True)
df = df.sort_values(by=by, ascending=ascending)
elif action == "create_column":
column_name = params.get("column_name")
expression = params.get("expression", "")
if expression:
# 简单的表达式计算
try:
df[column_name] = eval(expression, {"df": df, "np": np})
except:
pass
elif action == "pivot":
index = params.get("index")
columns = params.get("columns")
values = params.get("values")
if all([index, columns, values]):
df = df.pivot_table(index=index, columns=columns, values=values, aggfunc='mean')
return {
"data": df,
"summary": f"数据转换完成: {df.shape}",
"details": {"action_performed": action}
}
def _analyze_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""数据分析积木"""
analysis_type = params.get("analysis_type", "basic")
results = {}
if analysis_type == "basic":
results["description"] = df.describe().to_dict()
results["info"] = {
"shape": df.shape,
"columns": list(df.columns),
"dtypes": {col: str(dtype) for col, dtype in df.dtypes.items()},
"null_counts": df.isnull().sum().to_dict()
}
elif analysis_type == "correlation":
numeric_df = df.select_dtypes(include=[np.number])
if not numeric_df.empty:
results["correlation_matrix"] = numeric_df.corr().to_dict()
elif analysis_type == "value_counts":
column = params.get("column")
if column and column in df.columns:
results["value_counts"] = df[column].value_counts().to_dict()
return {
"data": df, # 分析操作通常不改变原数据
"summary": f"数据分析完成: {analysis_type}",
"details": results
}
def _groupby_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""分组聚合积木"""
group_columns = params.get("group_columns", [])
agg_functions = params.get("agg_functions", {})
if group_columns and agg_functions:
# 确保分组列存在
valid_columns = [col for col in group_columns if col in df.columns]
if valid_columns:
grouped = df.groupby(valid_columns)
result_df = grouped.agg(agg_functions).reset_index()
return {
"data": result_df,
"summary": f"分组聚合完成: 按 {valid_columns} 分组",
"details": {
"group_columns": valid_columns,
"aggregations": agg_functions,
"result_shape": result_df.shape
}
}
return {"data": df, "summary": "分组聚合参数不完整,跳过此操作"}
def _visualize_block(self, df: pd.DataFrame, params: Dict, block_name: str) -> Dict:
"""可视化积木"""
chart_type = params.get("chart_type", "bar")
title = params.get("title", f"Chart - {block_name}")
try:
plt.figure(figsize=params.get("figsize", (10, 6)))
if chart_type == "bar":
x_col = params.get("x")
y_col = params.get("y")
if x_col and y_col:
df.plot.bar(x=x_col, y=y_col, title=title)
elif chart_type == "line":
df.plot.line(title=title)
elif chart_type == "hist":
column = params.get("column", df.select_dtypes(include=[np.number]).columns[0])
df[column].hist()
plt.title(title)
elif chart_type == "scatter":
x_col = params.get("x")
y_col = params.get("y")
if x_col and y_col:
df.plot.scatter(x=x_col, y=y_col, title=title)
plt.tight_layout()
# 保存图片到base64
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format='png', dpi=100)
img_buffer.seek(0)
img_base64 = base64.b64encode(img_buffer.read()).decode()
plt.close()
return {
"data": df,
"summary": f"可视化完成: {chart_type} 图表",
"details": {
"chart_type": chart_type,
"image_base64": img_base64,
"image_format": "png"
}
}
except Exception as e:
return {
"data": df,
"summary": f"可视化失败: {str(e)}",
"details": {"error": str(e)}
}
def get_dataset_info(self, dataset_id: str = None) -> Dict[str, Any]:
"""获取数据集信息"""
if not dataset_id:
dataset_id = self.current_dataset_id
if dataset_id not in self.datasets:
return {"error": f"数据集不存在: {dataset_id}"}
df = self.datasets[dataset_id]
return {
"dataset_id": dataset_id,
"shape": df.shape,
"columns": list(df.columns),
"dtypes": {col: str(dtype) for col, dtype in df.dtypes.items()},
"null_counts": df.isnull().sum().to_dict(),
"sample_data": df.head(3).to_dict('records')
}
def get_analysis_history(self) -> List[Dict]:
"""获取分析历史"""
return self.analysis_history
# 创建全局引擎实例
engine = PandasBlocksEngine()
# MCP工具定义
@mcp.tool()
def load_dataset(data_content: str, data_type: str = "csv", dataset_name: str = None) -> Dict[str, Any]:
"""
加载数据集到Pandas积木引擎
Args:
data_content: 数据内容(CSV字符串、JSON字符串或URL)
data_type: 数据类型(csv/json/excel/auto)
dataset_name: 可选的数据集名称
"""
return engine.load_data(data_content, data_type, dataset_name)
@mcp.tool()
def execute_analysis_pipeline(blocks_config: List[Dict], dataset_id: str = None) -> Dict[str, Any]:
"""
执行积木流水线分析
Args:
blocks_config: 积木配置列表
dataset_id: 可选的数据集ID(默认为当前数据集)
Example blocks_config:
[
{
"type": "clean",
"name": "数据清洗",
"params": {
"operations": [
{"method": "fillna", "columns": ["age"], "value": 0},
{"method": "drop_duplicates"}
]
}
},
{
"type": "analyze",
"name": "基础分析",
"params": {
"analysis_type": "basic"
}
}
]
"""
return engine.execute_pipeline(blocks_config, dataset_id)
@mcp.tool()
def get_dataset_information(dataset_id: str = None) -> Dict[str, Any]:
"""
获取当前数据集的基本信息
"""
return engine.get_dataset_info(dataset_id)
@mcp.tool()
def quick_analysis(data_content: str, analysis_type: str = "comprehensive") -> Dict[str, Any]:
"""
快速数据分析(一键式)
Args:
data_content: 数据内容
analysis_type: 分析类型(basic/comprehensive/statistical)
"""
# 先加载数据
load_result = engine.load_data(data_content, "csv")
if "error" in load_result:
return load_result
dataset_id = load_result["dataset_id"]
# 根据分析类型配置积木
if analysis_type == "basic":
blocks = [
{"type": "clean", "name": "基础清洗", "params": {"operations": [
{"method": "fillna", "value": 0}
]}},
{"type": "analyze", "name": "描述性统计", "params": {"analysis_type": "basic"}}
]
else: # comprehensive
blocks = [
{"type": "clean", "name": "数据清洗", "params": {"operations": [
{"method": "fillna", "value": 0},
{"method": "drop_duplicates"}
]}},
{"type": "analyze", "name": "统计分析", "params": {"analysis_type": "basic"}},
{"type": "analyze", "name": "相关性分析", "params": {"analysis_type": "correlation"}}
]
return engine.execute_pipeline(blocks, dataset_id)
@mcp.tool()
def get_analysis_history() -> List[Dict]:
"""
获取分析操作历史记录
"""
return engine.get_analysis_history()
# 运行服务器
if __name__ == "__main__":
print("Pandas积木MCP服务器启动中...")
print("可用工具:")
print("- load_dataset: 加载数据")
print("- execute_analysis_pipeline: 执行分析流水线")
print("- get_dataset_information: 获取数据集信息")
print("- quick_analysis: 快速分析")
print("- get_analysis_history: 获取分析历史")
mcp.run(transport="stdio")
3. Claude Desktop配置文件 (config/claude_desktop_config.json)
json
{
"mcpServers": {
"pandas-blocks": {
"command": "python",
"args": ["/path/to/your/pandas_blocks_mcp/server.py"],
"env": {
"PYTHONPATH": "/path/to/your/pandas_blocks_mcp"
}
}
}
}
4. 示例数据 (examples/sample_data.csv)
csv
product,category,price,quantity_sold,profit
Laptop,Electronics,999.99,50,29999.5
Phone,Electronics,499.99,120,29999.4
Tablet,Electronics,299.99,80,11999.6
T-Shirt,Clothing,19.99,200,1999.0
Jeans,Clothing,49.99,150,3749.25
Shoes,Clothing,89.99,100,4499.5
Book,Education,14.99,300,2248.5
Notebook,Education,9.99,250,1247.5
5. 使用示例 (examples/example_usage.py)
python
#!/usr/bin/env python3
"""
Pandas积木MCP工具使用示例
"""
import asyncio
import pandas as pd
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
async def example_usage():
"""示例用法"""
# 配置服务器参数
server_params = StdioServerParameters(
command="python",
args=["server.py"] # 确保在正确目录运行
)
# 创建客户端连接
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
# 初始化会话
await session.initialize()
# 1. 列出可用工具
tools_response = await session.list_tools()
print("可用工具:", [tool.name for tool in tools_response.tools])
# 2. 加载示例数据
sample_data = """name,age,salary,department
Alice,30,50000,Engineering
Bob,25,45000,Marketing
Charlie,35,60000,Engineering
Diana,28,52000,Sales
Eve,32,48000,Marketing"""
load_result = await session.call_tool(
"load_dataset",
{"data_content": sample_data, "data_type": "csv"}
)
print("数据加载结果:", load_result)
# 3. 执行分析流水线
pipeline_config = [
{
"type": "clean",
"name": "数据清洗",
"params": {
"operations": [
{"method": "fillna", "value": 0},
{"method": "drop_duplicates"}
]
}
},
{
"type": "analyze",
"name": "基础分析",
"params": {"analysis_type": "basic"}
},
{
"type": "groupby",
"name": "部门分组",
"params": {
"group_columns": ["department"],
"agg_functions": {"salary": "mean", "age": "mean"}
}
}
]
analysis_result = await session.call_tool(
"execute_analysis_pipeline",
{"blocks_config": pipeline_config}
)
print("分析结果:", analysis_result)
# 4. 获取数据集信息
info_result = await session.call_tool("get_dataset_information", {})
print("数据集信息:", info_result)
if __name__ == "__main__":
asyncio.run(example_usage())
6. 项目说明文档 (README.md)
markdown
# Pandas积木MCP工具
让LLM通过简单的积木配置实现专业级数据分析能力。
## 功能特性
- 🧩 **积木化操作**: 将复杂的数据操作分解为简单的积木块
- 🔄 **链式执行**: 支持多个积木的流水线执行
- 📊 **丰富的数据操作**: 清洗、转换、分析、可视化
- 🔧 **简单接口**: LLM只需提供数据和参数配置
- 📈 **可视化支持**: 自动生成图表和分析报告
## 快速开始
### 安装依赖
```bash
pip install -r requirements.txt
运行MCP服务器
bash
python server.py
配置Claude Desktop
将配置文件添加到Claude Desktop的MCP服务器配置中。
积木类型说明
1. 数据清洗积木 (clean)
- 处理缺失值
- 删除重复项
- 列重命名
- 类型转换
2. 数据过滤积木 (filter)
- 条件过滤
- 查询过滤
- 列选择
3. 数据转换积木 (transform)
- 列创建
- 数据透视
- 排序操作
4. 数据分析积木 (analyze)
- 描述性统计
- 相关性分析
- 频数分析
5. 分组聚合积木 (groupby)
- 多列分组
- 多种聚合函数
- 结果重置索引
LLM调用示例
python
# 简单的数据分析流程
pipeline = [
{
"type": "clean",
"params": {"operations": [{"method": "fillna", "value": 0}]}
},
{
"type": "analyze",
"params": {"analysis_type": "comprehensive"}
}
]
result = execute_analysis_pipeline(pipeline)
高级功能
- 历史记录: 跟踪所有分析操作
- 多数据集管理: 支持同时处理多个数据集
- 错误处理: 详细的错误信息和调试支持
- 扩展性: 易于添加新的积木类型
许可证
MIT License
## 使用说明
这个完整的MCP工具项目提供了:
1. **完整的服务器实现**:基于FastMCP的成熟MCP服务器
2. **积木化架构**:将Pandas操作封装为简单的配置块
3. **易于扩展**:可以轻松添加新的积木类型
4. **生产就绪**:包含错误处理、日志记录和配置管理
LLM只需要通过简单的JSON配置来调用各种数据分析功能,无需了解复杂的Pandas语法,真正实现了"搭积木"式的数据分析体验。