langchain langgraph历史会话的 json序列化

你说得非常对!使用 langchain_core.messages 提供的工具函数确实是最佳实践。这种方式既官方又可靠,能正确处理所有消息类型和内部结构。

为什么这是最好的方式:

1. 官方支持,未来兼容性好

python 复制代码
from langchain_core.messages import (
    message_to_dict,      # 单个消息转换
    messages_to_dict,     # 消息列表转换
    messages_from_dict,   # 反向转换
    HumanMessage, AIMessage, SystemMessage
)

2. 处理所有消息类型

python 复制代码
import json
from langchain_core.messages import (
    HumanMessage, AIMessage, SystemMessage,
    ToolMessage, FunctionMessage,
    messages_to_dict, messages_from_dict
)

# 创建各种类型的消息
messages = [
    HumanMessage(content="Hello"),
    AIMessage(content="Hi there!", additional_kwargs={"tool_calls": [...]}),
    SystemMessage(content="You are a helpful assistant"),
    ToolMessage(content="result", tool_call_id="123"),
    FunctionMessage(content="data", name="some_function")
]

# 正确序列化
message_dicts = messages_to_dict(messages)
json_str = json.dumps(message_dicts, indent=2)
print(json_str)

# 完美恢复
restored_messages = messages_from_dict(message_dicts)
print(type(restored_messages[0]))  # <class 'langchain_core.messages.human.HumanMessage'>

3. 在 LangGraph 中的完美集成

python 复制代码
from langgraph.graph import StateGraph, END
from typing import TypedDict, List
import json
from langchain_core.messages import (
    BaseMessage, HumanMessage, AIMessage,
    messages_to_dict, messages_from_dict
)

# State 定义 - 存储为字典
class ChatState(TypedDict):
    messages: List[dict]  # 存储序列化后的消息
    metadata: dict

def human_node(state: ChatState):
    """处理用户输入"""
    # 1. 将字典转换回消息对象
    messages = messages_from_dict(state["messages"])
    
    # 2. 处理业务逻辑
    last_message = messages[-1] if messages else None
    if last_message:
        response = AIMessage(content=f"Echo: {last_message.content}")
        messages.append(response)
    
    # 3. 转换回字典存储
    state["messages"] = messages_to_dict(messages)
    return state

def ai_node(state: ChatState):
    """AI处理节点"""
    # 转换为消息对象
    messages = messages_from_dict(state["messages"])
    
    # 添加AI响应
    ai_message = AIMessage(
        content="I'm thinking about your question...",
        additional_kwargs={"reasoning": "User needs assistance"}
    )
    messages.append(ai_message)
    
    # 存回state
    state["messages"] = messages_to_dict(messages)
    return state

# 创建图
workflow = StateGraph(ChatState)
workflow.add_node("human", human_node)
workflow.add_node("ai", ai_node)

workflow.set_entry_point("human")
workflow.add_edge("human", "ai")
workflow.add_edge("ai", END)

app = workflow.compile()

# 使用
initial_state = {
    "messages": messages_to_dict([HumanMessage(content="Hello world!")]),
    "metadata": {"user_id": "123"}
}

result = app.invoke(initial_state)
print("Final messages:", result["messages"])

4. 完整的工作流示例

python 复制代码
from langchain_core.messages import messages_to_dict, messages_from_dict
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from typing import TypedDict, List
import json

# 定义State
class AgentState(TypedDict):
    messages: List[dict]  # 序列化的消息
    context: str
    status: str

# 工具函数:在State和Message对象间转换
def state_to_messages(state: AgentState) -> List[BaseMessage]:
    """从state中恢复消息对象"""
    return messages_from_dict(state["messages"])

def messages_to_state(messages: List[BaseMessage], state: AgentState) -> AgentState:
    """将消息对象存回state"""
    state["messages"] = messages_to_dict(messages)
    return state

# 节点函数
def process_input(state: AgentState):
    """处理用户输入节点"""
    # 获取消息对象
    messages = state_to_messages(state)
    
    # 创建LLM调用
    llm = ChatOpenAI(model="gpt-3.5-turbo")
    
    # 添加系统消息
    from langchain_core.messages import SystemMessage
    system_msg = SystemMessage(content="You are a helpful assistant.")
    all_messages = [system_msg] + messages
    
    # 调用LLM
    response = llm.invoke(all_messages)
    
    # 添加响应到消息列表
    messages.append(response)
    
    # 更新state
    state = messages_to_state(messages, state)
    state["status"] = "processed"
    
    return state

def format_output(state: AgentState):
    """格式化输出节点"""
    messages = state_to_messages(state)
    
    # 只获取最后一个AI消息
    ai_messages = [msg for msg in messages if isinstance(msg, AIMessage)]
    if ai_messages:
        last_ai_message = ai_messages[-1]
        state["context"] = f"AI said: {last_ai_message.content}"
    
    return state

# 构建图
workflow = StateGraph(AgentState)
workflow.add_node("process", process_input)
workflow.add_node("format", format_output)

workflow.set_entry_point("process")
workflow.add_edge("process", "format")
workflow.add_edge("format", END)

app = workflow.compile()

# 序列化和反序列化整个状态
def save_state(state: AgentState, filepath: str):
    """保存状态到文件"""
    with open(filepath, 'w') as f:
        json.dump(state, f, indent=2)

def load_state(filepath: str) -> AgentState:
    """从文件加载状态"""
    with open(filepath, 'r') as f:
        return json.load(f)

# 使用示例
initial_messages = messages_to_dict([
    HumanMessage(content="What is the weather today?")
])

initial_state = {
    "messages": initial_messages,
    "context": "",
    "status": "initialized"
}

# 执行
result = app.invoke(initial_state)
print("Result:", result)

# 保存状态
save_state(result, "agent_state.json")

# 稍后可以加载继续执行
loaded_state = load_state("agent_state.json")
new_result = app.invoke(loaded_state)

关键优势:

  1. 标准化:使用LangChain官方提供的序列化方法
  2. 完整支持 :处理所有消息类型和特殊字段(如additional_kwargstool_calls等)
  3. 易于调试:序列化的JSON结构清晰可读
  4. 跨版本兼容:降低LangChain版本升级带来的影响
相关推荐
whitelbwwww2 小时前
Pytorch--张量
开发语言·pytorch·python
qy-ll2 小时前
Leetcode100题逐题详解
数据结构·python·学习·算法·leetcode
2301_764441332 小时前
基于python与Streamlit构建的卫星数据多维可视化分析
开发语言·python·信息可视化
陈奕昆2 小时前
n8n实战营Day3课时3:库存物流联动·全流程测试与异常调试
人工智能·python·n8n
weixin_457760002 小时前
DefaultCPUAllocator: can‘t allocate memory
python·神经网络
测试人社区-小明2 小时前
测试金字塔的演进:如何构建健康的自动化测试套件
python·测试工具·数据挖掘·pycharm·机器人·github·量子计算
敬往事一杯酒哈2 小时前
1.3 Ros2快速体验
python·ros2
杨超越luckly2 小时前
HTML应用指南:利用GET请求获取全国瑞思教育门店位置信息
前端·python·arcgis·html·门店数据
haiyu_y2 小时前
Day 36 MLP神经网络的训练
python·深度学习·神经网络