1.简单调用
python
import operator
import os
from typing import TypedDict, Literal
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage, AnyMessage
from langchain_core.tools import tool
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import ToolNode
from typing import Annotated
# 1. 定义工具
@tool
def multiply(a: int, b: int) -> int:
"""求两个数的乘积
参数:a:第一个int类型
参数:b:第二个int类型
"""
return a * b
@tool
def divide(a: int, b: int) -> int:
"""求两个数的商"""
return a / b
# 工具列表 + 工具字典
tools = [multiply, divide]
tools_by_name = {tool.name: tool for tool in tools}
'''结构:key是工具的名字,value是真正的tool
{
"multiply": multiply工具函数,
"divide": divide工具函数
}
'''
# 2. 模型 + 绑定工具
model = ChatTongyi(
api_key=os.getenv("DASHSCOPE_API_KEY"),
model_name="qwen-plus-2025-12-01"
)
model = model.bind_tools(tools)
# ------------------------------
# 3. 定义状态(关键修复)
# ------------------------------
class State(TypedDict):
# 消息:自动追加
messages: Annotated[list[AnyMessage], operator.add]
llm_calls: int
# ------------------------------
# 4. LLM 节点(修复逻辑)
# ------------------------------
def llm_call(state: State):
"""模型思考:是否调用工具"""
return {
"messages": [model.invoke(state["messages"])],
"llm_calls": state.get("llm_calls", 0) + 1
}
'''
tool_calls 结构:
[
{
"name": "工具名字",
"args": { 参数字典 },
"id": "工具调用唯一ID"
}
]
'''
# ------------------------------
# 5. 工具执行节点(你写的那个!)
# ------------------------------
def tool_node(state: State):
"""执行工具,返回 ToolMessage"""
result = []
last_message = state["messages"][-1]
for tool_call in last_message.tool_calls:
# 遍历模型让我调用的每一个工具
tool = tools_by_name[tool_call["name"]] # 通过名字得到真正的函数
observation = tool.invoke(tool_call["args"])
# 把结果变成 ToolMessage
result.append(
ToolMessage(
content=observation,
tool_call_id=tool_call["id"]
)
)
# 返回结果 → 自动追加到 messages 里
return {"messages": result}
# ------------------------------
# 6. 条件判断:是否继续调用工具
# ------------------------------
def should_continue(state: State) -> Literal["tool_node", END]:
messages = state["messages"]
last_message = messages[-1]
# 如果有工具调用 → 去执行工具
if last_message.tool_calls:
return "tool_node"
# 没有 → 结束
return END
# ------------------------------
# 7. 构建流程图(全部修复)
# ------------------------------
workflow = StateGraph(State)
# 添加节点
workflow.add_node("llm_call", llm_call)
workflow.add_node("tool_node", tool_node)
# 流程
workflow.add_edge(START, "llm_call")
# 条件边:模型 → 工具 or 结束
workflow.add_conditional_edges(
"llm_call",
should_continue
)
# 工具执行完 → 回到模型
workflow.add_edge("tool_node", "llm_call")
# 编译
graph = workflow.compile()
# ------------------------------
# 8. 运行!
# ------------------------------
if __name__ == "__main__":
final_state = graph.invoke({
"messages": [HumanMessage(content="123456789 乘 987654321 等于多少?")],
"llm_calls": 0
})
# 打印所有消息
print("\n===== 完整对话记录 =====")
for msg in final_state["messages"]:
msg.pretty_print()
2.加记忆
加入记忆很简单,就是在正常的流程里面首先设置一个 checkpointer,然后编译的时候指定即可,然后调用的时候,config: RunnableConfig = {"configurable": {"thread_id": "1"}} 这种是固定的形式
python
from langgraph.graph import StateGraph, START, END
from langgraph.checkpoint.memory import InMemorySaver
from langchain_core.runnables import RunnableConfig
from typing import Annotated
from typing_extensions import TypedDict
from operator import add
class State(TypedDict):
foo: str
bar: Annotated[list[str], add]
def node_a(state: State):
return {"foo": "a", "bar": ["a"]}
def node_b(state: State):
return {"foo": "b", "bar": ["b"]}
workflow = StateGraph(State)
workflow.add_node(node_a)
workflow.add_node(node_b)
workflow.add_edge(START, "node_a")
workflow.add_edge("node_a", "node_b")
workflow.add_edge("node_b", END)
checkpointer = InMemorySaver()
graph = workflow.compile(checkpointer=checkpointer)
config: RunnableConfig = {"configurable": {"thread_id": "1"}}
graph.invoke({"foo": "", "bar":[]}, config)