大模型多轮对话需要3点
- llm
- prompt
- RunnableWithMessageHistory
注意 agent 多轮对话不在此范围
node版本
- 依赖
json
"@langchain/anthropic": "^1.0.0",
"@langchain/community": "^1.0.2",
"@langchain/core": "^1.0.3",
"@langchain/langgraph": "^1.0.1",
"@langchain/langgraph-supervisor": "^1.0.0",
"@langchain/openai": "^1.0.0",
"axios": "^1.13.2",
"dotenv": "^17.2.3",
"langchain": "^1.0.2",
"langsmith": "^0.3.78",
"openai": "^6.8.1",
"zod": "^4.1.12"
实现
JavaScript
import { ChatOpenAI } from "@langchain/openai";
import "dotenv/config";
import * as readline from 'readline';
import { ChatPromptTemplate,MessagesPlaceholder } from "@langchain/core/prompts";
import { FileSystemChatMessageHistory } from "@langchain/community/stores/message/file_system";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
// llm
const model = new ChatOpenAI({
model: "qwen-plus",
apiKey: process.env.OPENAI_API_KEY, //填写你token
configuration: {
baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1",
},
});
// prompt
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"你是一个全科目的专家, 尽你所能的帮助用户回答问题, 根据用户的语言回答问题",
],
new MessagesPlaceholder("chat_history"), // ! 注意实现多轮对话 需要message占位符来插入历史消息, 请根据实际token 控制对话轮数
["human", "{input}"],
]);
console.log("输入exit或q退出");
// 构建输出
const chain = prompt.pipe(model).pipe(new StringOutputParser());
// Runnable 接口的api
const chainWithHistory = new RunnableWithMessageHistory({
runnable: chain,
inputMessagesKey: "input",
historyMessagesKey: "chat_history",
getMessageHistory: async (sessionId) => {
console.log("sessionId:", sessionId);
const chatHistory = new FileSystemChatMessageHistory({
sessionId,
userId: "444", // 根据实际情况实现uuid,默认路径在当前目录 也可修改文件路径 `./chat_history/${sessionId}.json`
});
return chatHistory;
},
});
// 创建 readline 接口
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
function askQuestion() {
rl.question('问题: ', async (userContent) => {
if (userContent.toLowerCase() === "exit" || userContent.toLowerCase() === "q") {
rl.close();
return;
}
try {
// 使用用户实际输入的问题
const resp = await chainWithHistory.invoke(
{ input: userContent }, // 改为使用 userContent
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log('---------------------------');
// resp 已经是字符串,直接输出
console.log(resp);
console.log('---------------------------');
} catch (error) {
console.error('Error:', error.message);
}
// 继续提问
askQuestion();
});
}
// 开始提问循环
askQuestion();
python 版本
python
import uuid
from langchain_classic.agents.chat.prompt import HUMAN_MESSAGE
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_community.chat_message_histories import ChatMessageHistory, FileChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory, RunnableSequence
from app.agent.model.model import llm_qwen
from app.agent.prompts.multi_chat_prompt import multi_chat_prompt
from langchain_community.agent_toolkits.file_management import FileManagementToolkit
# from langgraph.prebuilt import create_react_agent
from langchain.agents import create_agent
from app.bailian.banlian_tools import add_tools
def get_session_history(session_id: str):
# if session_id not in store:
# store[session_id] = ChatMessageHistory()
return FileChatMessageHistory(f"{session_id}.json")
file_toolkit = FileManagementToolkit(root_dir="D:\code\ai-agent-test\.temp")
file_tools = file_toolkit.get_tools()
all_tools = file_tools + [add_tools]
agent = create_agent(model=llm_qwen, tools=all_tools)
llm_with_tools = llm_qwen.bind_tools(tools=all_tools)
chain = RunnableSequence(
first=multi_chat_prompt,
middle=[llm_with_tools],
last=StrOutputParser()
)
# chat_history = ChatMessageHistory()
# chat_history.add_user_message("我是野猪佩奇,我们要做 浏览器自动化智能体项目(类似selenium自动化)")
# chat_history.add_ai_message("")
chain_with_history = RunnableWithMessageHistory(
runnable=chain,
get_session_history=get_session_history,
input_messages_key="question",
history_messages_key="chat_history",
)
# chat_session_id = uuid.uuid4()
chat_session_id = "1"
print(f"session_id: {chat_session_id}")
while True:
user_input = input("用户:")
if user_input.lower() == "exit" or user_input.lower() == "quit":
break
print("助理:", end="")
for chunk in chain_with_history.stream(
{"question": user_input},
config={"configurable": {"session_id": chat_session_id}},
):
print(chunk, end="")
print("\n")