node/py实现 qwen多轮对话

大模型多轮对话需要3点

  1. llm
  2. prompt
  3. RunnableWithMessageHistory

注意 agent 多轮对话不在此范围

node版本

  • 依赖
json 复制代码
"@langchain/anthropic": "^1.0.0",
"@langchain/community": "^1.0.2",
"@langchain/core": "^1.0.3",
"@langchain/langgraph": "^1.0.1",
"@langchain/langgraph-supervisor": "^1.0.0",
"@langchain/openai": "^1.0.0",
"axios": "^1.13.2",
"dotenv": "^17.2.3",
"langchain": "^1.0.2",
"langsmith": "^0.3.78",
"openai": "^6.8.1",
"zod": "^4.1.12"

实现

JavaScript 复制代码
import { ChatOpenAI } from "@langchain/openai";
import "dotenv/config";
import * as readline from 'readline';
import { ChatPromptTemplate,MessagesPlaceholder } from "@langchain/core/prompts";

import { FileSystemChatMessageHistory } from "@langchain/community/stores/message/file_system";
import { RunnableWithMessageHistory } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";


// llm
const model = new ChatOpenAI({
  model: "qwen-plus",
  apiKey: process.env.OPENAI_API_KEY,  //填写你token
  configuration: {
    baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1",
  },
});

// prompt
const prompt = ChatPromptTemplate.fromMessages([
  [
    "system",
    "你是一个全科目的专家, 尽你所能的帮助用户回答问题, 根据用户的语言回答问题",
  ],
  new MessagesPlaceholder("chat_history"), // ! 注意实现多轮对话 需要message占位符来插入历史消息, 请根据实际token 控制对话轮数
  ["human", "{input}"],
]);

console.log("输入exit或q退出");


// 构建输出
const chain = prompt.pipe(model).pipe(new StringOutputParser());


// Runnable 接口的api
const chainWithHistory = new RunnableWithMessageHistory({
  runnable: chain,
  inputMessagesKey: "input",
  historyMessagesKey: "chat_history",
  getMessageHistory: async (sessionId) => {
    console.log("sessionId:", sessionId);
    const chatHistory = new FileSystemChatMessageHistory({
      sessionId,
      userId: "444", // 根据实际情况实现uuid,默认路径在当前目录 也可修改文件路径 `./chat_history/${sessionId}.json`
    });
    return chatHistory;
  },
});



// 创建 readline 接口
const rl = readline.createInterface({
  input: process.stdin,
  output: process.stdout
});

function askQuestion() {
  rl.question('问题: ', async (userContent) => {
    if (userContent.toLowerCase() === "exit" || userContent.toLowerCase() === "q") {
      rl.close();
      return;
    }

    try {
      // 使用用户实际输入的问题
      const resp = await chainWithHistory.invoke(
        { input: userContent },  // 改为使用 userContent
        { configurable: { sessionId: "langchain-test-session" } }
      );

      console.log('---------------------------');
      // resp 已经是字符串,直接输出
      console.log(resp);
      console.log('---------------------------');

    } catch (error) {
      console.error('Error:', error.message);
    }
    // 继续提问
    askQuestion();
  });
}

// 开始提问循环
askQuestion();

python 版本

python 复制代码
import uuid

from langchain_classic.agents.chat.prompt import HUMAN_MESSAGE

from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_community.chat_message_histories import ChatMessageHistory, FileChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory, RunnableSequence

from app.agent.model.model import llm_qwen
from app.agent.prompts.multi_chat_prompt import multi_chat_prompt
from langchain_community.agent_toolkits.file_management import FileManagementToolkit
# from langgraph.prebuilt import create_react_agent
from langchain.agents import create_agent
from app.bailian.banlian_tools import add_tools

def get_session_history(session_id: str):
    # if session_id not in store:
    #     store[session_id] = ChatMessageHistory()
    return FileChatMessageHistory(f"{session_id}.json")

file_toolkit = FileManagementToolkit(root_dir="D:\code\ai-agent-test\.temp")
file_tools = file_toolkit.get_tools()

all_tools = file_tools + [add_tools]

agent = create_agent(model=llm_qwen, tools=all_tools)


llm_with_tools = llm_qwen.bind_tools(tools=all_tools)

chain = RunnableSequence(
    first=multi_chat_prompt,
    middle=[llm_with_tools],
    last=StrOutputParser()
)




# chat_history = ChatMessageHistory()
# chat_history.add_user_message("我是野猪佩奇,我们要做 浏览器自动化智能体项目(类似selenium自动化)")
# chat_history.add_ai_message("")
chain_with_history = RunnableWithMessageHistory(
    runnable=chain,
    get_session_history=get_session_history,
    input_messages_key="question",
    history_messages_key="chat_history",
)

# chat_session_id = uuid.uuid4()
chat_session_id = "1"

print(f"session_id: {chat_session_id}")


while True:
    user_input = input("用户:")
    if user_input.lower() == "exit" or user_input.lower() == "quit":
        break

    print("助理:", end="")
    for chunk in chain_with_history.stream(
        {"question": user_input},
        config={"configurable": {"session_id": chat_session_id}},
    ):
        print(chunk, end="")

    print("\n")
相关推荐
Mr Xu_27 分钟前
【Vue3 + ECharts 实战】正确使用 showLoading、resize 与 dispose 避免内存泄漏
前端·信息可视化·vue·echarts
0思必得034 分钟前
[Web自动化] Selenium设置相关执行文件路径
前端·爬虫·python·selenium·自动化
雯0609~44 分钟前
hiprint:实现项目部署与打印1-官网提供普通html版本
前端·html
不绝1911 小时前
UGUI——进阶篇
前端
Exquisite.2 小时前
企业高性能web服务器(4)
运维·服务器·前端·网络·mysql
2501_944525542 小时前
Flutter for OpenHarmony 个人理财管理App实战 - 账户详情页面
android·java·开发语言·前端·javascript·flutter
2601_949857432 小时前
Flutter for OpenHarmony Web开发助手App实战:快捷键参考
前端·flutter
wangdaoyin20102 小时前
若依vue2前后端分离集成flowable
开发语言·前端·javascript
心柠3 小时前
vue3相关知识总结
前端·javascript·vue.js
Amumu121383 小时前
Vue Router(二)
java·前端