找了一些LangChain的教程,很多都是用OpenAI作为例子的,但是魔法不稳定也不方便,为什么不试试方便快捷的DeepSeek呢?
1 所需准备
(1)下载LangChian
bash
pip install langchain
先下载langchain包,后面可能还会用到一些包,遇到的时候再下载。 如果下载超时可以尝试使用清华镜像源。
bash
pip install langchain -i https://pypi.tuna.tsinghua.edu.cn/simple
(2)准备好DeepSeek api 进入DeepSeek官网,点击右上角进入api开放平台。 点击左侧充值菜单,进入api充值页面,测试的时候可以先充个10块钱的试试看,后面不够再继续充值。 然后点击左侧API keys菜单,进入api key的管理页面,点击创建api key,创建好后记得复制保存,一旦创建好key就不可见啦。

2 agent搭建
接下来创建一个文件夹,在文件夹中创建以下文件。 (1) .env 将上面步骤中创建好的APIkey保存到这个文件中并保存
ini
DEEPSEEK_API_KEY='{your_api_key}' #这里需要填入你自己创建的api key
(2)ds_chatbot.py 首先导入需要的包文件:
py
from langchain_core.language_models import BaseLLM
from langchain_core.outputs import LLMResult
from typing import Optional, List, Any
import requests
import os
from dotenv import load_dotenv
# 对话部分内容
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
然后读取环境变量中的api key:
py
load_dotenv()
接下来继承langchain框架中的llm创建一个基于DeepSeek的类:
py
class DeepSeekLLM(BaseLLM):
api_key: str = os.getenv("DEEPSEEK_API_KEY", "")
model: str = "deepseek-chat"
temperature: float = 0.7
max_tokens: int = 2048
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not self.api_key:
raise ValueError("DEEPSEEK_API_KEY 未设置")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> LLMResult:
# 实现 _generate 方法(处理多个prompts)
generations = []
for prompt in prompts:
text = self._invoke(prompt, stop=stop, **kwargs)
generations.append([{"text": text}])
return LLMResult(generations=generations)
def _invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""实际调用DeepSeek API"""
url = "https://api.deepseek.com/v1/chat/completions"
headers = {"Authorization": f"Bearer {self.api_key}"}
data = {
"model": self.model,
"messages": [{"role": "user", "content": prompt}],
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
@property
def _llm_type(self) -> str:
return "deepseek"
接下来调用我们创建好的类就可以啦:
py
llm = DeepSeekLLM()
memory = ConversationBufferMemory()
conversation = ConversationChain(llm=llm, memory=memory)
print("DeepSeek 聊天机器人已启动(输入'退出'结束对话)")
while True:
try:
user_input = input("你: ")
if user_input.lower() in ["退出", "exit", "quit"]:
break
response = conversation.predict(input=user_input)
print(f"AI助手: {response}")
except KeyboardInterrupt:
print("\n对话结束")
break
except Exception as e:
print(f"出错: {e}")
3 测试运行
完整的ds_chatbot.py文件内容:
py
# 这是一个最小单位能够运行的agent,后续会逐渐完善
from langchain_core.language_models import BaseLLM
from langchain_core.outputs import LLMResult
from typing import Optional, List, Any
import requests
import os
from dotenv import load_dotenv
# 对话部分内容
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
load_dotenv()
class DeepSeekLLM(BaseLLM):
api_key: str = os.getenv("DEEPSEEK_API_KEY", "")
model: str = "deepseek-chat"
temperature: float = 0.7
max_tokens: int = 2048
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not self.api_key:
raise ValueError("DEEPSEEK_API_KEY 未设置")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> LLMResult:
# 实现 _generate 方法(处理多个prompts)
generations = []
for prompt in prompts:
text = self._invoke(prompt, stop=stop, **kwargs)
generations.append([{"text": text}])
return LLMResult(generations=generations)
def _invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""实际调用DeepSeek API"""
url = "https://api.deepseek.com/v1/chat/completions"
headers = {"Authorization": f"Bearer {self.api_key}"}
data = {
"model": self.model,
"messages": [{"role": "user", "content": prompt}],
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
@property
def _llm_type(self) -> str:
return "deepseek"
# 使用示例
if __name__ == "__main__":
'''
llm = DeepSeekLLM()
print("DeepSeek 聊天机器人已启动(输入'退出'结束对话)")
while True:
try:
user_input = input("你: ")
if user_input.lower() in ["退出", "exit", "quit"]:
break
response = llm.invoke(user_input)
print(f"AI助手: {response}")
except KeyboardInterrupt:
print("\n对话结束")
break
except Exception as e:
print(f"出错: {e}")
'''
llm = DeepSeekLLM()
memory = ConversationBufferMemory()
conversation = ConversationChain(llm=llm, memory=memory)
print("DeepSeek 聊天机器人已启动(输入'退出'结束对话)")
while True:
try:
user_input = input("你: ")
if user_input.lower() in ["退出", "exit", "quit"]:
break
response = conversation.predict(input=user_input)
print(f"AI助手: {response}")
except KeyboardInterrupt:
print("\n对话结束")
break
except Exception as e:
print(f"出错: {e}")
接下来运行这个文件,我们就可以在ide的终端中和自己创建的聊天机器人进行对话了: 我们也可以去DeepSeek的api开放平台查看api的用量情况: