在langchain1.x中,使用init_chat_model函数进行构建model。
1. 第一种方式:支持硅基流动的调用
python
复制代码
from envs.envs import OPENAI_BASE_URL, OPENAI_BASE_MODEL, OPENAI_API_KEY
from langchain.chat_models import init_chat_model
model = init_chat_model(
model_provider="openai",
model=OPENAI_BASE_MODEL,
api_key=OPENAI_API_KEY,
base_url=OPENAI_BASE_URL,
temperature=0,
)
response = model.invoke(input="hello")
print(response)
2. 第二种方式:后配置
python
复制代码
from envs.envs import OPENAI_BASE_URL, OPENAI_BASE_MODEL, OPENAI_API_KEY
from langchain.chat_models import init_chat_model
model = init_chat_model(
temperature=0,
)
response = model.invoke(
input="hello",
config={
"model_provider": "openai",
"model": OPENAI_BASE_MODEL,
"api_key": OPENAI_API_KEY,
"base_url": OPENAI_BASE_URL,
"max_tokens": 4096,
},
)
print(response)
3. 第三种:流式输出
python
复制代码
from envs.envs import OPENAI_BASE_URL, OPENAI_BASE_MODEL, OPENAI_API_KEY
from langchain.chat_models import init_chat_model
model = init_chat_model(
temperature=0,
)
stream = model.stream(
input="hello",
config={
"model_provider": "openai",
"model": OPENAI_BASE_MODEL,
"api_key": OPENAI_API_KEY,
"base_url": OPENAI_BASE_URL,
"max_tokens": 4096,
},
)
for chunk in stream:
print(chunk.content, end="", flush=True)
4. 第四种:异步流式输出
python
复制代码
from envs.envs import OPENAI_BASE_URL, OPENAI_BASE_MODEL, OPENAI_API_KEY
from langchain.chat_models import init_chat_model
import asyncio
async def async_stream():
model = init_chat_model(
temperature=0,
)
stream = model.astream(
input="hello",
config={
"model_provider": "openai",
"model": OPENAI_BASE_MODEL,
"api_key": OPENAI_API_KEY,
"base_url": OPENAI_BASE_URL,
"max_tokens": 4096,
},
)
async for chunk in stream:
print(chunk.content, end="", flush=True)
if __name__ == "__main__":
asyncio.run(async_stream())
5. 提示词模板结合管道进行使用
python
复制代码
from envs.envs import OPENAI_BASE_URL, OPENAI_BASE_MODEL, OPENAI_API_KEY
from langchain.chat_models import init_chat_model
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human", "{question}"),
]
)
model = init_chat_model(
model_provider="openai",
model=OPENAI_BASE_MODEL,
api_key=OPENAI_API_KEY,
base_url=OPENAI_BASE_URL,
)
chain = prompt | model | StrOutputParser()
result = chain.invoke({"question": "你是谁?"})
print(result)