【大模型】-LangChain--stream流式同步异步

文章目录

1.同步stream流

python 复制代码
import os
from langchain_community.chat_models import ChatTongyi

os.environ["DASHSCOPE_API_KEY"] = "sk-秘钥"
llm = ChatTongyi(model="qwen-plus")
chunks = []
for chunk in llm.stream(#stream同步,异步是astream
    "天空是什么颜色?"
):
    chunks.append(chunk)
    print(chunk.content, end="|", flush=True)
    #print(StrOutputParser().parse(chunks))

2.异步astream流

python 复制代码
import os
import asyncio
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import SystemMessage


os.environ["DASHSCOPE_API_KEY"] = "sk-秘钥"
llm = ChatTongyi(model="qwen-plus")

prompt = ChatPromptTemplate.from_template("给我讲一个{topic}的故事")
output_parser = StrOutputParser()
chain = prompt | llm | output_parser

async def async_astream_chain():
    async for chunk in chain.astream({"topic": "干将"}):
        print(chunk, end="|", flush=True)

asyncio.run(async_astream_chain())

3.异步astream流json输出

python 复制代码
import os
import asyncio
from langchain_core.output_parsers import StrOutputParser,JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import SystemMessage


os.environ["DASHSCOPE_API_KEY"] = "sk-秘钥"
llm = ChatTongyi(model="qwen-plus")

#prompt = ChatPromptTemplate.from_template("给我讲一个{topic}的故事")
output_parser = JsonOutputParser()
chain = llm | output_parser

async def async_astream_chain():
    async for chunk in chain.astream(
        "以JSON格式输出法国、西班牙和日本的国家和人口列表"
    ):
        print(chunk, flush=True)

asyncio.run(async_astream_chain())

4.异步事件astream_events流

python 复制代码
import os
import asyncio
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import SystemMessage


os.environ["DASHSCOPE_API_KEY"] = "sk-秘钥"
llm = ChatTongyi(model="qwen-plus")

async def async_astream():
    events = []
    async for event in llm.astream_events("hello",version="v2"):
        events.append( event)
    print( events)


asyncio.run(async_astream())


"""
事件
[{
	'event': 'on_chat_model_start',
	'data': {
		'input': 'hello'
	},
	'name': 'ChatTongyi',
	'tags': [],
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = 'Hello', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = '! How', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = ' can I assist', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = ' you today?', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = ' 😊', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = '', additional_kwargs = {}, response_metadata = {
			'finish_reason': 'stop',
			'request_id': '3952154f-a9f5-4b39-a3c5-6eb8c85a6213',
			'token_usage': {
				'input_tokens': 9,
				'output_tokens': 11,
				'total_tokens': 20,
				'prompt_tokens_details': {
					'cached_tokens': 0
				}
			}
		}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_stream',
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'data': {
		'chunk': AIMessageChunk(content = '', additional_kwargs = {}, response_metadata = {}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9', chunk_position = 'last')
	},
	'parent_ids': []
}, {
	'event': 'on_chat_model_end',
	'data': {
		'output': AIMessageChunk(content = 'Hello! How can I assist you today? 😊', additional_kwargs = {}, response_metadata = {
			'finish_reason': 'stop',
			'request_id': '3952154f-a9f5-4b39-a3c5-6eb8c85a6213',
			'token_usage': {
				'input_tokens': 9,
				'output_tokens': 11,
				'total_tokens': 20,
				'prompt_tokens_details': {
					'cached_tokens': 0
				}
			}
		}, id = 'lc_run--ef708116-49b9-4572-8860-9bcf04c304e9')
	},
	'run_id': 'ef708116-49b9-4572-8860-9bcf04c304e9',
	'name': 'ChatTongyi',
	'tags': [],
	'metadata': {
		'ls_provider': 'tongyi',
		'ls_model_type': 'chat',
		'ls_model_name': 'qwen-plus'
	},
	'parent_ids': []
}]
"""

5.异步多线程

await asyncio.gather(task1(), task2())

python 复制代码
import os
import asyncio
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import SystemMessage


os.environ["DASHSCOPE_API_KEY"] = "sk-秘钥"
llm = ChatTongyi(model="qwen-plus")

async def task1():
    prompt = ChatPromptTemplate.from_template("给我讲一个{topic}的故事,100字")
    output_parser = StrOutputParser()
    chain = prompt | llm | output_parser
    async for chunk in chain.astream({"topic": "干将"}):
        print(chunk, end="|", flush=True)

async def task2():
    prompt = ChatPromptTemplate.from_template("给我讲一个{topic}的故事,100字")
    output_parser = StrOutputParser()
    chain = prompt | llm | output_parser
    async for chunk in chain.astream({"topic": "秦始皇"}):
        print(chunk, end="|", flush=True)

async def main():
    #同步调用
    # await task1()
    # await task2()

    # 异步
    await asyncio.gather(task1(), task2())

asyncio.run(main())
相关推荐
lxh01132 小时前
二叉树中的最大路径和
前端·算法·js
CQ_YM2 小时前
Linux进程基础
linux·服务器·进程
CC码码2 小时前
前端字符串排序搜索可以更加细化了
前端·javascript·面试
喵爱吃鱼2 小时前
kuma-ui中Flex vs FlexMin的关键区别
前端
JH灰色2 小时前
【大模型】-LangChain--Agent
windows·microsoft·langchain
codingMan2 小时前
[Android Compose] 拒绝闪烁!打造丝滑的聊天页面列表(仿微信效果)
前端
你别追我跑不动2 小时前
基于代码扫描的 Icon 优化实践
前端·性能优化
磊磊磊磊磊2 小时前
用AI做了个排版工具,分享一下如何高效省钱地用AI!
前端·后端·react.js
喵爱吃鱼2 小时前
flex 0 flex 1 flex none flex auto 应该在什么场景下使用
前端