AI 大模型应用进阶系列(三):大模型流式输出

带思考能力模型的流式输出

返回数据

  • 存在 reasoning_content,说明正在思考
  • 不存在 reasoning_content 说明思考已经结束
  • 返回 [DONE],说明已经完成
json 复制代码
 {
    "choices": [
        {
            "delta": {
                "content": "xxx",
                "reasoning_content": "xxx",
                "role": "assistant"
            },
            "index": 0
        }
    ],
    "created": xxx,
    "id": "xxx",
    "model": "xxx",
    "service_tier": "default",
    "object": "chat.completion.chunk",
    "usage": null
} 

代码逻辑

python 复制代码
import sys
import json
import requests

# 定义模型配置
_ai_config = {
    "model": "deepseek-reasoner",
    "url": "https://api.deepseek.com/chat/completions",
    "key": "you deepseek key",
}

# 开始思考
def _on_think_start():
    print("think start")

# 思考借宿
def _on_think_end():
    print("think end")

# 正在思考
def _on_thinking(chunk_text):
    sys.stdout.write(chunk_text)
    sys.stdout.flush()

# 流式接收
def _on_receiving(full_text, chunk_text):
    sys.stdout.write(chunk_text)
    sys.stdout.flush()

# 输出结束
def _on_finish(full_text):
    print("finish: " + full_text)


# 流式调用
def chat_stream(
    histories,
    ai_config=None,
    on_receiving=None,
    on_finish=None,
    on_thinking=None,
    on_think_start=None,
    on_think_end=None,
    response_format="text",
):

    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {ai_config['key']}",
    }

    payload = {
        "model": ai_config["model"],
        "messages": histories,
        "response_format": {"type": response_format},
        "stream": True,
    }

    full_content = ""

    try:
        # 发送POST请求,设置stream=True以启用流式响应
        with requests.post(
            ai_config["url"], headers=headers, json=payload, stream=True, timeout=60
        ) as response:
            # 检查响应状态码
            response.raise_for_status()

            # 明确设置响应编码为UTF-8,解决中文乱码问题
            response.encoding = "utf-8"

            is_thinking = False
            full_content = ""

            # 流式处理响应内容
            for line in response.iter_lines(decode_unicode=True):

                if line:
                    if line.startswith("data: ") and not line.startswith(
                        "data: [DONE]"
                    ):
                        data = json.loads(line[6:])
                        # 提取并处理返回的内容(这里假设返回格式为OpenAI API风格)
                        if "choices" in data and len(data["choices"]) > 0:
                            delta = data["choices"][0].get("delta", {})
                            if "reasoning_content" in delta:
                                # 存在reasoning_content,说明正在思考
                                current_thinking = True
                            else:
                                # 没有reasoning_content,说明已经思考结束
                                current_thinking = False

                            if current_thinking is True and is_thinking is False:
                                is_thinking = current_thinking
                                if on_think_start is not None:
                                    on_think_start()
                                    continue
                            if current_thinking is False and is_thinking is True:
                                is_thinking = current_thinking
                                if on_think_end is not None:
                                    on_think_end()
                                    continue

                            is_thinking = current_thinking

                            if is_thinking is True:
                                if on_thinking is not None:
                                    on_thinking(delta.get("reasoning_content", ""))
                                    continue

                            content = delta.get("content", "")
                            full_content += content
                            if on_receiving is not None:
                                on_receiving(full_content, content)

        if on_finish is not None:
            on_finish(full_content)
    except requests.exceptions.RequestException as e:
        print(f"请求异常: {e}")
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
    except Exception as e:
        print(f"发生未知错误: {e}")

    return full_content

# 调用大模型
chat_stream(
    ai_config=_ai_config,
    on_think_start=_on_think_start,
    on_think_end=_on_think_end,
    on_thinking=_on_thinking,
    on_receiving=_on_receiving,
    on_finish=_on_finish,
    histories=[
        {
            "role": "user",
            "content": "你好",
        }
    ],
)
相关推荐
哈__几秒前
CANN多模型并发部署方案
人工智能·pytorch
予枫的编程笔记2 分钟前
【Linux入门篇】Linux运维必学:Vim核心操作详解,告别编辑器依赖
linux·人工智能·linux运维·vim操作教程·程序员工具·编辑器技巧·新手学vim
慢半拍iii3 分钟前
对比分析:ops-nn与传统深度学习框架算子的差异
人工智能·深度学习·ai·cann
心疼你的一切5 分钟前
解构CANN仓库:AIGC API从底层逻辑到实战落地,解锁国产化AI生成算力
数据仓库·人工智能·深度学习·aigc·cann
啊阿狸不会拉杆13 分钟前
《机器学习导论》第 5 章-多元方法
人工智能·python·算法·机器学习·numpy·matplotlib·多元方法
薯一个蜂蜜牛奶味的愿13 分钟前
模块化显示神经网络结构的可视化工具--BlockShow
人工智能·深度学习·神经网络
班德先生17 分钟前
深耕多赛道品牌全案策划,为科技与时尚注入商业表达力
大数据·人工智能·科技
哈__18 分钟前
CANN加速强化学习推理:策略网络与价值网络优化
人工智能
慢半拍iii23 分钟前
ops-nn性能调优实战:提升神经网络推理速度的秘诀
人工智能·神经网络·ai·cnn·cann
hay_lee25 分钟前
Spring AI实现对话聊天-流式输出
java·人工智能·ollama·spring ai