文章目录
一、代码示例
python
def llm_stream_generator(agent_url: str, headers: dict, request_data: dict):
# 调用大模型SSE接口
response = requests.post(
url=agent_url,
headers=headers,
json=request_data,
stream=True,
timeout=10
)
response.raise_for_status()
# 流式读取响应数据
for line in response.iter_lines():
if line:
# 解析大模型返回的SSE数据(根据实际返回格式调整)
line_data = line.decode('utf-8').strip()
if line_data.startswith('data:'):
yield line_data + "\n\n"
python
from django.http import StreamingHttpResponse
from django.views.decorators.http import require_GET
from rest_framework.exceptions import APIException
from .utils import llm_stream_generator
# 存储所有连接的客户端
clients = []
@require_GET
def sse_endpoint(request):
try:
llm_gen = llm_stream_generator(agent_url="http://127.0.0.1:27080/v1/chat-messages",
headers={"authorization": "Bearer app-xxxxxxxxx"})
# 3. 返回流式响应
stream_response = StreamingHttpResponse(
llm_gen,
content_type="text/event-stream; charset=utf-8",
)
return stream_response
except Exception as e:
raise APIException(f"服务器内部错误: {str(e)}")