Python调用星火认知大模型API流式传输
- 前言
- [1. 获取API认证信息](#1. 获取API认证信息)
- [2. 快速调用集成星火认知大模型](#2. 快速调用集成星火认知大模型)
- [3. 实现AI流式回复](#3. 实现AI流式回复)
- 参考文档
前言
最近对大模型比较感兴趣,想要调用大模型的API,实现AI助手那种问答效果,正好讯飞星火有免费API额度,我就用Python来试试,还是很有趣的。接下来我就介绍一下使用方法,如果感兴趣可以直接看讯飞星火官方文档
1. 获取API认证信息
要想调用星火的API,首先在官网注册一下账号,再在选择需要的模型,购买免费的API,这里我看了下,免费的有一年期限,还是够用的。
链接:https://console.xfyun.cn/services/bm35
选择应用,没有就点 + 号创建一个,选择免费的,下单即可。
购买完后,记下前面看到的认证信息,后面填入代码参数中。
2. 快速调用集成星火认知大模型
先来个简单案例测试一下认证信息是否可用
python
from sparkai.llm.llm import ChatSparkLLM, ChunkPrintHandler
from sparkai.core.messages import ChatMessage
# 星火认知大模型Spark Max的URL值,其他版本大模型URL值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_URL = 'wss://spark-api.xf-yun.com/v3.5/chat'
# 星火认知大模型调用秘钥信息,请前往讯飞开放平台控制台(https://console.xfyun.cn/services/bm35)查看
SPARKAI_APP_ID = "" # 填写控制台中获取的 APPID 信息
SPARKAI_API_SECRET = "" # 填写控制台中获取的 APISecret 信息
SPARKAI_API_KEY = "" # 填写控制台中获取的 APIKey 信息
# 星火认知大模型Spark Max的domain值,其他版本大模型domain值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_DOMAIN = 'generalv3.5'
if __name__ == '__main__':
spark = ChatSparkLLM(
spark_api_url=SPARKAI_URL,
spark_app_id=SPARKAI_APP_ID,
spark_api_key=SPARKAI_API_KEY,
spark_api_secret=SPARKAI_API_SECRET,
spark_llm_domain=SPARKAI_DOMAIN,
streaming=False,
)
messages = [ChatMessage(
role="user",
content='你好呀'
)]
handler = ChunkPrintHandler()
a = spark.generate([messages], callbacks=[handler])
for chunk in a:
print(chunk)
可以看到,API调用成功,但这个只是简单的案例,不太能够满足我们需要的场景。接下来我将介绍流式回复形式。
3. 实现AI流式回复
虽然官网也有流式回复代码,不过用的是websocket,我不太会,所以我查了下其他方法,通过spark.stream + yield 也可以实现流式返回,为了跟好的实现前后端联调的效果,我用flask搭建一个简易的服务,前端则用request模拟用户请求。
1)SparkApi.py
为了方便使用,我将调用API的方法单独创建了一个类
javascript
from sparkai.llm.llm import ChatSparkLLM, ChunkPrintHandler
from sparkai.core.messages import ChatMessage
class SparkApi(object):
# 初始化
def __init__(self, APP_ID, APIKey, APISecret, Spark_url, Spark_domain):
self.APP_ID = APP_ID
self.APIKey = APIKey
self.APISecret = APISecret
self.Spark_url = Spark_url
self.Spark_domain = Spark_domain
self.spark = None
self.messages = []
self.assistant_response = ""
def init_spark(self, is_stream):
self.spark = ChatSparkLLM(
spark_api_url=self.Spark_url,
spark_app_id=self.APP_ID,
spark_api_key=self.APIKey,
spark_api_secret=self.APISecret,
spark_llm_domain=self.Spark_domain,
streaming=is_stream,
)
# 获取ai回答 文生文
def stream_generator(self, user_message):
self.messages.append(ChatMessage(role="user", content=user_message))
# 流式获取ai回复
response = self.spark.stream(self.messages)
for res in response:
self.assistant_response += res.content
# 回复结束
if res.additional_kwargs:
self.messages.append(ChatMessage(role="assistant", content=self.assistant_response))
self.assistant_response = ""
print(self.messages)
yield res.content.encode('utf-8')
2)SparkApiMain.py
python
from Tools.scripts.make_ctype import method
from aiohttp.web_routedef import route
from flask import Flask, request, Response, jsonify
from SparkApi import SparkApi
# from SparkPicture import SparkPicture
app = Flask(__name__)
# 星火认知大模型Spark Max的URL值,其他版本大模型URL值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_URL = 'wss://spark-api.xf-yun.com/v3.5/chat'
# SPARKAI_URL = 'wss://spark-api.xf-yun.com/v4.0/chat'
# 星火认知大模型调用秘钥信息,请前往讯飞开放平台控制台(https://console.xfyun.cn/services/bm35)查看
SPARKAI_APP_ID = ''
SPARKAI_API_SECRET = ''
SPARKAI_API_KEY = ''
# 星火认知大模型Spark Max的domain值,其他版本大模型domain值请前往文档(https://www.xfyun.cn/doc/spark/Web.html)查看
SPARKAI_DOMAIN = 'generalv3.5'
# SPARKAI_DOMAIN = '4.0Ultra'
# 注释的这块是绘图的API,由于我绘图和ai聊天创建了两个应用,所以得用两套认证信息,如果你创建在一个应用就不用向我这样
# ai 绘画
# APP_ID = ''
# API_Secret = ''
# API_KEY = ''
CHAT = {
"domain": 'general',
"temperature": 0.5,
"max_tokens": 4096,
"width": 512,
"height": 512
}
@app.route('/stream', methods=['POST'])
def main():
user_data = request.json.get('question', '') # 假设前端发送的 JSON 数据中包含 'message' 键
spark.init_spark(True)
return Response(spark.stream_generator(user_data), mimetype='text/plain')
# 这块是绘图的接口
# @app.route('/picture', methods=['POST'])
# def gener_picture():
# desc = request.json.get('desc', '') # 假设前端发送的 JSON 数据中包含 'message' 键
# res = spark_picture.generator_picture(desc)
# mock_image_url = spark_picture.parser_Message(res)
# return jsonify({"image_url": mock_image_url})
if __name__ == '__main__':
spark = SparkApi(SPARKAI_APP_ID, SPARKAI_API_KEY, SPARKAI_API_SECRET, SPARKAI_URL, SPARKAI_DOMAIN)
# 下面是绘图的API,如果需要可以把下面绘图的工具类加进去,SparkPicture类代码我放后面了
# spark_picture = SparkPicture(app_id=APP_ID, api_key=API_KEY, api_secret=API_Secret, chat=CHAT)
app.run(host='0.0.0.0', port=5000, debug=True)
3)SparkPicture.py
因为我将聊天和绘图放到一起了,所以这里也把绘图的代码放上来,不过我把有关绘图的都注释了,可以先调试ai聊天的,有兴趣可以试一下绘图。
python
# encoding: UTF-8
import time
import requests
from datetime import datetime
from wsgiref.handlers import format_date_time
from time import mktime
import hashlib
import base64
import hmac
from urllib.parse import urlencode
import json
from PIL import Image
from io import BytesIO
class AssembleHeaderException(Exception):
def __init__(self, msg):
self.message = msg
class Url:
def __init__(self, host, path, schema):
self.host = host
self.path = path
self.schema = schema
class SparkPicture:
def __init__(self,app_id,api_key,api_secret, chat):
self.app_id = app_id
self.api_key = api_key
self.api_secret = api_secret
self.chat = chat
# calculate sha256 and encode to base64
def sha256base64(self, data):
sha256 = hashlib.sha256()
sha256.update(data)
digest = base64.b64encode(sha256.digest()).decode(encoding='utf-8')
return digest
def parse_url(self,requset_url):
stidx = requset_url.index("://")
host = requset_url[stidx + 3:]
schema = requset_url[:stidx + 3]
edidx = host.index("/")
if edidx <= 0:
raise AssembleHeaderException("invalid request url:" + requset_url)
path = host[edidx:]
host = host[:edidx]
u = Url(host, path, schema)
return u
# 生成鉴权url
def assemble_ws_auth_url(self,requset_url, method="GET"):
u = self.parse_url(requset_url)
host = u.host
path = u.path
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# print(date)
# date = "Thu, 12 Dec 2019 01:57:27 GMT"
signature_origin = "host: {}\ndate: {}\n{} {} HTTP/1.1".format(host, date, method, path)
# print(signature_origin)
signature_sha = hmac.new(self.api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
self.api_key, "hmac-sha256", "host date request-line", signature_sha)
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# print(authorization_origin)
values = {
"host": host,
"date": date,
"authorization": authorization
}
return requset_url + "?" + urlencode(values)
# 生成请求body体
def get_body(self,text):
body= {
"header": {
"app_id": self.app_id,
"uid":"123456789"
},
"parameter": {
"chat": self.chat
},
"payload": {
"message":{
"text":[
{
"role":"user",
"content":text
}
]
}
}
}
return body
# 发起请求并返回结果
def generator_picture(self,text):
host = 'http://spark-api.cn-huabei-1.xf-yun.com/v2.1/tti'
url = self.assemble_ws_auth_url(host,method='POST')
content = self.get_body(text)
print(time.time())
response = requests.post(url,json=content,headers={'content-type': "application/json"}).text
print(time.time())
return response
#将base64 的图片数据存在本地
def base64_to_image(self,base64_data, save_path):
# 解码base64数据
img_data = base64.b64decode(base64_data)
# 将解码后的数据转换为图片
img = Image.open(BytesIO(img_data))
# 保存图片到本地
img.save(save_path)
# 解析并保存到指定位置
def parser_Message(self,message):
data = json.loads(message)
# print("data" + str(message))
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
else:
text = data["payload"]["choices"]["text"]
imageContent = text[0]
# if('image' == imageContent["content_type"]):
imageBase = imageContent["content"]
imageName = data['header']['sid']
savePath = f"D://chen2024//Pycharm//AIGC_OPENAI//讯飞薪火//image//{imageName}.jpg"
# print("图片的imageBase:" + imageBase)
self.base64_to_image(imageBase,savePath)
print("图片保存路径:" + savePath)
return savePath
4)spider.py
这块是模拟前端向后端发送请求的,可以更好观察流式回复的效果。
python
import json
import requests
from flask import jsonify
def get_stream_response(response):
# 流式接收response
temp_data = ''
print("AI:", end="")
for chunk in response.iter_content(chunk_size=1024):
temp_data += chunk.decode('utf-8')
print(chunk.decode('utf-8'), end="")
# print(rec_data_list)
# print("----------------------------")
# print(temp_data)
print()
return temp_data
# 获取文生文接口
def stream_upload(url, question):
headers = {'Content-Type': 'application/json'}
data = json.dumps({"question": question}) # 使用固定的键名"question"
# 流式接收response
response = requests.post(url,headers=headers , data=data, stream=True)
final_response = get_stream_response(response)
return final_response
# 获取文生图接口
def picture_upload(url ,desc):
headers = {'Content-Type': 'application/json'}
data = json.dumps({"desc": desc}) # 使用固定的键名"question"
# 流式接收response
image_path = requests.post(url, headers=headers, data=data).json()
print(f"图片路径为:{image_path['image_url']}")
return image_path
def back_to(is_back):
if is_back == "back":
return True
return False
if __name__ == '__main__':
url = 'http://127.0.0.1:5000/'
while True:
flag = input("1.文生文 2.文生图 请选择:")
# back 返回上一级
while flag != "back":
if int(flag) == 1:
question = input("我:")
if back_to(question):
flag = "back"
continue
response = stream_upload(url+'stream', question)
elif int(flag) == 2:
desc = input("请描绘图片:")
if back_to(desc):
flag = "back"
continue
resp = picture_upload(url+'picture', desc)
5)开始测试
先执行 SparkApiMain.py ,有active就应该是成功了,往上滑还可以看到 Running on http://127.0.0.1:5000
再执行 spider.py 程序
现在可以看到两个程序都执行了,我们可以在spider的运行框中输入问题,同时可以看到ai的回复,注意,先输入1,选择文生文
视频效果链接:https://live.csdn.net/v/439539
到这里就已经实现了前后端流式传输得效果了,讯飞星火除了绘图和聊天,还有其他的API,有兴趣都可以试试。