在做AI模型推理的接口时,这时候接口是非异步的,但是uvicorn运行FastAPI时就会出现阻塞所有请求。
这时候需要解决这个问题:
python
import asyncio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
import time
import io
import uvicorn
app = FastAPI()
def my_io(num):
print(num)
time.sleep(20)
@app.get("/hello")
async def hello():
loop = asyncio.get_event_loop()
# my_io 里包含不支持异步操作的代码, 所以就使用线程池来配合实现了。
future = loop.run_in_executor(None , my_io , 666)
response = await future
print("运行完成", response)
return {"message" : "success"}
def read_image_data(image_path : str):
with open(image_path , "rb") as fr:
datas = fr.read()
return datas
@app.get("/show_image/{image_path:path}")
async def show_image(image_path : str):
datas = await asyncio.get_event_loop().run_in_executor(None , read_image_data , image_path)
bytes = io.BytesIO(datas)
return StreamingResponse(bytes , media_type="image/png")
if __name__ == "__main__":
uvicorn.run("api:app", host="0.0.0.0", port=10001, reload=True)
完美解决!!!perfect!!!