前言
近期在做一些语音识别相关的工作,使用Paraformer实时语音识别作为后端服务
在开发过程中,碰到的一些诡异的问题,在此做个记录
背景
问题:短音频(2-3秒)识别,在尾部音频没有被正确识别。
原始音频内容:"上号"
模型输出:"上"
"号" 字没有被正确识别
整个服务由websocket进行通信,输入是音频数据流,输出是流式识别结果
"上号" 出现在音频 1.3s 左右,整个音频2.2s左右
分析过程
1、通过客户端补静音空包:
模型此时识别出 "上号",正确
2、为了降低客户端使用难度,我在服务端改成了每次处理的包大小都固定,并且在最后一段不足设置的固定包大小时,补空包
模型此时只识别了 "上",漏掉了 "号"
3、在2
的基础之上,我咋子服务端处理最后一段音频时,拼接了一段0.12s左右的空包
模型此时能够将 "上号" 都识别出来
前面有提到,"上号"出现在音频开始到1.3s左右,后续还有0.9s的静音
直接补空包,好像对结果不会有影响才对,带着这个问题,开始分析。
先看代码:
python
async def ws_serve(cls, websocket: WebSocket):
# 确保卸载检查任务已启动
await cls.start_unload_check_task()
websocket.ping_interval = 10.0 # 30秒发送一次ping
websocket.ping_timeout = 300.0 # 300秒未收到pong则断开
frames = []
frames_asr_online = []
cls.websocket_users.add(websocket)
# 添加固定包大小的缓冲区
websocket.audio_buffer = b""
websocket.packet_count = 0
websocket.status_dict_asr = {}
websocket.status_dict_asr_online = {"cache": {}, "is_final": False, "chunk_size": [5,10,5]}
websocket.status_dict_vad = {"cache": {}, "is_final": False}
websocket.status_dict_punc = {"cache": {}}
websocket.chunk_interval = 15
websocket.status_dict_asr_online["encoder_chunk_look_back"] = 4
websocket.status_dict_asr_online["decoder_chunk_look_back"] = 0
# 握手阶段
try:
await websocket.accept()
# 获取URL参数
language = websocket.query_params.get("lang")
use_speaker_verification = websocket.query_params.get("use_speaker", False)
if use_speaker_verification == "true":
use_speaker_verification = True
else:
use_speaker_verification = False
if language is not None:
if language not in ["cantonese", "english", "mandarin", "uyghur", "dialect", "other"]:
response = {
"action": "error",
"code": "10110",
"data": "",
"desc": "不支持的语言类型",
"sid": "rta0000000a@ch312c0e3f63609f0900"
}
await websocket.send_json(response)
cls.websocket_users.remove(websocket)
await cls.ws_reset(websocket)
websocket.language = language
logger.info(f"连接参数: language={language}")
response = {
"action": "started",
"code": "0",
"data": "",
"desc": "success",
"sid": "rta0000000a@ch312c0e3f63609f0900"
}
await websocket.send_json(response)
# 初始化模型
if language in ["cantonese", "english", "mandarin"]:
if cls.cantonese_model is None and not use_speaker_verification:
logger.info("初始化粤语模型")
cls.cantonese_model = init_cantonese_model(config.CANTONESE_ASR_MODEL_PATH, config.DEVICE)
# 更新模型最后访问时间
cls.model_last_access_time["cantonese_model"] = time.time()
if cls.mandrian_spk_model is None and use_speaker_verification:
logger.info("初始化普通话模型")
cls.mandrian_spk_model = init_mandrian_spk_model(config.MANDRIAN_ASR_MODEL_PATH, config.MANDRIAN_VAD_MODEL_PATH, config.MANDRIAN_PUNC_MODEL_PATH, config.MANDRIAN_SPK_MODEL_PATH, config.DEVICE)
# 更新模型最后访问时间
cls.model_last_access_time["mandrian_spk_model"] = time.time()
else:
logger.error(f"不支持的语言类型: {language}")
response = {
"action": "error",
"code": "10110",
"data": "",
"desc": f"不支持的语言类型:{language}, 所有支持的模型: cantonese, english, mandarin, uyghur, dialect, other",
"sid": "rta0000000a@ch312c0e3f63609f0900"
}
await websocket.send_json(response)
cls.websocket_users.remove(websocket)
await cls.ws_reset(websocket)
except Exception as e:
error_response = {
"action": "error",
"code": "10110",
"data": "",
"desc": "invalid authorization|illegal signature",
"sid": "rta0000000b@ch312c0e3f65f09f0900"
}
traceback.print_exc()
await websocket.send_json(error_response)
await cls.ws_reset(websocket)
try:
while True:
message = await websocket.receive_bytes()
# message_size = len(message)
# print(f"收到 {message_size} 字节的数据")
decoded_message = message.decode('utf-8', errors='ignore')
websocket.status_dict_asr_online["is_final"] = False
if decoded_message == '{"end": true}':
logger.info("结束标志")
websocket.status_dict_asr_online["is_final"] = True
# 处理最后的缓冲数据,如果不足固定包大小则补足
if len(websocket.audio_buffer) > 0:
if len(websocket.audio_buffer) < cls.FIXED_PACKET_SIZE:
# 用零字节补足到固定包大小
padding_size = cls.FIXED_PACKET_SIZE - len(websocket.audio_buffer)
websocket.audio_buffer += b'\x00' * padding_size
logger.info(f"最后一包数据不足{cls.FIXED_PACKET_SIZE}字节,已补足{padding_size}字节")
frames.append(websocket.audio_buffer)
frames_asr_online.append(websocket.audio_buffer)
websocket.audio_buffer = b""
websocket.packet_count += 1
# 添加额外的空音频包来确保最后的音频被正确识别
# 添加空音频包来帮助模型完成final解码,触发完整识别结果输出
# 根据encoder_chunk_look_back=4,建议至少4个包来确保充分的上下文
extra_silence_packets = 4 # 约0.24秒静音,确保模型有足够时间完成解码
for i in range(extra_silence_packets):
silence_packet = b'\x00' * cls.FIXED_PACKET_SIZE
frames.append(silence_packet)
frames_asr_online.append(silence_packet)
websocket.packet_count += 1
logger.info(f"添加了{extra_silence_packets}个空音频包以改善结尾识别")
else:
# 将数据添加到缓冲区
websocket.audio_buffer += message
# 检查是否达到固定包大小
while len(websocket.audio_buffer) >= cls.FIXED_PACKET_SIZE:
# 提取固定大小的数据包
packet = websocket.audio_buffer[:cls.FIXED_PACKET_SIZE]
websocket.audio_buffer = websocket.audio_buffer[cls.FIXED_PACKET_SIZE:]
frames.append(packet)
frames_asr_online.append(packet)
websocket.packet_count += 1
# print(f"处理第{websocket.packet_count}个固定包,大小: {len(packet)} 字节,缓冲区剩余: {len(websocket.audio_buffer)} 字节")
websocket.status_dict_vad["chunk_size"] = int(
websocket.status_dict_asr_online["chunk_size"][1]
* 60
/ websocket.chunk_interval
)
# 检查是否有新的固定包需要处理,或者是否需要处理结束逻辑
if len(frames_asr_online) > 0 or websocket.status_dict_asr_online["is_final"]:
# print("------"*10)
# print(f"当前有{len(frames_asr_online)}个固定包待处理")
# print(f"chunk_interval: {websocket.chunk_interval}")
# print(f"处理条件检查: {len(frames_asr_online) % websocket.chunk_interval}")
# print(f"is_final: {websocket.status_dict_asr_online['is_final']}")
# print("------"*10)
if websocket.language in ["cantonese", "english", "mandarin"] and not use_speaker_verification:
# 每收集到chunk_interval个固定包就处理一次,或者收到结束标志
if (
len(frames_asr_online) % websocket.chunk_interval == 0
or websocket.status_dict_asr_online["is_final"]
):
if websocket.status_dict_asr_online["is_final"]:
logger.info("is_final")
if len(frames_asr_online) > 0:
audio_in = b"".join(frames_asr_online)
logger.info(f"处理音频流,包含{len(frames_asr_online)}个固定包,总大小: {len(audio_in)} 字节")
# print(f"处理音频流,包含{len(frames_asr_online)}个固定包,总大小: {len(audio_in)} 字节")
try:
await cls.async_online_asr_cantonese(websocket, audio_in)
except Exception as e:
# logger.error(
# f"error in asr streaming, {websocket.status_dict_asr_online}"
# )
traceback.print_exc()
logger.error(f"音频流识别错误,ERROR: {e}")
frames_asr_online = []
# 确保在处理结束时发送结束消息
if websocket.status_dict_asr_online["is_final"]:
logger.info("发送处理结束消息给客户端")
message = {
"end": True,
}
await websocket.send_json(message)
except WebSocketDisconnect:
logger.error("WebSocket disconnected...")
cls.websocket_users.remove(websocket)
await cls.ws_reset(websocket)
except Exception as e:
logger.error("Exception:", e)
traceback.print_exc()
代码只包含核心逻辑部分,其中:
encoder_chunk_look_back = 4
:表示编码器需要回看四个chunk的信息
decoder_chunk_look_back = 0
:表示解码器不回看
chunk_interval = 10
: 表示每10个包识别一次
音频参数:
采样率:16,000 Hz
位深:16位(2字节/采样点)
声道:单声道
固定包大小:1,920字节
每秒音频数据量 = 16,000 × 2字节 × 1声道 = 32,000字节/秒
每个包时长 = 1,920 ÷ 32,000 = 0.06秒
10个包总时长 = 0.06 × 10 = 0.6秒
所以 19,200字节大约是0.6秒的音频。
原始音频大约2.2s,大概被分成了四个chunk,其中 "上号" 在前1.3s左右,后续0.9秒无内容。
通过调整encoder_chunk_look_back为3,即编码器回看3个chunk,即1.8s,模型能识别出 "哦哦"(模型效果问题)
通过调整chunk_interval为100,即 10个包总时长 为 6 秒,模型能够识别出 "上号"
由此得出结论,不添加0.12s空包,模型只能识别出 "上" 而无法识别出 "号" 是因为 这个音频 "号" 字刚好被截断,导致识别效果不好,被当作是空字符。