【Moonshine Onnx版本 语音识别】

## 安装环境

bash 复制代码
pip install onnxruntime numpy tokenizers librosa modelscope huggingface-hub

## 下载模型

huggingface

!huggingface-cli download UsefulSensors/moonshine --allow_patterns 'onnx/base/*.onnx' --local-dir ./models/

下载tokenizer.json

!wget https://github.com/usefulsensors/moonshine/blob/main/moonshine/assets/tokenizer.json -P './models/onnx/base/'

modelscope

!modelscope download --model manyeyes/moonshine-base-en-onnx --local_dir ./models/

## 运行

python 复制代码
import os
import wave
import numpy as np
import tokenizers
import onnxruntime

class MoonshineOnnxModel:
    def __init__(self, models_dir):

        preprocess, encode, uncached_decode, cached_decode = [
            f"{models_dir}/{x}.onnx"
            for x in ["preprocess", "encode", "uncached_decode", "cached_decode"]
        ]
        self.preprocess = onnxruntime.InferenceSession(preprocess)
        self.encode = onnxruntime.InferenceSession(encode)
        self.uncached_decode = onnxruntime.InferenceSession(uncached_decode)
        self.cached_decode = onnxruntime.InferenceSession(cached_decode)
        self.tokenizer = tokenizers.Tokenizer.from_file(
            os.path.join(models_dir, "tokenizer.json")
        )
        print('Successfully Load Model.')

    def _generate(self, audio, max_len=None):
        "audio has to be a numpy array of shape [1, num_audio_samples]"
        if max_len is None:
            # max 6 tokens per second of audio
            max_len = int((audio.shape[-1] / 16_000) * 6)
        preprocessed = self.preprocess.run([], dict(args_0=audio))[0]
        seq_len = [preprocessed.shape[-2]]

        context = self.encode.run([], dict(args_0=preprocessed, args_1=seq_len))[0]
        inputs = [[1]]
        seq_len = [1]

        tokens = [1]
        logits, *cache = self.uncached_decode.run(
            [], dict(args_0=inputs, args_1=context, args_2=seq_len)
        )
        for i in range(max_len):
            next_token = logits.squeeze().argmax()
            tokens.extend([next_token])
            if next_token == 2:
                break

            seq_len[0] += 1
            inputs = [[next_token]]
            logits, *cache = self.cached_decode.run(
                [],
                dict(
                    args_0=inputs,
                    args_1=context,
                    args_2=seq_len,
                    **{f"args_{i+3}": x for i, x in enumerate(cache)},
                ),
            )
        return [tokens]

    def generate(self, audio_paths: list[str] | str, max_len=None):
        if isinstance(audio_paths, str):
            audio_paths = [audio_paths]

        audios = []
        for audio_path in audio_paths:
          with wave.open(audio_path) as f:
              params = f.getparams()
              assert (
                  params.nchannels == 1
                  and params.framerate == 16_000
                  and params.sampwidth == 2
              ), f"wave file should have 1 channel, 16KHz, and int16"
              audio = f.readframes(params.nframes)
          audio = np.frombuffer(audio, np.int16) / 32768.0
          audio = audio.astype(np.float32)[None, ...]
          audios.append(audio)

        audios = np.concatenate(audios, axis=0)
        tokens = self._generate(audios, max_len)
        texts = self.tokenizer.decode_batch(tokens)

        return texts


if __name__ == "__main__":
    model_dir = f"models/onnx/base/"
    client = MoonshineOnnxModel(model_dir)
    audio_path = "beckett.wav"
    text = client.generate(audio_path)
    print(text)
相关推荐
浏览器爱好者44 分钟前
如何下载适用于语音识别功能增强的Google Chrome浏览器
人工智能·chrome·语音识别
A_ugust__1 天前
Vue3集成百度实时语音识别
人工智能·语音识别
yt948321 天前
基于GMM的语音识别
人工智能·语音识别
非凡ghost2 天前
超级扩音器手机版:随时随地,大声说话
android·人工智能·智能手机·语音识别·软件需求
aflyingwolf_pomelo2 天前
语音合成(TTS)从零搭建一个完整的TTS系统-第二节-中文转拼音
人工智能·算法·语音识别
DC...3 天前
vue使用语音识别
前端·vue.js·语音识别
小马过河R4 天前
声音识别(声纹识别)和语音识别的区别
人工智能·深度学习·机器学习·语言模型·语音识别
uncle_ll5 天前
李宏毅NLP-4-语音识别part3-CTC
人工智能·自然语言处理·nlp·语音识别·ctc
郭庆汝5 天前
解决本地浏览器访问服务器端语音识别项目显示“麦克风未授权”的问题
人工智能·语音识别
李煜鑫7 天前
关于视频的一些算法内容,不包含代码等
算法·音视频·语音识别