开源的语音合成大模型-Cosyvoice使用介绍

1 模型概览

CosyVoice 是由阿里巴巴达摩院通义实验室开发的新一代生成式语音合成大模型系列,其核心目标是通过大模型技术深度融合文本理解与语音生成,实现高度拟人化的语音合成体验。该系列包含初代 CosyVoice 及其升级版 CosyVoice 2.0,两者在技术架构、性能和应用场景上均有显著差异。关键突破包括:

  • MOS评分达5.53,接近真人发音水平;

  • 首包延迟低至150ms,较传统方案降低60%;

  • 支持多种语言及方言(中/英/日/韩/粤语/四川话等),支持中英混合语句自然合成;

  • 集成情感控制环境音效插入 (如[laughter])等细粒度生成能力。

2 不同应用场景的模型功能

|-------------------------|-------------------|-----------------------|-----------------------------------------|
| 模型名称 | 核心功能 | 使用场景 | 技术特点 |
| CosyVoice-300M | 零样本音色克隆、跨语言生成 | 个性化语音克隆、跨语种配音(如中文→英文) | 仅需 3s 参考音频;支持 5 种语言;无预置音色,需用户提供样本 |
| CosyVoice-300M-Instruct | 细粒度情感/韵律控制(富文本指令) | 情感配音(如广告、有声书)、语气细节调整 | 支持自然语言指令(如"欢快语气")及富文本标签(如 <laugh>)159 |
| CosyVoice-300M-SFT | 预置音色合成(无需样本) | 快速生成固定音色(如教育课件、导航语音) | 内置 7 种预训练音色(中/英/日/韩/粤男女声);无需克隆样本 |
| CosyVoice2-0.5B | 多语言流式语音合成、低延迟实时响应 | 直播、实时对话客服、双向语音交互 | 0.5B 参数量;支持双向流式合成(首包延迟 ≤150ms);多种语言支持 |

用户可以根据自己不同的业务需求选择不同的模型

3 不同场景的demo

3.1 CosyVoice-300M

python 复制代码
import sys
sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
import torchaudio


#cosyvoice = CosyVoice('/models/iic/CosyVoice-300M', load_jit=False, load_trt=False, fp16=False)

def inference_zero_shot_300M(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/zero_shot_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_zero_shot(tts_text, '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
        torchaudio.save('asset/test_data/zero_shot3_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

# cross_lingual usage
def inference_cross_lingual_300M(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/cross_lingual_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=False)):
        torchaudio.save('asset/test_data/cross_lingual_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

# vc usage
def inference_vc_300M(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/zero_shot_prompt.wav', 16000)
    source_speech_16k = load_wav('asset/cross_lingual_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_vc(source_speech_16k, prompt_speech_16k, stream=False)):
        torchaudio.save('asset/test_data/vc_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)


if __name__ == '__main__':
    cosyvoice = CosyVoice('hub/models/iic/CosyVoice-300M(模型地址)') # or change to pretrained_models/CosyVoice-300M-25Hz for 25Hz inference
    inference_zero_shot_300M(cosyvoice,'今天是个好日子,我们一起去旅游吧')
    inference_cross_lingual_300M(cosyvoice,'今天是个好日子,我们一起去旅游吧')

3.2 CosyVoice-300M-Instruct

python 复制代码
import sys
sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
import torchaudio


def inference_instruct(cosyvoice,tts_text):
    cosyvoice = CosyVoice('/hub/models/iic/CosyVoice-300M-Instruct')
    # instruct usage, support <laughter></laughter><strong></strong>[laughter][breath]
    for i, j in enumerate(cosyvoice.inference_instruct(tts_text, '中文男', 'Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.', stream=False)):
        torchaudio.save('asset/cosyvoice-instruct/instruct_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

if __name__ == '__main__':
    cosyvoice = CosyVoice('/hub/models/iic/CosyVoice-300M') # or change to pretrained_models/CosyVoice-300M-25Hz for 25Hz inference
    #nference_zero_shot_300M(cosyvoice,'今天是个好日子,我们一起去旅游吧')
    inference_instruct(cosyvoice,'在面对挑战时,他展现了非凡的<strong>勇气</strong>与<strong>智慧</strong>。')

3.3 CosyVoice-300M-SFT

python 复制代码
import sys
sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
import torchaudio


# sft usage
def inference_sft(cosyvoice,tts_text):
    print(cosyvoice.list_available_spks())
    # change stream=True for chunk stream inference
    for i, j in enumerate(cosyvoice.inference_sft(tts_text, '中文女', stream=False)):
        torchaudio.save('asset/cosyvoice-sft/sft_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

if __name__ == '__main__':
    cosyvoice = CosyVoice('/hub/models/iic/CosyVoice-300M-SFT', load_jit=False, load_trt=False, fp16=False)
    inference_sft(cosyvoice,'今天是个好日子,我们一起去旅游吧')

3.4 CosyVoice2-0.5B

python 复制代码
import sys
sys.path.append('third_party/Matcha-TTS')
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
import torchaudio


# zero_shot usage
def inference_zero_shot_05B(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/zero_shot_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_zero_shot(tts_text, '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
        torchaudio.save('asset/CosyVoice2-05B/zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

# fine grained control, for supported control, check cosyvoice/tokenizer/tokenizer.py#L248
def inference_cross_lingual_05B(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/zero_shot_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=False)):
        torchaudio.save('asset/CosyVoice2-05B/fine_grained_control_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)

# instruct usage
def inference_instruct2_05B(cosyvoice,tts_text):
    prompt_speech_16k = load_wav('asset/zero_shot_prompt.wav', 16000)
    for i, j in enumerate(cosyvoice.inference_instruct2(tts_text, '用四川话说这句话', prompt_speech_16k, stream=False)):
        torchaudio.save('asset/CosyVoice2-05B/instruct1_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)


if __name__ == '__main__':
    cosyvoice = CosyVoice2('/hub/models/iic/CosyVoice2-0.5B', load_jit=False, load_trt=False, fp16=False)
    tts_text = '收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。'
    #inference_zero_shot_05B(cosyvoice,tts_text)
    #inference_cross_lingual_05B(cosyvoice,tts_text)
    inference_instruct2_05B(cosyvoice,tts_text)

以上为简单的demo,实测效果很好了,可以使用CosyVoice框架提供的http接口,也可以自己使用fastapi定制化开发。

CosyVoice代码仓库地址:https://github.com/FunAudioLLM/CosyVoice.git

CosyVoice2-0.5B模型魔塔地址:CosyVoice语音生成大模型2.0-0.5B

推荐一个好用的JSON工具:JSON在线

相关推荐
幂简集成1 天前
通义灵码 AI 程序员低代码 API 课程实战教程
android·人工智能·深度学习·神经网络·低代码·rxjava
Tadas-Gao1 天前
阿里云通义MoE全局均衡技术:突破专家负载失衡的革新之道
人工智能·架构·大模型·llm·云计算
xiaozhazha_1 天前
快鹭云业财一体化系统技术解析:低代码+AI如何破解数据孤岛难题
人工智能·低代码
DreamNotOver1 天前
基于Scikit-learn集成学习模型的情感分析研究与实现
python·scikit-learn·集成学习
pan0c231 天前
集成学习(随机森林算法、Adaboost算法)
人工智能·机器学习·集成学习
pan0c231 天前
集成学习 —— 梯度提升树GBDT、XGBoost
人工智能·机器学习·集成学习
Learn Beyond Limits1 天前
Error metrics for skewed datasets|倾斜数据集的误差指标
大数据·人工智能·python·深度学习·机器学习·ai·吴恩达
我不是小upper1 天前
一文详解深度学习中神经网络的各层结构与功能!
人工智能·pytorch·深度学习
半瓶榴莲奶^_^1 天前
python基础案例-数据可视化
python·信息可视化·数据分析
kingmax542120081 天前
概率与数理统计公式及结论汇总
人工智能·机器学习·概率论