语音播报 F5-TTS 部署笔记

目录

推理代码,参数已配置好:

封装注册说话人功能:

推理优化:

推理代码,优化后:


推理代码,参数已配置好:

python 复制代码
import soundfile as sf
import torch
import tqdm
from cached_path import cached_path

from model import DiT, UNetT
from model.utils import save_spectrogram

from model.utils_infer import load_vocoder, load_model, infer_process, remove_silence_for_generated_wav
from model.utils import seed_everything
import random
import sys


class F5TTS:
    def __init__(self, model_type="F5-TTS", ckpt_file="", vocab_file="", ode_method="euler", use_ema=True, local_path=None, device=None, ):
        # Initialize parameters
        self.final_wave = None
        self.target_sample_rate = 24000
        self.n_mel_channels = 100
        self.hop_length = 256
        self.target_rms = 0.1
        self.seed = -1

        # Set device
        self.device = device or ("cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu")

        # Load models
        self.load_vocoder_model(local_path)
        self.load_ema_model(model_type, ckpt_file, vocab_file, ode_method, use_ema)

    def load_vocoder_model(self, local_path):
        self.vocos = load_vocoder(local_path is not None, local_path, self.device)

    def load_ema_model(self, model_type, ckpt_file, vocab_file, ode_method, use_ema):
        if model_type == "F5-TTS":
            if not ckpt_file:
                ckpt_file = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors"))
            model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
            model_cls = DiT
        elif model_type == "E2-TTS":
            if not ckpt_file:
                ckpt_file = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors"))
            model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
            model_cls = UNetT
        else:
            raise ValueError(f"Unknown model type: {model_type}")

        self.ema_model = load_model(model_cls, model_cfg, ckpt_file, vocab_file, ode_method, use_ema, self.device)

    def export_wav(self, wav, file_wave, remove_silence=False):
        sf.write(file_wave, wav, self.target_sample_rate)

        if remove_silence:
            remove_silence_for_generated_wav(file_wave)

    def export_spectrogram(self, spect, file_spect):
        save_spectrogram(spect, file_spect)

    def infer(self, ref_file, ref_text, gen_text, show_info=print, progress=tqdm, target_rms=0.1, cross_fade_duration=0.15, sway_sampling_coef=-1, cfg_strength=2, nfe_step=32, speed=1.0,
            fix_duration=None, remove_silence=False, file_wave=None, file_spect=None, seed=-1, ):
        if seed == -1:
            seed = random.randint(0, sys.maxsize)
        seed_everything(seed)
        self.seed = seed
        wav, sr, spect = infer_process(ref_file, ref_text, gen_text, self.ema_model, show_info=show_info, progress=progress, target_rms=target_rms, cross_fade_duration=cross_fade_duration,
            nfe_step=nfe_step, cfg_strength=cfg_strength, sway_sampling_coef=sway_sampling_coef, speed=speed, fix_duration=fix_duration, device=self.device, )

        if file_wave is not None:
            self.export_wav(wav, file_wave, remove_silence)

        if file_spect is not None:
            self.export_spectrogram(spect, file_spect)

        return wav, sr, spect


if __name__ == "__main__":
    f5tts = F5TTS(model_type="F5-TTS", ckpt_file="./hf_download\hub\models--SWivid--F5-TTS\snapshots\84e5a410d9cead4de2f847e7c9369a6440bdfaca\F5TTS_Base\model_1200000.safetensors",
                  vocab_file=r"E:\project\F5-TTS\src\f5_tts\infer\examples\vocab.txt",
        local_path=r"E:\project\F5-TTS\hf_download\hub\models--charactr--vocos-mel-24khz\snapshots\0feb3fdd929bcd6649e0e7c5a688cf7dd012ef21",  # 这里指定本地 vocoder
        device="cuda")
    # f5tts = F5TTS()

    wav, sr, spect = f5tts.infer(ref_file="tests/ref_audio/test_en_1_ref_short.wav", ref_text="some call me nature, others call me mother nature.",
        gen_text="""I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences.""",
        file_wave="tests/out.wav", file_spect="tests/out.png", seed=-1,  # random seed = -1
    )

    print("seed :", f5tts.seed)

封装注册说话人功能:

python 复制代码
import os
import json
import time

import torch
from api import F5TTS  # 假设你的 F5TTS 类在这个模块中


# ========== 2. 创建带说话人管理的封装类 ==========
class F5TTSWithSpeaker:
    def __init__(self, f5tts_instance):
        self.f5tts = f5tts_instance
        self.speakers = {}
        self.speaker_file = "speakers.json"
        self._load_speakers()  # 自动加载已保存的说话人

    def register_speaker(self, speaker_id, ref_audio, ref_text):
        """注册说话人"""
        # 验证文件是否存在
        if not os.path.exists(ref_audio):
            raise FileNotFoundError(f"参考音频不存在: {ref_audio}")

        self.speakers[speaker_id] = {"ref_audio": ref_audio, "ref_text": ref_text}
        print(f"✅ 已注册说话人: {speaker_id}")
        self._save_speakers()
        return speaker_id

    def _save_speakers(self):
        """保存说话人信息"""
        try:
            with open(self.speaker_file, 'w', encoding='utf-8') as f:
                json.dump(self.speakers, f, ensure_ascii=False, indent=2)
            print(f"💾 说话人信息已保存到: {self.speaker_file}")
        except Exception as e:
            print(f"保存失败: {e}")

    def _load_speakers(self):
        """加载说话人信息"""
        if os.path.exists(self.speaker_file):
            try:
                with open(self.speaker_file, 'r', encoding='utf-8') as f:
                    self.speakers = json.load(f)
                print(f"📂 已加载 {len(self.speakers)} 个说话人")
                for sid in self.speakers:
                    print(f"   - {sid}")
            except Exception as e:
                print(f"加载说话人信息失败: {e}")
                self.speakers = {}
        else:
            print("📝 未找到说话人配置文件,将创建新文件")

    def list_speakers(self):
        """列出所有已注册的说话人"""
        if not self.speakers:
            print("暂无已注册的说话人")
        else:
            print(f"已注册的说话人 ({len(self.speakers)} 个):")
            for sid, info in self.speakers.items():
                print(f"   🎤 {sid}: {info['ref_audio']}")
        return self.speakers

    def infer_with_speaker(self, speaker_id, gen_text, **kwargs):
        """使用已注册的说话人进行推理"""
        if speaker_id not in self.speakers:
            raise ValueError(f"说话人 '{speaker_id}' 未注册")

        speaker = self.speakers[speaker_id]
        print(f"🎤 使用说话人: {speaker_id}")
        print(f"  参考音频: {speaker['ref_audio']}")
        print(f"  参考文本: {speaker['ref_text'][:50]}...")
        print(f"  生成文本: {gen_text[:50]}...")

        return self.f5tts.infer(ref_file=speaker["ref_audio"], ref_text=speaker["ref_text"], gen_text=gen_text, **kwargs)

    def remove_speaker(self, speaker_id):
        """删除说话人"""
        if speaker_id in self.speakers:
            del self.speakers[speaker_id]
            self._save_speakers()
            print(f"🗑️ 已删除说话人: {speaker_id}")
        else:
            print(f"⚠️ 说话人 '{speaker_id}' 不存在")


# ========== 3. 使用示例 ==========

if __name__ == '__main__':

    os.makedirs("outputs", exist_ok=True)
    print("初始化 F5TTS 模型...")
    f5tts = F5TTS(model_type="F5-TTS", ckpt_file="./hf_download\hub\models--SWivid--F5-TTS\snapshots\84e5a410d9cead4de2f847e7c9369a6440bdfaca\F5TTS_Base\model_1200000.safetensors",
                  vocab_file=r"E:\project\F5-TTS\src\f5_tts\infer\examples\vocab.txt",
        local_path=r"E:\project\F5-TTS\hf_download\hub\models--charactr--vocos-mel-24khz\snapshots\0feb3fdd929bcd6649e0e7c5a688cf7dd012ef21",  # 这里指定本地 vocoder
        device="cuda")
    # 创建带说话人管理的 TTS 实例
    tts = F5TTSWithSpeaker(f5tts)

    # ===== 3.1 注册说话人 =====
    print("\n" + "=" * 50)
    print("注册说话人")
    print("=" * 50)

    speaker_id = "nature_voice"
    ref_audio = "tests/ref_audio/test_en_1_ref_short.wav"
    ref_text = "some call me nature, others call me mother nature."

    # tts.register_speaker(speaker_id, ref_audio, ref_text)

    # ===== 3.2 列出所有说话人 =====
    print("\n" + "=" * 50)
    print("列出说话人")
    print("=" * 50)
    tts.list_speakers()

    # ===== 3.3 使用注册的说话人生成语音 =====
    print("\n" + "=" * 50)
    print("生成语音")
    print("=" * 50)

    gen_text = """I don't really care what you call me. I've been a silent spectator, 
    watching species evolve, empires rise and fall. But always remember, 
    I am mighty and enduring. Respect me and I'll nurture you; 
    ignore me and you shall face the consequences."""
    gen_text='你好,我是人工智能助手'
    for i in range(10):
        try:
            start=time.time()
            wav, sr, spect = tts.infer_with_speaker(speaker_id=speaker_id, gen_text=gen_text, file_wave="outputs/nature_speech.wav", file_spect=None, remove_silence=True, seed=42,
                speed=1.0, cfg_strength=2.0, nfe_step=32)
            print(f"  语音生成成功!")
            print(f"  音频文件: outputs/nature_speech.wav")
            print(f"  采样率: {sr} Hz")
            print(f"  音频时长: {len(wav) / sr:.2f} 秒")
            print('time',time.time()-start)

        except Exception as e:
            print(f"❌ 生成失败: {e}")

推理优化:

model/utils_infer.py

python 复制代码
# A unified script for inference process
# Make adjustments inside functions, and consider both gradio and cli scripts if need to change func output format

import re
import tempfile

import numpy as np
import torch
import torchaudio
import tqdm
from pydub import AudioSegment, silence
from transformers import pipeline
from vocos import Vocos

from model import CFM
from model.utils import (
    load_checkpoint,
    get_tokenizer,
    convert_char_to_pinyin,
)


device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"

vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")


# -----------------------------------------

target_sample_rate = 24000
n_mel_channels = 100
hop_length = 256
target_rms = 0.1
cross_fade_duration = 0.15
ode_method = "euler"
nfe_step = 16  # 16, 32
cfg_strength = 2.0
sway_sampling_coef = -1.0
speed = 1.0
fix_duration = None

# -----------------------------------------


_ref_cache = {}


def chunk_text(text, max_chars=135):
    """
    Splits the input text into chunks, each with a maximum number of characters.

    Args:
        text (str): The text to be split.
        max_chars (int): The maximum number of characters per chunk.

    Returns:
        List[str]: A list of text chunks.
    """
    chunks = []
    current_chunk = ""
    # Split the text into sentences based on punctuation followed by whitespace
    sentences = re.split(r"(?<=[;:,.!?])\s+|(?<=[;:,。!?])", text)

    for sentence in sentences:
        if len(current_chunk.encode("utf-8")) + len(sentence.encode("utf-8")) <= max_chars:
            current_chunk += sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
        else:
            if current_chunk:
                chunks.append(current_chunk.strip())
            current_chunk = sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence

    if current_chunk:
        chunks.append(current_chunk.strip())

    return chunks


# load vocoder
def load_vocoder(is_local=False, local_path="", device=device):
    if is_local:
        print(f"Load vocos from local path {local_path}")
        vocos = Vocos.from_hparams(f"{local_path}/config.yaml")
        state_dict = torch.load(f"{local_path}/pytorch_model.bin", map_location=device)
        vocos.load_state_dict(state_dict)
        vocos.eval()
    else:
        print("Download Vocos from huggingface charactr/vocos-mel-24khz")
        vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
    return vocos


# load asr pipeline

asr_pipe = None


def initialize_asr_pipeline(device=device):
    global asr_pipe
    asr_pipe = pipeline(
        "automatic-speech-recognition",
        model="openai/whisper-large-v3-turbo",
        torch_dtype=torch.float16,
        device=device,
    )


def load_model(model_cls, model_cfg, ckpt_path, vocab_file="", ode_method=ode_method, use_ema=True, device=device):
    if vocab_file == "":
        vocab_file = "Emilia_ZH_EN"
        tokenizer = "pinyin"
    else:
        tokenizer = "custom"

    print("\nvocab : ", vocab_file)
    print("tokenizer : ", tokenizer)
    print("model : ", ckpt_path, "\n")

    vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer)
    model = CFM(
        transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
        mel_spec_kwargs=dict(
            target_sample_rate=target_sample_rate,
            n_mel_channels=n_mel_channels,
            hop_length=hop_length,
        ),
        odeint_kwargs=dict(
            method=ode_method,
        ),
        vocab_char_map=vocab_char_map,
    ).to(device)

    model = load_checkpoint(model, ckpt_path, device, use_ema=use_ema)

    return model


# preprocess reference audio and text


def preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=print, device=device):
    show_info("Converting audio...")
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
        aseg = AudioSegment.from_file(ref_audio_orig)

        non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000)
        non_silent_wave = AudioSegment.silent(duration=0)
        for non_silent_seg in non_silent_segs:
            non_silent_wave += non_silent_seg
        aseg = non_silent_wave

        audio_duration = len(aseg)
        if audio_duration > 15000:
            show_info("Audio is over 15s, clipping to only first 15s.")
            aseg = aseg[:15000]
        aseg.export(f.name, format="wav")
        ref_audio = f.name

    if not ref_text.strip():
        global asr_pipe
        if asr_pipe is None:
            initialize_asr_pipeline(device=device)
        show_info("No reference text provided, transcribing reference audio...")
        ref_text = asr_pipe(
            ref_audio,
            chunk_length_s=30,
            batch_size=128,
            generate_kwargs={"task": "transcribe"},
            return_timestamps=False,
        )["text"].strip()
        show_info("Finished transcription")
    else:
        show_info("Using custom reference text...")

    # Add the functionality to ensure it ends with ". "
    if not ref_text.endswith(". ") and not ref_text.endswith("。"):
        if ref_text.endswith("."):
            ref_text += " "
        else:
            ref_text += ". "

    return ref_audio, ref_text


def preencode_reference(ref_audio, ref_text, model_obj, device=device):
    """
    预编码参考音频,返回可直接用于推理的特征

    Args:
        ref_audio: 参考音频路径或 (audio, sr) 元组
        ref_text: 参考文本
        model_obj: 加载好的模型
        device: 设备

    Returns:
        dict: 包含预编码特征的字典
    """
    # 处理音频
    if isinstance(ref_audio, tuple):
        audio, sr = ref_audio
    else:
        audio, sr = torchaudio.load(ref_audio)

    if audio.shape[0] > 1:
        audio = torch.mean(audio, dim=0, keepdim=True)

    rms = torch.sqrt(torch.mean(torch.square(audio)))
    if rms < target_rms:
        audio = audio * target_rms / rms

    if sr != target_sample_rate:
        resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
        audio = resampler(audio)

    audio = audio.to(device)

    # 预处理文本
    if len(ref_text[-1].encode("utf-8")) == 1:
        ref_text = ref_text + " "

    text_list = [ref_text]
    final_text_list = convert_char_to_pinyin(text_list)

    # 预编码到模型
    with torch.inference_mode():
        # 这里调用模型的预编码方法(需要模型支持)
        # 假设模型有 encode_condition 方法
        if hasattr(model_obj, "encode_condition"):
            cond_features = model_obj.encode_condition(audio, final_text_list)
        else:
            # 如果没有,就保存原始数据
            cond_features = {"audio": audio, "text": final_text_list, "rms": rms}

    return {"cond_features": cond_features, "ref_text": ref_text, "audio_len": audio.shape[-1] // hop_length, "rms": rms}


def infer_with_precomputed(ref_data, gen_text_batches, model_obj, progress=tqdm, target_rms=0.1, cross_fade_duration=0.15, nfe_step=32, cfg_strength=2.0, sway_sampling_coef=-1, speed=1,
        fix_duration=None, device=None, ):
    """
    使用预编码的参考特征进行推理

    Args:
        ref_data: preencode_reference 返回的字典
        gen_text_batches: 生成文本的批次列表
        model_obj: 模型对象
        ...
    """
    cond_features = ref_data["cond_features"]
    ref_text = ref_data["ref_text"]
    ref_audio_len = ref_data["audio_len"]
    rms = ref_data["rms"]

    generated_waves = []
    spectrograms = []

    for i, gen_text in enumerate(progress.tqdm(gen_text_batches)):
        # 准备生成文本
        text_list = [ref_text + gen_text]
        final_text_list = convert_char_to_pinyin(text_list)

        # 计算时长
        if fix_duration is not None:
            duration = int(fix_duration * target_sample_rate / hop_length)
        else:
            ref_text_len = len(ref_text.encode("utf-8"))
            gen_text_len = len(gen_text.encode("utf-8"))
            duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)

        # 推理
        with torch.inference_mode():
            if isinstance(cond_features, dict) and "audio" in cond_features:
                # 原始方式:直接使用音频
                generated, _ = model_obj.sample(cond=cond_features["audio"], text=final_text_list, duration=duration, steps=nfe_step, cfg_strength=cfg_strength,
                    sway_sampling_coef=sway_sampling_coef, )
            else:
                # 使用预编码特征(需要模型支持)
                generated, _ = model_obj.sample_with_features(cond_features=cond_features, text=final_text_list, duration=duration, steps=nfe_step, cfg_strength=cfg_strength,
                    sway_sampling_coef=sway_sampling_coef, )

        generated = generated.to(torch.float32)
        generated = generated[:, ref_audio_len:, :]
        generated_mel_spec = generated.permute(0, 2, 1)
        generated_wave = vocos.decode(generated_mel_spec.cpu())

        if rms < target_rms:
            generated_wave = generated_wave * rms / target_rms

        generated_wave = generated_wave.squeeze().cpu().numpy()

        generated_waves.append(generated_wave)
        spectrograms.append(generated_mel_spec[0].cpu().numpy())

    # 合并音频
    if cross_fade_duration <= 0:
        final_wave = np.concatenate(generated_waves)
    else:
        final_wave = generated_waves[0]
        for i in range(1, len(generated_waves)):
            prev_wave = final_wave
            next_wave = generated_waves[i]

            cross_fade_samples = int(cross_fade_duration * target_sample_rate)
            cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))

            if cross_fade_samples <= 0:
                final_wave = np.concatenate([prev_wave, next_wave])
                continue

            prev_overlap = prev_wave[-cross_fade_samples:]
            next_overlap = next_wave[:cross_fade_samples]

            fade_out = np.linspace(1, 0, cross_fade_samples)
            fade_in = np.linspace(0, 1, cross_fade_samples)
            cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in

            final_wave = np.concatenate([prev_wave[:-cross_fade_samples], cross_faded_overlap, next_wave[cross_fade_samples:]])

    combined_spectrogram = np.concatenate(spectrograms, axis=1)

    return final_wave, target_sample_rate, combined_spectrogram

def infer_process(
    ref_audio,
    ref_text,
    gen_text,
    model_obj,
    show_info=print,
    progress=tqdm,
    target_rms=target_rms,
    cross_fade_duration=cross_fade_duration,
    nfe_step=nfe_step,
    cfg_strength=cfg_strength,
    sway_sampling_coef=sway_sampling_coef,
    speed=speed,
    fix_duration=fix_duration,
    device=device,
):
    # Split the input text into batches
    audio, sr = torchaudio.load(ref_audio)
    max_chars = int(len(ref_text.encode("utf-8")) / (audio.shape[-1] / sr) * (25 - audio.shape[-1] / sr))
    gen_text_batches = chunk_text(gen_text, max_chars=max_chars)
    for i, gen_text in enumerate(gen_text_batches):
        print(f"gen_text {i}", gen_text)

    show_info(f"Generating audio in {len(gen_text_batches)} batches...")
    return infer_batch_process(
        (audio, sr),
        ref_text,
        gen_text_batches,
        model_obj,
        progress=progress,
        target_rms=target_rms,
        cross_fade_duration=cross_fade_duration,
        nfe_step=nfe_step,
        cfg_strength=cfg_strength,
        sway_sampling_coef=sway_sampling_coef,
        speed=speed,
        fix_duration=fix_duration,
        device=device,
    )

def infer_batch_process(
    ref_audio,
    ref_text,
    gen_text_batches,
    model_obj,
    progress=tqdm,
    target_rms=0.1,
    cross_fade_duration=0.15,
    nfe_step=32,
    cfg_strength=2.0,
    sway_sampling_coef=-1,
    speed=1,
    fix_duration=None,
    device=None,
):
    audio, sr = ref_audio
    if audio.shape[0] > 1:
        audio = torch.mean(audio, dim=0, keepdim=True)

    rms = torch.sqrt(torch.mean(torch.square(audio)))
    if rms < target_rms:
        audio = audio * target_rms / rms
    if sr != target_sample_rate:
        resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
        audio = resampler(audio)
    audio = audio.to(device)

    generated_waves = []
    spectrograms = []

    if len(ref_text[-1].encode("utf-8")) == 1:
        ref_text = ref_text + " "
    for i, gen_text in enumerate(progress.tqdm(gen_text_batches)):
        # Prepare the text
        text_list = [ref_text + gen_text]
        final_text_list = convert_char_to_pinyin(text_list)

        ref_audio_len = audio.shape[-1] // hop_length
        if fix_duration is not None:
            duration = int(fix_duration * target_sample_rate / hop_length)
        else:
            # Calculate duration
            ref_text_len = len(ref_text.encode("utf-8"))
            gen_text_len = len(gen_text.encode("utf-8"))
            duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)

        # inference
        with torch.inference_mode():
            generated, _ = model_obj.sample(
                cond=audio,
                text=final_text_list,
                duration=duration,
                steps=nfe_step,
                cfg_strength=cfg_strength,
                sway_sampling_coef=sway_sampling_coef,
            )

        generated = generated.to(torch.float32)
        generated = generated[:, ref_audio_len:, :]
        generated_mel_spec = generated.permute(0, 2, 1)
        generated_wave = vocos.decode(generated_mel_spec.cpu())
        if rms < target_rms:
            generated_wave = generated_wave * rms / target_rms

        # wav -> numpy
        generated_wave = generated_wave.squeeze().cpu().numpy()

        generated_waves.append(generated_wave)
        spectrograms.append(generated_mel_spec[0].cpu().numpy())

    # Combine all generated waves with cross-fading
    if cross_fade_duration <= 0:
        # Simply concatenate
        final_wave = np.concatenate(generated_waves)
    else:
        final_wave = generated_waves[0]
        for i in range(1, len(generated_waves)):
            prev_wave = final_wave
            next_wave = generated_waves[i]

            # Calculate cross-fade samples, ensuring it does not exceed wave lengths
            cross_fade_samples = int(cross_fade_duration * target_sample_rate)
            cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))

            if cross_fade_samples <= 0:
                # No overlap possible, concatenate
                final_wave = np.concatenate([prev_wave, next_wave])
                continue

            # Overlapping parts
            prev_overlap = prev_wave[-cross_fade_samples:]
            next_overlap = next_wave[:cross_fade_samples]

            # Fade out and fade in
            fade_out = np.linspace(1, 0, cross_fade_samples)
            fade_in = np.linspace(0, 1, cross_fade_samples)

            # Cross-faded overlap
            cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in

            # Combine
            new_wave = np.concatenate(
                [prev_wave[:-cross_fade_samples], cross_faded_overlap, next_wave[cross_fade_samples:]]
            )

            final_wave = new_wave

    # Create a combined spectrogram
    combined_spectrogram = np.concatenate(spectrograms, axis=1)

    return final_wave, target_sample_rate, combined_spectrogram


# remove silence from generated wav


def remove_silence_for_generated_wav(filename):
    aseg = AudioSegment.from_file(filename)
    non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
    non_silent_wave = AudioSegment.silent(duration=0)
    for non_silent_seg in non_silent_segs:
        non_silent_wave += non_silent_seg
    aseg = non_silent_wave
    aseg.export(filename, format="wav")

推理代码,优化后:

E:\project\F5-TTS\demo_youhua.py

python 复制代码
import time

from api import F5TTS
from model.utils_infer import preencode_reference, chunk_text, infer_with_precomputed

if __name__ == '__main__':
    device='cuda:0'
    f5tts = F5TTS(model_type="F5-TTS", ckpt_file="./hf_download\hub\models--SWivid--F5-TTS\snapshots\84e5a410d9cead4de2f847e7c9369a6440bdfaca\F5TTS_Base\model_1200000.safetensors",
                  vocab_file=r"E:\project\F5-TTS\src\f5_tts\infer\examples\vocab.txt",
                  local_path=r"E:\project\F5-TTS\hf_download\hub\models--charactr--vocos-mel-24khz\snapshots\0feb3fdd929bcd6649e0e7c5a688cf7dd012ef21",  # 这里指定本地 vocoder
                  device="cuda")
    # 1. 预编码参考音频(只需一次)
    ref_data = preencode_reference(ref_audio="tests/ref_audio/test_en_1_ref_short.wav",
                                   ref_text="some call me nature, others call me mother nature.", model_obj=f5tts.ema_model, device=device)

    # 2. 多次推理(无需重复编码参考音频)
    gen_text_batches = chunk_text("I don't really care...", max_chars=135)

    for i in range(10):
        start = time.time()
        wav, sr, spect = infer_with_precomputed(ref_data=ref_data, gen_text_batches=gen_text_batches, model_obj=f5tts.ema_model, nfe_step=32, cfg_strength=2.0, device=device)
        print('time',time.time() - start)
相关推荐
圣光SG3 小时前
ES6+ 基础学习笔记
笔记·学习·es6
小陈phd3 小时前
多模态大模型学习笔记(二十六)—— 核心技术篇③ | 虚拟人的声音情感:从语音合成到声音克隆
笔记·学习
不会聊天真君6473 小时前
基础语法·下(golang笔记第三期)
开发语言·笔记·golang
FakeOccupational3 小时前
【电路笔记 通信】IEEE 1588精密时间协议(PTP):数学假设+时间同步链路建模+消除主从偏差算法
笔记·算法
云边散步3 小时前
godot2D游戏教程系列二(23)
笔记·学习·游戏·音视频·游戏开发
像素猎人4 小时前
差分数组【自用笔记】【c++】
c++·笔记·算法
weixin_441003644 小时前
廖华英《中国文化概况》修订版+批注版+译文版+笔记+课件PPT+配套题库 PDF
笔记·pdf·中国文化概况
Cathy Bryant4 小时前
聊聊拓扑学
笔记·算法·数学建模·拓扑学·高等数学
The森4 小时前
macOS 26(M芯片)部署 cocos2d-x(C++)全链路指南——Xcode + Rosetta
c++·经验分享·笔记·macos·xcode·cocos2d