微软云语音识别ASR示例Demo

对象存储服务 OSS 对应 Azure Blob Storage

语音识别 ASR 对应 Azure Speech-to-Text

语音合成 TTS 对应 Azure Text-to-Speech

上传..mp3文件或者上传OSS地址 返回音频的文字示例demo

依赖

复制代码
<dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-webflux</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <!--   microsoft ASR     -->
        <dependency>
            <groupId>com.microsoft.cognitiveservices.speech</groupId>
            <artifactId>client-sdk</artifactId>
            <version>1.43.0</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>
        <dependency>
            <groupId>io.projectreactor</groupId>
            <artifactId>reactor-test</artifactId>
            <scope>test</scope>
        </dependency>
    </dependencies>

代码 在application.properties或者yaml中配置key和endpoint

复制代码
package com.example.microsoftasr.controller;

import com.microsoft.cognitiveservices.speech.*;
import com.microsoft.cognitiveservices.speech.audio.AudioConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import java.io.File;
import java.net.URI;
import java.nio.file.Files;

@RestController
@RequestMapping("/asr")
public class TestController {

    @Value("${azure.speech.key}")
    private String speechKey;

    @Value("${azure.speech.endpoint}")
    private String speechEndpoint;

    @GetMapping("/hello")
    public String test() {
        return "Hello World";
    }

    @PostMapping("/recognize")
    public String recognize(@RequestParam(value = "file", required = false) MultipartFile file,
                            @RequestParam(value = "url", required = false) String ossUrl) {
        if ((file == null || file.isEmpty()) && (ossUrl == null || ossUrl.isBlank())) {
            return "未提供音频文件或音频地址";
        }

        File tempInput = null;
        File tempWav = null;

        try {
            // 1. 保存临时原始音频
            if (file != null && !file.isEmpty()) {
                String suffix = getSuffix(file.getOriginalFilename());
                tempInput = File.createTempFile("audio-input-", "." + suffix);
                file.transferTo(tempInput);
            } else {
                String suffix = getSuffix(ossUrl);
                tempInput = File.createTempFile("audio-input-", "." + suffix);
                try (var in = new java.net.URL(ossUrl).openStream()) {
                    Files.copy(in, tempInput.toPath(), java.nio.file.StandardCopyOption.REPLACE_EXISTING);
                }
            }

            // 2. 转换成 WAV(16kHz 单声道)
            tempWav = File.createTempFile("audio-output-", ".wav");
            if (!getSuffix(tempInput.getName()).equalsIgnoreCase("wav")) {
                ProcessBuilder pb = new ProcessBuilder(
                        "F:\\ffmpeg-7.1.1-full_build\\ffmpeg-7.1.1-full_build\\bin\\ffmpeg.exe", "-y",
                        "-i", tempInput.getAbsolutePath(),
                        "-ar", "16000",
                        "-ac", "1",
                        tempWav.getAbsolutePath()
                );
                Process process = pb.inheritIO().start();
                int exitCode = process.waitFor();
                if (exitCode != 0) return "ffmpeg 转换失败,exitCode=" + exitCode;
            } else {
                Files.copy(tempInput.toPath(), tempWav.toPath(), java.nio.file.StandardCopyOption.REPLACE_EXISTING);
            }

            // 3. 调用微软 ASR 识别
            SpeechConfig speechConfig = SpeechConfig.fromEndpoint(new URI(speechEndpoint), speechKey);
            speechConfig.setSpeechRecognitionLanguage("zh-CN");

            try (AudioConfig audioConfig = AudioConfig.fromWavFileInput(tempWav.getAbsolutePath());
                 SpeechRecognizer recognizer = new SpeechRecognizer(speechConfig, audioConfig)) {
                SpeechRecognitionResult result = recognizer.recognizeOnceAsync().get();
                if (result.getReason() == ResultReason.RecognizedSpeech) {
                    return result.getText();
                } else {
                    return "识别失败: " + result.getReason();
                }
            }

        } catch (Exception e) {
            e.printStackTrace();
            return "识别异常: " + e.getMessage();
        } finally {
            try {
                if (tempInput != null) Files.deleteIfExists(tempInput.toPath());
                if (tempWav != null) Files.deleteIfExists(tempWav.toPath());
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
    }

    private String getSuffix(String filenameOrUrl) {
        if (filenameOrUrl == null || !filenameOrUrl.contains(".")) return "tmp";
        return filenameOrUrl.substring(filenameOrUrl.lastIndexOf('.') + 1);
    }

}
相关推荐
冬奇Lab19 分钟前
RAG 系列(五):Embedding 模型——语义理解的核心
人工智能·llm·aigc
深小乐21 分钟前
AI 周刊【2026.04.27-05.03】:Anthropic 9000亿美元估值、英伟达死磕智能体、中央重磅定调AI
人工智能
码点滴24 分钟前
什么时候用 DeepSeek V4,而不是 GPT-5/Claude/Gemini?
人工智能·gpt·架构·大模型·deepseek
狐狐生风35 分钟前
LangChain 向量存储:Chroma、FAISS
人工智能·python·学习·langchain·faiss·agentai
波动几何37 分钟前
CDA架构代码工坊技能cda-code-lab
人工智能
舟遥遥娓飘飘43 分钟前
DeepSeek V4技术变革对社会结构与职业体系的重构
人工智能
狐狐生风44 分钟前
LangChain RAG 基础
人工智能·python·学习·langchain·rag·agentai
墨北小七1 小时前
使用InspireFace进行智慧楼宇门禁人脸识别的训练微调
人工智能·深度学习·神经网络
HackTorjan1 小时前
深度神经网络的反向传播与梯度优化原理
人工智能·spring boot·神经网络·机器学习·dnn
PersistJiao2 小时前
Codex、Claude Code、gstack三者的关系
人工智能