第3课 获取并播放音频流

本课对应源文件下载链接:

https://download.csdn.net/download/XiBuQiuChong/88680079

FFmpeg作为一套庞大的音视频处理开源工具,其源码有太多值得研究的地方。但对于大多数初学者而言,如何快速利用相关的API写出自己想要的东西才是迫切需要的,至于原理的进一步学习那是以后的事情。

在上一课中,我们已经成功获取到视频流并显示,这节课我们将参考视频的工作流程来获取音频并播放。

1.与处理视频的过程差不多,要播放音频就要先初始化音频解码器,在函数runFFmpeg中加入以下代码:

复制代码
//音频解码器
int audioIndex = -1;
AVCodec *aDecodec;
AVCodecContext *aDecodeCtx = NULL;

//初始化并打开音频解码器
aDecodec = avcodec_find_decoder(inFormatCtx->streams[audioIndex]->codecpar->codec_id);
aDecodeCtx = avcodec_alloc_context3(aDecodec);
avcodec_parameters_to_context(aDecodeCtx, inFormatCtx->streams[audioIndex]->codecpar);
avcodec_open2(aDecodeCtx, aDecodec, 0);

2.在处理视频数据包后我们可以接着处理音频数据包,并把音频帧转换为pcm数组加入音频队列备用:

cpp 复制代码
if (normalPkt.stream_index == videoIndex)
		{
			ret = avcodec_send_packet(vDecodeCtx, &normalPkt);
			ret = avcodec_receive_frame(vDecodeCtx, deVideoFrame);
			av_packet_unref(&normalPkt);
			ret = sws_scale(bgrSwsCtx, (const uint8_t* const*)deVideoFrame->data, deVideoFrame->linesize, 0, deVideoFrame->height, bgrFrame.data, bgrFrame.linesize);
			srcMat = cv::Mat(bgrFrame.height, bgrFrame.width, CV_8UC3, bgrFrame.data[0]);
			//imshow("viceo", srcMat);
			//cv::waitKey(10);
			mainDlg->drawMatOfPlay(srcMat);
			av_frame_unref(deVideoFrame);
		}
		else if (normalPkt.stream_index == audioIndex)
		{

			ret = avcodec_send_packet(aDecodeCtx, &normalPkt);
			while (1){
				ret = avcodec_receive_frame(aDecodeCtx, deAudioFrame);
				if (ret != 0){
					break;
				}
				else{

					int originAudioDataSize = deAudioFrame->linesize[0] * deAudioFrame->channels << 1;
					outAudioBuff = new char[originAudioDataSize];
					int outSampleNum = convertAudioFrameToAudioBuff(deAudioFrame, &outAudioBuff, originAudioDataSize);
					int finalAudioDataSize = outSampleNum *av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *deAudioFrame->channels;
					tmpAudioQueObj.audioDataArr = outAudioBuff;
					tmpAudioQueObj.audioDataSize = finalAudioDataSize;
					EnterCriticalSection(&queLock);
					outAudioQue.push(tmpAudioQueObj);
					if (outAudioQue.size() > 50){
						free(outAudioQue.front().audioDataArr);
						outAudioQue.front().audioDataSize = 0;
						outAudioQue.front().audioDataArr = NULL;
						outAudioQue.front().audioDataSize = NULL;
						outAudioQue.pop();
					}
					LeaveCriticalSection(&queLock);
				}
				av_frame_unref(deAudioFrame);
			}

			av_packet_unref(&normalPkt);

		}

3.为了能播放声音,需要先打开扬声器,然后把队列中的数据送入扬声器:

cpp 复制代码
//打开扬声器
void fmlp::openSpeaker(){
	outWaveform.wFormatTag = WAVE_FORMAT_PCM;
	outWaveform.nSamplesPerSec = 44100;
	outWaveform.wBitsPerSample = 16;
	outWaveform.nChannels = 2;
	//waveform.nBlockAlign = (waveform.wBitsPerSample * waveform.nChannels) / 8;
	outWaveform.nBlockAlign = (outWaveform.wBitsPerSample*outWaveform.nChannels) >> 3;
	outWaveform.nAvgBytesPerSec = outWaveform.nBlockAlign * outWaveform.nSamplesPerSec;
	outWaveform.cbSize = 0;

	waveOutOpen(&hWaveOut, WAVE_MAPPER, &outWaveform, (DWORD)(speakerCallback), 0L, CALLBACK_FUNCTION);
	waveOutSetVolume(hWaveOut, 4 * 0xffffffff);
	waveHdrArr = new WAVEHDR[audioDataArrNum];
	for (int i = 0; i < audioDataArrNum; i++)
	{
		waveHdrArr[i].lpData = new char[finalAudioDataSize];
		waveHdrArr[i].dwBufferLength = finalAudioDataSize;
		waveHdrArr[i].dwBytesRecorded = 0;
		waveHdrArr[i].dwUser = 0;
		waveHdrArr[i].dwFlags = 0;
		waveHdrArr[i].dwLoops = 0;
		waveHdrArr[i].lpNext = NULL;
		waveHdrArr[i].reserved = 0;
		waveOutPrepareHeader(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
	}

}
//扬声器回调函数
DWORD CALLBACK fmlp::speakerCallback(HWAVEOUT hwaveout, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2)
{
	switch (uMsg)
	{
	case WOM_OPEN:
		break;

	case WOM_DONE:

	{
					 LPWAVEHDR pwh = (LPWAVEHDR)dwParam1;
					 if (pwh->lpData){
						 free(pwh->lpData);
						 pwh->dwBufferLength = 0;
						 pwh->lpData = NULL;
						 pwh->dwBufferLength = NULL;
					 }
	}


		break;

	case WOM_CLOSE:
		break;
	default:
		break;
	}
	return 0;
}




//播放声音
DWORD WINAPI fmlp::playAudioThreadProc(LPVOID lpParam){
	fmlp *pThis = (fmlp*)lpParam;
	pThis->playAudio();
	return 0;



}

int fmlp::playAudio(){


	int i = 0;
	while (true){

		if (outAudioQue.empty()){
			Sleep(5);
			continue;
		}
		EnterCriticalSection(&queLock);

		if (waveHdrArr[i].dwFlags & WHDR_PREPARED){
			waveHdrArr[i].lpData = (LPSTR)outAudioQue.front().audioDataArr;
			waveHdrArr[i].dwBufferLength = outAudioQue.front().audioDataSize;
			waveOutWrite(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
			outAudioQue.pop();
			i++;
		}
		LeaveCriticalSection(&queLock);
		if (i >= audioDataArrNum){
			i = 0;
		}
		Sleep(5);
	}

}

4.这样一个最简单的既能播放视频也能播放音频的播放器就完成了。

相关推荐
脑子缺根弦1 小时前
融合优势:SIP 广播对讲联动华为会议 全场景沟通响应提速
华为·音视频·广播对讲系统
肥or胖10 小时前
【FFmpeg 快速入门】本地播放器 项目
开发语言·qt·ffmpeg·音视频
笑虾10 小时前
bat 批处理实现 FFmpeg 命令导出 mov 到 png 序列帧
ffmpeg·png·mov·序列帧
海绵波波10711 小时前
opencv、torch、torchvision、tensorflow的区别
人工智能·opencv·tensorflow
DogDaoDao13 小时前
GitHub开源轻量级语音模型 Vui:重塑边缘智能语音交互的未来
大模型·github·音视频·交互·vui·语音模型·智能语音
顾随16 小时前
(三)OpenCV——图像形态学
图像处理·人工智能·python·opencv·计算机视觉
张海森-1688201 天前
视频码率是什么?视频流分辨率 2688x1520_25fps采用 h264格式压缩,其码率为
音视频
jndingxin1 天前
OpenCV直线段检测算法类cv::line_descriptor::LSDDetector
人工智能·opencv·算法
Mikowoo0072 天前
03_opencv_imwrite()函数
opencv·计算机视觉
mortimer2 天前
当AI配音遇上视频:实现音画同步的自动化工程实践
python·ffmpeg·ai编程