第3课 获取并播放音频流

本课对应源文件下载链接:

https://download.csdn.net/download/XiBuQiuChong/88680079

FFmpeg作为一套庞大的音视频处理开源工具,其源码有太多值得研究的地方。但对于大多数初学者而言,如何快速利用相关的API写出自己想要的东西才是迫切需要的,至于原理的进一步学习那是以后的事情。

在上一课中,我们已经成功获取到视频流并显示,这节课我们将参考视频的工作流程来获取音频并播放。

1.与处理视频的过程差不多,要播放音频就要先初始化音频解码器,在函数runFFmpeg中加入以下代码:

复制代码
//音频解码器
int audioIndex = -1;
AVCodec *aDecodec;
AVCodecContext *aDecodeCtx = NULL;

//初始化并打开音频解码器
aDecodec = avcodec_find_decoder(inFormatCtx->streams[audioIndex]->codecpar->codec_id);
aDecodeCtx = avcodec_alloc_context3(aDecodec);
avcodec_parameters_to_context(aDecodeCtx, inFormatCtx->streams[audioIndex]->codecpar);
avcodec_open2(aDecodeCtx, aDecodec, 0);

2.在处理视频数据包后我们可以接着处理音频数据包,并把音频帧转换为pcm数组加入音频队列备用:

cpp 复制代码
if (normalPkt.stream_index == videoIndex)
		{
			ret = avcodec_send_packet(vDecodeCtx, &normalPkt);
			ret = avcodec_receive_frame(vDecodeCtx, deVideoFrame);
			av_packet_unref(&normalPkt);
			ret = sws_scale(bgrSwsCtx, (const uint8_t* const*)deVideoFrame->data, deVideoFrame->linesize, 0, deVideoFrame->height, bgrFrame.data, bgrFrame.linesize);
			srcMat = cv::Mat(bgrFrame.height, bgrFrame.width, CV_8UC3, bgrFrame.data[0]);
			//imshow("viceo", srcMat);
			//cv::waitKey(10);
			mainDlg->drawMatOfPlay(srcMat);
			av_frame_unref(deVideoFrame);
		}
		else if (normalPkt.stream_index == audioIndex)
		{

			ret = avcodec_send_packet(aDecodeCtx, &normalPkt);
			while (1){
				ret = avcodec_receive_frame(aDecodeCtx, deAudioFrame);
				if (ret != 0){
					break;
				}
				else{

					int originAudioDataSize = deAudioFrame->linesize[0] * deAudioFrame->channels << 1;
					outAudioBuff = new char[originAudioDataSize];
					int outSampleNum = convertAudioFrameToAudioBuff(deAudioFrame, &outAudioBuff, originAudioDataSize);
					int finalAudioDataSize = outSampleNum *av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *deAudioFrame->channels;
					tmpAudioQueObj.audioDataArr = outAudioBuff;
					tmpAudioQueObj.audioDataSize = finalAudioDataSize;
					EnterCriticalSection(&queLock);
					outAudioQue.push(tmpAudioQueObj);
					if (outAudioQue.size() > 50){
						free(outAudioQue.front().audioDataArr);
						outAudioQue.front().audioDataSize = 0;
						outAudioQue.front().audioDataArr = NULL;
						outAudioQue.front().audioDataSize = NULL;
						outAudioQue.pop();
					}
					LeaveCriticalSection(&queLock);
				}
				av_frame_unref(deAudioFrame);
			}

			av_packet_unref(&normalPkt);

		}

3.为了能播放声音,需要先打开扬声器,然后把队列中的数据送入扬声器:

cpp 复制代码
//打开扬声器
void fmlp::openSpeaker(){
	outWaveform.wFormatTag = WAVE_FORMAT_PCM;
	outWaveform.nSamplesPerSec = 44100;
	outWaveform.wBitsPerSample = 16;
	outWaveform.nChannels = 2;
	//waveform.nBlockAlign = (waveform.wBitsPerSample * waveform.nChannels) / 8;
	outWaveform.nBlockAlign = (outWaveform.wBitsPerSample*outWaveform.nChannels) >> 3;
	outWaveform.nAvgBytesPerSec = outWaveform.nBlockAlign * outWaveform.nSamplesPerSec;
	outWaveform.cbSize = 0;

	waveOutOpen(&hWaveOut, WAVE_MAPPER, &outWaveform, (DWORD)(speakerCallback), 0L, CALLBACK_FUNCTION);
	waveOutSetVolume(hWaveOut, 4 * 0xffffffff);
	waveHdrArr = new WAVEHDR[audioDataArrNum];
	for (int i = 0; i < audioDataArrNum; i++)
	{
		waveHdrArr[i].lpData = new char[finalAudioDataSize];
		waveHdrArr[i].dwBufferLength = finalAudioDataSize;
		waveHdrArr[i].dwBytesRecorded = 0;
		waveHdrArr[i].dwUser = 0;
		waveHdrArr[i].dwFlags = 0;
		waveHdrArr[i].dwLoops = 0;
		waveHdrArr[i].lpNext = NULL;
		waveHdrArr[i].reserved = 0;
		waveOutPrepareHeader(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
	}

}
//扬声器回调函数
DWORD CALLBACK fmlp::speakerCallback(HWAVEOUT hwaveout, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2)
{
	switch (uMsg)
	{
	case WOM_OPEN:
		break;

	case WOM_DONE:

	{
					 LPWAVEHDR pwh = (LPWAVEHDR)dwParam1;
					 if (pwh->lpData){
						 free(pwh->lpData);
						 pwh->dwBufferLength = 0;
						 pwh->lpData = NULL;
						 pwh->dwBufferLength = NULL;
					 }
	}


		break;

	case WOM_CLOSE:
		break;
	default:
		break;
	}
	return 0;
}




//播放声音
DWORD WINAPI fmlp::playAudioThreadProc(LPVOID lpParam){
	fmlp *pThis = (fmlp*)lpParam;
	pThis->playAudio();
	return 0;



}

int fmlp::playAudio(){


	int i = 0;
	while (true){

		if (outAudioQue.empty()){
			Sleep(5);
			continue;
		}
		EnterCriticalSection(&queLock);

		if (waveHdrArr[i].dwFlags & WHDR_PREPARED){
			waveHdrArr[i].lpData = (LPSTR)outAudioQue.front().audioDataArr;
			waveHdrArr[i].dwBufferLength = outAudioQue.front().audioDataSize;
			waveOutWrite(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
			outAudioQue.pop();
			i++;
		}
		LeaveCriticalSection(&queLock);
		if (i >= audioDataArrNum){
			i = 0;
		}
		Sleep(5);
	}

}

4.这样一个最简单的既能播放视频也能播放音频的播放器就完成了。

相关推荐
赖small强7 小时前
【音视频开发】Linux UVC (USB Video Class) 驱动框架深度解析
linux·音视频·v4l2·uvc
赖small强8 小时前
【音视频开发】ISP流水线核心模块深度解析
音视频·isp·白平衡·亮度·luminance·gamma 校正·降噪处理
赖small强8 小时前
【音视频开发】Linux V4L2 (Video for Linux 2) 驱动框架深度解析白皮书
linux·音视频·v4l2·设备节点管理·视频缓冲队列·videobuf2
mortimer8 小时前
视频自动翻译里的“时空折叠”:简单实用的音画同步实践
python·ffmpeg·aigc
未央几许9 小时前
使用ffmpeg.wasm解码视频(avi,mpg等格式)问题
前端·ffmpeg
棒棒的皮皮10 小时前
【OpenCV】Python图像处理之读取与保存
图像处理·python·opencv
ACP广源盛1392462567312 小时前
GSV2712@ACP#2 进 1 出 HDMI 2.0/Type-C DisplayPort 1.4 混合切换器 + 嵌入式 MCU
单片机·嵌入式硬件·计算机外设·音视频
AI周红伟13 小时前
通义万相开源14B数字人Wan2.2-S2V!影视级音频驱动视频生成,助力专业内容创作
音视频
AI周红伟13 小时前
数字人视频生成:Wan2.2-S2V-14B: 音频驱动的电影视频生成
音视频
EasyCVR14 小时前
智能农业实践:视频融合平台EasyCVR的农业大棚可视化监控方案
音视频