OpenRTP开源地址
增加桌面采集
暂时只是增加了rtp发送,没有ps和ts流的发送,代码比较多,每天修改
为了测试rtp所用,采用GDI简单采集发送
c
#pragma once
#ifdef _WIN32
#define HDIB HANDLE
#include <stdint.h>
#include <Windows.h>
class c_DeviceDesktop
{
protected:
int m_FramesWritten; // To track where we are in the file
RECT m_rScreen; // Rect containing entire screen coordinates
int m_h; // The current image height
int m_w; // And current image width
int m_iRepeatTime; // // Time in msec between frames
int m_nCurrentBitDepth; // Screen bit depth
int nDataSize;
HDC hScrDC; //屏幕的DC保存
HDC hMemDC ;//内存DC
HBITMAP hBitmap;
RECT _desktop_rect;
HCURSOR _hcur;
int m_crop_top = 0;
int m_crop_bot = 0;
int m_crop_lef = 0;
int m_crop_rig = 0;
public:
c_DeviceDesktop();
~c_DeviceDesktop();
//void SetCrop(int top, int bottom, int left, int right);
void AddMouse(HDC hMemDC, LPRECT lpRect, HDC hScrDC, HWND hwnd);
// Override the version that offers exactly one media type
uint8_t *QueryFrame();
//传入一副rgb数据并且透明合并
//uint8_t *QueryFrame(uint8_t *buffer,int w,int h);
int QueryFrameLen()
{
return m_w* m_h* 3;
}
// Set the agreed media type and set up the necessary parameters
void SetMediaType(BITMAPINFOHEADER &bmiHeader);
// Support multiple display formats
void GetMediaType(int iPosition, BITMAPINFOHEADER &bmiHeader);
int GetWidth()
{
return m_w;
}
int GetHeight()
{
return m_h ;
}
int GetDataSize()
{
return nDataSize;
}
private:
BITMAPINFOHEADER _bmiHeader;
BYTE *_pData;
HBITMAP CopyScreenToBitmap(HDC hdc,HDC hMemDC,LPRECT lpRect, BYTE *pData, BITMAPINFO *pHeader);
};
#endif
不过这个代码的优点是加了鼠标在图像上
c
编码
这次编码没有直接采用libx264,本身里面加了ffmpeg库以后,这次直接使用ffmpeg进行编码,代码在
可以将图像倒置编码,使用了一个技巧,如下所示:
c
void RGB2YUV(const uint8_t* RGB, int w, int h, int reverse)
{
uint8_t* data[AV_NUM_DATA_POINTERS] = { 0 };
int linesize[AV_NUM_DATA_POINTERS] = { 0 };
if (reverse >0) //摄像头编码
{
//一行(宽)数据的字节数, 解决图像倒置问题
linesize[0] = -w * 3; //R G B
//linesize[0] = w * 3;
data[0] = (uint8_t*)RGB + 3 * w * (h - 1); //解决图像倒置问题
//data[0] = (uint8_t*)RGB;
sws_scale(_vsc, data, linesize, 0, h, //源数据
_yuv->data, _yuv->linesize);
}
else //桌面
{
linesize[0] = w * 3; //R G B
data[0] = (uint8_t*)RGB;
int height = sws_scale(_vsc, data, linesize, 0, h, //源数据
_yuv->data, _yuv->linesize);
}
}
这样图像倒置就比较简单解决,不过也可以纯手工算法倒置,没多大区别,TYUVRGBScale.h 里面有转化,合并,下采样的代码,放大缩小也不在话下,为什么要自己写呢,因为代码最后都会走到硬件芯片上,手动写后面可以采用比如直接的cuda 核函数来变化,所以也写了一份。
帧率保证算法
这次加上了帧率保证算法代码,好处是后面即使用固定增加的时间戳音视频也能同步,具体看以下的线程保证,主要原理是计算fix_consume 和 时间耗费的 total,两者的diff 差就是我们需要暂停的时间,以下也示例了所有编码类如何使用。rtp 协议直接发送,一定要编码保证每个关键帧前面有sps和pps, h265 加上vps sps pps,里面有分析的代码函数为AnalyseNalu 。
c
void ProtocolEncoderLive::Run()
{
this->v_ts_audio = 0;
this->v_ts_video = 0;
if (!v_config.urlrtmp.empty())
{
v_rtmp.InitParam(v_config.urlrtmp.c_str(), width_, height_, fps_, 0);
}
// 开始捕获音视频
ds_capture_->StartAudio();
ds_capture_->StartVideo();
c_DeviceDesktop deskCapture;
int w = deskCapture.GetWidth();
int h = deskCapture.GetHeight();
VideoEncoder encoder;
encoder.Init(w, h, w, h, 2000, 10, AV_CODEC_ID_H264);
int fpsdesk = 10;
uint32_t m_start_clock = 0;
float delay = 1000.0f / (float)(fpsdesk);
uint32_t start_timestamp = GetTimestamp32();
while (!IsStop())
{
if (c_thread::IsStop())
break;
uint8_t* buf = deskCapture.QueryFrame();
AVPacket* pkt = encoder.EncodeFrameFromBGR24(buf, 0);
// MergeRGB(desk, w, h, cam, m_old_sw, m_old_sh, 0.5, 0.5);
if (pkt != NULL)
{
bool isKeyframe = pkt->flags & AV_PKT_FLAG_KEY;
GETALLPoint
AnalyseNalu(pkt->data, pkt->size, &sps, spslen, &pps, ppslen, &se, selen, &frame, framelen);
uint32_t ts = convertToRTPTimestamp();
if (isKeyframe && sps != NULL && pps!=NULL)
{
pos = GETPointRemoveStartcode(sps, spslen, retlen);
v_rtp_desktop.send_video(sps, retlen, ts, isKeyframe);
pos = GETPointRemoveStartcode(pps, ppslen, retlen);
v_rtp_desktop.send_video(pos, retlen, ts, isKeyframe);
}
pos = GETPointRemoveStartcode(frame, framelen, retlen);
v_rtp_desktop.send_video(pos, retlen, ts, isKeyframe);
av_packet_free(&pkt);
}
uint32_t tnow = GetTimestamp32();
uint32_t total = tnow - start_timestamp; //總共花費的時間
uint32_t fix_consume = (uint32_t)(++m_start_clock * delay);
if (total < fix_consume) {
int64_t diff = (int64_t)(fix_consume - total);
if (diff > 0) { //相差5毫秒以上
//cout << total << ":" << diff << endl;
std::this_thread::sleep_for(std::chrono::milliseconds(diff));
}
}
}
ds_capture_->StopVideo();
ds_capture_->StopAudio();
gVideoBegin = false;
/* if (udp_ts_muxer_)
{
udp_ts_muxer_->WriteEnd();
}*/
}
执行
如下图所示,端口变为3个,在6000 的基础上声音+2 , 桌面再加2 。
配置文件有所改变,地址和端口可以保存。数据库是ini数据库,可以读写,比较简单
测试
先试用vlc测试,要写的东西比较多,接收方面先放一放,
vlc 测试sdp:
c
v=0
o=- 0 0 IN IP4 127.0.0.1
s=Stream Name
c=IN IP4 127.0.0.1
t=0 0
m=video 6004 RTP/AVP 96
a=rtpmap:96 H264/90000
只是端口编程了6004,打开vlc,本机回环镜像,看了头大
后记
既然是RTP的代码,肯定也需要也好接收,RTPReceive的代码从下次文章开始加强