ZLMediaKit
主函数
cpp
int start_main(int argc,char *argv[]) {
{
CMD_main cmd_main;
try {
cmd_main.operator()(argc, argv); // 命令行参数解析
} catch (ExitException &) {
return 0;
} catch (std::exception &ex) {
cout << ex.what() << endl;
return -1;
}
}
bool bDaemon = cmd_main.hasKey("daemon"); //后台守护
g_ini_file = cmd_main["config"]; // 配置文件路径
pid_t pid = getpid();
bool kill_parent_if_failed = true;
if (bDaemon) {
// 启动守护进程 [AUTO-TRANSLATED:33b2c5be]
// Start daemon process
System::startDaemon(kill_parent_if_failed);
}
// 设置poller线程数和cpu亲和性
EventPollerPool::setPoolSize(threads);
WorkThreadPool::setPoolSize(threads);
EventPollerPool::enableCpuAffinity(affinity);
WorkThreadPool::enableCpuAffinity(affinity);
// 加载配置文件, 如果配置文件不存在就创建一个
loadIniConfig(g_ini_file.data());
// 获取监听端口并真正启动各种服务
std::string listen_ip = mINI::Instance()[General::kListenIP];
uint16_t rtspPort = mINI::Instance()[Rtsp::kPort];
uint16_t rtmpPort = mINI::Instance()[Rtmp::kPort];
uint16_t httpPort = mINI::Instance()[Http::kPort];
uint16_t rtpPort = mINI::Instance()[RtpProxy::kPort];
// rtsp服务器,端口默认554 [AUTO-TRANSLATED:07937d81]
// rtsp server, default port 554
if (rtspPort) { rtspSrv->start<RtspSession>(rtspPort, listen_ip); }
// rtmp服务器,端口默认1935 [AUTO-TRANSLATED:58324c74]
// rtmp server, default port 1935
if (rtmpPort) { rtmpSrv->start<RtmpSession>(rtmpPort, listen_ip); }
// http服务器,端口默认80 [AUTO-TRANSLATED:8899e852]
// http server, default port 80
if (httpPort) { httpSrv->start<HttpSession>(httpPort, listen_ip); }
#if defined(ENABLE_WEBRTC)
// webrtc udp服务器 [AUTO-TRANSLATED:157a64e5]
// webrtc udp server
if (rtcPort) { rtcSrv_udp->start<WebRtcSession>(rtcPort, listen_ip; }
if (rtcTcpPort) { rtcSrv_tcp->start<WebRtcSession>(rtcTcpPort, listen_ip);}
//webrtc 信令服务器
if (signalingPort) {
signaleSrv->start<WebRtcWebcosktSignalingSession>(signalingPort);
}
if (signalSslPort) {
signalsSrv->start<WebRtcWebcosktSignalSslSession>(signalSslPort);
}
//STUN/TURN服务
if (icePort) { iceSrv->start<IceSession>(icePort);}
if (iceTcpPort) { iceTcpSrv->start<IceSession>(iceTcpPort);}
#endif
}
mINI
cpp
template<typename variant>
class mINI_basic : public std::map<std::string, variant> {}
using mINI = mINI_basic<variant>;
-
MNI basic继承自std::map<std::string, Variant>,印证其底层数据结构为键值映射。 -
key为std::string类型(协议名或配置项名),value为Variant类型(泛型值)。
增加自定义配置信息
cpp
# config.ini中添加自定义配置
[privprotocolsdk]
ip=127.0.0.1
port=1220
username=test
passwd=AAAaaa123
cpp
// config.h中添加声明
// PrivProtocolSdk 配置 // 私有sdk协议
namespace PrivProtocolSdk_ { // 命名空间
static constexpr char kFieldName[] = "privprotocolsdk."; // 静态常量
extern const std::string ip;
extern const std::string port;
extern const std::string username;
extern const std::string passwd;
}
// config.cpp中添加定义
namespace PrivProtocolSdk_{
const string ip = string(kFieldName) + "ip";
const string port = string(kFieldName) + "port";
const string username = string(kFieldName) + "username";
const string passwd = string(kFieldName) + "passwd";
static onceToken token([]() {
mINI::Instance()[ip] = "127.0.0.1";// 默认值
mINI::Instance()[port] = 9901;
mINI::Instance()[username] = "tyj";
mINI::Instance()[passwd] = "Tangyujie@123";
}); // 加到mINI的map中
}
获取使用自定义配置信息
cpp
#include <config.h> // 引入配置信息头文件
#define GET_CONFIG(type, arg, key)\
static type arg = ::toolkit::mINI::Instance()[key];\ // 变量定义赋值
LISTEN_RELOAD_KEY(arg, key, { RELOAD_KEY(arg, key); });
// 在GET_CONFIG宏替换中会完成对应变量的定义, 直接使用即可
GET_CONFIG(std::string, _ip, PrivProtocolSdk_::ip);
GET_CONFIG(int, _port, PrivProtocolSdk_::port);
GET_CONFIG(std::string, _username, PrivProtocolSdk_::username);
GET_CONFIG(std::string, _passwd, PrivProtocolSdk_::passwd);
DebugL << "ip " << _ip << ", port " << _port << ", usrname "
<< _username << ", passwd " <<_passwd;
TcpServer
cpp
auto rtspSrv = std::make_shared<TcpServer>();
// 创建TcpServer的共享指针, 所有Tcp请求处理的泛型服务端
if (rtspPort) { rtspSrv->start<RtspSession>(rtspPort, listen_ip);
// 根据传入的具体Session真正创建具体的协议服务端
cpp
template <typename SessionType>
void start(uint16_t port, const std::string &host = "::", uint32_t backlog = 1024, const std::function<void(std::shared_ptr<SessionType> &)> &cb = nullptr)
{
static std::string cls_name = toolkit::demangle(typeid(SessionType).name());
// Session创建器,通过它创建不同类型的服务器 [AUTO-TRANSLATED:f5585e1e]
_session_alloc = [cb](const TcpServer::Ptr &server, const Socket::Ptr &sock) {
auto session = std::shared_ptr<SessionType> // 根据传入的SessionType
(new SessionType(sock), [](SessionType *ptr) {
TraceP(static_cast<Session *>(ptr)) << "~" << cls_name;
delete ptr;
});
if (cb) {
cb(session);
}
TraceP(static_cast<Session *>(session.get())) << cls_name;
session->setOnCreateSocket(server->_on_create_socket);
return std::make_shared<SessionHelper>(server, std::move(session), cls_name);
};
start_l(port, host, backlog);
}
启动过程:
-
初始化了一个
_session_alloc的具体Tcp会话创建器 -
执行
start_l进行服务端的具体连接监听、接收连接的通用操作上去
cpp
// 简化代码如下
void TcpServer::start_l(uint16_t port, const std::string &host, uint32_t backlog) {
setupEvent();
if (!_socket->listen(port, host.c_str(), backlog)) {
// 创建tcp监听失败,可能是由于端口占用或权限问题 [AUTO-TRANSLATED:88ebdefc]
//TCP listener creation failed, possibly due to port occupation or permission issues
string err = (StrPrinter << "Listen on " << host << " " << port << " failed: " << get_uv_errmsg(true));
throw std::runtime_error(err);
}
InfoL << "TCP server listening on [" << host << "]: " << port;
}
-
执行
setupEvent提前安装设置/安装好各种事件处理回调函数 -
执行
socket->listen完成套接字的监听和accept操作
cpp
void TcpServer::setupEvent() {
_socket = createSocket(_poller); // 创建监听套接字
weak_ptr<TcpServer> weak_self = std::static_pointer_cast<TcpServer>(shared_from_this());
// 给监听套接字设置 BeforeAccept事件处理以及 Accept事件处理
_socket->setOnBeforeAccept([weak_self](const EventPoller::Ptr &poller) -> Socket::Ptr {
if (auto strong_self = weak_self.lock()) {
return strong_self->onBeforeAcceptConnection(poller);
}
return nullptr;
});
_socket->setOnAccept([weak_self](Socket::Ptr &sock, shared_ptr<void> &complete) {
if (auto strong_self = weak_self.lock()) {
auto ptr = sock->getPoller().get();
auto server = strong_self->getServer(ptr);
ptr->async([server, sock, complete]() {
// 该tcp客户端派发给对应线程的TcpServer服务器 [AUTO-TRANSLATED:662b882f]
// This TCP client is dispatched to the corresponding thread of the TcpServer server
server->onAcceptConnection(sock);
});
}
});
}
cpp
Socket::Ptr TcpServer::onBeforeAcceptConnection(const EventPoller::Ptr &poller) {
assert(_poller->isCurrentThread());
//此处改成自定义获取poller对象,防止负载不均衡 [AUTO-TRANSLATED:16c66457]
//Modify this to a custom way of getting the poller object to prevent load imbalance
return createSocket(_multi_poller ? EventPollerPool::Instance().getPoller(false) : _poller);
}
BeforeAcceptConnection函数用于AcceptConnection的前处理:创建一个Socket对象
cpp
// 接收到客户端连接请求 [AUTO-TRANSLATED:8a67b72a]
//Received a client connection request
Session::Ptr TcpServer::onAcceptConnection(const Socket::Ptr &sock) {
weak_ptr<TcpServer> weak_self = std::static_pointer_cast<TcpServer>(shared_from_this());
//创建一个Session;这里实现创建不同的服务会话实例 [AUTO-TRANSLATED:9ed745be]
//Create a Session; here implement creating different service session instances
auto helper =
_session_alloc(std::static_pointer_cast<TcpServer>(shared_from_this()), sock);
auto session = helper->session();
weak_ptr<Session> weak_session = session;
//会话接收数据事件 [AUTO-TRANSLATED:f3f4cbbb]
//Session receives data event
sock->setOnRead([weak_session](const Buffer::Ptr &buf, struct sockaddr *, int) {
//获取会话强应用 [AUTO-TRANSLATED:187497e6]
//Get the strong application of the session
auto strong_session = weak_session.lock();
if (!strong_session) {
return;
}
try {
strong_session->onRecv(buf); // 每个会话读取数据后的必然处理onRecv
} catch (SockException &ex) {
strong_session->shutdown(ex);
} catch (exception &ex) {
strong_session->shutdown(SockException(Err_shutdown, ex.what()));
}
});
//会话接收到错误事件 [AUTO-TRANSLATED:b000e868]
//Session receives an error event
sock->setOnErr([weak_self, weak_session, ptr, cls](const SockException &err) {
...
});
return session;
}
onAcceptConnection 作用如下
-
创建session会话
-
设置会话数据接收和错误 事件处理回调
设置好上述处理后开始调用真正的 listen操作
cpp
bool Socket::listen(uint16_t port, const string &local_ip, int backlog) {
closeSock();
int fd = SockUtil::listen(port, local_ip.data(), backlog);
if (fd == -1) {
return false;
}
return fromSock_l(std::make_shared<SockNum>(fd, SockNum::Sock_TCP_Server));
}
-
先关闭套接字再从新执行
SockUtil::listen实现协议栈监听操作 -
fromSock_l调用套接字对象中的Socket::attachEvent实现套接字的Accpet事件处理器添加,添加到 Poller 事件循环中进行监控套接字连接请求的Accept事件
cpp
bool Socket::attachEvent(const SockNum::Ptr &sock) {
weak_ptr<Socket> weak_self = shared_from_this();
if (sock->type() == SockNum::Sock_TCP_Server) {
// tcp服务器 [AUTO-TRANSLATED:f4b9757f]
//TCP server
auto result = _poller->addEvent(sock->rawFd(), EventPoller::Event_Read | EventPoller::Event_Error, [weak_self, sock](int event) {
if (auto strong_self = weak_self.lock()) {
strong_self->onAccept(sock, event);
}
});
return -1 != result;
}
}
当客户端向监听套接字的连接请求到来时:Socket::onAccept 函数将会被调用
cpp
int Socket::onAccept(const SockNum::Ptr &sock, int event) noexcept {
int fd;
struct sockaddr_storage peer_addr;
socklen_t addr_len = sizeof(peer_addr);
while (true) {
if (event & EventPoller::Event_Read) {
// accpet 接收TCP协议栈的连接请求
fd = (int)accept(sock->rawFd(), (struct sockaddr *)&peer_addr, &addr_len);
if (fd == -1) {
//Accept failed
int err = get_uv_error(true);
if (err == UV_EAGAIN) {
//No new connection
return 0;
}
... // 省略处理, 避免陷入细节
return -1;
}
// 设置socket属性
SockUtil::setNoSigpipe(fd);
SockUtil::setNoBlocked(fd);
SockUtil::setNoDelay(fd);
SockUtil::setSendBuf(fd);
SockUtil::setRecvBuf(fd);
SockUtil::setCloseWait(fd);
SockUtil::setCloExec(fd);
// 先调用前面注册好的before_accpet处理器拦截构造Socket对象
Socket::Ptr peer_sock;
try {
LOCK_GUARD(_mtx_event);
// 拦截Socket对象的构造 [AUTO-TRANSLATED:b38b67b9]
peer_sock = _on_before_accept(_poller);
} catch (std::exception &ex) {
ErrorL << "Exception occurred when emit on_before_accept: " << ex.what();
close(fd);
continue;
}
auto sock = std::make_shared<SockNum>(fd, SockNum::Sock_TCP);
// 设置好fd,以备在onAccept事件中可以正常访问该fd [AUTO-TRANSLATED:e3e3c225]
peer_sock->setSock(sock);
// 赋值peer ip,防止在执行setSock时,fd已经被reset断开 [AUTO-TRANSLATED:7ca197db]
memcpy(&peer_sock->_peer_addr, &peer_addr, addr_len);
shared_ptr<void> completed(nullptr, [peer_sock, sock](void *) {
try {
// 然后把该fd加入poll监听(确保先触发onAccept事件然后再触发onRead等事件)
if (!peer_sock->attachEvent(sock)) {
// 加入poll监听失败,触发onErr事件,通知该Socket无效
peer_sock->emitErr(SockException(Err_eof, "add event failed"));
}
} catch (std::exception &ex) {
ErrorL << "Exception occurred: " << ex.what();
}
});
try {
// 此处捕获异常,目的是防止socket未accept尽,epoll边沿触发失效的问题
LOCK_GUARD(_mtx_event);
// 先触发onAccept事件,此时应该监听该Socket的onRead等事件
_on_accept(peer_sock, completed); // 执行onAcceptConnection操作
} catch (std::exception &ex) {
ErrorL << "Exception occurred when emit on_accept: " << ex.what();
continue;
}
}
if (event & EventPoller::Event_Error) {
auto ex = getSockErr(sock->rawFd());
emitErr(ex);
ErrorL << "TCP listener occurred a err: " << ex.what();
return -1;
}
}
}
- 明确一点:Socket对象有各种事件,上层Session会话也有各种事件,对应不同的事件处理器
RtspSession & HttpSession
cpp
void RtspSession::onRecv(const Buffer::Ptr &buf) {
_alive_ticker.resetTime();
_bytes_usage += buf->size();
if (_on_recv) {
//http poster的请求数据转发给http getter处理
_on_recv(buf);
} else {
input(buf->data(), buf->size());
}
}
cpp
void HttpSession::onRecv(const Buffer::Ptr &pBuf) {
_ticker.resetTime();
input(pBuf->data(), pBuf->size());
}
不论是 HttpSession::onRecv 还是 RtspSession::onRecv 均会调用 input 来输入数据
cpp
class RtspSplitter : public HttpRequestSplitter{...};
class HttpSession: public toolkit::Session,
public FlvMuxer,
public HttpRequestSplitter,
public WebSocketSplitter {...};
void HttpRequestSplitter::input(const char *data,size_t len) {
const char *ptr = data;
if(!_remain_data.empty()){
_remain_data.append(data,len);
data = ptr = _remain_data.data();
len = _remain_data.size();
}
splitPacket:
// 数据按照请求头处理
const char *index = nullptr;
_remain_data_size = len;
while (_content_len == 0 && _remain_data_size > 0 && (index = onSearchPacketTail(ptr,_remain_data_size)) != nullptr) {
...
_content_len = onRecvHeader(header_ptr, header_size);
}
// 已经找到http头了
if(_content_len > 0){
// 数据按照固定长度content处理
// 收到content数据,并且接收content完毕
onRecvContent(ptr,_content_len);
if(_remain_data_size > 0){
// 还有数据没有处理完毕
_remain_data.assign(ptr,_remain_data_size);
data = ptr = (char *)_remain_data.data();
len = _remain_data.size();
goto splitPacket;
}
_remain_data.clear();
return;
}
// _content_len < 0;数据按照不固定长度content处理
onRecvContent(ptr,_remain_data_size);//消费掉所有剩余数据
_remain_data.clear();
}
-
RtspSplitter 跟 HttpSession 类均继承于HttpRequestSplitter,说明了什么?
说明在
HttpRequestSplitter::input接口中的onRecvHeader跟onRecvContent均为抽象可扩展接口
cpp
void RtspSplitter::onRecvContent(const char *data, size_t len) {
_parser.setContent(string(data,len));
onWholeRtspPacket(_parser);
_parser.clear();
}
void RtspSession::onWholeRtspPacket(Parser &parser) {
string method = parser.method(); //提取出请求命令字
using rtsp_request_handler = void (RtspSession::*)(const Parser &parser);
static unordered_map<string, rtsp_request_handler> s_cmd_functions;
static onceToken token([]() {
s_cmd_functions.emplace("OPTIONS", &RtspSession::handleReq_Options);
s_cmd_functions.emplace("DESCRIBE", &RtspSession::handleReq_Describe);
s_cmd_functions.emplace("ANNOUNCE", &RtspSession::handleReq_ANNOUNCE);
s_cmd_functions.emplace("RECORD", &RtspSession::handleReq_RECORD);
s_cmd_functions.emplace("SETUP", &RtspSession::handleReq_Setup);
s_cmd_functions.emplace("PLAY", &RtspSession::handleReq_Play);
s_cmd_functions.emplace("PAUSE", &RtspSession::handleReq_Pause);
s_cmd_functions.emplace("TEARDOWN", &RtspSession::handleReq_Teardown);
s_cmd_functions.emplace("GET", &RtspSession::handleReq_Get);
s_cmd_functions.emplace("POST", &RtspSession::handleReq_Post);
s_cmd_functions.emplace("SET_PARAMETER", &RtspSession::handleReq_SET_PARAMETER);
s_cmd_functions.emplace("GET_PARAMETER", &RtspSession::handleReq_SET_PARAMETER);
});
auto it = s_cmd_functions.find(method);
(this->*(it->second))(parser);
parser.clear();
}
-
RtspSplitter::onRecvContent处理整个RtspPacket包 -
RtspSession::onWholeRtspPacket根据 请求命令字 去映射调用相应的 handler 命令处理器
cpp
ssize_t HttpSession::onRecvHeader(const char *header, size_t len) {
InfoL << "[onRecvHeader HttpSession]";
using func_type = void (HttpSession::*)();
static unordered_map<string, func_type> s_func_map;
static onceToken token([]() {
s_func_map.emplace("GET", &HttpSession::onHttpRequest_GET);
s_func_map.emplace("POST", &HttpSession::onHttpRequest_POST);
s_func_map.emplace("PUT", &HttpSession::onHttpRequest_POST);
// DELETE命令用于whip/whep用,只用于触发http api
s_func_map.emplace("DELETE", &HttpSession::onHttpRequest_POST);
s_func_map.emplace("HEAD", &HttpSession::onHttpRequest_HEAD);
s_func_map.emplace("OPTIONS", &HttpSession::onHttpRequest_OPTIONS);
});
_parser.parse(header, len);
CHECK(_parser.url()[0] == '/');
_origin = _parser["Origin"];
urlDecode(_parser);
auto &cmd = _parser.method();
auto it = s_func_map.find(cmd);
if (content_len == 0) {
// 没有body的情况,直接触发回调
(this->*(it->second))();
_parser.clear();
// 如果设置了_on_recv_body, 那么说明后续要处理body
return _on_recv_body ? -1 : 0;
}
}
HttpSession::onRecvHeader来根据 Http请求命令字 去映射调用相应的 handler 命令处理器,这也是二次开发最集中的地方,用于绑定来自浏览器发送的 http/websocket 请求处理
RtspPusher
cpp
//_RingReaderDispatcher::write();
void write(T in, bool is_key = true) {
InfoL << "[reader_map:size]: "<< _reader_map.size();
for (auto it = _reader_map.begin(); it != _reader_map.end();) {
auto reader = it->second.lock();
if (!reader) {
it = _reader_map.erase(it);
--_reader_size;
onSizeChanged(false);
continue;
}
reader->onRead(in, is_key); // 分发给所有已注册的RingReader
++it;
}
_storage->write(std::move(in), is_key); // 写入底层存储 _storage
}
- 对于每个拉流者Player均对应有一个RingReader,RingBuffer缓冲区读者,推流时会触发
_RingReaderDispatcher::write环形缓冲区读者分发器中的write操作来触发拉流时提前注册好的reader中的onRead操作来将流发给Player
cpp
class RingBuffer : public std::enable_shared_from_this<RingBuffer<T>>{};
void write(T in, bool is_key = true) { // 推流者触发
LOCK_GUARD(_mtx_map);
for (auto &pr : _dispatcher_map) {
auto &second = pr.second;
//切换线程后触发onRead事件
pr.first->async([second, in, is_key]() mutable { second->write(std::move(in), is_key); }, false);
}
_storage->write(std::move(in), is_key);
}
RingBuffer::write触发遍历 ringreader_dispatcher_map且通过async线程切换异步执行RingReader::write操作
RtspPlayer
cpp
void RtspSession::handleReq_Play(const Parser &parser) {
// Play命令字请求处理并响应
auto play_src = _play_src.lock();
if (!strScale.empty()) {
//这是设置播放速度
....
play_src->speed(speed);
}
if (!strRange.empty()) {
//这是seek操作
...
use_gop = !play_src->seekTo((uint32_t) iStartTime);
}
res_header.emplace("RTP-Info", rtp_info);
res_header.emplace("Range", StrPrinter << "npt=" << setiosflags(ios::fixed) << setprecision(2) << play_src->getTimeStamp(TrackInvalid) / 1000.0);
sendRtspResponse("200 OK", res_header);
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//设置播放track
if (inited_tracks.size() == 1) {
_target_play_track = inited_tracks[0];
}
//在回复rtsp信令后再恢复播放
play_src->pause(false);
if (!_play_reader && _rtp_type != Rtsp::RTP_MULTICAST) {
weak_ptr<RtspSession> weak_self = static_pointer_cast<RtspSession>(shared_from_this());
_play_reader = play_src->getRing()->attach(getPoller(), use_gop);
// 在源中创建reader【ringbuffer】, 用于接收推流的转发RTP数据
_play_reader->setDetachCB([weak_self]() {
auto strong_self = weak_self.lock();
if (!strong_self) {
return;
}
strong_self->shutdown(SockException(Err_shutdown, "ring buffer detached"));
});
// 设置可读回调, 拉流时设置
_play_reader->setReadCB([weak_self](const RtspMediaSource::RingDataType &pack) {
auto strong_self = weak_self.lock();
if (!strong_self) {
return;
}
strong_self->sendRtpPacket(pack); //推流时调用,分发给订阅拉流的所有player
});
}
}