Android平台如何拉取RTSP|RTMP流并转发至轻量级RTSP服务?

​技术背景

好多了解我们模块的开发者都知道,我们有非常成熟的轻量级RTSP服务模块,可以采集摄像头或屏幕的数据,编码打包注入Android平台的轻量级RTSP服务模块,让Android设备端,充当个类似于网络摄像头的角色,对外提供个RTSP拉流的URL,实现内网环境下的无服务部署直播场景,这种在内网监控或智慧教室、无纸化场景等,非常实用。

技术实现

今天要探讨的是,如何把外部的RTSP|RTMP流,注入到轻量级RTSP服务?实际上,这块对大牛直播SDK来说,算不上新模块或技术,因为前些年已经实现了,对应的是我们的内网RTSP网关模块。

实现方式,和RTSP转RTMP推送有些类似,先把RTSP或RTMP流,拉取下来,回调编码后的H.264/H.265/AAC/PCMU/PCMA数据到上层。

上层模块,通过轻量级RTSP服务模块提供的编码后数据投递接口,实现数据源的对接。

先说拉取RTSP或RTMP流数据,并回调到上层:

scss 复制代码
/* SmartRelayDemo.java
 * Created by daniusdk.com
 * WeChat:xinsheng120
 */
class ButtonPullListener implements View.OnClickListener {
	public void onClick(View v) {
		if (stream_player_.is_pulling()) {
			Log.i(TAG, "Stop Pull..");

			boolean iRet = stream_player_.StopPull();

			if (!iRet) {
				Log.e(TAG, "Call StopPull failed..");
				return;
			}

			stream_player_.try_release();
			btnPullStream.setText("开始拉流");
		} else {
			Log.i(TAG, "Start playback stream++");

			if (!stream_player_.OpenPlayerHandle(playback_url_, play_buffer_, is_using_tcp_))
				return;

			if(audio_opt_ == 2)
			{
				libPlayer.SmartPlayerSetAudioDataCallback(stream_player_.get(), new PlayerAudioDataCallback(stream_publisher_));
			}
			if(video_opt_ == 2)
			{
				libPlayer.SmartPlayerSetVideoDataCallback(stream_player_.get(), new PlayerVideoDataCallback(stream_publisher_));
			}

			int is_pull_trans_code = 1;
			boolean iPlaybackRet = stream_player_.StartPull(is_pull_trans_code);
			if (!iPlaybackRet) {
				Log.e(TAG, "Call StartPlayer failed..");
				return;
			}

			btnPullStream.setText("停止拉流");
		}
	}
}

拉取到的视音频数据,投递到轻量级RTSP服务模块即可,先看视频数据回调处理:

java 复制代码
	class PlayerVideoDataCallback implements NTVideoDataCallback
	{
		private WeakReference<LibPublisherWrapper> publisher_;
		private int video_buffer_size = 0;
		private ByteBuffer video_buffer_ = null;

		public PlayerVideoDataCallback(LibPublisherWrapper publisher) {
			if (publisher != null)
				publisher_ = new WeakReference<>(publisher);
		}

		@Override
		public ByteBuffer getVideoByteBuffer(int size)
		{
			//Log.i("getVideoByteBuffer", "size: " + size);

			if( size < 1 )
			{
				return null;
			}

			if ( size <= video_buffer_size &&  video_buffer_ != null )
			{
				return  video_buffer_;
			}

			video_buffer_size = size + 1024;
			video_buffer_size = (video_buffer_size+0xf) & (~0xf);

			video_buffer_ = ByteBuffer.allocateDirect(video_buffer_size);

			// Log.i("getVideoByteBuffer", "size: " + size + " buffer_size:" + video_buffer_size);

			return video_buffer_;
		}

		public void onVideoDataCallback(int ret, int video_codec_id, int sample_size, int is_key_frame, long timestamp, int width, int height, long presentation_timestamp)
		{
			//Log.i("onVideoDataCallback", "ret: " + ret + ", video_codec_id: " + video_codec_id + ", sample_size: " + sample_size + ", is_key_frame: "+ is_key_frame +  ", timestamp: " + timestamp +
			//		",presentation_timestamp:" + presentation_timestamp);

			if ( video_buffer_ == null)
				return;

			LibPublisherWrapper publisher = publisher_.get();
			if (null == publisher)
				return;

			if (!publisher.is_publishing())
				return;

			video_buffer_.rewind();

			publisher.PostVideoEncodedData(video_codec_id, video_buffer_, sample_size, is_key_frame, timestamp, presentation_timestamp);

		}
	}

音频数据回调处理:

ini 复制代码
	class PlayerAudioDataCallback implements NTAudioDataCallback
	{
		private WeakReference<LibPublisherWrapper> publisher_;
		private int audio_buffer_size = 0;
		private int param_info_size = 0;

		private ByteBuffer audio_buffer_ = null;
		private ByteBuffer parameter_info_ = null;

		public PlayerAudioDataCallback(LibPublisherWrapper publisher) {
			if (publisher != null)
				publisher_ = new WeakReference<>(publisher);
		}

		@Override
		public ByteBuffer getAudioByteBuffer(int size)
		{
			//Log.i("getAudioByteBuffer", "size: " + size);

			if( size < 1 )
			{
				return null;
			}

			if ( size <= audio_buffer_size && audio_buffer_ != null )
			{
				return audio_buffer_;
			}

			audio_buffer_size = size + 512;
			audio_buffer_size = (audio_buffer_size+0xf) & (~0xf);

			audio_buffer_ = ByteBuffer.allocateDirect(audio_buffer_size);

			// Log.i("getAudioByteBuffer", "size: " + size + " buffer_size:" + audio_buffer_size);

			return audio_buffer_;
		}

		@Override
		public ByteBuffer getAudioParameterInfo(int size)
		{
			//Log.i("getAudioParameterInfo", "size: " + size);

			if(size < 1)
			{
				return null;
			}

			if ( size <= param_info_size &&  parameter_info_ != null )
			{
				return  parameter_info_;
			}

			param_info_size = size + 32;
			param_info_size = (param_info_size+0xf) & (~0xf);

			parameter_info_ = ByteBuffer.allocateDirect(param_info_size);

			//Log.i("getAudioParameterInfo", "size: " + size + " buffer_size:" + param_info_size);

			return parameter_info_;
		}

		public void onAudioDataCallback(int ret, int audio_codec_id, int sample_size, int is_key_frame, long timestamp, int sample_rate, int channel, int parameter_info_size, long reserve)
		{
			//Log.i("onAudioDataCallback", "ret: " + ret + ", audio_codec_id: " + audio_codec_id + ", sample_size: " + sample_size + ", timestamp: " + timestamp +
			//		",sample_rate:" + sample_rate);

			if ( audio_buffer_ == null)
				return;

			LibPublisherWrapper publisher = publisher_.get();
			if (null == publisher)
				return;

			if (!publisher.is_publishing())
				return;

			audio_buffer_.rewind();

			publisher.PostAudioEncodedData(audio_codec_id, audio_buffer_, sample_size, is_key_frame, timestamp, parameter_info_, parameter_info_size);
		}
	}

启动、停止RTSP服务:

ini 复制代码
	//启动/停止RTSP服务
	class ButtonRtspServiceListener implements View.OnClickListener {
		public void onClick(View v) {
			if (!rtsp_server_.empty()) {
				rtsp_server_.reset();
				btnRtspService.setText("启动RTSP服务");
				btnRtspPublisher.setEnabled(false);
				return;
			}

			Log.i(TAG, "onClick start rtsp service..");

			int port = 8554;
			String user_name = null;
			String password = null;
			LibPublisherWrapper.RTSPServer.Handle server_handle = LibPublisherWrapper.RTSPServer.create_and_start_server(libPublisher,
					port, user_name, password);

			if (null == server_handle) {
				Log.e(TAG, "启动rtsp server失败! 请检查设置的端口是否被占用!");
				return;
			}

			rtsp_server_.reset(server_handle);

			btnRtspService.setText("停止RTSP服务");
			btnRtspPublisher.setEnabled(true);
		}
	}

发布RTSP流:

scss 复制代码
	//发布/停止RTSP流
	class ButtonRtspPublisherListener implements View.OnClickListener {
		public void onClick(View v) {
			if (stream_publisher_.is_rtsp_publishing()) {
				stopRtspPublisher();

				btnRtspPublisher.setText("发布RTSP流");
				btnGetRtspSessionNumbers.setEnabled(false);
				btnRtspService.setEnabled(true);
				return;
			}

			Log.i(TAG, "onClick start rtsp publisher..");

			PusherInitAndSetConfig();

			String rtsp_stream_name = "stream1";
			stream_publisher_.SetRtspStreamName(rtsp_stream_name);
			stream_publisher_.ClearRtspStreamServer();

			stream_publisher_.AddRtspStreamServer(rtsp_server_.get_native());

			if (!stream_publisher_.StartRtspStream()) {
				stream_publisher_.try_release();
				Log.e(TAG, "调用发布rtsp流接口失败!");
				return;
			}

			startAudioRecorder();

			btnRtspPublisher.setText("停止RTSP流");
			btnGetRtspSessionNumbers.setEnabled(true);
			btnRtspService.setEnabled(false);
		}
	}

获取RTSP会话数:

java 复制代码
	//获取RTSP会话数
	class ButtonGetRtspSessionNumbersListener implements View.OnClickListener {
		public void onClick(View v) {
			if (rtsp_server_.is_running()) {
				int session_numbers = rtsp_server_.get_client_session_number();

				Log.i(TAG, "GetRtspSessionNumbers: " + session_numbers);

				PopRtspSessionNumberDialog(session_numbers);
			}
		}
	}

总结

Android平台内网RTSP网关模块,系内置轻量级RTSP服务模块扩展,完成外部RTSP/RTMP数据拉取并注入到轻量级RTSP服务模块工作 ,多个内网客户端直接访问内网轻量级RTSP服务获取公网数据,无需部署单独的服务器,支持RTSP/RTMP H.265数据接入。

内置轻量级RTSP服务模块和内置RTSP网关模块共同点:

内置轻量级RTSP服务模块和内置RTSP网关模块,核心痛点是避免用户或者开发者单独部署RTSP或者RTMP服务 ,数据汇聚到内置RTSP服务,对外提供可供拉流的RTSP URL,适用于内网环境下 ,对并发要求不高的场景,支持H.264/H.265,支持RTSP鉴权、单播、组播模式,考虑到单个服务承载能力,我们支持同时创建多个RTSP服务,并支持获取当前RTSP服务会话连接数。

内置轻量级RTSP服务模块和内置RTSP网关模块不同点:数据来源不同

  1. 内置轻量级RTSP服务模块,数据源来自摄像头、屏幕、麦克风等编码前数据,或者本地编码后的对接数据;

  2. 内置RTSP网关模块,实际上是RTSP/RTMP拉流模块+内置轻量级RTSP服务模块组合出来的。数据源来自RTSP或RTMP网络流 ,拉流模块完成编码后的音视频数据回调,然后,汇聚到内置轻量级RTSP服务模块。

相关推荐
声知视界8 小时前
音视频基础能力之 iOS 视频篇(一):视频采集
音视频开发
fareast_mzh1 天前
Setting Up a Simple Live Streaming Server on Debian 11
运维·debian·直播
关键帧Keyframe3 天前
音视频面试题集锦第 15 期 | 编辑 SDK 架构 | 直播回声 | 播放器架构
音视频开发·视频编码·客户端
伊织code5 天前
[2024最新] macOS 发起 Bilibili 直播(不使用 OBS)
macos·mac·web·直播·b站·bilibili
音视频开发技术7 天前
cannot locate symbol _ZTVNSt6__ndk119basic_ostringstreamIcNS_
android·直播
关键帧Keyframe8 天前
iOS 不用 libyuv 也能高效实现 RGB/YUV 数据转换丨音视频工业实战
音视频开发·视频编码·客户端
关键帧Keyframe10 天前
音视频面试题集锦第 7 期
音视频开发·视频编码·客户端
关键帧Keyframe10 天前
音视频面试题集锦第 8 期
ios·音视频开发·客户端
蚝油菜花15 天前
MimicTalk:字节跳动和浙江大学联合推出 15 分钟生成 3D 说话人脸视频的生成模型
人工智能·开源·音视频开发