RTC: Refine stream/ssrc/sdp structure

pull/1908/head
winlin 5 years ago
parent 426938cc8a
commit 991672bf41

@ -167,10 +167,6 @@ srs_error_t SrsGoApiRtcPlay::do_serve_http(ISrsHttpResponseWriter* w, ISrsHttpMe
local_sdp.session_config_.dtls_role = _srs_config->get_rtc_dtls_role(request.vhost);
local_sdp.session_config_.dtls_version = _srs_config->get_rtc_dtls_version(request.vhost);
if ((err = exchange_sdp(&request, remote_sdp, local_sdp)) != srs_success) {
return srs_error_wrap(err, "remote sdp have error or unsupport attributes");
}
// Whether enabled.
bool server_enabled = _srs_config->get_rtc_server_enabled();
bool rtc_enabled = _srs_config->get_rtc_enabled(request.vhost);
@ -521,14 +517,11 @@ srs_error_t SrsGoApiRtcPublish::do_serve_http(ISrsHttpResponseWriter* w, ISrsHtt
SrsSdp local_sdp;
// TODO: FIXME: move to create_session.
// Config for SDP and session.
local_sdp.session_config_.dtls_role = _srs_config->get_rtc_dtls_role(request.vhost);
local_sdp.session_config_.dtls_version = _srs_config->get_rtc_dtls_version(request.vhost);
if ((err = exchange_sdp(&request, remote_sdp, local_sdp)) != srs_success) {
return srs_error_wrap(err, "remote sdp have error or unsupport attributes");
}
// Whether enabled.
bool server_enabled = _srs_config->get_rtc_server_enabled();
bool rtc_enabled = _srs_config->get_rtc_enabled(request.vhost);

File diff suppressed because it is too large Load Diff

@ -57,6 +57,8 @@ class SrsRtpNackForReceiver;
class SrsRtpIncommingVideoFrame;
class SrsRtpRingBuffer;
class SrsRtcConsumer;
class SrsRtcAudioSendTrack;
class SrsRtcVideoSendTrack;
const uint8_t kSR = 200;
const uint8_t kRR = 201;
@ -75,23 +77,6 @@ const uint8_t kSLI = 2;
const uint8_t kRPSI = 3;
const uint8_t kAFB = 15;
class SrsNtp
{
public:
uint64_t system_ms_;
uint64_t ntp_;
uint32_t ntp_second_;
uint32_t ntp_fractions_;
public:
SrsNtp();
virtual ~SrsNtp();
public:
static SrsNtp from_time_ms(uint64_t ms);
static SrsNtp to_time_ms(uint64_t ntp);
public:
static uint64_t kMagicNtpFractionalUnit;
};
enum SrsRtcConnectionStateType
{
// TODO: FIXME: Should prefixed by enum name.
@ -187,16 +172,9 @@ protected:
SrsCoroutine* trd;
SrsRtcConnection* session_;
private:
// TODO: FIXME: How to handle timestamp overflow?
// Information for audio.
uint32_t audio_ssrc;
uint16_t audio_payload_type;
// Information for video.
uint16_t video_payload_type;
uint32_t video_ssrc;
// NACK ARQ ring buffer.
SrsRtpRingBuffer* audio_queue_;
SrsRtpRingBuffer* video_queue_;
// key: publish_ssrc, value: send track to process rtp/rtcp
std::map<uint32_t, SrsRtcAudioSendTrack*> audio_tracks_;
std::map<uint32_t, SrsRtcVideoSendTrack*> video_tracks_;
// Simulators.
int nn_simulate_nack_drop;
private:
@ -205,11 +183,15 @@ private:
bool realtime;
// Whether enabled nack.
bool nack_enabled_;
// Whether palyer started.
bool is_started;
// statistic send packets.
SrsRtcOutgoingInfo info;
public:
SrsRtcPlayStream(SrsRtcConnection* s, SrsContextId parent_cid);
virtual ~SrsRtcPlayStream();
public:
srs_error_t initialize(uint32_t vssrc, uint32_t assrc, uint16_t v_pt, uint16_t a_pt);
srs_error_t initialize(SrsRequest* request, std::map<uint32_t, SrsRtcTrackDescription*> sub_relations);
// interface ISrsReloadHandler
public:
virtual srs_error_t on_reload_vhost_play(std::string vhost);
@ -238,6 +220,7 @@ private:
srs_error_t on_rtcp_feedback(char* data, int nb_data);
srs_error_t on_rtcp_ps_feedback(char* data, int nb_data);
srs_error_t on_rtcp_rr(char* data, int nb_data);
uint32_t get_video_publish_ssrc(uint32_t play_ssrc);
};
// A RTC publish stream, client push and publish stream to SRS.
@ -248,48 +231,41 @@ private:
uint64_t nn_audio_frames;
private:
SrsRtcConnection* session_;
uint32_t video_ssrc;
uint32_t audio_ssrc;
uint16_t pt_to_drop_;
// Whether enabled nack.
bool nack_enabled_;
private:
bool request_keyframe_;
SrsRtpRingBuffer* video_queue_;
SrsRtpNackForReceiver* video_nack_;
SrsRtpRingBuffer* audio_queue_;
SrsRtpNackForReceiver* audio_nack_;
private:
SrsRequest* req;
SrsRtcStream* source;
// Simulators.
int nn_simulate_nack_drop;
private:
std::map<uint32_t, uint64_t> last_sender_report_sys_time;
std::map<uint32_t, SrsNtp> last_sender_report_ntp;
// track vector
std::vector<SrsRtcAudioRecvTrack*> audio_tracks_;
std::vector<SrsRtcVideoRecvTrack*> video_tracks_;
private:
srs_utime_t last_twcc_feedback_time_;
int twcc_id_;
uint8_t twcc_fb_count_;
SrsRtcpTWCC rtcp_twcc_;
SrsRtpExtensionTypes extension_types_;
bool is_started;
public:
SrsRtcPublishStream(SrsRtcConnection* session);
virtual ~SrsRtcPublishStream();
public:
srs_error_t initialize(uint32_t vssrc, uint32_t assrc, int twcc_id, SrsRequest* req);
srs_error_t initialize(SrsRequest* req, SrsRtcStreamDescription* stream_desc);
srs_error_t start();
private:
void check_send_nacks(SrsRtpNackForReceiver* nack, uint32_t ssrc);
srs_error_t send_rtcp_rr(uint32_t ssrc, SrsRtpRingBuffer* rtp_queue);
srs_error_t send_rtcp_xr_rrtr(uint32_t ssrc);
srs_error_t send_rtcp_fb_pli(uint32_t ssrc);
srs_error_t send_rtcp_rr();
srs_error_t send_rtcp_xr_rrtr();
public:
srs_error_t on_rtp(char* buf, int nb_buf);
virtual void on_before_decode_payload(SrsRtpPacket2* pkt, SrsBuffer* buf, ISrsRtpPayloader** ppayload);
private:
srs_error_t on_audio(SrsRtpPacket2* pkt);
srs_error_t on_video(SrsRtpPacket2* pkt);
srs_error_t on_nack(SrsRtpPacket2* pkt);
srs_error_t send_periodic_twcc();
public:
srs_error_t on_rtcp(char* data, int nb_data);
@ -300,7 +276,7 @@ private:
srs_error_t on_rtcp_ps_feedback(char* data, int nb_data);
srs_error_t on_rtcp_rr(char* data, int nb_data);
public:
void request_keyframe();
void request_keyframe(uint32_t ssrc);
// interface ISrsHourGlass
public:
virtual srs_error_t notify(int type, srs_utime_t interval, srs_utime_t tick);
@ -310,6 +286,10 @@ private:
void simulate_drop_packet(SrsRtpHeader* h, int nn_bytes);
private:
srs_error_t on_twcc(uint16_t sn);
SrsRtcAudioRecvTrack* get_audio_track(uint32_t ssrc);
SrsRtcVideoRecvTrack* get_video_track(uint32_t ssrc);
void update_rtt(uint32_t ssrc, int rtt);
void update_send_report_time(uint32_t ssrc, const SrsNtp& ntp);
};
// A RTC Peer Connection, SDP level object.
@ -320,6 +300,8 @@ class SrsRtcConnection
friend class SrsRtcPublishStream;
public:
bool disposing_;
private:
static uint32_t ssrc_num;
private:
SrsRtcServer* server_;
SrsRtcConnectionStateType state_;
@ -360,6 +342,7 @@ public:
SrsRtcConnection(SrsRtcServer* s);
virtual ~SrsRtcConnection();
public:
// TODO: FIXME: save only connection info.
SrsSdp* get_local_sdp();
void set_local_sdp(const SrsSdp& sdp);
SrsSdp* get_remote_sdp();
@ -373,6 +356,8 @@ public:
void set_encrypt(bool v);
void switch_to_context();
SrsContextId context_id();
srs_error_t add_publisher(SrsRequest* request, const SrsSdp& remote_sdp, SrsSdp& local_sdp);
srs_error_t add_player(SrsRequest* request, const SrsSdp& remote_sdp, SrsSdp& local_sdp);
public:
// Before initialize, user must set the local SDP, which is used to inititlize DTLS.
srs_error_t initialize(SrsRtcStream* source, SrsRequest* r, bool is_publisher, std::string username, SrsContextId context_id);
@ -387,11 +372,25 @@ public:
srs_error_t start_publish();
bool is_stun_timeout();
void update_sendonly_socket(SrsUdpMuxSocket* skt);
public:
// send rtcp
void check_send_nacks(SrsRtpNackForReceiver* nack, uint32_t ssrc);
srs_error_t send_rtcp_rr(uint32_t ssrc, SrsRtpRingBuffer* rtp_queue, const uint64_t& last_send_systime, const SrsNtp& last_send_ntp);
srs_error_t send_rtcp_xr_rrtr(uint32_t ssrc);
srs_error_t send_rtcp_fb_pli(uint32_t ssrc);
public:
// Simulate the NACK to drop nn packets.
void simulate_nack_drop(int nn);
private:
srs_error_t on_binding_request(SrsStunPacket* r);
// publish media capabilitiy negotiate
srs_error_t negotiate_publish_capability(SrsRequest* req, const SrsSdp& remote_sdp, SrsRtcStreamDescription* stream_desc);
srs_error_t generate_publish_local_sdp(SrsRequest* req, SrsSdp& local_sdp, SrsRtcStreamDescription* stream_desc);
// play media capabilitiy negotiate
srs_error_t negotiate_play_capability(SrsRequest* req, const SrsSdp& remote_sdp, std::map<uint32_t, SrsRtcTrackDescription*>& sub_relations);
srs_error_t generate_play_local_sdp(SrsRequest* req, SrsSdp& local_sdp, SrsRtcStreamDescription* stream_desc);
srs_error_t create_player(SrsRequest* request, std::map<uint32_t, SrsRtcTrackDescription*> sub_relations);
srs_error_t create_publisher(SrsRequest* request, SrsRtcStreamDescription* stream_desc);
};
class ISrsRtcHijacker

@ -243,6 +243,9 @@ ISrsDtlsCallback::~ISrsDtlsCallback()
SrsDtls::SrsDtls(ISrsDtlsCallback* cb)
{
dtls_ctx = NULL;
dtls = NULL;
callback = cb;
handshake_done = false;

@ -46,7 +46,7 @@ if (!getline(is,word,delim)) {\
return srs_error_new(ERROR_RTC_SDP_DECODE, "fetch with delim failed");\
}\
static std::vector<std::string> split_str(const std::string& str, const std::string& delim)
std::vector<std::string> split_str(const std::string& str, const std::string& delim)
{
std::vector<std::string> ret;
size_t pre_pos = 0;
@ -176,6 +176,16 @@ SrsSSRCInfo::SrsSSRCInfo()
ssrc_ = 0;
}
SrsSSRCInfo::SrsSSRCInfo(uint32_t ssrc, std::string cname, std::string stream_id, std::string track_id)
{
ssrc_ = ssrc;
cname_ = cname;
msid_ = stream_id;
msid_tracker_ = track_id;
mslabel_ = msid_;
label_ = msid_tracker_;
}
SrsSSRCInfo::~SrsSSRCInfo()
{
}
@ -802,6 +812,13 @@ void SrsSdp::set_ice_pwd(const std::string& pwd)
}
}
void SrsSdp::set_dtls_role(const std::string& dtls_role)
{
for (std::vector<SrsMediaDesc>::iterator iter = media_descs_.begin(); iter != media_descs_.end(); ++iter) {
iter->session_info_.setup_ = dtls_role;
}
}
void SrsSdp::set_fingerprint_algo(const std::string& algo)
{
for (std::vector<SrsMediaDesc>::iterator iter = media_descs_.begin(); iter != media_descs_.end(); ++iter) {
@ -849,6 +866,16 @@ std::string SrsSdp::get_ice_pwd() const
return "";
}
std::string SrsSdp::get_dtls_role() const
{
// Becaues we use BUNDLE, so we can choose the first element.
for (std::vector<SrsMediaDesc>::const_iterator iter = media_descs_.begin(); iter != media_descs_.end(); ++iter) {
return iter->session_info_.setup_;
}
return "";
}
srs_error_t SrsSdp::parse_line(const std::string& line)
{
srs_error_t err = srs_success;

@ -34,6 +34,9 @@
#include <map>
const std::string kTWCCExt = "http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01";
// TDOO: FIXME: Rename it, and add utest.
extern std::vector<std::string> split_str(const std::string& str, const std::string& delim);
struct SrsSessionConfig
{
public:
@ -64,6 +67,7 @@ class SrsSSRCInfo
{
public:
SrsSSRCInfo();
SrsSSRCInfo(uint32_t ssrc, std::string cname, std::string stream_id, std::string track_id);
virtual ~SrsSSRCInfo();
public:
srs_error_t encode(std::ostringstream& os);
@ -192,12 +196,14 @@ public:
public:
void set_ice_ufrag(const std::string& ufrag);
void set_ice_pwd(const std::string& pwd);
void set_dtls_role(const std::string& dtls_role);
void set_fingerprint_algo(const std::string& algo);
void set_fingerprint(const std::string& fingerprint);
void add_candidate(const std::string& ip, const int& port, const std::string& type);
std::string get_ice_ufrag() const;
std::string get_ice_pwd() const;
std::string get_dtls_role() const;
private:
srs_error_t parse_line(const std::string& line);
private:

@ -311,6 +311,36 @@ srs_error_t SrsRtcServer::create_session(
return srs_error_new(ERROR_RTC_SOURCE_BUSY, "stream %s busy", req->get_stream_url().c_str());
}
// TODO: FIXME: add do_create_session to error process.
SrsRtcConnection* session = new SrsRtcConnection(this);
if ((err = do_create_session(session, req, remote_sdp, local_sdp, mock_eip, publish, source)) != srs_success) {
srs_freep(session);
return srs_error_wrap(err, "create session");
}
*psession = session;
return err;
}
srs_error_t SrsRtcServer::do_create_session(
SrsRtcConnection* session, SrsRequest* req, const SrsSdp& remote_sdp, SrsSdp& local_sdp, const std::string& mock_eip, bool publish,
SrsRtcStream* source
)
{
srs_error_t err = srs_success;
// first add publisher/player for negotiate sdp media info
if (publish) {
if ((err = session->add_publisher(req, remote_sdp, local_sdp)) != srs_success) {
return srs_error_wrap(err, "add publisher");
}
} else {
if ((err = session->add_player(req, remote_sdp, local_sdp)) != srs_success) {
return srs_error_wrap(err, "add publisher");
}
}
std::string local_pwd = srs_random_str(32);
std::string local_ufrag = "";
// TODO: FIXME: Rename for a better name, it's not an username.
@ -339,7 +369,19 @@ srs_error_t SrsRtcServer::create_session(
}
}
SrsRtcConnection* session = new SrsRtcConnection(this);
if (remote_sdp.get_dtls_role() == "active") {
local_sdp.set_dtls_role("passive");
} else if (remote_sdp.get_dtls_role() == "passive") {
local_sdp.set_dtls_role("active");
} else if (remote_sdp.get_dtls_role() == "actpass") {
local_sdp.set_dtls_role(local_sdp.session_config_.dtls_role);
} else {
// @see: https://tools.ietf.org/html/rfc4145#section-4.1
// The default value of the setup attribute in an offer/answer exchange
// is 'active' in the offer and 'passive' in the answer.
local_sdp.set_dtls_role("passive");
}
session->set_remote_sdp(remote_sdp);
// We must setup the local SDP, then initialize the session object.
session->set_local_sdp(local_sdp);
@ -348,13 +390,10 @@ srs_error_t SrsRtcServer::create_session(
SrsContextId cid = _srs_context->get_id();
// Before session initialize, we must setup the local SDP.
if ((err = session->initialize(source, req, publish, username, cid)) != srs_success) {
srs_freep(session);
return srs_error_wrap(err, "init");
}
map_username_session.insert(make_pair(username, session));
*psession = session;
return err;
}

@ -39,6 +39,7 @@ class SrsHourGlass;
class SrsRtcConnection;
class SrsRequest;
class SrsSdp;
class SrsRtcStream;
class ISrsRtcServerHandler
{
@ -80,6 +81,12 @@ public:
SrsRequest* req, const SrsSdp& remote_sdp, SrsSdp& local_sdp, const std::string& mock_eip, bool publish,
SrsRtcConnection** psession
);
private:
srs_error_t do_create_session(
SrsRtcConnection* session, SrsRequest* req, const SrsSdp& remote_sdp, SrsSdp& local_sdp,
const std::string& mock_eip, bool publish, SrsRtcStream* source
);
public:
// We start offering, create_session2 to generate offer, setup_session2 to handle answer.
srs_error_t create_session2(SrsSdp& local_sdp, SrsRtcConnection** psession);
srs_error_t setup_session2(SrsRtcConnection* session, SrsRequest* req, const SrsSdp& remote_sdp);

@ -35,6 +35,8 @@
#include <srs_kernel_buffer.hpp>
#include <srs_kernel_rtc_rtp.hpp>
#include <srs_core_autofree.hpp>
#include <srs_app_rtc_queue.hpp>
#include <srs_app_rtc_conn.hpp>
#ifdef SRS_FFMPEG_FIT
#include <srs_app_rtc_codec.hpp>
@ -90,6 +92,41 @@ srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFor
return err;
}
uint64_t SrsNtp::kMagicNtpFractionalUnit = 1ULL << 32;
SrsNtp::SrsNtp()
{
system_ms_ = 0;
ntp_ = 0;
ntp_second_ = 0;
ntp_fractions_ = 0;
}
SrsNtp::~SrsNtp()
{
}
SrsNtp SrsNtp::from_time_ms(uint64_t ms)
{
SrsNtp srs_ntp;
srs_ntp.system_ms_ = ms;
srs_ntp.ntp_second_ = ms / 1000;
srs_ntp.ntp_fractions_ = (static_cast<double>(ms % 1000 / 1000.0)) * kMagicNtpFractionalUnit;
srs_ntp.ntp_ = (static_cast<uint64_t>(srs_ntp.ntp_second_) << 32) | srs_ntp.ntp_fractions_;
return srs_ntp;
}
SrsNtp SrsNtp::to_time_ms(uint64_t ntp)
{
SrsNtp srs_ntp;
srs_ntp.ntp_ = ntp;
srs_ntp.ntp_second_ = (ntp & 0xFFFFFFFF00000000ULL) >> 32;
srs_ntp.ntp_fractions_ = (ntp & 0x00000000FFFFFFFFULL);
srs_ntp.system_ms_ = (static_cast<uint64_t>(srs_ntp.ntp_second_) * 1000) +
(static_cast<double>(static_cast<uint64_t>(srs_ntp.ntp_fractions_) * 1000.0) / kMagicNtpFractionalUnit);
return srs_ntp;
}
SrsRtcConsumer::SrsRtcConsumer(SrsRtcStream* s)
{
source = s;
@ -254,6 +291,7 @@ SrsRtcStream::SrsRtcStream()
#else
bridger_ = new SrsRtcDummyBridger();
#endif
stream_desc_ = NULL;
}
SrsRtcStream::~SrsRtcStream()
@ -264,6 +302,7 @@ SrsRtcStream::~SrsRtcStream()
srs_freep(req);
srs_freep(bridger_);
srs_freep(stream_desc_);
}
srs_error_t SrsRtcStream::initialize(SrsRequest* r)
@ -423,6 +462,36 @@ srs_error_t SrsRtcStream::on_rtp(SrsRtpPacket2* pkt)
return err;
}
void SrsRtcStream::set_stream_desc(SrsRtcStreamDescription* stream_desc)
{
srs_freep(stream_desc_);
stream_desc_ = stream_desc->copy();
}
std::vector<SrsRtcTrackDescription*> SrsRtcStream::get_track_desc(std::string type, std::string media_name)
{
std::vector<SrsRtcTrackDescription*> track_descs;
if (!stream_desc_) {
return track_descs;
}
if (type == "audio") {
if (stream_desc_->audio_track_desc_->media_->name_ == media_name) {
track_descs.push_back(stream_desc_->audio_track_desc_);
}
}
if (type == "video") {
std::vector<SrsRtcTrackDescription*>::iterator it = stream_desc_->video_track_descs_.begin();
while (it != stream_desc_->video_track_descs_.end() ){
track_descs.push_back(*it);
++it;
}
}
return track_descs;
}
#ifdef SRS_FFMPEG_FIT
SrsRtcFromRtmpBridger::SrsRtcFromRtmpBridger(SrsRtcStream* source)
{
@ -975,3 +1044,672 @@ void SrsRtcDummyBridger::on_unpublish()
{
}
SrsCodecPayload::SrsCodecPayload()
{
}
SrsCodecPayload::SrsCodecPayload(uint8_t pt, std::string encode_name, int sample)
{
pt_ = pt;
name_ = encode_name;
sample_ = sample;
}
SrsCodecPayload::~SrsCodecPayload()
{
}
SrsCodecPayload* SrsCodecPayload::copy()
{
SrsCodecPayload* cp = new SrsCodecPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
return cp;
}
SrsMediaPayloadType SrsCodecPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.rtcp_fb_ = rtcp_fbs_;
return media_payload_type;
}
SrsVideoPayload::SrsVideoPayload()
{
}
SrsVideoPayload::SrsVideoPayload(uint8_t pt, std::string encode_name, int sample)
:SrsCodecPayload(pt, encode_name, sample)
{
h264_param_.profile_level_id = "";
h264_param_.packetization_mode = "";
h264_param_.level_asymmerty_allow = "";
}
SrsVideoPayload::~SrsVideoPayload()
{
}
SrsVideoPayload* SrsVideoPayload::copy()
{
SrsVideoPayload* cp = new SrsVideoPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
cp->h264_param_ = h264_param_;
return cp;
}
SrsMediaPayloadType SrsVideoPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.rtcp_fb_ = rtcp_fbs_;
std::ostringstream format_specific_param;
if (!h264_param_.level_asymmerty_allow.empty()) {
format_specific_param << "level-asymmetry-allowed=" << h264_param_.level_asymmerty_allow;
}
if (!h264_param_.packetization_mode.empty()) {
format_specific_param << ";packetization-mode=" << h264_param_.packetization_mode;
}
if (!h264_param_.profile_level_id.empty()) {
format_specific_param << ";profile-level-id=" << h264_param_.profile_level_id;
}
media_payload_type.format_specific_param_ = format_specific_param.str();
return media_payload_type;
}
srs_error_t SrsVideoPayload::set_h264_param_desc(std::string fmtp)
{
srs_error_t err = srs_success;
std::vector<std::string> vec = split_str(fmtp, ";");
for (size_t i = 0; i < vec.size(); ++i) {
std::vector<std::string> kv = split_str(vec[i], "=");
if (kv.size() == 2) {
if (kv[0] == "profile-level-id") {
h264_param_.profile_level_id = kv[1];
} else if (kv[0] == "packetization-mode") {
// 6.3. Non-Interleaved Mode
// This mode is in use when the value of the OPTIONAL packetization-mode
// media type parameter is equal to 1. This mode SHOULD be supported.
// It is primarily intended for low-delay applications. Only single NAL
// unit packets, STAP-As, and FU-As MAY be used in this mode. STAP-Bs,
// MTAPs, and FU-Bs MUST NOT be used. The transmission order of NAL
// units MUST comply with the NAL unit decoding order.
// @see https://tools.ietf.org/html/rfc6184#section-6.3
h264_param_.packetization_mode = kv[1];
} else if (kv[0] == "level-asymmetry-allowed") {
h264_param_.level_asymmerty_allow = kv[1];
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid h264 param=%s", kv[0].c_str());
}
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid h264 param=%s", vec[i].c_str());
}
}
return err;
}
SrsAudioPayload::SrsAudioPayload()
{
}
SrsAudioPayload::SrsAudioPayload(uint8_t pt, std::string encode_name, int sample, int channel)
:SrsCodecPayload(pt, encode_name, sample)
{
channel_ = channel;
opus_param_.minptime = 0;
opus_param_.use_inband_fec = false;
opus_param_.usedtx = false;
}
SrsAudioPayload::~SrsAudioPayload()
{
}
SrsAudioPayload* SrsAudioPayload::copy()
{
SrsAudioPayload* cp = new SrsAudioPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
cp->channel_ = channel_;
cp->opus_param_ = opus_param_;
return cp;
}
SrsMediaPayloadType SrsAudioPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.encoding_param_ = srs_int2str(channel_);
media_payload_type.rtcp_fb_ = rtcp_fbs_;
std::ostringstream format_specific_param;
if (opus_param_.minptime) {
format_specific_param << "minptime=" << opus_param_.minptime;
}
if (opus_param_.use_inband_fec) {
format_specific_param << ";useinbandfec=1";
}
if (opus_param_.usedtx) {
format_specific_param << ";usedtx=1";
}
media_payload_type.format_specific_param_ = format_specific_param.str();
return media_payload_type;
}
srs_error_t SrsAudioPayload::set_opus_param_desc(std::string fmtp)
{
srs_error_t err = srs_success;
std::vector<std::string> vec = split_str(fmtp, ";");
for (size_t i = 0; i < vec.size(); ++i) {
std::vector<std::string> kv = split_str(vec[i], "=");
if (kv.size() == 2) {
if (kv[0] == "minptime") {
opus_param_.minptime = (int)::atol(kv[1].c_str());
} else if (kv[0] == "useinbandfec") {
opus_param_.use_inband_fec = (kv[1] == "1") ? true : false;
} else if (kv[0] == "usedtx") {
opus_param_.usedtx = (kv[1] == "1") ? true : false;
}
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid opus param=%s", vec[i].c_str());
}
}
return err;
}
SrsRtcTrackDescription::SrsRtcTrackDescription()
{
ssrc_ = 0;
rtx_ssrc_ = 0;
fec_ssrc_ = 0;
is_active_ = true;
media_ = NULL;
red_ = NULL;
rtx_ = NULL;
ulpfec_ = NULL;
rsfec_ = NULL;
}
SrsRtcTrackDescription::~SrsRtcTrackDescription()
{
srs_freep(media_);
srs_freep(red_);
srs_freep(rtx_);
srs_freep(ulpfec_);
srs_freep(rsfec_);
}
bool SrsRtcTrackDescription::has_ssrc(uint32_t ssrc)
{
if (ssrc == ssrc_ || ssrc == rtx_ssrc_ || ssrc == fec_ssrc_) {
return true;
}
return false;
}
void SrsRtcTrackDescription::add_rtp_extension_desc(int id, std::string uri)
{
extmaps_[id] = uri;
}
void SrsRtcTrackDescription::set_direction(std::string direction)
{
direction_ = direction;
}
void SrsRtcTrackDescription::set_codec_payload(SrsCodecPayload* payload)
{
media_ = payload;
}
void SrsRtcTrackDescription::create_auxiliary_payload(const std::vector<SrsMediaPayloadType> payloads)
{
if (!payloads.size()) {
return;
}
SrsMediaPayloadType payload = payloads.at(0);
if (payload.encoding_name_ == "red"){
srs_freep(red_);
red_ = new SrsCodecPayload(payload.payload_type_, "red", payload.clock_rate_);
} else if (payload.encoding_name_ == "rtx") {
srs_freep(rtx_);
rtx_ = new SrsCodecPayload(payload.payload_type_, "rtx", payload.clock_rate_);
} else if (payload.encoding_name_ == "ulpfec") {
srs_freep(ulpfec_);
ulpfec_ = new SrsCodecPayload(payload.payload_type_, "ulpfec", payload.clock_rate_);
} else if (payload.encoding_name_ == "rsfec") {
srs_freep(rsfec_);
rsfec_ = new SrsCodecPayload(payload.payload_type_, "rsfec", payload.clock_rate_);
}
}
void SrsRtcTrackDescription::set_rtx_ssrc(uint32_t ssrc)
{
rtx_ssrc_ = ssrc;
}
void SrsRtcTrackDescription::set_fec_ssrc(uint32_t ssrc)
{
fec_ssrc_ = ssrc;
}
void SrsRtcTrackDescription::set_mid(std::string mid)
{
mid_ = mid;
}
int SrsRtcTrackDescription::get_rtp_extension_id(std::string uri)
{
for(std::map<int, std::string>::iterator it = extmaps_.begin(); it != extmaps_.end(); ++it) {
if(uri == it->second) {
return it->first;
}
}
return -1;
}
SrsRtcTrackDescription* SrsRtcTrackDescription::copy()
{
SrsRtcTrackDescription* cp = new SrsRtcTrackDescription();
cp->type_ = type_;
cp->id_ = id_;
cp->ssrc_ = ssrc_;
cp->fec_ssrc_ = fec_ssrc_;
cp->rtx_ssrc_ = rtx_ssrc_;
cp->extmaps_ = extmaps_;
cp->direction_ = direction_;
cp->mid_ = mid_;
cp->is_active_ = is_active_;
cp->media_ = media_ ? media_->copy():NULL;
cp->red_ = red_ ? red_->copy():NULL;
cp->rtx_ = rtx_ ? rtx_->copy():NULL;
cp->ulpfec_ = ulpfec_ ? ulpfec_->copy():NULL;
cp->rsfec_ = rsfec_ ? rsfec_->copy():NULL;
return cp;
}
SrsRtcStreamDescription::SrsRtcStreamDescription()
{
audio_track_desc_ = NULL;
}
SrsRtcStreamDescription::~SrsRtcStreamDescription()
{
srs_freep(audio_track_desc_);
for (int i = 0; i < video_track_descs_.size(); ++i) {
srs_freep(video_track_descs_.at(i));
}
video_track_descs_.clear();
}
SrsRtcStreamDescription* SrsRtcStreamDescription::copy()
{
SrsRtcStreamDescription* stream_desc = new SrsRtcStreamDescription();
if (audio_track_desc_) {
stream_desc->audio_track_desc_ = audio_track_desc_->copy();
}
for (int i = 0; i < video_track_descs_.size(); ++i) {
stream_desc->video_track_descs_.push_back(video_track_descs_.at(i)->copy());
}
return stream_desc;
}
SrsRtcTrackDescription* SrsRtcStreamDescription::find_track_description_by_ssrc(uint32_t ssrc)
{
if (audio_track_desc_->has_ssrc(ssrc)) {
return audio_track_desc_;
}
for (int i = 0; i < video_track_descs_.size(); ++i) {
if (video_track_descs_.at(i)->has_ssrc(ssrc)) {
return video_track_descs_.at(i);
}
}
return NULL;
}
ISrsRtcTrack::ISrsRtcTrack()
{
}
ISrsRtcTrack::~ISrsRtcTrack()
{
}
SrsRtcRecvTrack::SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc, bool is_audio)
{
session_ = session;
track_desc_ = track_desc->copy();
if (is_audio) {
rtp_queue_ = new SrsRtpRingBuffer(100);
nack_receiver_ = new SrsRtpNackForReceiver(rtp_queue_, 100 * 2 / 3);
} else {
rtp_queue_ = new SrsRtpRingBuffer(1000);
nack_receiver_ = new SrsRtpNackForReceiver(rtp_queue_, 1000 * 2 / 3);
}
}
SrsRtcRecvTrack::~SrsRtcRecvTrack()
{
srs_freep(rtp_queue_);
srs_freep(nack_receiver_);
srs_freep(track_desc_);
}
bool SrsRtcRecvTrack::has_ssrc(uint32_t ssrc)
{
if (track_desc_) {
return track_desc_->has_ssrc(ssrc);
}
return false;
}
void SrsRtcRecvTrack::update_rtt(int rtt)
{
if (nack_receiver_) {
nack_receiver_->update_rtt(rtt);
}
}
void SrsRtcRecvTrack::update_send_report_time(const SrsNtp& ntp)
{
last_sender_report_ntp = ntp;
last_sender_report_sys_time = srs_update_system_time();;
}
srs_error_t SrsRtcRecvTrack::send_rtcp_rr()
{
srs_error_t err = srs_success;
if (session_) {
return session_->send_rtcp_rr(track_desc_->ssrc_, rtp_queue_, last_sender_report_sys_time, last_sender_report_ntp);
}
return err;
}
srs_error_t SrsRtcRecvTrack::send_rtcp_xr_rrtr()
{
srs_error_t err = srs_success;
if (track_desc_) {
return session_->send_rtcp_xr_rrtr(track_desc_->ssrc_);
}
return err;
}
srs_error_t SrsRtcRecvTrack::on_nack(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
uint32_t ssrc = pkt->header.get_ssrc();
uint16_t seq = pkt->header.get_sequence();
// TODO: check whether is necessary?
nack_receiver_->remove_timeout_packets();
SrsRtpNackInfo* nack_info = nack_receiver_->find(seq);
if (nack_info) {
// seq had been received.
nack_receiver_->remove(seq);
return err;
}
// insert check nack list
uint16_t nack_first = 0, nack_last = 0;
if (!rtp_queue_->update(seq, nack_first, nack_last)) {
srs_warn("too old seq %u, range [%u, %u]", seq, rtp_queue_->begin, rtp_queue_->end);
}
if (srs_rtp_seq_distance(nack_first, nack_last) > 0) {
srs_trace("update seq=%u, nack range [%u, %u]", seq, nack_first, nack_last);
nack_receiver_->insert(nack_first, nack_last);
nack_receiver_->check_queue_size();
}
// insert into video_queue and audio_queue
rtp_queue_->set(seq, pkt->copy());
// send_nack
session_->check_send_nacks(nack_receiver_, ssrc);
return err;
}
srs_error_t SrsRtcRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
return srs_success;
}
SrsRtcAudioRecvTrack::SrsRtcAudioRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcRecvTrack(session, track_desc, true)
{
}
SrsRtcAudioRecvTrack::~SrsRtcAudioRecvTrack()
{
}
srs_error_t SrsRtcAudioRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// uint8_t pt = pkt->header.get_payload_type();
// SrsRtcTrackDescription track = rtc_stream_desc_->get_audio_tracks();
// // process red packet.
// if (pt == red_pt) {
// } else if (pt == rtx_pt) { // process rtx_pt.
// // restore retranmission packet.
// } else if (pt == fec_pt) {
// }
if (source) {
if ((err = source->on_rtp(pkt)) != srs_success) {
return srs_error_wrap(err, "source on rtp");
}
}
// For NACK to handle packet.
if ((err = on_nack(pkt)) != srs_success) {
return srs_error_wrap(err, "on nack");
}
return err;
}
SrsRtcVideoRecvTrack::SrsRtcVideoRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcRecvTrack(session, track_desc, false)
{
request_key_frame_ = false;
}
SrsRtcVideoRecvTrack::~SrsRtcVideoRecvTrack()
{
}
srs_error_t SrsRtcVideoRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->frame_type = SrsFrameTypeVideo;
// TODO: FIXME: add rtp process
if (request_key_frame_) {
// TODO: FIXME: add coroutine to request key frame.
request_key_frame_ = false;
// TODO: FIXME: Check error.
session_->send_rtcp_fb_pli(track_desc_->ssrc_);
}
if (source) {
if ((err = source->on_rtp(pkt)) != srs_success) {
return srs_error_wrap(err, "source on rtp");
}
}
// For NACK to handle packet.
if ((err = on_nack(pkt)) != srs_success) {
return srs_error_wrap(err, "on nack");
}
return err;
}
void SrsRtcVideoRecvTrack::request_keyframe()
{
request_key_frame_ = true;
}
SrsRtcSendTrack::SrsRtcSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc, bool is_audio)
{
session_ = session;
track_desc_ = track_desc->copy();
if (is_audio) {
rtp_queue_ = new SrsRtpRingBuffer(100);
} else {
rtp_queue_ = new SrsRtpRingBuffer(1000);
}
}
SrsRtcSendTrack::~SrsRtcSendTrack()
{
srs_freep(rtp_queue_);
srs_freep(track_desc_);
}
bool SrsRtcSendTrack::has_ssrc(uint32_t ssrc)
{
if (track_desc_) {
return track_desc_->has_ssrc(ssrc);
}
return false;
}
SrsRtpPacket2* SrsRtcSendTrack::fetch_rtp_packet(uint16_t seq)
{
if (rtp_queue_) {
return rtp_queue_->at(seq);
}
return NULL;
}
srs_error_t SrsRtcSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
return srs_success;
}
srs_error_t SrsRtcSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
return srs_success;
}
SrsRtcAudioSendTrack::SrsRtcAudioSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcSendTrack(session, track_desc, true)
{
}
SrsRtcAudioSendTrack::~SrsRtcAudioSendTrack()
{
}
srs_error_t SrsRtcAudioSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->header.set_ssrc(track_desc_->ssrc_);
pkt->header.set_payload_type(track_desc_->media_->pt_);
// Put rtp packet to NACK/ARQ queue
if (true) {
SrsRtpPacket2* nack = pkt->copy();
rtp_queue_->set(nack->header.get_sequence(), nack);
}
send_packets.push_back(pkt);
return err;
}
srs_error_t SrsRtcAudioSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// process rtcp
return err;
}
SrsRtcVideoSendTrack::SrsRtcVideoSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcSendTrack(session, track_desc, false)
{
}
SrsRtcVideoSendTrack::~SrsRtcVideoSendTrack()
{
}
srs_error_t SrsRtcVideoSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->header.set_ssrc(track_desc_->ssrc_);
pkt->header.set_payload_type(track_desc_->media_->pt_);
// Put rtp packet to NACK/ARQ queue
if (true) {
SrsRtpPacket2* nack = pkt->copy();
rtp_queue_->set(nack->header.get_sequence(), nack);
}
send_packets.push_back(pkt);
return err;
}
srs_error_t SrsRtcVideoSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// process rtcp
return err;
}

@ -28,7 +28,12 @@
#include <vector>
#include <map>
#include <inttypes.h>
#include <vector>
#include <string>
#include <map>
#include <srs_app_rtc_sdp.hpp>
#include <srs_service_st.hpp>
#include <srs_app_source.hpp>
@ -43,6 +48,28 @@ class SrsRtcFromRtmpBridger;
class SrsAudioRecode;
class SrsRtpPacket2;
class SrsSample;
class SrsRtcStreamDescription;
class SrsRtcTrackDescription;
class SrsRtcConnection;
class SrsRtpRingBuffer;
class SrsRtpNackForReceiver;
class SrsNtp
{
public:
uint64_t system_ms_;
uint64_t ntp_;
uint32_t ntp_second_;
uint32_t ntp_fractions_;
public:
SrsNtp();
virtual ~SrsNtp();
public:
static SrsNtp from_time_ms(uint64_t ms);
static SrsNtp to_time_ms(uint64_t ntp);
public:
static uint64_t kMagicNtpFractionalUnit;
};
class SrsRtcConsumer
{
@ -100,7 +127,7 @@ public:
ISrsRtcPublishStream();
virtual ~ISrsRtcPublishStream();
public:
virtual void request_keyframe() = 0;
virtual void request_keyframe(uint32_t ssrc) = 0;
};
// A Source is a stream, to publish and to play with, binding to SrsRtcPublishStream and SrsRtcPlayStream.
@ -118,6 +145,8 @@ private:
ISrsRtcPublishStream* publish_stream_;
// Transmux RTMP to RTC.
ISrsSourceBridger* bridger_;
// Steam description for this steam.
SrsRtcStreamDescription* stream_desc_;
private:
// To delivery stream to clients.
std::vector<SrsRtcConsumer*> consumers;
@ -159,6 +188,9 @@ public:
void set_publish_stream(ISrsRtcPublishStream* v);
// Consume the shared RTP packet, user must free it.
srs_error_t on_rtp(SrsRtpPacket2* pkt);
// Set and get stream description for souce
void set_stream_desc(SrsRtcStreamDescription* stream_desc);
std::vector<SrsRtcTrackDescription*> get_track_desc(std::string type, std::string media_type);
};
#ifdef SRS_FFMPEG_FIT
@ -214,5 +246,242 @@ public:
virtual void on_unpublish();
};
// TODO: FIXME: Rename it.
class SrsCodecPayload
{
public:
std::string type_;
uint8_t pt_;
std::string name_;
int sample_;
std::vector<std::string> rtcp_fbs_;
public:
SrsCodecPayload();
SrsCodecPayload(uint8_t pt, std::string encode_name, int sample);
virtual ~SrsCodecPayload();
public:
virtual SrsCodecPayload* copy();
virtual SrsMediaPayloadType generate_media_payload_type();
};
// TODO: FIXME: Rename it.
class SrsVideoPayload : public SrsCodecPayload
{
public:
struct H264SpecificParameter
{
std::string profile_level_id;
std::string packetization_mode;
std::string level_asymmerty_allow;
};
H264SpecificParameter h264_param_;
public:
SrsVideoPayload();
SrsVideoPayload(uint8_t pt, std::string encode_name, int sample);
virtual ~SrsVideoPayload();
public:
virtual SrsVideoPayload* copy();
virtual SrsMediaPayloadType generate_media_payload_type();
public:
srs_error_t set_h264_param_desc(std::string fmtp);
};
// TODO: FIXME: Rename it.
class SrsAudioPayload : public SrsCodecPayload
{
struct SrsOpusParameter
{
int minptime;
bool use_inband_fec;
bool usedtx;
};
public:
int channel_;
SrsOpusParameter opus_param_;
public:
SrsAudioPayload();
SrsAudioPayload(uint8_t pt, std::string encode_name, int sample, int channel);
virtual ~SrsAudioPayload();
public:
virtual SrsAudioPayload* copy();
virtual SrsMediaPayloadType generate_media_payload_type();
public:
srs_error_t set_opus_param_desc(std::string fmtp);
};
class SrsRtcTrackDescription
{
public:
// type: audio, video
std::string type_;
// track_id
std::string id_;
// ssrc is the primary ssrc for this track,
// if sdp has ssrc-group, it is the first ssrc of the ssrc-group
uint32_t ssrc_;
// rtx ssrc is the second ssrc of "FEC" src-group,
// if no rtx ssrc, rtx_ssrc_ = 0.
uint32_t fec_ssrc_;
// rtx ssrc is the second ssrc of "FID" src-group,
// if no rtx ssrc, rtx_ssrc_ = 0.
uint32_t rtx_ssrc_;
// key: rtp header extension id, value: rtp header extension uri.
std::map<int, std::string> extmaps_;
// Whether this track active. default: active.
bool is_active_;
// direction
std::string direction_;
// TODO: FIXME: whether mid is needed?
std::string mid_;
// meida payload, such as opus, h264.
SrsCodecPayload* media_;
SrsCodecPayload* red_;
SrsCodecPayload* rtx_;
SrsCodecPayload* ulpfec_;
SrsCodecPayload* rsfec_;
public:
SrsRtcTrackDescription();
virtual ~SrsRtcTrackDescription();
public:
// whether or not the track has ssrc.
// for example:
// we need check track has the ssrc in the ssrc_group, then add ssrc_group to the track,
bool has_ssrc(uint32_t ssrc);
public:
void add_rtp_extension_desc(int id, std::string uri);
void set_direction(std::string direction);
void set_codec_payload(SrsCodecPayload* payload);
// auxiliary paylod include red, rtx, ulpfec, rsfec.
void create_auxiliary_payload(const std::vector<SrsMediaPayloadType> payload_types);
void set_rtx_ssrc(uint32_t ssrc);
void set_fec_ssrc(uint32_t ssrc);
void set_mid(std::string mid);
int get_rtp_extension_id(std::string uri);
public:
SrsRtcTrackDescription* copy();
public:
// find media with payload type.
SrsMediaPayloadType generate_media_payload_type(int payload_type);
};
class SrsRtcStreamDescription
{
public:
// the id for this stream;
std::string id_;
SrsRtcTrackDescription* audio_track_desc_;
std::vector<SrsRtcTrackDescription*> video_track_descs_;
public:
SrsRtcStreamDescription();
virtual ~SrsRtcStreamDescription();
public:
SrsRtcStreamDescription* copy();
SrsRtcTrackDescription* find_track_description_by_ssrc(uint32_t ssrc);
};
class ISrsRtcTrack
{
public:
ISrsRtcTrack();
virtual ~ISrsRtcTrack();
public:
virtual srs_error_t on_rtp(SrsRtpPacket2* pkt) = 0;
};
class SrsRtcRecvTrack
{
protected:
SrsRtcTrackDescription* track_desc_;
SrsRtcConnection* session_;
SrsRtpRingBuffer* rtp_queue_;
SrsRtpNackForReceiver* nack_receiver_;
// send report ntp and received time.
SrsNtp last_sender_report_ntp;
uint64_t last_sender_report_sys_time;
public:
SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* stream_descs, bool is_audio);
virtual ~SrsRtcRecvTrack();
public:
bool has_ssrc(uint32_t ssrc);
void update_rtt(int rtt);
void update_send_report_time(const SrsNtp& ntp);
srs_error_t send_rtcp_rr();
srs_error_t send_rtcp_xr_rrtr();
protected:
srs_error_t on_nack(SrsRtpPacket2* pkt);
public:
virtual srs_error_t on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt);
};
class SrsRtcAudioRecvTrack : public SrsRtcRecvTrack
{
public:
SrsRtcAudioRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc);
virtual ~SrsRtcAudioRecvTrack();
public:
virtual srs_error_t on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt);
};
class SrsRtcVideoRecvTrack : public SrsRtcRecvTrack
{
private:
bool request_key_frame_;
public:
SrsRtcVideoRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* stream_descs);
virtual ~SrsRtcVideoRecvTrack();
public:
virtual srs_error_t on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt);
public:
void request_keyframe();
};
class SrsRtcSendTrack
{
protected:
// send track description
SrsRtcTrackDescription* track_desc_;
SrsRtcConnection* session_;
// NACK ARQ ring buffer.
SrsRtpRingBuffer* rtp_queue_;
public:
SrsRtcSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc, bool is_audio);
virtual ~SrsRtcSendTrack();
public:
bool has_ssrc(uint32_t ssrc);
SrsRtpPacket2* fetch_rtp_packet(uint16_t seq);
public:
virtual srs_error_t on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt);
virtual srs_error_t on_rtcp(SrsRtpPacket2* pkt);
};
class SrsRtcAudioSendTrack : public SrsRtcSendTrack
{
public:
SrsRtcAudioSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc);
virtual ~SrsRtcAudioSendTrack();
public:
virtual srs_error_t on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt);
virtual srs_error_t on_rtcp(SrsRtpPacket2* pkt);
};
class SrsRtcVideoSendTrack : public SrsRtcSendTrack
{
public:
SrsRtcVideoSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc);
virtual ~SrsRtcVideoSendTrack();
public:
virtual srs_error_t on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt);
virtual srs_error_t on_rtcp(SrsRtpPacket2* pkt);
};
#endif

@ -353,6 +353,9 @@
#define ERROR_RTC_NO_SESSION 5022
#define ERROR_RTC_INVALID_PARAMS 5023
#define ERROR_RTC_DUMMY_BRIDGER 5024
#define ERROR_RTC_STREM_STARTED 5025
#define ERROR_RTC_STREAM_DESC 5026
#define ERROR_RTC_TRACK_CODEC 5027
///////////////////////////////////////////////////////
// GB28181 API error.

Loading…
Cancel
Save