Merge branch '2.0release' into develop

pull/499/head
winlin 10 years ago
commit 114f4447bb

@ -542,6 +542,11 @@ bool SrsHlsMuxer::is_segment_overflow()
{ {
srs_assert(current); srs_assert(current);
// to prevent very small segment.
if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
return false;
}
// use N% deviation, to smoother. // use N% deviation, to smoother.
double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0; double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f", srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
@ -559,7 +564,18 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow()
{ {
// @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950
srs_assert(current); srs_assert(current);
return current->duration >= hls_aof_ratio * hls_fragment;
// to prevent very small segment.
if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
return false;
}
// use N% deviation, to smoother.
double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
current->duration, hls_fragment + deviation, deviation, deviation_ts, hls_fragment);
return current->duration >= hls_aof_ratio * hls_fragment + deviation;
} }
int SrsHlsMuxer::update_acodec(SrsCodecAudio ac) int SrsHlsMuxer::update_acodec(SrsCodecAudio ac)
@ -968,7 +984,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
// we use absolutely overflow of segment to make jwplayer/ffplay happy // we use absolutely overflow of segment to make jwplayer/ffplay happy
// @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184
if (cache->audio && muxer->is_segment_absolutely_overflow()) { if (cache->audio && muxer->is_segment_absolutely_overflow()) {
srs_warn("hls: absolute audio reap segment."); srs_info("hls: absolute audio reap segment.");
if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) { if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) {
return ret; return ret;
} }
@ -991,7 +1007,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
// do reap ts if any of: // do reap ts if any of:
// a. wait keyframe and got keyframe. // a. wait keyframe and got keyframe.
// b. always reap when not wait keyframe. // b. always reap when not wait keyframe.
if (!muxer->wait_keyframe() || sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) { if (!muxer->wait_keyframe()|| sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
// when wait keyframe, there must exists idr frame in sample. // when wait keyframe, there must exists idr frame in sample.
if (!sample->has_idr && muxer->wait_keyframe()) { if (!sample->has_idr && muxer->wait_keyframe()) {
srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr); srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr);

@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel()
stream = SrsTsStreamReserved; stream = SrsTsStreamReserved;
msg = NULL; msg = NULL;
continuity_counter = 0; continuity_counter = 0;
context = NULL;
} }
SrsTsChannel::~SrsTsChannel() SrsTsChannel::~SrsTsChannel()
@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler()
SrsTsContext::SrsTsContext() SrsTsContext::SrsTsContext()
{ {
pure_audio = false;
vcodec = SrsCodecVideoReserved; vcodec = SrsCodecVideoReserved;
acodec = SrsCodecAudioReserved1; acodec = SrsCodecAudioReserved1;
} }
@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext()
pids.clear(); pids.clear();
} }
bool SrsTsContext::is_pure_audio()
{
return pure_audio;
}
void SrsTsContext::on_pmt_parsed()
{
pure_audio = true;
std::map<int, SrsTsChannel*>::iterator it;
for (it = pids.begin(); it != pids.end(); ++it) {
SrsTsChannel* channel = it->second;
if (channel->apply == SrsTsPidApplyVideo) {
pure_audio = false;
}
}
}
void SrsTsContext::reset() void SrsTsContext::reset()
{ {
vcodec = SrsCodecVideoReserved; vcodec = SrsCodecVideoReserved;
@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream)
if (pids.find(pid) == pids.end()) { if (pids.find(pid) == pids.end()) {
channel = new SrsTsChannel(); channel = new SrsTsChannel();
channel->context = this;
pids[pid] = channel; pids[pid] = channel;
} else { } else {
channel = pids[pid]; channel = pids[pid];
@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream)
// update the apply pid table. // update the apply pid table.
packet->context->set(packet->pid, SrsTsPidApplyPAT); packet->context->set(packet->pid, SrsTsPidApplyPAT);
packet->context->on_pmt_parsed();
return ret; return ret;
} }

@ -172,6 +172,7 @@ struct SrsTsChannel
SrsTsPidApply apply; SrsTsPidApply apply;
SrsTsStream stream; SrsTsStream stream;
SrsTsMessage* msg; SrsTsMessage* msg;
SrsTsContext* context;
// for encoder. // for encoder.
u_int8_t continuity_counter; u_int8_t continuity_counter;
@ -343,6 +344,7 @@ class SrsTsContext
// codec // codec
private: private:
std::map<int, SrsTsChannel*> pids; std::map<int, SrsTsChannel*> pids;
bool pure_audio;
// encoder // encoder
private: private:
// when any codec changed, write the PAT/PMT. // when any codec changed, write the PAT/PMT.
@ -352,6 +354,14 @@ public:
SrsTsContext(); SrsTsContext();
virtual ~SrsTsContext(); virtual ~SrsTsContext();
public: public:
/**
* whether the hls stream is pure audio stream.
*/
virtual bool is_pure_audio();
/**
* when PMT table parsed, we know some info about stream.
*/
virtual void on_pmt_parsed();
/** /**
* reset the context for a new ts segment start. * reset the context for a new ts segment start.
*/ */

@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8)
} }
// only wait for a duration of last piece. // only wait for a duration of last piece.
if (i == pieces.size() - 1) { if (i == (int)pieces.size() - 1) {
next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000; next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000;
} }
} }
@ -657,7 +657,7 @@ public:
SrsIngestSrsOutput(SrsHttpUri* rtmp) { SrsIngestSrsOutput(SrsHttpUri* rtmp) {
out_rtmp = rtmp; out_rtmp = rtmp;
disconnected = false; disconnected = false;
raw_aac_dts = 0; raw_aac_dts = srs_update_system_time_ms();
req = NULL; req = NULL;
io = NULL; io = NULL;
@ -807,12 +807,14 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
{ {
int ret = ERROR_SUCCESS; int ret = ERROR_SUCCESS;
u_int32_t duration_ms = (u_int32_t)(duration * 1000);
// ts tbn to flv tbn. // ts tbn to flv tbn.
u_int32_t dts = (u_int32_t)raw_aac_dts; u_int32_t dts = (u_int32_t)raw_aac_dts;
raw_aac_dts += (int64_t)(duration * 1000); raw_aac_dts += duration_ms;
// got the next msg to calc the delta duration for each audio. // got the next msg to calc the delta duration for each audio.
u_int32_t max_dts = dts + (u_int32_t)(duration * 1000); u_int32_t max_dts = dts + duration_ms;
// send each frame. // send each frame.
while (!avs->empty()) { while (!avs->empty()) {
@ -852,7 +854,7 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
} }
// calc the delta of dts, when previous frame output. // calc the delta of dts, when previous frame output.
u_int32_t delta = (duration * 1000) / (avs->size() / frame_size); u_int32_t delta = duration_ms / (avs->size() / frame_size);
dts = (u_int32_t)(srs_min(max_dts, dts + delta)); dts = (u_int32_t)(srs_min(max_dts, dts + delta));
} }
@ -863,28 +865,36 @@ int SrsIngestSrsOutput::parse_message_queue()
{ {
int ret = ERROR_SUCCESS; int ret = ERROR_SUCCESS;
if (queue.empty()) {
return ret;
}
SrsTsMessage* first_ts_msg = queue.begin()->second;
SrsTsContext* context = first_ts_msg->channel->context;
bool cpa = context->is_pure_audio();
int nb_videos = 0; int nb_videos = 0;
int nb_audios = 0; if (!cpa) {
std::multimap<int64_t, SrsTsMessage*>::iterator it; std::multimap<int64_t, SrsTsMessage*>::iterator it;
for (it = queue.begin(); it != queue.end(); ++it) { for (it = queue.begin(); it != queue.end(); ++it) {
SrsTsMessage* msg = it->second; SrsTsMessage* msg = it->second;
// publish audio or video.
if (msg->channel->stream == SrsTsStreamVideoH264) {
nb_videos++;
}
}
// publish audio or video. // always wait 2+ videos, to left one video in the queue.
if (msg->channel->stream == SrsTsStreamVideoH264) { // TODO: FIXME: support pure audio hls.
nb_videos++; if (nb_videos <= 1) {
} else { return ret;
nb_audios++;
} }
} }
// always wait 2+ videos, to left one video in the queue.
// TODO: FIXME: support pure audio hls.
if (nb_videos <= 1) {
return ret;
}
// parse messages util the last video. // parse messages util the last video.
while (nb_videos > 1 && queue.size() > 0) { while ((cpa && queue.size() > 1) || nb_videos > 1) {
srs_assert(!queue.empty());
std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin(); std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin();
SrsTsMessage* msg = it->second; SrsTsMessage* msg = it->second;

Loading…
Cancel
Save