diff --git a/src/zm_ffmpeg_camera.cpp b/src/zm_ffmpeg_camera.cpp index d5d48b054..c02259561 100644 --- a/src/zm_ffmpeg_camera.cpp +++ b/src/zm_ffmpeg_camera.cpp @@ -760,8 +760,6 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event dumpPacket(mFormatContext->streams[packet.stream_index], &packet, "Captured Packet"); if ( packet.dts == AV_NOPTS_VALUE ) { packet.dts = packet.pts; - //} else if ( packet.pts == AV_NOPTS_VALUE ) { - //packet.pts = packet.dts; } // Video recording @@ -781,19 +779,10 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event // Also don't know how much it matters for audio. if ( packet.stream_index == mVideoStreamId ) { //Write the packet to our video store - AVPacket out_packet; - av_init_packet(&out_packet); - if ( zm_av_packet_ref(&out_packet, &packet) < 0 ) { - Error("error refing packet"); - } - out_packet.pts = av_rescale_q(out_packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.dts = av_rescale_q(out_packet.dts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.duration = av_rescale_q(out_packet.duration, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - int ret = videoStore->writeVideoFramePacket(&out_packet); + int ret = videoStore->writeVideoFramePacket(&packet); if ( ret < 0 ) { //Less than zero and we skipped a frame Warning("Error writing last packet to videostore."); } - zm_av_packet_unref(&out_packet); } // end if video delete videoStore; @@ -888,7 +877,6 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event } } // end if recording or not - // Buffer video packets, since we are not recording. // All audio packets are keyframes, so only if it's a video keyframe if ( packet.stream_index == mVideoStreamId ) { @@ -901,18 +889,17 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event } packetqueue->clearQueue(monitor->GetPreEventCount(), mVideoStreamId); - - packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]); + packetqueue->queuePacket(&packet); } else if ( packetqueue->size() ) { // it's a keyframe or we already have something in the queue - packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]); + packetqueue->queuePacket(&packet); } } else if ( packet.stream_index == mAudioStreamId ) { // The following lines should ensure that the queue always begins with a video keyframe //Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() ); if ( record_audio && packetqueue->size() ) { // if it's audio, and we are doing audio, and there is already something in the queue - packetqueue->queuePacket(&packet, mFormatContext->streams[packet.stream_index]); + packetqueue->queuePacket(&packet); } } // end if packet type @@ -921,19 +908,9 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event if ( have_video_keyframe || keyframe ) { if ( videoStore ) { - AVPacket out_packet; - av_init_packet(&out_packet); - if ( zm_av_packet_ref( &out_packet, &packet ) < 0 ) { - Error("error refing packet"); - } - out_packet.pts = av_rescale_q(out_packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.dts = av_rescale_q(out_packet.dts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.duration = av_rescale_q(out_packet.duration, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - //Write the packet to our video store - int ret = videoStore->writeVideoFramePacket(&out_packet); - zm_av_packet_unref(&out_packet); + int ret = videoStore->writeVideoFramePacket(&packet); if ( ret < 0 ) { //Less than zero and we skipped a frame zm_av_packet_unref(&packet); return 0; @@ -1033,26 +1010,15 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event if ( videoStore ) { if ( record_audio ) { if ( have_video_keyframe ) { - - AVPacket out_packet; - av_init_packet(&out_packet); - if ( zm_av_packet_ref( &out_packet, &packet ) < 0 ) { - Error("error refing packet"); - } - out_packet.pts = av_rescale_q(out_packet.pts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.dts = av_rescale_q(out_packet.dts, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - out_packet.duration = av_rescale_q(out_packet.duration, mFormatContext->streams[packet.stream_index]->time_base, AV_TIME_BASE_Q); - - Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index); - //Write the packet to our video store - //FIXME no relevance of last key frame - int ret = videoStore->writeAudioFramePacket(&out_packet); - zm_av_packet_unref(&out_packet); - if ( ret < 0 ) {//Less than zero and we skipped a frame - Warning("Failure to write audio packet."); - zm_av_packet_unref( &packet ); - return 0; - } + Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index ); + //Write the packet to our video store + //FIXME no relevance of last key frame + int ret = videoStore->writeAudioFramePacket( &packet ); + if ( ret < 0 ) {//Less than zero and we skipped a frame + Warning("Failure to write audio packet."); + zm_av_packet_unref( &packet ); + return 0; + } } else { Debug(3, "Not recording audio yet because we don't have a video keyframe yet"); } diff --git a/src/zm_packet.cpp b/src/zm_packet.cpp index a89a557d2..a04282d26 100644 --- a/src/zm_packet.cpp +++ b/src/zm_packet.cpp @@ -24,29 +24,23 @@ using namespace std; -ZMPacket::ZMPacket( AVPacket *p, AVStream *stream ) { +ZMPacket::ZMPacket( AVPacket *p ) { frame = NULL; image = NULL; av_init_packet( &packet ); if ( zm_av_packet_ref( &packet, p ) < 0 ) { Error("error refing packet"); } - packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q); - packet.dts = av_rescale_q(packet.dts, stream->time_base, AV_TIME_BASE_Q); - packet.duration = av_rescale_q(packet.duration, stream->time_base, AV_TIME_BASE_Q); gettimeofday( ×tamp, NULL ); } -ZMPacket::ZMPacket( AVPacket *p, AVStream *stream, struct timeval *t ) { +ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) { frame = NULL; image = NULL; av_init_packet( &packet ); if ( zm_av_packet_ref( &packet, p ) < 0 ) { Error("error refing packet"); } - packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q); - packet.dts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q); - packet.duration = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q); timestamp = *t; } diff --git a/src/zm_packet.h b/src/zm_packet.h index 608d1b1e9..bdb67cb57 100644 --- a/src/zm_packet.h +++ b/src/zm_packet.h @@ -38,8 +38,8 @@ class ZMPacket { struct timeval timestamp; public: AVPacket *av_packet() { return &packet; } - ZMPacket( AVPacket *packet, AVStream *stream, struct timeval *timestamp ); - explicit ZMPacket( AVPacket *packet, AVStream * ); + ZMPacket( AVPacket *packet, struct timeval *timestamp ); + explicit ZMPacket( AVPacket *packet ); ~ZMPacket(); }; diff --git a/src/zm_packetqueue.cpp b/src/zm_packetqueue.cpp index b39b612cf..11f24c2a2 100644 --- a/src/zm_packetqueue.cpp +++ b/src/zm_packetqueue.cpp @@ -20,6 +20,7 @@ #include "zm_packetqueue.h" #include "zm_ffmpeg.h" #include +#include "zm_time.h" zm_packetqueue::zm_packetqueue( int p_max_stream_id ) { max_stream_id = p_max_stream_id; @@ -48,6 +49,7 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) { return true; } +#if 0 std::list::reverse_iterator it = pktQueue.rbegin(); // Scan through the queue looking for a packet for our stream with a dts <= ours. @@ -56,28 +58,31 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) { Debug(2, "Looking at packet with stream index (%d) with dts %" PRId64, av_packet->stream_index, av_packet->dts); - if ( - //( av_packet->stream_index == zm_packet->packet.stream_index ) - //&& - ( av_packet->dts != AV_NOPTS_VALUE ) - && - ( av_packet->dts <= zm_packet->packet.dts) - ) { - Debug(2, "break packet with stream index (%d) with dts %" PRId64, - (*it)->packet.stream_index, (*it)->packet.dts); - break; + if ( av_packet->stream_index == zm_packet->packet.stream_index ) { + if ( + ( av_packet->dts != AV_NOPTS_VALUE ) + && + ( av_packet->dts <= zm_packet->packet.dts) + ) { + Debug(2, "break packet with stream index (%d) with dts %" PRId64, + (*it)->packet.stream_index, (*it)->packet.dts); + break; + } + } else { // Not same stream, compare timestamps + if ( tvDiffUsec(((*it)->timestamp, zm_packet->timestamp) ) <= 0 ) { + Debug(2, "break packet with stream index (%d) with dts %" PRId64, + (*it)->packet.stream_index, (*it)->packet.dts); + break; + } } it++; } // end while not the end of the queue if ( it != pktQueue.rend() ) { - Debug(2, "Found packet with stream index (%d) with dts %" PRId64, - (*it)->packet.stream_index, (*it)->packet.dts); - //it --; - //Debug(2, "Found packet with stream index (%d) with dts %" PRId64, - //(*it)->packet.stream_index, (*it)->packet.dts); + Debug(2, "Found packet with stream index (%d) with dts %" PRId64 " <= %" PRId64, + (*it)->packet.stream_index, (*it)->packet.dts, zm_packet->packet.dts); if ( it == pktQueue.rbegin() ) { - Debug(2, "Inserting packet with dts %" PRId64 " at end", zm_packet->packet.dts); + Debug(2,"Inserting packet with dts %" PRId64 " at end", zm_packet->packet.dts); // No dts value, can't so much with it pktQueue.push_back(zm_packet); packet_counts[zm_packet->packet.stream_index] += 1; @@ -85,31 +90,25 @@ bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) { } // Convert to a forward iterator so that we can insert at end std::list::iterator f_it = it.base(); - Debug(2, "Insert packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64, - (*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts); - if ( f_it == pktQueue.end() ) { - Debug(2, "Pushing to end"); - pktQueue.push_back(zm_packet); - } else { - Debug(2, "Insert packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64, - (*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts); - pktQueue.insert(f_it, zm_packet); - } + Debug(2, "Insert packet before packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64, + (*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts); + + pktQueue.insert(f_it, zm_packet); packet_counts[zm_packet->packet.stream_index] += 1; return true; } - Debug(1,"Unable to find a spot for stream %d with dts %" PRId64 ". Sticking on front", + Debug(1,"Unable to insert packet for stream %d with dts %" PRId64 " into queue.", zm_packet->packet.stream_index, zm_packet->packet.dts); - // Must be before any packets in the queue. Stick it at the beginning - pktQueue.push_front(zm_packet); +#endif + pktQueue.push_back(zm_packet); packet_counts[zm_packet->packet.stream_index] += 1; return true; } // end bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) -bool zm_packetqueue::queuePacket(AVPacket* av_packet, AVStream *stream) { - ZMPacket *zm_packet = new ZMPacket(av_packet, stream); +bool zm_packetqueue::queuePacket(AVPacket* av_packet) { + ZMPacket *zm_packet = new ZMPacket(av_packet); return queuePacket(zm_packet); } diff --git a/src/zm_packetqueue.h b/src/zm_packetqueue.h index 6e4b83b02..a02a51ade 100644 --- a/src/zm_packetqueue.h +++ b/src/zm_packetqueue.h @@ -33,9 +33,9 @@ class zm_packetqueue { public: zm_packetqueue(int max_stream_id); virtual ~zm_packetqueue(); - bool queuePacket(AVPacket* packet, AVStream *stream, struct timeval *timestamp); + bool queuePacket(AVPacket* packet, struct timeval *timestamp); bool queuePacket(ZMPacket* packet); - bool queuePacket(AVPacket* packet, AVStream *stream); + bool queuePacket(AVPacket* packet); ZMPacket * popPacket(); bool popVideoPacket(ZMPacket* packet); bool popAudioPacket(ZMPacket* packet); diff --git a/src/zm_videostore.cpp b/src/zm_videostore.cpp index 6f2bb3afc..1482884ab 100644 --- a/src/zm_videostore.cpp +++ b/src/zm_videostore.cpp @@ -285,8 +285,8 @@ VideoStore::VideoStore( video_last_pts = 0; video_last_dts = 0; - video_first_pts = 0; - video_first_dts = 0; + audio_first_pts = 0; + audio_first_dts = 0; audio_next_pts = 0; audio_next_dts = 0; @@ -862,23 +862,20 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { if ( ipkt->duration != AV_NOPTS_VALUE ) { duration = av_rescale_q( ipkt->duration, - AV_TIME_BASE_Q, + video_in_stream->time_base, video_out_stream->time_base); - Debug(1, "duration from ipkt: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ") (%d/%d) (%d/%d)", - ipkt->pts, - video_last_pts, + Debug(1, "duration from ipkt: %" PRId64 ") => (%" PRId64 ") (%d/%d) (%d/%d)", ipkt->duration, duration, - 1, - AV_TIME_BASE, + video_in_stream->time_base.num, + video_in_stream->time_base.den, video_out_stream->time_base.num, video_out_stream->time_base.den ); } else { duration = av_rescale_q( ipkt->pts - video_last_pts, - AV_TIME_BASE_Q, - //video_in_stream->time_base, + video_in_stream->time_base, video_out_stream->time_base); Debug(1, "duration calc: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ")", ipkt->pts, @@ -888,7 +885,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { ); if ( duration <= 0 ) { // Why are we setting the duration to 1? - duration = ipkt->duration ? ipkt->duration : av_rescale_q(1, AV_TIME_BASE_Q, video_out_stream->time_base); + duration = ipkt->duration ? ipkt->duration : av_rescale_q(1,video_in_stream->time_base, video_out_stream->time_base); } } opkt.duration = duration; @@ -905,8 +902,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { } else { opkt.pts = av_rescale_q( ipkt->pts - video_first_pts, - AV_TIME_BASE_Q, - //video_in_stream->time_base, + video_in_stream->time_base, video_out_stream->time_base ); } @@ -933,8 +929,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { } else { opkt.dts = av_rescale_q( ipkt->dts - video_first_dts, - AV_TIME_BASE_Q, - //video_in_stream->time_base, + video_in_stream->time_base, video_out_stream->time_base ); Debug(3, "opkt.dts = %" PRId64 " from ipkt->dts(%" PRId64 ") - first_pts(%" PRId64 ")", @@ -952,11 +947,11 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { opkt.dts = video_out_stream->cur_dts; } -# if 0 - if ( opkt.dts <= video_out_stream->cur_dts ) { +# if 1 + if ( opkt.dts < video_out_stream->cur_dts ) { Warning("Fixing non-monotonic dts/pts dts %" PRId64 " pts %" PRId64 " stream %" PRId64, opkt.dts, opkt.pts, video_out_stream->cur_dts); - opkt.dts = video_out_stream->cur_dts + 1; + opkt.dts = video_out_stream->cur_dts; if ( opkt.dts > opkt.pts ) { opkt.pts = opkt.dts; } @@ -1019,12 +1014,12 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { zm_dump_frame(out_frame, "Out frame after resample"); // out_frame pts is in the input pkt pts... needs to be adjusted before sending to the encoder if ( out_frame->pts != AV_NOPTS_VALUE ) { - if ( !video_first_pts ) { - video_first_pts = out_frame->pts; - Debug(1, "No video_first_pts setting to %" PRId64, video_first_pts); + if ( !audio_first_pts ) { + audio_first_pts = out_frame->pts; + Debug(1, "No video_first_pts setting to %" PRId64, audio_first_pts); out_frame->pts = 0; } else { - out_frame->pts = out_frame->pts - video_first_pts; + out_frame->pts = out_frame->pts - audio_first_pts; zm_dump_frame(out_frame, "Out frame after pts adjustment"); } // @@ -1032,7 +1027,6 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { // sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8 out_frame->pts = audio_next_pts; } - // We need to keep track of this due to resampling audio_next_pts = out_frame->pts + out_frame->nb_samples; av_init_packet(&opkt); @@ -1093,24 +1087,22 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { if ( ipkt->duration && (ipkt->duration != AV_NOPTS_VALUE) ) { opkt.duration = av_rescale_q( ipkt->duration, - AV_TIME_BASE_Q, - //audio_in_stream->time_base, + audio_in_stream->time_base, audio_out_stream->time_base); } // Scale the PTS of the outgoing packet to be the correct time base if ( ipkt->pts != AV_NOPTS_VALUE ) { - if ( !video_first_pts ) { + if ( !audio_first_pts ) { opkt.pts = 0; - video_first_pts = ipkt->pts; + audio_first_pts = ipkt->pts; Debug(1, "No video_first_pts"); } else { opkt.pts = av_rescale_q( - ipkt->pts - video_first_pts, - AV_TIME_BASE_Q, - //audio_in_stream->time_base, + ipkt->pts - audio_first_pts, + audio_in_stream->time_base, audio_out_stream->time_base); Debug(2, "audio opkt.pts = %" PRId64 " from ipkt->pts(%" PRId64 ") - first_pts(%" PRId64 ")", - opkt.pts, ipkt->pts, video_first_pts); + opkt.pts, ipkt->pts, audio_first_pts); } } else { Debug(2, "opkt.pts = undef"); @@ -1128,17 +1120,16 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { opkt.dts = audio_next_dts + av_rescale_q( audio_in_stream->cur_dts - audio_last_dts, AV_TIME_BASE_Q, audio_out_stream->time_base); } #endif - if ( !video_first_dts ) { + if ( !audio_first_dts ) { opkt.dts = 0; - video_first_dts = ipkt->dts; + audio_first_dts = ipkt->dts; } else { opkt.dts = av_rescale_q( - ipkt->dts - video_first_dts, - AV_TIME_BASE_Q, - //audio_in_stream->time_base, + ipkt->dts - audio_first_dts, + audio_in_stream->time_base, audio_out_stream->time_base); Debug(2, "opkt.dts = %" PRId64 " from ipkt.dts(%" PRId64 ") - first_dts(%" PRId64 ")", - opkt.dts, ipkt->dts, video_first_dts); + opkt.dts, ipkt->dts, audio_first_dts); } audio_last_dts = ipkt->dts; } else { @@ -1185,7 +1176,6 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { int VideoStore::resample_audio() { // Resample the in_frame into the audioSampleBuffer until we process the whole // decoded data. Note: pts does not survive resampling or converting - // if we ask for less samples than we input, convert_frame will buffer the remainder apparently #if defined(HAVE_LIBSWRESAMPLE) || defined(HAVE_LIBAVRESAMPLE) #if defined(HAVE_LIBSWRESAMPLE) Debug(2, "Converting %d to %d samples using swresample", @@ -1217,7 +1207,7 @@ int VideoStore::resample_audio() { audio_next_pts = out_frame->pts + out_frame->nb_samples; #endif - if ( (ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + out_frame->nb_samples)) < 0 ) { + if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + out_frame->nb_samples)) < 0) { Error("Could not reallocate FIFO"); return 0; } @@ -1234,7 +1224,6 @@ int VideoStore::resample_audio() { // AAC requires 1024 samples per encode. Our input tends to be 160, so need to buffer them. if ( frame_size > av_audio_fifo_size(fifo) ) { - Debug(1, "Not enough samples in the fifo"); return 0; } @@ -1244,18 +1233,15 @@ int VideoStore::resample_audio() { } out_frame->nb_samples = frame_size; // resampling changes the duration because the timebase is 1/samples - out_frame->pkt_duration = out_frame->nb_samples; - // out_frame->sample_rate; if ( in_frame->pts != AV_NOPTS_VALUE ) { - //out_frame->pkt_duration = av_rescale_q( - //in_frame->pkt_duration, - //audio_in_stream->time_base, - //audio_out_stream->time_base); + out_frame->pkt_duration = av_rescale_q( + in_frame->pkt_duration, + audio_in_stream->time_base, + audio_out_stream->time_base); out_frame->pts = av_rescale_q( in_frame->pts, - AV_TIME_BASE_Q, - //audio_in_ctx->time_base, - audio_out_ctx->time_base); + audio_in_stream->time_base, + audio_out_stream->time_base); } #else #if defined(HAVE_LIBAVRESAMPLE)