clean out old hwdecode stuff. refactor common code out

This commit is contained in:
Isaac Connor
2019-06-25 14:11:59 -04:00
parent bfb7238edb
commit 2cbcaeebbc
4 changed files with 161 additions and 412 deletions

View File

@@ -505,7 +505,7 @@ bool is_audio_context( AVCodecContext *codec_context ) {
#endif
}
int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet ) {
int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet) {
int ret;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( (ret = avcodec_send_packet(context, &packet)) < 0 ) {
@@ -514,28 +514,10 @@ int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet
return 0;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
if ( (ret = avcodec_receive_frame(context, hwFrame)) < 0 ) {
Error( "Unable to receive frame %d: %s, continuing", streams[packet.stream_index].frame_count,
av_make_error_string(ret).c_str() );
return 0;
}
if ( (ret = av_hwframe_transfer_data(frame, hwFrame, 0)) < 0 ) {
Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count,
av_make_error_string(ret).c_str() );
return 0;
}
} else {
#endif
if ( (ret = avcodec_receive_frame(context, frame)) < 0 ) {
Error( "Unable to send packet %s, continuing", av_make_error_string(ret).c_str() );
return 0;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( (ret = avcodec_receive_frame(context, frame)) < 0 ) {
Error( "Unable to send packet %s, continuing", av_make_error_string(ret).c_str() );
return 0;
}
#endif
# else
int frameComplete = 0;
while ( !frameComplete ) {
@@ -551,7 +533,7 @@ int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet
} // end while !frameComplete
#endif
return 1;
} // end int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet )
} // end int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet)
void dumpPacket(AVStream *stream, AVPacket *pkt, const char *text) {
char b[10240];

View File

@@ -26,7 +26,7 @@
extern "C" {
#include "libavutil/time.h"
#if HAVE_AVUTIL_HWCONTEXT_H
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_qsv.h"
#endif
@@ -42,6 +42,7 @@ extern "C" {
#endif
#if HAVE_LIBAVUTIL_HWCONTEXT_H
static enum AVPixelFormat hw_pix_fmt;
static enum AVPixelFormat get_hw_format(
AVCodecContext *ctx,
@@ -57,45 +58,6 @@ static enum AVPixelFormat get_hw_format(
Error("Failed to get HW surface format.");
return AV_PIX_FMT_NONE;
}
#if HAVE_AVUTIL_HWCONTEXT_H
static AVPixelFormat get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts) {
while (*pix_fmts != AV_PIX_FMT_NONE) {
if (*pix_fmts == AV_PIX_FMT_QSV) {
DecodeContext *decode = (DecodeContext *)avctx->opaque;
AVHWFramesContext *frames_ctx;
AVQSVFramesContext *frames_hwctx;
int ret;
/* create a pool of surfaces to be used by the decoder */
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
if (!avctx->hw_frames_ctx)
return AV_PIX_FMT_NONE;
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
frames_hwctx = (AVQSVFramesContext*)frames_ctx->hwctx;
frames_ctx->format = AV_PIX_FMT_QSV;
frames_ctx->sw_format = avctx->sw_pix_fmt;
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
frames_ctx->initial_pool_size = 32;
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
if (ret < 0)
return AV_PIX_FMT_NONE;
return AV_PIX_FMT_QSV;
}
pix_fmts++;
}
Error("The QSV pixel format not offered in get_format()");
return AV_PIX_FMT_NONE;
}
#endif
FfmpegCamera::FfmpegCamera(
@@ -136,9 +98,6 @@ FfmpegCamera::FfmpegCamera(
}
hwaccel = false;
#if HAVE_AVUTIL_HWCONTEXT_H
decode = { NULL };
#endif
hwFrame = NULL;
mFormatContext = NULL;
@@ -154,7 +113,6 @@ FfmpegCamera::FfmpegCamera(
startTime = 0;
mCanCapture = false;
videoStore = NULL;
video_last_pts = 0;
have_video_keyframe = false;
packetqueue = NULL;
error_count = 0;
@@ -175,7 +133,7 @@ FfmpegCamera::FfmpegCamera(
} else {
Panic("Unexpected colours: %d",colours);
}
}
} // FfmpegCamera::FfmpegCamera
FfmpegCamera::~FfmpegCamera() {
@@ -214,8 +172,8 @@ int FfmpegCamera::PreCapture() {
return 0;
}
int FfmpegCamera::Capture( Image &image ) {
if ( ! mCanCapture ) {
int FfmpegCamera::Capture(Image &image) {
if ( !mCanCapture ) {
return -1;
}
@@ -233,112 +191,46 @@ int FfmpegCamera::Capture( Image &image ) {
// Check for Connection failure.
(avResult == -110)
) {
Info("Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf);
Info("Unable to read packet from stream %d: error %d \"%s\".",
packet.stream_index, avResult, errbuf);
} else {
Error("Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf);
Error("Unable to read packet from stream %d: error %d \"%s\".",
packet.stream_index, avResult, errbuf);
}
return -1;
}
bytes += packet.size;
int keyframe = packet.flags & AV_PKT_FLAG_KEY;
if ( keyframe )
have_video_keyframe = true;
Debug( 5, "Got packet from stream %d dts (%d) pts(%d)", packet.stream_index, packet.pts, packet.dts );
Debug(5, "Got packet from stream %d dts (%d) pts(%d)",
packet.stream_index, packet.pts, packet.dts);
// What about audio stream? Maybe someday we could do sound detection...
if ( ( packet.stream_index == mVideoStreamId ) && ( keyframe || have_video_keyframe ) ) {
int ret;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_packet( mVideoCodecContext, &packet );
int ret;
ret = zm_receive_frame(mVideoCodecContext, mRawFrame, packet);
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", frameCount, errbuf );
zm_av_packet_unref( &packet );
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to get frame at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref(&packet);
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
ret = avcodec_receive_frame( mVideoCodecContext, hwFrame );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", frameCount, errbuf );
zm_av_packet_unref( &packet );
continue;
}
Debug(1, "transfering from hardware");
ret = av_hwframe_transfer_data(mRawFrame, hwFrame, 0);
if (ret < 0) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to transfer frame at frame %d: %s, continuing", frameCount, errbuf );
zm_av_packet_unref( &packet );
continue;
}
} else {
#endif
ret = avcodec_receive_frame( mVideoCodecContext, mRawFrame );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", frameCount, errbuf );
zm_av_packet_unref( &packet );
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
}
#endif
frameComplete = 1;
# else
ret = zm_avcodec_decode_video( mVideoCodecContext, mRawFrame, &frameComplete, &packet );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to decode frame at frame %d: %s, continuing", frameCount, errbuf );
zm_av_packet_unref( &packet );
continue;
Debug(4, "Decoded video packet at frame %d", frameCount);
if ( transfer_to_image(image, mFrame, mRawFrame) < 0 ) {
zm_av_packet_unref(&packet);
return -1;
}
#endif
Debug( 4, "Decoded video packet at frame %d", frameCount );
if ( frameComplete ) {
Debug( 4, "Got frame %d", frameCount );
uint8_t* directbuffer;
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == NULL ) {
Error("Failed requesting writeable buffer for the captured image.");
return -1;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(mFrame->data, mFrame->linesize,
directbuffer, imagePixFormat, width, height, 1);
#else
avpicture_fill( (AVPicture *)mFrame, directbuffer,
imagePixFormat, width, height);
#endif
#if HAVE_LIBSWSCALE
if ( sws_scale(mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mVideoCodecContext->height, mFrame->data, mFrame->linesize) <= 0 ) {
Error("Unable to convert raw format %u to target format %u at frame %d", mVideoCodecContext->pix_fmt, imagePixFormat, frameCount);
return -1;
}
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras");
#endif // HAVE_LIBSWSCALE
frameCount++;
} // end if frameComplete
frameCount++;
} else {
Debug( 4, "Different stream_index %d", packet.stream_index );
Debug(4, "Different stream_index %d", packet.stream_index);
} // end if packet.stream_index == mVideoStreamId
bytes += packet.size;
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
} // end while ! frameComplete
return frameComplete ? 1 : 0;
} // FfmpegCamera::Capture
@@ -498,44 +390,6 @@ int FfmpegCamera::OpenFfmpeg() {
mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG_LOW_DELAY;
#endif
#if HAVE_AVUTIL_HWCONTEXT_H
if ( mVideoCodecContext->codec_id == AV_CODEC_ID_H264 ) {
//vaapi_decoder = new VAAPIDecoder();
//mVideoCodecContext->opaque = vaapi_decoder;
//mVideoCodec = vaapi_decoder->openCodec( mVideoCodecContext );
if ( ! mVideoCodec ) {
// Try to open an hwaccel codec.
if ( (mVideoCodec = avcodec_find_decoder_by_name("h264_vaapi")) == NULL ) {
Debug(1, "Failed to find decoder (h264_vaapi)" );
} else {
Debug(1, "Success finding decoder (h264_vaapi)" );
}
}
if ( ! mVideoCodec ) {
// Try to open an hwaccel codec.
if ( (mVideoCodec = avcodec_find_decoder_by_name("h264_qsv")) == NULL ) {
Debug(1, "Failed to find decoder (h264_qsv)" );
} else {
Debug(1, "Success finding decoder (h264_qsv)" );
/* open the hardware device */
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
"auto", NULL, 0);
if (ret < 0) {
Error("Failed to open the hardware device");
mVideoCodec = NULL;
} else {
mVideoCodecContext->opaque = &decode;
mVideoCodecContext->get_format = get_format;
hwaccel = true;
hwFrame = zm_av_frame_alloc();
}
}
}
} // end if h264
#endif
AVHWAccel *first_hwaccel = av_hwaccel_next(NULL);
AVHWAccel *temp_hwaccel = first_hwaccel;
AVHWAccel *h264 = NULL;
@@ -988,7 +842,6 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
if ( have_video_keyframe || keyframe ) {
if ( videoStore ) {
//Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet);
if ( ret < 0 ) { //Less than zero and we skipped a frame
@@ -1001,174 +854,140 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
Debug(4, "about to decode video");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_packet(mVideoCodecContext, &packet);
ret = zm_receive_frame(mVideoCodecContext, mRawFrame, packet);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to send packet at frame %d: %s, continuing", frameCount, errbuf);
Warning("Unable to receive frame %d: %s, continuing. error count is %s",
frameCount, errbuf, error_count);
error_count += 1;
if ( error_count > 100 ) {
Error("Error count over 100, going to close and re-open stream");
return -1;
}
zm_av_packet_unref(&packet);
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
Debug(1, "Using hwaccel to decode");
ret = avcodec_receive_frame(mVideoCodecContext, hwFrame);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to send packet at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref(&packet);
continue;
}
ret = av_hwframe_transfer_data(mRawFrame, hwFrame, 0);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to transfer frame at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref(&packet);
continue;
}
} else {
#endif
Debug(1, "Decodingaccel to decode");
ret = avcodec_receive_frame(mVideoCodecContext, mRawFrame);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Warning("Unable to receive frame %d: %s, continuing. error count is %s",
frameCount, errbuf, error_count);
error_count += 1;
if ( error_count > 100 ) {
Error("Error count over 100, going to close and re-open stream");
return -1;
}
zm_av_packet_unref(&packet);
continue;
}
if ( error_count > 0 ) error_count --;
zm_dump_video_frame(mRawFrame);
if ( mRawFrame->format == hw_pix_fmt ) {
/* retrieve data from GPU to CPU */
ret = av_hwframe_transfer_data(hwFrame, mRawFrame, 0);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to transfer frame at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref(&packet);
continue;
}
Debug(1,"Success transfering");
zm_dump_video_frame(hwFrame);
hwFrame->pts = mRawFrame->pts;
input_frame = hwFrame;
} else {
input_frame = mRawFrame;
}
#if HAVE_AVUTIL_HWCONTEXT_H
}
#endif
frameComplete = 1;
# else
ret = zm_avcodec_decode_video(mVideoCodecContext, input_frame, &frameComplete, &packet);
if ( error_count > 0 ) error_count --;
zm_dump_video_frame(mRawFrame);
#if HAVE_LIBAVUTIL_HWCONTEXT_H
if ( mRawFrame->format == hw_pix_fmt ) {
/* retrieve data from GPU to CPU */
ret = av_hwframe_transfer_data(hwFrame, mRawFrame, 0);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to decode frame at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref( &packet );
Error("Unable to transfer frame at frame %d: %s, continuing", frameCount, errbuf);
zm_av_packet_unref(&packet);
continue;
}
}
zm_dump_video_frame(hwFrame, "After hwtransfer");
hwFrame->pts = mRawFrame->pts;
input_frame = hwFrame;
} else {
#endif
input_frame = mRawFrame;
#if HAVE_LIBAVUTIL_HWCONTEXT_H
}
#endif
if ( frameComplete ) {
Debug(4, "Got frame %d", frameCount);
Debug(4, "Got frame %d", frameCount);
if ( transfer_to_image(image, mFrame, input_frame) < 0 ) {
zm_av_packet_unref(&packet);
return -1;
}
uint8_t* directbuffer;
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == NULL ) {
Error("Failed requesting writeable buffer for the captured image.");
zm_av_packet_unref(&packet);
return -1;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(mFrame->data, mFrame->linesize, directbuffer, imagePixFormat, width, height, 1);
#else
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
#endif
Debug(1,"swscale target format: %c%c%c%c %c%c%c%c",
(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff),
(mVideoCodecContext->pix_fmt)&0xff,
((mVideoCodecContext->pix_fmt>>8)&0xff),
((mVideoCodecContext->pix_fmt>>16)&0xff),
((mVideoCodecContext->pix_fmt>>24)&0xff)
);
if ( ! mConvertContext ) {
mConvertContext = sws_getContext(
input_frame->width,
input_frame->height,
(AVPixelFormat)input_frame->format,
width, height,
imagePixFormat, SWS_BICUBIC, NULL,
NULL, NULL);
if ( mConvertContext == NULL ) {
Error( "Unable to create conversion context for %s", mPath.c_str() );
return -1;
}
}
if ( sws_scale(mConvertContext, input_frame->data, input_frame->linesize,
0, mVideoCodecContext->height, mFrame->data, mFrame->linesize) <= 0 ) {
Error("Unable to convert raw format %u to target format %u at frame %d codec %u ",
input_frame->format,
imagePixFormat, frameCount,
mVideoCodecContext->pix_fmt
);
return -1;
}
frameCount++;
} else {
Debug( 3, "Not framecomplete after av_read_frame" );
} // end if frameComplete
frameComplete = 1;
frameCount++;
} else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
frameComplete = 1;
if ( videoStore ) {
if ( record_audio ) {
if ( have_video_keyframe ) {
Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index );
Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)",
mAudioStreamId, packet.stream_index);
//Write the packet to our video store
//FIXME no relevance of last key frame
int ret = videoStore->writeAudioFramePacket( &packet );
if ( ret < 0 ) {//Less than zero and we skipped a frame
Warning("Failure to write audio packet.");
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
return 0;
}
} else {
Debug(3, "Not recording audio yet because we don't have a video keyframe yet");
}
} else {
Debug(4, "Not doing recording of audio packet" );
Debug(4, "Not doing recording of audio packet");
}
} else {
Debug(4, "Have audio packet, but not recording atm" );
Debug(4, "Have audio packet, but not recording atm");
}
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
return 0;
} else {
#if LIBAVUTIL_VERSION_CHECK(56, 23, 0, 23, 0)
Debug( 3, "Some other stream index %d, %s", packet.stream_index, av_get_media_type_string( mFormatContext->streams[packet.stream_index]->codecpar->codec_type) );
Debug(3, "Some other stream index %d, %s",
packet.stream_index,
av_get_media_type_string(mFormatContext->streams[packet.stream_index]->codecpar->codec_type)
);
#else
Debug( 3, "Some other stream index %d", packet.stream_index );
Debug(3, "Some other stream index %d", packet.stream_index);
#endif
} // end if is video or audio or something else
// the packet contents are ref counted... when queuing, we allocate another packet and reference it with that one, so we should always need to unref here, which should not affect the queued version.
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
} // end while ! frameComplete
return frameCount;
} // end FfmpegCamera::CaptureAndRecord
int FfmpegCamera::transfer_to_image(Image &image, AVFrame *output_frame, AVFrame *input_frame) {
uint8_t* directbuffer;
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == NULL ) {
Error("Failed requesting writeable buffer for the captured image.");
return -1;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(output_frame->data, output_frame->linesize,
directbuffer, imagePixFormat, width, height, 1);
#else
avpicture_fill((AVPicture *)output_frame, directbuffer,
imagePixFormat, width, height);
#endif
#if HAVE_LIBSWSCALE
if ( ! mConvertContext ) {
mConvertContext = sws_getContext(
input_frame->width,
input_frame->height,
(AVPixelFormat)input_frame->format,
width, height,
imagePixFormat, SWS_BICUBIC, NULL,
NULL, NULL);
if ( mConvertContext == NULL ) {
Error("Unable to create conversion context for %s", mPath.c_str());
return -1;
}
}
if ( sws_scale(mConvertContext, input_frame->data, input_frame->linesize,
0, mVideoCodecContext->height, output_frame->data, output_frame->linesize) <= 0 ) {
Error("Unable to convert raw format %u to target format %u at frame %d codec %u ",
input_frame->format,
imagePixFormat, frameCount,
mVideoCodecContext->pix_fmt
);
return -1;
}
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras");
#endif // HAVE_LIBSWSCALE
return 0;
} // end int FfmpegCamera::transfer_to_image(Image &i, AVFrame *output_frame, AVFrame input_frame)
int FfmpegCamera::FfmpegInterruptCallback(void *ctx) {
//FfmpegCamera* camera = reinterpret_cast<FfmpegCamera*>(ctx);
//Debug(4, "FfmpegInterruptCallback");

View File

@@ -27,7 +27,7 @@
#include "zm_videostore.h"
#include "zm_packetqueue.h"
#if HAVE_AVUTIL_HWCONTEXT_H
#if HAVE_LIBAVUTIL_HWCONTEXT_H
typedef struct DecodeContext {
AVBufferRef *hw_device_ref;
} DecodeContext;
@@ -59,17 +59,11 @@ class FfmpegCamera : public Camera {
bool hwaccel;
AVFrame *hwFrame;
#if HAVE_AVUTIL_HWCONTEXT_H
#if HAVE_LIBAVUTIL_HWCONTEXT_H
DecodeContext decode;
#endif
AVBufferRef *hw_device_ctx = NULL;
// Need to keep track of these because apparently the stream can start with values for pts/dts and then subsequent packets start at zero.
int64_t audio_last_pts;
int64_t audio_last_dts;
int64_t video_last_pts;
int64_t video_last_dts;
// Used to store the incoming packet, it will get copied when queued.
// We only ever need one at a time, so instead of constantly allocating
// and freeing this structure, we will just make it a member of the object.
@@ -110,5 +104,6 @@ class FfmpegCamera : public Camera {
int PostCapture();
private:
static int FfmpegInterruptCallback(void*ctx);
int transfer_to_image(Image &i, AVFrame *output_frame, AVFrame *input_frame);
};
#endif // ZM_FFMPEG_CAMERA_H

View File

@@ -20,18 +20,18 @@ FFmpeg_Input::~FFmpeg_Input() {
}
}
int FFmpeg_Input::Open( const char *filepath ) {
int FFmpeg_Input::Open(const char *filepath) {
int error;
/** Open the input file to read from it. */
if ( (error = avformat_open_input( &input_format_context, filepath, NULL, NULL)) < 0 ) {
error = avformat_open_input(&input_format_context, filepath, NULL, NULL);
if ( error < 0 ) {
Error("Could not open input file '%s' (error '%s')\n",
filepath, av_make_error_string(error).c_str() );
input_format_context = NULL;
return error;
}
}
/** Get information on the input file (number of streams etc.). */
if ( (error = avformat_find_stream_info(input_format_context, NULL)) < 0 ) {
@@ -44,23 +44,23 @@ int FFmpeg_Input::Open( const char *filepath ) {
}
streams = new stream[input_format_context->nb_streams];
Debug(2,"Have %d streams", input_format_context->nb_streams);
Debug(2, "Have %d streams", input_format_context->nb_streams);
for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) {
if ( is_video_stream( input_format_context->streams[i] ) ) {
if ( is_video_stream(input_format_context->streams[i]) ) {
zm_dump_stream_format(input_format_context, i, 0, 0);
if ( video_stream_id == -1 ) {
video_stream_id = i;
// if we break, then we won't find the audio stream
} else {
Warning( "Have another video stream." );
Warning("Have another video stream.");
}
} else if ( is_audio_stream( input_format_context->streams[i] ) ) {
} else if ( is_audio_stream(input_format_context->streams[i]) ) {
if ( audio_stream_id == -1 ) {
Debug(2,"Audio stream is %d", i);
Debug(2, "Audio stream is %d", i);
audio_stream_id = i;
} else {
Warning( "Have another audio stream." );
Warning("Have another audio stream.");
}
} else {
Warning("Unknown stream type");
@@ -68,25 +68,26 @@ int FFmpeg_Input::Open( const char *filepath ) {
streams[i].frame_count = 0;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
streams[i].context = avcodec_alloc_context3( NULL );
avcodec_parameters_to_context( streams[i].context, input_format_context->streams[i]->codecpar );
streams[i].context = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(streams[i].context, input_format_context->streams[i]->codecpar);
#else
streams[i].context = input_format_context->streams[i]->codec;
#endif
if ( !(streams[i].codec = avcodec_find_decoder(streams[i].context->codec_id)) ) {
Error( "Could not find input codec\n");
Error("Could not find input codec");
avformat_close_input(&input_format_context);
return AVERROR_EXIT;
} else {
Debug(1, "Using codec (%s) for stream %d", streams[i].codec->name, i );
Debug(1, "Using codec (%s) for stream %d", streams[i].codec->name, i);
}
if ((error = avcodec_open2( streams[i].context, streams[i].codec, NULL)) < 0) {
Error( "Could not open input codec (error '%s')\n",
av_make_error_string(error).c_str() );
error = avcodec_open2(streams[i].context, streams[i].codec, NULL);
if ( error < 0 ) {
Error("Could not open input codec (error '%s')",
av_make_error_string(error).c_str());
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context( &streams[i].context );
avcodec_free_context(&streams[i].context);
#endif
avformat_close_input(&input_format_context);
return error;
@@ -94,14 +95,14 @@ int FFmpeg_Input::Open( const char *filepath ) {
} // end foreach stream
if ( video_stream_id == -1 )
Error( "Unable to locate video stream in %s", filepath );
Error("Unable to locate video stream in %s", filepath);
if ( audio_stream_id == -1 )
Debug( 3, "Unable to locate audio stream in %s", filepath );
Debug(3, "Unable to locate audio stream in %s", filepath);
return 0;
} // end int FFmpeg_Input::Open( const char * filepath )
AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
AVFrame *FFmpeg_Input::get_frame(int stream_id) {
Debug(1, "Getting frame from stream %d", stream_id);
int frameComplete = false;
@@ -119,85 +120,38 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
// Check for Connection failure.
(ret == -110)
) {
Info( "av_read_frame returned %s.", errbuf );
Info("av_read_frame returned %s.", errbuf);
return NULL;
}
Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, ret, errbuf );
Error("Unable to read packet from stream %d: error %d \"%s\".",
packet.stream_index, ret, errbuf);
return NULL;
}
dumpPacket(input_format_context->streams[packet.stream_index], &packet, "Received packet");
if ( (stream_id < 0) || (packet.stream_index == stream_id) ) {
Debug(3,"Packet is for our stream (%d)", packet.stream_index );
Debug(3, "Packet is for our stream (%d)", packet.stream_index);
AVCodecContext *context = streams[packet.stream_index].context;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_packet(context, &packet);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to send packet at frame %d: %s, continuing",
streams[packet.stream_index].frame_count, errbuf);
zm_av_packet_unref(&packet);
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
ret = avcodec_receive_frame( context, hwFrame );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to receive frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
ret = av_hwframe_transfer_data(frame, hwFrame, 0);
if (ret < 0) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref(&packet);
continue;
}
} else {
#endif
if ( frame ) {
av_frame_free(&frame);
frame = zm_av_frame_alloc();
} else {
frame = zm_av_frame_alloc();
}
//Debug(1,"Getting frame %d", streams[packet.stream_index].frame_count);
ret = avcodec_receive_frame(context, frame);
ret = zm_receive_frame(context, frame, packet);
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error("Unable to decode frame at frame %d: %s, continuing",
streams[packet.stream_index].frame_count, errbuf);
zm_av_packet_unref( &packet );
av_frame_free(&frame);
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
}
#endif
frameComplete = 1;
# else
if ( frame ) {
av_frame_free(&frame);
frame = zm_av_frame_alloc();
} else {
frame = zm_av_frame_alloc();
}
ret = zm_avcodec_decode_video(context, frame, &frameComplete, &packet);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error( "Unable to decode frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
av_frame_free(&frame);
continue;
}
#endif
} // end if it's the right stream
frameComplete = 1;
} // end if it's the right stream
zm_av_packet_unref(&packet);
@@ -206,7 +160,7 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
} // end AVFrame *FFmpeg_Input::get_frame
AVFrame *FFmpeg_Input::get_frame( int stream_id, double at ) {
AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) {
Debug(1, "Getting frame from stream %d at %f", stream_id, at);
int64_t seek_target = (int64_t)(at * AV_TIME_BASE);
@@ -218,9 +172,8 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id, double at ) {
if ( !frame ) {
// Don't have a frame yet, so get a keyframe before the timestamp
if ( ( ret = av_seek_frame(
input_format_context, stream_id, seek_target, AVSEEK_FLAG_FRAME
) < 0 ) ) {
ret = av_seek_frame(input_format_context, stream_id, seek_target, AVSEEK_FLAG_FRAME);
if ( ret < 0 ) {
Error("Unable to seek in stream");
return NULL;
}
@@ -246,7 +199,7 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id, double at ) {
if ( frame->pts <= seek_target ) {
zm_dump_frame(frame, "pts <= seek_target");
while ( frame && (frame->pts < seek_target) ) {
if ( ! get_frame(stream_id) )
if ( !get_frame(stream_id) )
return frame;
}
return frame;