diff --git a/.cirrus.yml b/.cirrus.yml index 3f2182f9c..4b9f1b055 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -2,7 +2,7 @@ task: name: freebsd-build freebsd_instance: matrix: - - image_family: freebsd-12-2 + - image_family: freebsd-12-3 - image_family: freebsd-13-0 prepare_script: diff --git a/.github/docker/centos7-gcc8-zm/Dockerfile b/.github/docker/centos7-gcc8-zm/Dockerfile new file mode 100644 index 000000000..80210ebb9 --- /dev/null +++ b/.github/docker/centos7-gcc8-zm/Dockerfile @@ -0,0 +1,11 @@ +FROM centos:7 + +LABEL name="centos7-gcc8-zm" \ + version="1" + +RUN yum -y install https://mirrors.rpmfusion.org/free/el/rpmfusion-free-release-7.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ + yum -y install https://repo.ius.io/ius-release-el7.rpm && yum -y install git236 && \ + yum -y update && yum -y install make cmake3 gcc-c++ mariadb-devel ffmpeg-devel libcurl-devel vlc-devel libvncserver-devel libjpeg-turbo-devel "perl(Date::Manip)" "perl(DBD::mysql)" "perl(ExtUtils::MakeMaker)" "perl(Sys::Mmap)" "perl(Sys::Syslog)" "perl(LWP::UserAgent)" polkit-devel libjwt-devel && \ + yum -y install centos-release-scl-rh && \ + INSTALL_PKGS="devtoolset-8-gcc devtoolset-8-gcc-c++" && \ + yum -y install --setopt=tsflags=nodocs $INSTALL_PKGS diff --git a/.github/workflows/ci-centos-7.yml b/.github/workflows/ci-centos-7.yml index 1c1dad8e1..39973d05a 100644 --- a/.github/workflows/ci-centos-7.yml +++ b/.github/workflows/ci-centos-7.yml @@ -9,6 +9,7 @@ on: permissions: contents: read + packages: read jobs: build: @@ -22,21 +23,15 @@ jobs: - crypto_backend: gnutls jwt_backend: libjwt runs-on: ubuntu-latest - container: centos:7 + container: ghcr.io/dougnazar/centos7-gcc8-zm:latest steps: - - name: Enable RPMFusion and EPEL - run: yum -y install https://mirrors.rpmfusion.org/free/el/rpmfusion-free-release-7.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - - name: Install git - run: yum -y install https://repo.ius.io/ius-release-el7.rpm && yum -y install git224 - uses: actions/checkout@v3 with: submodules: recursive - - name: Install dependencies - run: yum -y update && yum -y install make cmake3 gcc-c++ mariadb-devel ffmpeg-devel libcurl-devel vlc-devel libvncserver-devel libjpeg-turbo-devel "perl(Date::Manip)" "perl(DBD::mysql)" "perl(ExtUtils::MakeMaker)" "perl(Sys::Mmap)" "perl(Sys::Syslog)" "perl(LWP::UserAgent)" polkit-devel libjwt-devel - name: Prepare run: mkdir build - name: Configure - run: cd build && cmake3 --version && cmake3 .. -DBUILD_MAN=0 -DENABLE_WERROR=1 -DZM_CRYPTO_BACKEND=${{ matrix.crypto_backend }} -DZM_JWT_BACKEND=${{ matrix.jwt_backend }} + run: source /usr/bin/scl_source enable devtoolset-8 && cd build && cmake3 --version && cmake3 .. -DBUILD_MAN=0 -DENABLE_WERROR=1 -DZM_CRYPTO_BACKEND=${{ matrix.crypto_backend }} -DZM_JWT_BACKEND=${{ matrix.jwt_backend }} - name: Build - run: cd build && make -j3 | grep --line-buffered -Ev '^(cp lib\/|Installing.+\.pm)' && (exit ${PIPESTATUS[0]}) + run: source /usr/bin/scl_source enable devtoolset-8 && cd build && make -j3 | grep --line-buffered -Ev '^(cp lib\/|Installing.+\.pm)' && (exit ${PIPESTATUS[0]}) diff --git a/.github/workflows/ci-stretch.yml b/.github/workflows/ci-stretch.yml deleted file mode 100644 index cb192074d..000000000 --- a/.github/workflows/ci-stretch.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: CI Debian Stretch - -on: - push: - branches: - - '*' - pull_request: - branches: [ master ] - -permissions: - contents: read - -jobs: - build: - defaults: - run: - shell: bash - runs-on: ubuntu-latest - container: debian:stretch-backports - - steps: - - name: Update packages - run: apt-get -qq update && apt-get -qq upgrade - - name: Install git - run: apt-get -qq install git/stretch-backports git-man/stretch-backports - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Install dependencies - run: > - apt-get -qq install make cmake g++ - default-libmysqlclient-dev - libavcodec-dev libavformat-dev libavutil-dev libswresample-dev libswscale-dev libavdevice-dev - libcurl4-gnutls-dev libvlc-dev libvncserver-dev - libdate-manip-perl libdbd-mysql-perl libsys-mmap-perl libwww-perl - libpolkit-gobject-1-dev - libssl-dev - - name: Prepare - run: mkdir build - - name: Configure - run: cd build && cmake --version && cmake .. -DBUILD_MAN=0 -DENABLE_WERROR=1 - - name: Build - run: cd build && make -j3 | grep --line-buffered -Ev '^(cp lib\/|Installing.+\.pm)' && (exit ${PIPESTATUS[0]}) diff --git a/.github/workflows/create-packages.yml b/.github/workflows/create-packages.yml index f0d6083f3..5e7754e2d 100644 --- a/.github/workflows/create-packages.yml +++ b/.github/workflows/create-packages.yml @@ -3,8 +3,6 @@ name: Create packages on: push: branches: [ master ] - pull_request: - branches: [ master ] jobs: package: diff --git a/CMakeLists.txt b/CMakeLists.txt index 6a3c123f2..97dc6ae9a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,6 +43,8 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") set (CMAKE_CXX_STANDARD 17) set (CMAKE_CXX_STANDARD_REQUIRED ON) +add_compile_options(-D_FILE_OFFSET_BITS=64) + include(ConfigureBaseTargets) include(CheckPlatform) @@ -88,6 +90,7 @@ mark_as_advanced( ZM_PATH_ARP_SCAN ZM_CONFIG_DIR ZM_CONFIG_SUBDIR + ZM_DETECT_SYSTEMD ZM_SYSTEMD ZM_MANPAGE_DEST_PREFIX) @@ -188,6 +191,8 @@ set(ZM_PERL_SEARCH_PATH "" CACHE PATH installed outside Perl's default search path.") set(ZM_TARGET_DISTRO "" CACHE STRING "Build ZoneMinder for a specific distribution. Currently, valid names are: fc27, fc26, el7, OS13, FreeBSD") +set(ZM_DETECT_SYSTEMD "ON" CACHE BOOL + "Set to OFF to disable detection of systemd. default: ON") set(ZM_SYSTEMD "OFF" CACHE BOOL "Set to ON to force building ZM with systemd support. default: OFF") set(ZM_MANPAGE_DEST_PREFIX "share/man" CACHE PATH @@ -270,7 +275,7 @@ set(CMAKE_EXTRA_INCLUDE_FILES ${CMAKE_EXTRA_INCLUDE_FILES} stdio.h stdlib.h math set_property(GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS ON) # Set the systemd flag if systemd is autodetected or ZM_SYSTEMD has been set -if(ZM_SYSTEMD OR (IS_DIRECTORY /usr/lib/systemd/system) OR (IS_DIRECTORY /lib/systemd/system)) +if(ZM_SYSTEMD OR (ZM_DETECT_SYSTEMD AND ((IS_DIRECTORY /usr/lib/systemd/system) OR (IS_DIRECTORY /lib/systemd/system)))) set(WITH_SYSTEMD 1) endif() diff --git a/db/monitors_dbupdate.sql b/db/monitors_dbupdate.sql new file mode 100644 index 000000000..ee76af60e --- /dev/null +++ b/db/monitors_dbupdate.sql @@ -0,0 +1 @@ +ALTER TABLE zm.Monitors ADD Onvif_Alarm_Txt varchar(30) DEFAULT 'MotionAlarm' NULL; diff --git a/db/zm_create.sql.in b/db/zm_create.sql.in index 30bf6db9f..da9544989 100644 --- a/db/zm_create.sql.in +++ b/db/zm_create.sql.in @@ -474,6 +474,7 @@ CREATE TABLE `Monitors` ( `ONVIF_Password` VARCHAR(64) NOT NULL DEFAULT '', `ONVIF_Options` VARCHAR(64) NOT NULL DEFAULT '', `ONVIF_Event_Listener` BOOLEAN NOT NULL DEFAULT FALSE, + `ONVIF_Alarm_Text` VARCHAR(30) DEFAULT 'MotionAlarm', `use_Amcrest_API` BOOLEAN NOT NULL DEFAULT FALSE, `Device` tinytext NOT NULL default '', `Channel` tinyint(3) unsigned NOT NULL default '0', diff --git a/db/zm_update-1.37.19.sql b/db/zm_update-1.37.19.sql new file mode 100644 index 000000000..ec8c61a8c --- /dev/null +++ b/db/zm_update-1.37.19.sql @@ -0,0 +1,18 @@ +-- +-- Update Monitors table to have ONVIF_Alarm_Text +-- + +SELECT 'Checking for ONVIF_Alarm_Text in Monitors'; +SET @s = (SELECT IF( + (SELECT COUNT(*) + FROM INFORMATION_SCHEMA.COLUMNS + WHERE table_name = 'Monitors' + AND table_schema = DATABASE() + AND column_name = 'ONVIF_Alarm_Text' + ) > 0, +"SELECT 'Column ONVIF_Alarm_Text already exists in Monitors'", +"ALTER TABLE Monitors ADD Onvif_Alarm_Text varchar(30) DEFAULT 'MotionAlarm' AFTER `ONVIF_Event_Listener`" +)); + +PREPARE stmt FROM @s; +EXECUTE stmt; diff --git a/dep/RtspServer b/dep/RtspServer index eab328514..1b40f1661 160000 --- a/dep/RtspServer +++ b/dep/RtspServer @@ -1 +1 @@ -Subproject commit eab32851421ffe54fec0229c3efc44c642bc8d46 +Subproject commit 1b40f1661f93f50fd5805f239d1e466a3bcf888f diff --git a/distros/redhat/zoneminder.spec b/distros/redhat/zoneminder.spec index 98b9bb581..b7a0ef948 100644 --- a/distros/redhat/zoneminder.spec +++ b/distros/redhat/zoneminder.spec @@ -37,7 +37,7 @@ %global _hardened_build 1 Name: zoneminder -Version: 1.37.18 +Version: 1.37.19 Release: 1%{?dist} Summary: A camera monitoring and analysis tool Group: System Environment/Daemons diff --git a/scripts/ZoneMinder/lib/ZoneMinder/Memory.pm.in b/scripts/ZoneMinder/lib/ZoneMinder/Memory.pm.in index 70960d36e..59e94fffa 100644 --- a/scripts/ZoneMinder/lib/ZoneMinder/Memory.pm.in +++ b/scripts/ZoneMinder/lib/ZoneMinder/Memory.pm.in @@ -165,12 +165,14 @@ our %mem_data = ( recording => { type=>'uint8', seq=>$mem_seq++ }, signal => { type=>'uint8', seq=>$mem_seq++ }, format => { type=>'uint8', seq=>$mem_seq++ }, + reserved1 => { type=>'uint8', seq=>$mem_seq++ }, + reserved2 => { type=>'uint8', seq=>$mem_seq++ }, imagesize => { type=>'uint32', seq=>$mem_seq++ }, last_frame_score => { type=>'uint32', seq=>$mem_seq++ }, audio_frequency => { type=>'uint32', seq=>$mem_seq++ }, audio_channels => { type=>'uint32', seq=>$mem_seq++ }, startup_time => { type=>'time_t64', seq=>$mem_seq++ }, - zmc_heartbeat_time => { type=>'time_t64', seq=>$mem_seq++ }, + heartbeat_time => { type=>'time_t64', seq=>$mem_seq++ }, last_write_time => { type=>'time_t64', seq=>$mem_seq++ }, last_read_time => { type=>'time_t64', seq=>$mem_seq++ }, last_viewed_time => { type=>'time_t64', seq=>$mem_seq++ }, diff --git a/scripts/zmwatch.pl.in b/scripts/zmwatch.pl.in index f585dd65e..bf5120e41 100644 --- a/scripts/zmwatch.pl.in +++ b/scripts/zmwatch.pl.in @@ -101,6 +101,17 @@ while (!$zm_terminate) { $monitor->control('restart'); next; } + + my $heartbeat_time = zmMemRead($monitor, 'shared_data:heartbeat_time'); + my $heartbeat_elapsed = $now-$heartbeat_time; + if ($heartbeat_elapsed > $Config{ZM_WATCH_MAX_DELAY}) { + Info("Restarting capture daemon for $monitor->{Id} $monitor->{Name}, $now - heartbeat time $heartbeat_time $heartbeat_elapsed > $Config{ZM_WATCH_MAX_DELAY}"); + $monitor->control('restart'); + next; + } else { + Debug("Monitor $monitor->{Id} $monitor->{Name}, heartbeat time $now - $heartbeat_time $heartbeat_elapsed < $Config{ZM_WATCH_MAX_DELAY}"); + } + next if $monitor->{Capturing} eq 'Ondemand'; next if $monitor->{Decoding} eq 'None' or $monitor->{Decoding} eq 'Ondemand'; diff --git a/src/zm_curl_camera.cpp b/src/zm_curl_camera.cpp index 27a213ed1..ddd7e52cb 100644 --- a/src/zm_curl_camera.cpp +++ b/src/zm_curl_camera.cpp @@ -323,7 +323,7 @@ int cURLCamera::Capture(std::shared_ptr &zm_packet) { } zm_packet->keyframe = 1; zm_packet->codec_type = AVMEDIA_TYPE_VIDEO; - zm_packet->packet.stream_index = mVideoStreamId; + zm_packet->packet->stream_index = mVideoStreamId; zm_packet->stream = mVideoStream; zm_packet->image->DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder); frameComplete = true; @@ -351,7 +351,7 @@ int cURLCamera::Capture(std::shared_ptr &zm_packet) { } zm_packet->keyframe = 1; zm_packet->codec_type = AVMEDIA_TYPE_VIDEO; - zm_packet->packet.stream_index = mVideoStreamId; + zm_packet->packet->stream_index = mVideoStreamId; zm_packet->stream = mVideoStream; zm_packet->image->DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder); single_offsets.pop_front(); diff --git a/src/zm_eventstream.cpp b/src/zm_eventstream.cpp index bec939187..9dafae3cd 100644 --- a/src/zm_eventstream.cpp +++ b/src/zm_eventstream.cpp @@ -734,9 +734,8 @@ bool EventStream::sendFrame(Microseconds delta_us) { FrameData *frame_data = &event_data->frames[curr_frame_id-1]; AVFrame *frame = ffmpeg_input->get_frame(ffmpeg_input->get_video_stream_id(), FPSeconds(frame_data->offset).count()); - if ( frame ) { + if (frame) { image = new Image(frame); - //av_frame_free(&frame); } else { Error("Failed getting a frame."); return false; @@ -1097,7 +1096,7 @@ bool EventStream::send_file(const std::string &filepath) { Info("File size is zero. Unable to send raw frame %d: %s", curr_frame_id, strerror(errno)); return false; } - if (0 > fprintf(stdout, "Content-Length: %jd\r\n\r\n", filestat.st_size)) { + if (0 > fprintf(stdout, "Content-Length: %jd\r\n\r\n", static_cast(filestat.st_size))) { fclose(fdj); /* Close the file handle */ Info("Unable to send raw frame %d: %s", curr_frame_id, strerror(errno)); return false; diff --git a/src/zm_ffmpeg.h b/src/zm_ffmpeg.h index 18090098f..cbb661e59 100644 --- a/src/zm_ffmpeg.h +++ b/src/zm_ffmpeg.h @@ -23,6 +23,8 @@ #include "zm_config.h" #include "zm_define.h" +#include + extern "C" { #include @@ -177,34 +179,34 @@ void zm_dump_codecpar(const AVCodecParameters *par); Debug(2, "%s: pts: %" PRId64 ", dts: %" PRId64 \ ", size: %d, stream_index: %d, flags: %04x, keyframe(%d) pos: %" PRId64 ", duration: %" AV_PACKET_DURATION_FMT, \ text,\ - pkt.pts,\ - pkt.dts,\ - pkt.size,\ - pkt.stream_index,\ - pkt.flags,\ - pkt.flags & AV_PKT_FLAG_KEY,\ - pkt.pos,\ - pkt.duration) + pkt->pts,\ + pkt->dts,\ + pkt->size,\ + pkt->stream_index,\ + pkt->flags,\ + pkt->flags & AV_PKT_FLAG_KEY,\ + pkt->pos,\ + pkt->duration) # define ZM_DUMP_STREAM_PACKET(stream, pkt, text) \ if (logDebugging()) { \ - double pts_time = static_cast(av_rescale_q(pkt.pts, stream->time_base, AV_TIME_BASE_Q)) / AV_TIME_BASE; \ + double pts_time = static_cast(av_rescale_q(pkt->pts, stream->time_base, AV_TIME_BASE_Q)) / AV_TIME_BASE; \ \ Debug(2, "%s: pts: %" PRId64 " * %u/%u=%f, dts: %" PRId64 \ ", size: %d, stream_index: %d, %s flags: %04x, keyframe(%d) pos: %" PRId64", duration: %" AV_PACKET_DURATION_FMT, \ text, \ - pkt.pts, \ + pkt->pts, \ stream->time_base.num, \ stream->time_base.den, \ pts_time, \ - pkt.dts, \ - pkt.size, \ - pkt.stream_index, \ + pkt->dts, \ + pkt->size, \ + pkt->stream_index, \ av_get_media_type_string(CODEC_TYPE(stream)), \ - pkt.flags, \ - pkt.flags & AV_PKT_FLAG_KEY, \ - pkt.pos, \ - pkt.duration); \ + pkt->flags, \ + pkt->flags & AV_PKT_FLAG_KEY, \ + pkt->pos, \ + pkt->duration); \ } #else @@ -238,4 +240,58 @@ int zm_resample_get_delay(SwrContext *resample_ctx, int time_base); int zm_add_samples_to_fifo(AVAudioFifo *fifo, AVFrame *frame); int zm_get_samples_from_fifo(AVAudioFifo *fifo, AVFrame *frame); +struct zm_free_av_packet +{ + void operator()(AVPacket *pkt) const + { + av_packet_free(&pkt); + } +}; + +using av_packet_ptr = std::unique_ptr; + +struct av_packet_guard +{ + av_packet_guard() : packet{nullptr} + { + } + explicit av_packet_guard(const av_packet_ptr& p) : packet{p.get()} + { + } + explicit av_packet_guard(AVPacket *p) : packet{p} + { + } + ~av_packet_guard() + { + if (packet) + av_packet_unref(packet); + } + + void acquire(const av_packet_ptr& p) + { + packet = p.get(); + } + void acquire(AVPacket *p) + { + packet = p; + } + void release() + { + packet = nullptr; + } + +private: + AVPacket *packet; +}; + +struct zm_free_av_frame +{ + void operator()(AVFrame *frame) const + { + av_frame_free(&frame); + } +}; + +using av_frame_ptr = std::unique_ptr; + #endif // ZM_FFMPEG_H diff --git a/src/zm_ffmpeg_camera.cpp b/src/zm_ffmpeg_camera.cpp index 693fa13a4..b8e284951 100644 --- a/src/zm_ffmpeg_camera.cpp +++ b/src/zm_ffmpeg_camera.cpp @@ -152,6 +152,7 @@ FfmpegCamera::FfmpegCamera( Panic("Unexpected colours: %d", colours); } + packet = av_packet_ptr{av_packet_alloc()}; } // FfmpegCamera::FfmpegCamera FfmpegCamera::~FfmpegCamera() { @@ -204,7 +205,7 @@ int FfmpegCamera::Capture(std::shared_ptr &zm_packet) { ); } - if ((ret = av_read_frame(formatContextPtr, &packet)) < 0) { + if ((ret = av_read_frame(formatContextPtr, packet.get())) < 0) { if ( // Check if EOF. (ret == AVERROR_EOF || (formatContextPtr->pb && formatContextPtr->pb->eof_reached)) || @@ -212,37 +213,38 @@ int FfmpegCamera::Capture(std::shared_ptr &zm_packet) { (ret == -110) ) { Info("Unable to read packet from stream %d: error %d \"%s\".", - packet.stream_index, ret, av_make_error_string(ret).c_str()); + packet->stream_index, ret, av_make_error_string(ret).c_str()); } else { Error("Unable to read packet from stream %d: error %d \"%s\".", - packet.stream_index, ret, av_make_error_string(ret).c_str()); + packet->stream_index, ret, av_make_error_string(ret).c_str()); } return -1; } - AVStream *stream = formatContextPtr->streams[packet.stream_index]; + av_packet_guard pkt_guard{packet}; + + AVStream *stream = formatContextPtr->streams[packet->stream_index]; ZM_DUMP_STREAM_PACKET(stream, packet, "ffmpeg_camera in"); zm_packet->codec_type = stream->codecpar->codec_type; - bytes += packet.size; - zm_packet->set_packet(&packet); + bytes += packet->size; + zm_packet->set_packet(packet.get()); zm_packet->stream = stream; - zm_packet->pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q); - if (packet.pts != AV_NOPTS_VALUE) { + zm_packet->pts = av_rescale_q(packet->pts, stream->time_base, AV_TIME_BASE_Q); + if (packet->pts != AV_NOPTS_VALUE) { if (stream == mVideoStream) { if (mFirstVideoPTS == AV_NOPTS_VALUE) - mFirstVideoPTS = packet.pts; + mFirstVideoPTS = packet->pts; - mLastVideoPTS = packet.pts - mFirstVideoPTS; + mLastVideoPTS = packet->pts - mFirstVideoPTS; } else { if (mFirstAudioPTS == AV_NOPTS_VALUE) - mFirstAudioPTS = packet.pts; + mFirstAudioPTS = packet->pts; - mLastAudioPTS = packet.pts - mFirstAudioPTS; + mLastAudioPTS = packet->pts - mFirstAudioPTS; } } - zm_av_packet_unref(&packet); return 1; } // FfmpegCamera::Capture diff --git a/src/zm_ffmpeg_camera.h b/src/zm_ffmpeg_camera.h index 42b716b03..289695b36 100644 --- a/src/zm_ffmpeg_camera.h +++ b/src/zm_ffmpeg_camera.h @@ -58,7 +58,7 @@ class FfmpegCamera : public Camera { // Used to store the incoming packet, it will get copied when queued. // We only ever need one at a time, so instead of constantly allocating // and freeing this structure, we will just make it a member of the object. - AVPacket packet; + av_packet_ptr packet; int OpenFfmpeg(); int Close() override; diff --git a/src/zm_ffmpeg_input.cpp b/src/zm_ffmpeg_input.cpp index 16da59f03..ab2429adb 100644 --- a/src/zm_ffmpeg_input.cpp +++ b/src/zm_ffmpeg_input.cpp @@ -9,7 +9,6 @@ FFmpeg_Input::FFmpeg_Input() { audio_stream_id = -1; FFMPEGInit(); streams = nullptr; - frame = nullptr; last_seek_request = -1; } @@ -17,10 +16,6 @@ FFmpeg_Input::~FFmpeg_Input() { if ( input_format_context ) { Close(); } - if ( frame ) { - av_frame_free(&frame); - frame = nullptr; - } } // end ~FFmpeg_Input() /* Takes streams provided from elsewhere. They might not come from the same source @@ -137,11 +132,15 @@ int FFmpeg_Input::Close( ) { AVFrame *FFmpeg_Input::get_frame(int stream_id) { int frameComplete = false; - AVPacket packet; - av_init_packet(&packet); + av_packet_ptr packet{av_packet_alloc()}; + + if (!packet) { + Error("Unable to allocate packet."); + return nullptr; + } while ( !frameComplete ) { - int ret = av_read_frame(input_format_context, &packet); + int ret = av_read_frame(input_format_context, packet.get()); if ( ret < 0 ) { if ( // Check if EOF. @@ -153,36 +152,36 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id) { return nullptr; } Error("Unable to read packet from stream %d: error %d \"%s\".", - packet.stream_index, ret, av_make_error_string(ret).c_str()); + packet->stream_index, ret, av_make_error_string(ret).c_str()); return nullptr; } - ZM_DUMP_STREAM_PACKET(input_format_context->streams[packet.stream_index], packet, "Received packet"); + ZM_DUMP_STREAM_PACKET(input_format_context->streams[packet->stream_index], packet, "Received packet"); - if ( (stream_id >= 0) && (packet.stream_index != stream_id) ) { - Debug(1,"Packet is not for our stream (%d)", packet.stream_index ); + av_packet_guard pkt_guard{packet}; + + if ( (stream_id >= 0) && (packet->stream_index != stream_id) ) { + Debug(1,"Packet is not for our stream (%d)", packet->stream_index ); continue; } - AVCodecContext *context = streams[packet.stream_index].context; + AVCodecContext *context = streams[packet->stream_index].context; - if ( frame ) { - av_frame_free(&frame); - frame = zm_av_frame_alloc(); - } else { - frame = zm_av_frame_alloc(); + frame = av_frame_ptr{zm_av_frame_alloc()}; + if (!frame) { + Error("Unable to allocate frame."); + return nullptr; } - ret = zm_send_packet_receive_frame(context, frame, packet); + ret = zm_send_packet_receive_frame(context, frame.get(), *packet); if ( ret < 0 ) { Error("Unable to decode frame at frame %d: %d %s, continuing", - streams[packet.stream_index].frame_count, ret, av_make_error_string(ret).c_str()); - zm_av_packet_unref(&packet); - av_frame_free(&frame); + streams[packet->stream_index].frame_count, ret, av_make_error_string(ret).c_str()); + frame = nullptr; continue; } else { - if ( is_video_stream(input_format_context->streams[packet.stream_index]) ) { - zm_dump_video_frame(frame, "resulting video frame"); + if ( is_video_stream(input_format_context->streams[packet->stream_index]) ) { + zm_dump_video_frame(frame.get(), "resulting video frame"); } else { - zm_dump_frame(frame, "resulting frame"); + zm_dump_frame(frame.get(), "resulting frame"); } } @@ -194,9 +193,8 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id) { input_format_context->streams[stream_id]->time_base ); - zm_av_packet_unref(&packet); } // end while !frameComplete - return frame; + return frame.get(); } // end AVFrame *FFmpeg_Input::get_frame AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) { @@ -249,7 +247,7 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) { } } else if ( last_seek_request == seek_target ) { // paused case, sending keepalives - return frame; + return frame.get(); } // end if frame->pts > seek_target last_seek_request = seek_target; @@ -275,11 +273,11 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) { while ( frame && (frame->pts < seek_target) ) { if ( !get_frame(stream_id) ) { Warning("Got no frame. returning nothing"); - return frame; + return frame.get(); } } zm_dump_frame(frame, "frame->pts <= seek_target, got"); - return frame; + return frame.get(); } return get_frame(stream_id); diff --git a/src/zm_ffmpeg_input.h b/src/zm_ffmpeg_input.h index 3f2e5b7e9..d2143ee27 100644 --- a/src/zm_ffmpeg_input.h +++ b/src/zm_ffmpeg_input.h @@ -2,6 +2,7 @@ #define ZM_FFMPEG_INPUT_H #include "zm_define.h" +#include "zm_ffmpeg.h" extern "C" { #include @@ -49,7 +50,7 @@ class FFmpeg_Input { int video_stream_id; int audio_stream_id; AVFormatContext *input_format_context; - AVFrame *frame; + av_frame_ptr frame; int64_t last_seek_request; }; diff --git a/src/zm_ffmpeg_output.cpp b/src/zm_ffmpeg_output.cpp index ec677f265..2d7a7edc9 100644 --- a/src/zm_ffmpeg_output.cpp +++ b/src/zm_ffmpeg_output.cpp @@ -84,13 +84,22 @@ AVFrame *FFmpeg_Output::get_frame( int stream_id ) { Debug(1, "Getting frame from stream %d", stream_id ); int frameComplete = false; - AVPacket packet; - av_init_packet( &packet ); - AVFrame *frame = zm_av_frame_alloc(); + av_packet_ptr packet{av_packet_alloc()}; char errbuf[AV_ERROR_MAX_STRING_SIZE]; + if (!packet) { + Error("Unable to allocate packet.", ); + return nullptr; + } + + frame = av_frame_ptr{zm_av_frame_alloc()}; + if (!frame) { + Error("Unable to allocate frame."); + return nullptr; + } + while ( !frameComplete ) { - int ret = av_read_frame( input_format_context, &packet ); + int ret = av_read_frame( input_format_context, packet.get() ); if ( ret < 0 ) { av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE); if ( @@ -102,20 +111,21 @@ AVFrame *FFmpeg_Output::get_frame( int stream_id ) { Info( "av_read_frame returned %s.", errbuf ); return NULL; } - Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, ret, errbuf ); + Error( "Unable to read packet from stream %d: error %d \"%s\".", packet->stream_index, ret, errbuf ); return NULL; } - if ( (stream_id < 0 ) || ( packet.stream_index == stream_id ) ) { - Debug(1,"Packet is for our stream (%d)", packet.stream_index ); + av_packet_guard pkt_guard{packet}; - AVCodecContext *context = streams[packet.stream_index].context; + if ( (stream_id < 0 ) || ( packet->stream_index == stream_id ) ) { + Debug(1,"Packet is for our stream (%d)", packet->stream_index ); - ret = avcodec_send_packet( context, &packet ); + AVCodecContext *context = streams[packet->stream_index].context; + + ret = avcodec_send_packet( context, packet.get() ); if ( ret < 0 ) { av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE ); - Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf ); - zm_av_packet_unref( &packet ); + Error( "Unable to send packet at frame %d: %s, continuing", streams[packet->stream_index].frame_count, errbuf ); continue; } else { Debug(1, "Success getting a packet"); @@ -126,25 +136,22 @@ AVFrame *FFmpeg_Output::get_frame( int stream_id ) { ret = avcodec_receive_frame( context, hwFrame ); if ( ret < 0 ) { av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE ); - Error( "Unable to receive frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf ); - zm_av_packet_unref( &packet ); + Error( "Unable to receive frame %d: %s, continuing", streams[packet->stream_index].frame_count, errbuf ); continue; } ret = av_hwframe_transfer_data(frame, hwFrame, 0); if (ret < 0) { av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE ); - Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf ); - zm_av_packet_unref( &packet ); + Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet->stream_index].frame_count, errbuf ); continue; } } else { #endif Debug(1,"Getting a frame?"); - ret = avcodec_receive_frame( context, frame ); + ret = avcodec_receive_frame( context, frame.get() ); if ( ret < 0 ) { av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE ); - Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf ); - zm_av_packet_unref( &packet ); + Error( "Unable to send packet at frame %d: %s, continuing", streams[packet->stream_index].frame_count, errbuf ); continue; } @@ -155,9 +162,7 @@ AVFrame *FFmpeg_Output::get_frame( int stream_id ) { frameComplete = 1; } // end if it's the right stream - zm_av_packet_unref( &packet ); - } // end while ! frameComplete - return frame; + return frame.get(); } // end AVFrame *FFmpeg_Output::get_frame diff --git a/src/zm_ffmpeg_output.h b/src/zm_ffmpeg_output.h index 9ab4ea403..abeffa942 100644 --- a/src/zm_ffmpeg_output.h +++ b/src/zm_ffmpeg_output.h @@ -35,6 +35,7 @@ class FFmpeg_Output { int video_stream_id; int audio_stream_id; AVFormatContext *input_format_context; + av_frame_ptr frame; }; #endif diff --git a/src/zm_fifo.cpp b/src/zm_fifo.cpp index c7a4a608f..acdcbad10 100644 --- a/src/zm_fifo.cpp +++ b/src/zm_fifo.cpp @@ -99,9 +99,9 @@ bool Fifo::close() { bool Fifo::writePacket(const ZMPacket &packet) { if (!(outfile or open())) return false; - Debug(2, "Writing header ZM %u %" PRId64, packet.packet.size, packet.pts); + Debug(2, "Writing header ZM %u %" PRId64, packet.packet->size, packet.pts); // Going to write a brief header - if (fprintf(outfile, "ZM %u %" PRId64 "\n", packet.packet.size, packet.pts) < 0) { + if (fprintf(outfile, "ZM %u %" PRId64 "\n", packet.packet->size, packet.pts) < 0) { if (errno != EAGAIN) { Error("Problem during writing: %s", strerror(errno)); } else { @@ -110,7 +110,7 @@ bool Fifo::writePacket(const ZMPacket &packet) { return false; } - if (fwrite(packet.packet.data, packet.packet.size, 1, outfile) != 1) { + if (fwrite(packet.packet->data, packet.packet->size, 1, outfile) != 1) { Debug(1, "Unable to write to '%s': %s", path.c_str(), strerror(errno)); return false; } @@ -129,8 +129,8 @@ bool Fifo::writePacket(const std::string &filename, const ZMPacket &packet) { return false; } - Debug(4, "Writing packet of size %d pts %" PRId64, packet.packet.size, packet.pts); - if (fwrite(packet.packet.data, packet.packet.size, 1, outfile) != 1) { + Debug(4, "Writing packet of size %d pts %" PRId64, packet.packet->size, packet.pts); + if (fwrite(packet.packet->data, packet.packet->size, 1, outfile) != 1) { Debug(1, "Unable to write to '%s': %s", filename.c_str(), strerror(errno)); fclose(outfile); return false; diff --git a/src/zm_image.cpp b/src/zm_image.cpp index ab68de90d..65aec46f8 100644 --- a/src/zm_image.cpp +++ b/src/zm_image.cpp @@ -289,7 +289,11 @@ bool Image::Assign(const AVFrame *frame) { // Desired format AVPixelFormat format = (AVPixelFormat)AVPixFormat(); - AVFrame *dest_frame = zm_av_frame_alloc(); + av_frame_ptr dest_frame{zm_av_frame_alloc()}; + if (!dest_frame) { + Error("Unable to allocate destination frame"); + return false; + } sws_convert_context = sws_getCachedContext( sws_convert_context, frame->width, frame->height, (AVPixelFormat)frame->format, @@ -301,8 +305,7 @@ bool Image::Assign(const AVFrame *frame) { Error("Unable to create conversion context"); return false; } - bool result = Assign(frame, sws_convert_context, dest_frame); - av_frame_free(&dest_frame); + bool result = Assign(frame, sws_convert_context, dest_frame.get()); update_function_pointers(); return result; } // end Image::Assign(const AVFrame *frame) @@ -914,7 +917,7 @@ bool Image::ReadRaw(const char *filename) { if ( (unsigned int)statbuf.st_size != size ) { fclose(infile); - Error("Raw file size mismatch, expected %d bytes, found %ld", size, statbuf.st_size); + Error("Raw file size mismatch, expected %d bytes, found %jd", size, static_cast(statbuf.st_size)); return false; } diff --git a/src/zm_libvlc_camera.cpp b/src/zm_libvlc_camera.cpp index 95284074f..c1278df12 100644 --- a/src/zm_libvlc_camera.cpp +++ b/src/zm_libvlc_camera.cpp @@ -288,7 +288,7 @@ int LibvlcCamera::Capture(std::shared_ptr &zm_packet) { mLibvlcData.mutex.lock(); zm_packet->image->Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp); - zm_packet->packet.stream_index = mVideoStreamId; + zm_packet->packet->stream_index = mVideoStreamId; zm_packet->stream = mVideoStream; mLibvlcData.mutex.unlock(); diff --git a/src/zm_libvnc_camera.cpp b/src/zm_libvnc_camera.cpp index 3ff7804b6..9e63183bc 100644 --- a/src/zm_libvnc_camera.cpp +++ b/src/zm_libvnc_camera.cpp @@ -214,7 +214,7 @@ int VncCamera::Capture(std::shared_ptr &zm_packet) { } zm_packet->keyframe = 1; zm_packet->codec_type = AVMEDIA_TYPE_VIDEO; - zm_packet->packet.stream_index = mVideoStreamId; + zm_packet->packet->stream_index = mVideoStreamId; zm_packet->stream = mVideoStream; uint8_t *directbuffer = zm_packet->image->WriteBuffer(width, height, colours, subpixelorder); diff --git a/src/zm_local_camera.cpp b/src/zm_local_camera.cpp index 2bcfac905..5b31a89fc 100644 --- a/src/zm_local_camera.cpp +++ b/src/zm_local_camera.cpp @@ -219,7 +219,7 @@ int LocalCamera::vid_fd = -1; int LocalCamera::v4l_version = 0; LocalCamera::V4L2Data LocalCamera::v4l2_data; -AVFrame **LocalCamera::capturePictures = nullptr; +av_frame_ptr *LocalCamera::capturePictures; LocalCamera *LocalCamera::last_camera = nullptr; @@ -436,7 +436,7 @@ LocalCamera::LocalCamera( /* Initialize swscale stuff */ if (capture and (conversion_type == 1)) { - tmpPicture = av_frame_alloc(); + tmpPicture = av_frame_ptr{zm_av_frame_alloc()}; if (!tmpPicture) Fatal("Could not allocate temporary picture"); @@ -456,7 +456,6 @@ LocalCamera::LocalCamera( Fatal("Unable to initialise image scaling context"); } } else { - tmpPicture = nullptr; imgConversionContext = nullptr; } // end if capture and conversion_tye == swscale if (capture and device_prime) @@ -471,8 +470,6 @@ LocalCamera::~LocalCamera() { if (capture && (conversion_type == 1)) { sws_freeContext(imgConversionContext); imgConversionContext = nullptr; - - av_frame_free(&tmpPicture); } } // end LocalCamera::~LocalCamera @@ -658,7 +655,7 @@ void LocalCamera::Initialise() { channel_count, v4l_multi_buffer, v4l2_data.reqbufs.count); v4l2_data.buffers = new V4L2MappedBuffer[v4l2_data.reqbufs.count]; - capturePictures = new AVFrame *[v4l2_data.reqbufs.count]; + capturePictures = new av_frame_ptr[v4l2_data.reqbufs.count]; for (unsigned int i = 0; i < v4l2_data.reqbufs.count; i++) { struct v4l2_buffer vid_buf; @@ -681,7 +678,7 @@ void LocalCamera::Initialise() { Fatal("Can't map video buffer %u (%u bytes) to memory: %s(%d)", i, vid_buf.length, strerror(errno), errno); - capturePictures[i] = av_frame_alloc(); + capturePictures[i] = av_frame_ptr{zm_av_frame_alloc()}; if (!capturePictures[i]) Fatal("Could not allocate picture"); @@ -738,7 +735,7 @@ void LocalCamera::Terminate() { Debug(3, "Unmapping video buffers"); for ( unsigned int i = 0; i < v4l2_data.reqbufs.count; i++ ) { - av_frame_free(&capturePictures[i]); + capturePictures[i] = nullptr; if ( munmap(v4l2_data.buffers[i].start, v4l2_data.buffers[i].length) < 0 ) Error("Failed to munmap buffer %d: %s", i, strerror(errno)); @@ -1376,7 +1373,7 @@ int LocalCamera::Capture(std::shared_ptr &zm_packet) { zm_packet->image->Assign(width, height, colours, subpixelorder, buffer, imagesize); } // end if doing conversion or not - zm_packet->packet.stream_index = mVideoStreamId; + zm_packet->packet->stream_index = mVideoStreamId; zm_packet->stream = mVideoStream; zm_packet->codec_type = AVMEDIA_TYPE_VIDEO; zm_packet->keyframe = 1; diff --git a/src/zm_local_camera.h b/src/zm_local_camera.h index f702b816f..16ebe8b4b 100644 --- a/src/zm_local_camera.h +++ b/src/zm_local_camera.h @@ -73,11 +73,11 @@ protected: static V4L2Data v4l2_data; - static AVFrame **capturePictures; + static av_frame_ptr *capturePictures; _AVPIXELFORMAT imagePixFormat; _AVPIXELFORMAT capturePixFormat; struct SwsContext *imgConversionContext; - AVFrame *tmpPicture; + av_frame_ptr tmpPicture; static LocalCamera *last_camera; diff --git a/src/zm_logger.cpp b/src/zm_logger.cpp index f43ea6f30..4db535a78 100644 --- a/src/zm_logger.cpp +++ b/src/zm_logger.cpp @@ -82,7 +82,8 @@ Logger::Logger() : smSyslogPriorities[PANIC] = LOG_ERR; char code[4] = ""; - for (int i = DEBUG1; i <= DEBUG9; i++) { + // Extra comparison against DEBUG1 to ensure GCC knows we are printing a single byte. + for (int i = DEBUG1; i>=DEBUG1 && i <= DEBUG9; i++) { snprintf(code, sizeof(code), "DB%d", i); smCodes[i] = code; smSyslogPriorities[i] = LOG_DEBUG; diff --git a/src/zm_monitor.cpp b/src/zm_monitor.cpp index 5a9a96bb9..420551071 100644 --- a/src/zm_monitor.cpp +++ b/src/zm_monitor.cpp @@ -95,10 +95,11 @@ std::string load_monitor_sql = "`ImageBufferCount`, `MaxImageBufferCount`, `WarmupCount`, `PreEventCount`, `PostEventCount`, `StreamReplayBuffer`, `AlarmFrameCount`, " "`SectionLength`, `MinSectionLength`, `FrameSkip`, `MotionFrameSkip`, " "`FPSReportInterval`, `RefBlendPerc`, `AlarmRefBlendPerc`, `TrackMotion`, `Exif`," -"`RTSPServer`, `RTSPStreamName`," +"`RTSPServer`, `RTSPStreamName`, `ONVIF_Alarm_Text`," "`ONVIF_URL`, `ONVIF_Username`, `ONVIF_Password`, `ONVIF_Options`, `ONVIF_Event_Listener`, `use_Amcrest_API`, " "`SignalCheckPoints`, `SignalCheckColour`, `Importance`-1, ZoneCount FROM `Monitors`"; + std::string CameraType_Strings[] = { "Unknown", "Local", @@ -229,6 +230,7 @@ Monitor::Monitor() embed_exif(false), rtsp_server(false), rtsp_streamname(""), + onvif_alarm_txt(""), importance(0), zone_count(0), capture_max_fps(0), @@ -273,7 +275,6 @@ Monitor::Monitor() analysis_thread(nullptr), decoder_it(nullptr), decoder(nullptr), - dest_frame(nullptr), convert_context(nullptr), //zones(nullptr), privacy_bitmask(nullptr), @@ -494,6 +495,9 @@ void Monitor::Load(MYSQL_ROW dbrow, bool load_zones=true, Purpose p = QUERY) { /* "`RTSPServer`,`RTSPStreamName`, */ rtsp_server = (*dbrow[col] != '0'); col++; rtsp_streamname = dbrow[col]; col++; +// get alarm text from table. + onvif_alarm_txt = std::string(dbrow[col] ? dbrow[col] : ""); col++; + /* "`ONVIF_URL`, `ONVIF_Username`, `ONVIF_Password`, `ONVIF_Options`, `ONVIF_Event_Listener`, `use_Amcrest_API`, " */ onvif_url = std::string(dbrow[col] ? dbrow[col] : ""); col++; @@ -771,7 +775,6 @@ std::shared_ptr Monitor::Load(unsigned int p_id, bool load_zones, Purpo } bool Monitor::connect() { - ReloadLinkedMonitors(); ReloadZones(); if (zones.size() != zone_count) { Warning("Monitor %d has incorrect zone_count %d != %zu", id, zone_count, zones.size()); @@ -802,7 +805,7 @@ bool Monitor::connect() { image_buffer_count, image_size, (image_buffer_count * image_size), - mem_size); + static_cast(mem_size)); #if ZM_MEM_MAPPED mem_file = stringtf("%s/zm.mmap.%u", staticConfig.PATH_MAP.c_str(), id); if (purpose != CAPTURE) { @@ -831,25 +834,25 @@ bool Monitor::connect() { if (purpose == CAPTURE) { // Allocate the size if (ftruncate(map_fd, mem_size) < 0) { - Error("Can't extend memory map file %s to %jd bytes: %s", mem_file.c_str(), mem_size, strerror(errno)); + Error("Can't extend memory map file %s to %jd bytes: %s", mem_file.c_str(), static_cast(mem_size), strerror(errno)); close(map_fd); map_fd = -1; return false; } } else if (map_stat.st_size == 0) { - Error("Got empty memory map file size %ld, is the zmc process for this monitor running?", map_stat.st_size); + Error("Got empty memory map file size %jd, is the zmc process for this monitor running?", static_cast(map_stat.st_size)); close(map_fd); map_fd = -1; return false; } else { - Error("Got unexpected memory map file size %ld, expected %jd", map_stat.st_size, static_cast(mem_size)); + Error("Got unexpected memory map file size %jd, expected %jd", static_cast(map_stat.st_size), static_cast(mem_size)); close(map_fd); map_fd = -1; return false; } } // end if map_stat.st_size != mem_size - Debug(3, "MMap file size is %ld", map_stat.st_size); + Debug(3, "MMap file size is %jd", static_cast(map_stat.st_size)); #ifdef MAP_LOCKED mem_ptr = (unsigned char *)mmap(nullptr, mem_size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_LOCKED, map_fd, 0); if (mem_ptr == MAP_FAILED) { @@ -911,7 +914,7 @@ bool Monitor::connect() { ); alarm_image.HoldBuffer(true); /* Don't release the internal buffer or replace it with another */ if (alarm_image.Buffer() + image_size > mem_ptr + mem_size) { - Warning("We will exceed memsize by %ld bytes!", alarm_image.Buffer() + image_size - (mem_ptr + mem_size)); + Warning("We will exceed memsize by %td bytes!", (alarm_image.Buffer() + image_size) - (mem_ptr + mem_size)); } if (purpose == CAPTURE) { @@ -956,6 +959,7 @@ bool Monitor::connect() { usedsubpixorder = camera->SubpixelOrder(); // Used in CheckSignal shared_data->valid = true; + ReloadLinkedMonitors(); //ONVIF and Amcrest Setup //For now, only support one event type per camera, so share some state. @@ -1103,8 +1107,6 @@ Monitor::~Monitor() { if (video_fifo) delete video_fifo; if (audio_fifo) delete audio_fifo; Debug(1, "Don fifo"); - if (dest_frame) av_frame_free(&dest_frame); - Debug(1, "Don fifo"); if (convert_context) { Debug(1, "Don fifo"); sws_freeContext(convert_context); @@ -1705,7 +1707,7 @@ bool Monitor::Poll() { Debug(1, "Got Good Response! %i", result); for (auto msg : tev__PullMessagesResponse.wsnt__NotificationMessage) { if (msg->Topic->__any.text != NULL && - std::strstr(msg->Topic->__any.text, "MotionAlarm") && + std::strstr(msg->Topic->__any.text, onvif_alarm_txt.c_str()) && msg->Message.__any.elts != NULL && msg->Message.__any.elts->next != NULL && msg->Message.__any.elts->next->elts != NULL && @@ -2018,7 +2020,7 @@ bool Monitor::Analyse() { ref_image.Blend(y_image, ( state==ALARM ? alarm_ref_blend_perc : ref_blend_perc )); } else if (snap->image) { Debug(1, "Blending full colour image because analysis_image = %d, in_frame=%p and format %d != %d, %d", - analysis_image, snap->in_frame, + analysis_image, snap->in_frame.get(), (snap->in_frame ? snap->in_frame->format : -1), AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P @@ -2065,7 +2067,7 @@ bool Monitor::Analyse() { (event_close_mode == CLOSE_ALARM)); } if ((!pre_event_count) || (Event::PreAlarmCount() >= alarm_frame_count-1)) { - Info("%s: %03d - Gone into alarm state PreAlarmCount: %u > AlarmFrameCount:%u Cause:%s", + Info("%s: %03d - ExtAlm - Gone into alarm state PreAlarmCount: %u > AlarmFrameCount:%u Cause:%s", name.c_str(), snap->image_index, Event::PreAlarmCount(), alarm_frame_count, cause.c_str()); shared_data->state = state = ALARM; @@ -2078,7 +2080,7 @@ bool Monitor::Analyse() { Debug(1, "%s: %03d - Alarmed frame while in alert state. Consecutive alarmed frames left to return to alarm state: %03d", name.c_str(), analysis_image_count, alert_to_alarm_frame_count); if (alert_to_alarm_frame_count == 0) { - Info("%s: %03d - Gone back into alarm state", name.c_str(), analysis_image_count); + Info("%s: %03d - ExtAlm - Gone back into alarm state Cause:ONVIF", name.c_str(), analysis_image_count); shared_data->state = state = ALARM; } } else if (state == TAPE) { @@ -2250,8 +2252,7 @@ bool Monitor::Analyse() { } } // Free up the decoded frame as well, we won't be using it for anything at this time. - if (snap->out_frame) av_frame_free(&snap->out_frame); - if (snap->buffer) av_freep(&snap->buffer); + snap->out_frame = nullptr; delete packet_lock; } @@ -2433,7 +2434,7 @@ int Monitor::Capture() { std::shared_ptr packet = std::make_shared(); packet->image_index = image_count; packet->timestamp = std::chrono::system_clock::now(); - shared_data->zmc_heartbeat_time = std::chrono::system_clock::to_time_t(packet->timestamp); + shared_data->heartbeat_time = std::chrono::system_clock::to_time_t(packet->timestamp); int captureResult = camera->Capture(packet); Debug(4, "Back from capture result=%d image count %d", captureResult, image_count); @@ -2464,10 +2465,10 @@ int Monitor::Capture() { shared_data->last_write_time = std::chrono::system_clock::to_time_t(packet->timestamp); } Debug(2, "Have packet stream_index:%d ?= videostream_id: %d q.vpktcount %d event? %d image_count %d", - packet->packet.stream_index, video_stream_id, packetqueue.packet_count(video_stream_id), ( event ? 1 : 0 ), image_count); + packet->packet->stream_index, video_stream_id, packetqueue.packet_count(video_stream_id), ( event ? 1 : 0 ), image_count); if (packet->codec_type == AVMEDIA_TYPE_VIDEO) { - packet->packet.stream_index = video_stream_id; // Convert to packetQueue's index + packet->packet->stream_index = video_stream_id; // Convert to packetQueue's index if (video_fifo) { if (packet->keyframe) { // avcodec strips out important nals that describe the stream and @@ -2488,7 +2489,7 @@ int Monitor::Capture() { if (record_audio and (packetqueue.packet_count(video_stream_id) or event)) { packet->image_index=-1; Debug(2, "Queueing audio packet"); - packet->packet.stream_index = audio_stream_id; // Convert to packetQueue's index + packet->packet->stream_index = audio_stream_id; // Convert to packetQueue's index packetqueue.queuePacket(packet); } else { Debug(4, "Not Queueing audio packet"); @@ -2596,7 +2597,7 @@ bool Monitor::Decode() { return true; // Don't need decode } - if ((!packet->image) and packet->packet.size and !packet->in_frame) { + if ((!packet->image) and packet->packet->size and !packet->in_frame) { if ((decoding == DECODING_ALWAYS) or ((decoding == DECODING_ONDEMAND) and this->hasViewers() ) @@ -2615,19 +2616,19 @@ bool Monitor::Decode() { if (packet->in_frame and !packet->image) { packet->image = new Image(camera_width, camera_height, camera->Colours(), camera->SubpixelOrder()); - if (convert_context || this->setupConvertContext(packet->in_frame, packet->image)) { - if (!packet->image->Assign(packet->in_frame, convert_context, dest_frame)) { + if (convert_context || this->setupConvertContext(packet->in_frame.get(), packet->image)) { + if (!packet->image->Assign(packet->in_frame.get(), convert_context, dest_frame.get())) { delete packet->image; packet->image = nullptr; } - av_frame_unref(dest_frame); + av_frame_unref(dest_frame.get()); } else { delete packet->image; packet->image = nullptr; } // end if have convert_context } // end if need transfer to image } else { - Debug(1, "No packet.size(%d) or packet->in_frame(%p). Not decoding", packet->packet.size, packet->in_frame); + Debug(1, "No packet.size(%d) or packet->in_frame(%p). Not decoding", packet->packet->size, packet->in_frame.get()); } } else { Debug(1, "Not Decoding ? %s", Decoding_Strings[decoding].c_str()); @@ -3129,7 +3130,7 @@ int Monitor::PrimeCapture() { } if (decoding != DECODING_NONE) { - if (!dest_frame) dest_frame = zm_av_frame_alloc(); + if (!dest_frame) dest_frame = av_frame_ptr{zm_av_frame_alloc()}; if (!decoder_it) decoder_it = packetqueue.get_video_it(false); if (!decoder) { Debug(1, "Creating decoder thread"); @@ -3172,13 +3173,13 @@ int Monitor::Close() { } if (analysis_thread) { analysis_thread->Stop(); - Debug(1, "Analysi stopped"); + Debug(1, "Analysis stopped"); } //ONVIF Teardown if (Poller) { Poller->Stop(); - Debug(1, "Polleri stopped"); + Debug(1, "Poller stopped"); } #ifdef WITH_GSOAP if (onvif_event_listener && (soap != nullptr)) { @@ -3193,8 +3194,9 @@ int Monitor::Close() { } //End ONVIF #endif //Janus Teardown - if (janus_enabled && (purpose == CAPTURE)) { + if (janus_enabled and (purpose == CAPTURE) and Janus_Manager) { delete Janus_Manager; + Janus_Manager = nullptr; } if (audio_fifo) { @@ -3262,7 +3264,7 @@ void Monitor::get_ref_image() { std::shared_ptr snap = snap_lock->packet_; Debug(1, "get_ref_image: packet.stream %d ?= video_stream %d, packet image id %d packet image %p", - snap->packet.stream_index, video_stream_id, snap->image_index, snap->image ); + snap->packet->stream_index, video_stream_id, snap->image_index, snap->image ); // Might not have been decoded yet FIXME if (snap->image) { ref_image.Assign(width, height, camera->Colours(), diff --git a/src/zm_monitor.h b/src/zm_monitor.h index 68038936c..29d80ec77 100644 --- a/src/zm_monitor.h +++ b/src/zm_monitor.h @@ -192,37 +192,40 @@ protected: uint8_t recording; /* +55 */ uint8_t signal; /* +56 */ uint8_t format; /* +57 */ - uint32_t imagesize; /* +58 */ - uint32_t last_frame_score; /* +62 */ - uint32_t audio_frequency; /* +66 */ - uint32_t audio_channels; /* +70 */ + uint8_t reserved1; /* +58 */ + uint8_t reserved2; /* +59 */ + uint32_t imagesize; /* +60 */ + uint32_t last_frame_score; /* +64 */ + uint32_t audio_frequency; /* +68 */ + uint32_t audio_channels; /* +72 */ + uint32_t reserved3; /* +76 */ /* ** This keeps 32bit time_t and 64bit time_t identical and compatible as long as time is before 2038. ** Shared memory layout should be identical for both 32bit and 64bit and is multiples of 16. ** Because startup_time is 64bit it may be aligned to a 64bit boundary. So it's offset SHOULD be a multiple ** of 8. Add or delete epadding's to achieve this. */ - union { /* +72 */ + union { /* +80 */ time_t startup_time; /* When the zmc process started. zmwatch uses this to see how long the process has been running without getting any images */ uint64_t extrapad1; }; - union { /* +80 */ - time_t zmc_heartbeat_time; /* Constantly updated by zmc. Used to determine if the process is alive or hung or dead */ + union { /* +88 */ + time_t heartbeat_time; /* Constantly updated by zmc. Used to determine if the process is alive or hung or dead */ uint64_t extrapad2; }; - union { /* +88 */ + union { /* +96 */ time_t last_write_time; uint64_t extrapad3; }; - union { /* +96 */ + union { /* +104 */ time_t last_read_time; uint64_t extrapad4; }; - union { /* +104 */ + union { /* +112 */ time_t last_viewed_time; uint64_t extrapad5; }; - uint8_t control_state[256]; /* +112 */ + uint8_t control_state[256]; /* +120 */ char alarm_cause[256]; char video_fifo_path[64]; @@ -457,6 +460,7 @@ protected: bool embed_exif; // Whether to embed Exif data into each image frame or not bool rtsp_server; // Whether to include this monitor as an rtsp server stream std::string rtsp_streamname; // path in the rtsp url for this monitor + std::string onvif_alarm_txt; // def onvif_alarm_txt int importance; // Importance of this monitor, affects Connection logging errors. unsigned int zone_count; @@ -519,7 +523,7 @@ protected: std::unique_ptr analysis_thread; packetqueue_iterator *decoder_it; std::unique_ptr decoder; - AVFrame *dest_frame; // Used by decoding thread doing colorspace conversions + av_frame_ptr dest_frame; // Used by decoding thread doing colorspace conversions SwsContext *convert_context; std::thread close_event_thread; @@ -589,11 +593,11 @@ public: gettimeofday(&now, nullptr); Debug(3, "Shared data is valid, checking heartbeat %" PRIi64 " - %" PRIi64 " = %" PRIi64" < %f", static_cast(now.tv_sec), - static_cast(shared_data->zmc_heartbeat_time), - static_cast(now.tv_sec - shared_data->zmc_heartbeat_time), + static_cast(shared_data->heartbeat_time), + static_cast(now.tv_sec - shared_data->heartbeat_time), config.watch_max_delay); - if ((now.tv_sec - shared_data->zmc_heartbeat_time) < config.watch_max_delay) + if ((now.tv_sec - shared_data->heartbeat_time) < config.watch_max_delay) return true; } return false; @@ -752,7 +756,7 @@ public: SystemTimePoint GetStartupTime() const { return std::chrono::system_clock::from_time_t(shared_data->startup_time); } void SetStartupTime(SystemTimePoint time) { shared_data->startup_time = std::chrono::system_clock::to_time_t(time); } void SetHeartbeatTime(SystemTimePoint time) { - shared_data->zmc_heartbeat_time = std::chrono::system_clock::to_time_t(time); + shared_data->heartbeat_time = std::chrono::system_clock::to_time_t(time); } void get_ref_image(); diff --git a/src/zm_monitor_janus.cpp b/src/zm_monitor_janus.cpp index d46ce4ce1..0994b014f 100644 --- a/src/zm_monitor_janus.cpp +++ b/src/zm_monitor_janus.cpp @@ -27,15 +27,20 @@ Monitor::JanusManager::JanusManager(Monitor *parent_) : //constructor takes care of init and calls add_to parent = parent_; if ((config.janus_path != nullptr) && (config.janus_path[0] != '\0')) { - janus_endpoint = config.janus_path; //TODO: strip trailing / + janus_endpoint = config.janus_path; + //remove the trailing slash if present + if (janus_endpoint.back() == '/') janus_endpoint.pop_back(); } else { janus_endpoint = "127.0.0.1:8088/janus"; } - if (janus_endpoint.back() == '/') janus_endpoint.pop_back(); //remove the trailing slash if present std::size_t at_pos = parent->path.find("@", 7); - if (at_pos != std::string::npos) { //If we find an @ symbol, we have a username/password. Otherwise, passwordless login. + if (at_pos != std::string::npos) { + //If we find an @ symbol, we have a username/password. Otherwise, passwordless login. std::size_t colon_pos = parent->path.find(":", 7); //Search for the colon, but only after the rtsp:// text. - if (colon_pos == std::string::npos) throw std::runtime_error("Cannot Parse URL for Janus."); //Looks like an invalid url + if (colon_pos == std::string::npos) { + //Looks like an invalid url + throw std::runtime_error("Cannot Parse URL for Janus."); + } rtsp_username = parent->path.substr(7, colon_pos-7); rtsp_password = parent->path.substr(colon_pos+1, at_pos - colon_pos - 1); rtsp_path = "rtsp://"; @@ -161,12 +166,12 @@ int Monitor::JanusManager::add_to_janus() { CURLcode res; std::string response; - curl_easy_setopt(curl, CURLOPT_URL,endpoint.c_str()); + curl_easy_setopt(curl, CURLOPT_URL, endpoint.c_str()); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, postData.c_str()); res = curl_easy_perform(curl); - curl_easy_cleanup(curl); + curl_easy_cleanup(curl); if (res != CURLE_OK) { Error("Failed to curl_easy_perform adding rtsp stream"); @@ -192,8 +197,8 @@ int Monitor::JanusManager::add_to_janus() { size_t Monitor::JanusManager::WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) { - ((std::string*)userp)->append((char*)contents, size * nmemb); - return size * nmemb; + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; } /* @@ -216,11 +221,10 @@ void Monitor::JanusManager::generateKey() } */ - int Monitor::JanusManager::get_janus_session() { janus_session = ""; curl = curl_easy_init(); - if(!curl) return -1; + if (!curl) return -1; std::string endpoint = janus_endpoint; std::string response; @@ -249,15 +253,14 @@ int Monitor::JanusManager::get_janus_session() { int Monitor::JanusManager::get_janus_handle() { curl = curl_easy_init(); - if(!curl) return -1; - + if (!curl) return -1; CURLcode res; std::string response = ""; std::string endpoint = janus_endpoint+"/"+janus_session; std::string postData = "{\"janus\" : \"attach\", \"plugin\" : \"janus.plugin.streaming\", \"transaction\" : \"randomString\"}"; - curl_easy_setopt(curl, CURLOPT_URL,endpoint.c_str()); + curl_easy_setopt(curl, CURLOPT_URL, endpoint.c_str()); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, postData.c_str()); diff --git a/src/zm_monitor_monitorlink.cpp b/src/zm_monitor_monitorlink.cpp index ffd48d351..8fb485888 100644 --- a/src/zm_monitor_monitorlink.cpp +++ b/src/zm_monitor_monitorlink.cpp @@ -115,7 +115,7 @@ bool Monitor::MonitorLink::connect() { disconnect(); return false; } else if (map_stat.st_size < mem_size) { - Error("Got unexpected memory map file size %ld, expected %jd", map_stat.st_size, static_cast(mem_size)); + Error("Got unexpected memory map file size %jd, expected %jd", static_cast(map_stat.st_size), static_cast(mem_size)); disconnect(); return false; } diff --git a/src/zm_mpeg.cpp b/src/zm_mpeg.cpp index b962883a4..0d8fbad7d 100644 --- a/src/zm_mpeg.cpp +++ b/src/zm_mpeg.cpp @@ -213,8 +213,8 @@ bool VideoStream::OpenStream( ) { Debug( 1, "Opened codec" ); /* allocate the encoded raw picture */ - opicture = zm_av_frame_alloc( ); - if ( !opicture ) { + opicture = av_frame_ptr{zm_av_frame_alloc()}; + if (!opicture) { Error("Could not allocate opicture"); return false; } @@ -224,36 +224,33 @@ bool VideoStream::OpenStream( ) { int size = av_image_get_buffer_size(codec_context->pix_fmt, codec_context->width, codec_context->height, 1); - uint8_t *opicture_buf = (uint8_t *)av_malloc(size); - if ( !opicture_buf ) { - av_frame_free( &opicture ); - Error( "Could not allocate opicture_buf" ); + opicture->buf[0] = av_buffer_alloc(size); + if (!opicture->buf[0]) { + Error( "Could not allocate opicture buffer" ); return false; } av_image_fill_arrays(opicture->data, opicture->linesize, - opicture_buf, codec_context->pix_fmt, codec_context->width, codec_context->height, 1); + opicture->buf[0]->data, codec_context->pix_fmt, codec_context->width, codec_context->height, 1); /* if the output format is not identical to the input format, then a temporary picture is needed too. It is then converted to the required output format */ - tmp_opicture = nullptr; if ( codec_context->pix_fmt != pf ) { - tmp_opicture = av_frame_alloc(); + tmp_opicture = av_frame_ptr{av_frame_alloc()}; - if ( !tmp_opicture ) { - Error( "Could not allocate tmp_opicture" ); + if (!tmp_opicture) { + Error("Could not allocate tmp_opicture"); return false; } size = av_image_get_buffer_size(pf, codec_context->width, codec_context->height, 1); - uint8_t *tmp_opicture_buf = (uint8_t *)av_malloc( size ); - if ( !tmp_opicture_buf ) { - av_frame_free( &tmp_opicture ); - Error( "Could not allocate tmp_opicture_buf" ); + tmp_opicture->buf[0] = av_buffer_alloc(size); + if (!tmp_opicture->buf[0]) { + Error( "Could not allocate tmp_opicture buffer" ); return false; } av_image_fill_arrays(tmp_opicture->data, - tmp_opicture->linesize, tmp_opicture_buf, pf, + tmp_opicture->linesize, tmp_opicture->buf[0]->data, pf, codec_context->width, codec_context->height, 1); } } // end if ost @@ -300,8 +297,6 @@ bool VideoStream::OpenStream( ) { VideoStream::VideoStream( const char *in_filename, const char *in_format, int bitrate, double frame_rate, int colours, int subpixelorder, int width, int height ) : filename(in_filename), format(in_format), - opicture(nullptr), - tmp_opicture(nullptr), video_outbuf(nullptr), video_outbuf_size(0), last_pts( -1 ), @@ -338,10 +333,8 @@ VideoStream::VideoStream( const char *in_filename, const char *in_format, int bi SetParameters( ); // Allocate buffered packets. - packet_buffers = new AVPacket*[2]; - packet_buffers[0] = new AVPacket(); - packet_buffers[1] = new AVPacket(); - packet_index = 0; + for (auto &pkt : packet_buffers) + pkt = av_packet_ptr{av_packet_alloc()}; // Initialize mutex used by streaming thread. if ( pthread_mutex_init( buffer_copy_lock, nullptr ) != 0 ) { @@ -375,21 +368,9 @@ VideoStream::~VideoStream( ) { delete buffer_copy_lock; } - if (packet_buffers) { - delete packet_buffers[0]; - delete packet_buffers[1]; - delete[] packet_buffers; - } - /* close each codec */ if ( ost ) { avcodec_close( codec_context ); - av_free( opicture->data[0] ); - av_frame_free( &opicture ); - if ( tmp_opicture ) { - av_free( tmp_opicture->data[0] ); - av_frame_free( &tmp_opicture ); - } av_free( video_outbuf ); } @@ -472,10 +453,10 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size, } else { memcpy( opicture->data[0], buffer, buffer_size ); } - AVFrame *opicture_ptr = opicture; - AVPacket *pkt = packet_buffers[packet_index]; - av_init_packet( pkt ); + AVFrame *opicture_ptr = opicture.get(); + AVPacket *pkt = packet_buffers[packet_index].get(); + if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO && codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) { pkt->flags |= AV_PKT_FLAG_KEY; @@ -550,7 +531,7 @@ void *VideoStream::StreamingThreadCallback(void *ctx) { // Since this lag is not constant the client may skip frames. // Get the last rendered packet. - AVPacket *packet = videoStream->packet_buffers[videoStream->packet_index]; + AVPacket *packet = videoStream->packet_buffers[videoStream->packet_index].get(); if (packet->size) { videoStream->SendPacket(packet); } diff --git a/src/zm_mpeg.h b/src/zm_mpeg.h index 231821f2d..20195b2df 100644 --- a/src/zm_mpeg.h +++ b/src/zm_mpeg.h @@ -22,6 +22,7 @@ #include "zm_ffmpeg.h" #include +#include class VideoStream { protected: @@ -45,8 +46,8 @@ protected: AVStream *ost; AVCodecContext *codec_context; const AVCodec *codec; - AVFrame *opicture; - AVFrame *tmp_opicture; + av_frame_ptr opicture; + av_frame_ptr tmp_opicture; uint8_t *video_outbuf; int video_outbuf_size; double last_pts; @@ -59,7 +60,7 @@ protected: pthread_mutex_t *buffer_copy_lock; int buffer_copy_size; int buffer_copy_used; - AVPacket** packet_buffers; + std::array packet_buffers; int packet_index; int SendPacket(AVPacket *packet); static void* StreamingThreadCallback(void *ctx); diff --git a/src/zm_packet.cpp b/src/zm_packet.cpp index 0ffe262ca..f584b5b0c 100644 --- a/src/zm_packet.cpp +++ b/src/zm_packet.cpp @@ -28,9 +28,6 @@ AVPixelFormat target_format = AV_PIX_FMT_NONE; ZMPacket::ZMPacket() : keyframe(0), stream(nullptr), - in_frame(nullptr), - out_frame(nullptr), - buffer(nullptr), image(nullptr), analysis_image(nullptr), score(-1), @@ -40,17 +37,13 @@ ZMPacket::ZMPacket() : pts(0), decoded(false) { - av_init_packet(&packet); - packet.size = 0; // So we can detect whether it has been filled. + packet = av_packet_ptr{av_packet_alloc()}; } ZMPacket::ZMPacket(Image *i, SystemTimePoint tv) : keyframe(0), stream(nullptr), - in_frame(nullptr), - out_frame(nullptr), timestamp(tv), - buffer(nullptr), image(i), analysis_image(nullptr), score(-1), @@ -60,17 +53,13 @@ ZMPacket::ZMPacket(Image *i, SystemTimePoint tv) : pts(0), decoded(false) { - av_init_packet(&packet); - packet.size = 0; // So we can detect whether it has been filled. + packet = av_packet_ptr{av_packet_alloc()}; } ZMPacket::ZMPacket(ZMPacket &p) : keyframe(0), stream(nullptr), - in_frame(nullptr), - out_frame(nullptr), timestamp(p.timestamp), - buffer(nullptr), image(nullptr), analysis_image(nullptr), score(-1), @@ -80,25 +69,20 @@ ZMPacket::ZMPacket(ZMPacket &p) : pts(0), decoded(false) { - av_init_packet(&packet); - packet.size = 0; - packet.data = nullptr; - if (zm_av_packet_ref(&packet, &p.packet) < 0) { + packet = av_packet_ptr{av_packet_alloc()}; + + if (zm_av_packet_ref(packet.get(), p.packet.get()) < 0) { Error("error refing packet"); } } ZMPacket::~ZMPacket() { - zm_av_packet_unref(&packet); - if (in_frame) av_frame_free(&in_frame); - if (out_frame) av_frame_free(&out_frame); - if (buffer) av_freep(&buffer); delete analysis_image; delete image; } ssize_t ZMPacket::ram() { - return packet.size + + return packet->size + (in_frame ? in_frame->linesize[0] * in_frame->height : 0) + (out_frame ? out_frame->linesize[0] * out_frame->height : 0) + (image ? image->Size() : 0) + @@ -116,24 +100,24 @@ int ZMPacket::decode(AVCodecContext *ctx) { if (in_frame) { Error("Already have a frame?"); } else { - in_frame = zm_av_frame_alloc(); + in_frame = av_frame_ptr{zm_av_frame_alloc()}; } // packets are always stored in AV_TIME_BASE_Q so need to convert to codec time base //av_packet_rescale_ts(&packet, AV_TIME_BASE_Q, ctx->time_base); - int ret = zm_send_packet_receive_frame(ctx, in_frame, packet); + int ret = zm_send_packet_receive_frame(ctx, in_frame.get(), *packet); if (ret < 0) { if (AVERROR(EAGAIN) != ret) { Warning("Unable to receive frame : code %d %s.", ret, av_make_error_string(ret).c_str()); } - av_frame_free(&in_frame); + in_frame = nullptr; return 0; } int bytes_consumed = ret; if (ret > 0) { - zm_dump_video_frame(in_frame, "got frame"); + zm_dump_video_frame(in_frame.get(), "got frame"); #if HAVE_LIBAVUTIL_HWCONTEXT_H #if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0) @@ -172,7 +156,7 @@ int ZMPacket::decode(AVCodecContext *ctx) { } // end if target_format not set #endif - AVFrame *new_frame = zm_av_frame_alloc(); + av_frame_ptr new_frame{zm_av_frame_alloc()}; #if 0 if ( target_format == AV_PIX_FMT_RGB0 ) { if ( image ) { @@ -187,30 +171,28 @@ int ZMPacket::decode(AVCodecContext *ctx) { } #endif /* retrieve data from GPU to CPU */ - zm_dump_video_frame(in_frame, "Before hwtransfer"); - ret = av_hwframe_transfer_data(new_frame, in_frame, 0); + zm_dump_video_frame(in_frame.get(), "Before hwtransfer"); + ret = av_hwframe_transfer_data(new_frame.get(), in_frame.get(), 0); if (ret < 0) { Error("Unable to transfer frame: %s, continuing", av_make_error_string(ret).c_str()); - av_frame_free(&in_frame); - av_frame_free(&new_frame); + in_frame = nullptr; return 0; } - ret = av_frame_copy_props(new_frame, in_frame); + ret = av_frame_copy_props(new_frame.get(), in_frame.get()); if (ret < 0) { Error("Unable to copy props: %s, continuing", av_make_error_string(ret).c_str()); } - zm_dump_video_frame(new_frame, "After hwtransfer"); + zm_dump_video_frame(new_frame.get(), "After hwtransfer"); #if 0 if ( new_frame->format == AV_PIX_FMT_RGB0 ) { new_frame->format = AV_PIX_FMT_RGBA; zm_dump_video_frame(new_frame, "After hwtransfer setting to rgba"); } #endif - av_frame_free(&in_frame); - in_frame = new_frame; + in_frame = std::move(new_frame); } else #endif #endif @@ -239,7 +221,7 @@ Image *ZMPacket::get_image(Image *i) { } image = i; } - image->Assign(in_frame); + image->Assign(in_frame.get()); return image; } @@ -249,18 +231,18 @@ Image *ZMPacket::set_image(Image *i) { } AVPacket *ZMPacket::set_packet(AVPacket *p) { - if (zm_av_packet_ref(&packet, p) < 0) { + if (zm_av_packet_ref(packet.get(), p) < 0) { Error("error refing packet"); } timestamp = std::chrono::system_clock::now(); keyframe = p->flags & AV_PKT_FLAG_KEY; - return &packet; + return packet.get(); } AVFrame *ZMPacket::get_out_frame(int width, int height, AVPixelFormat format) { if (!out_frame) { - out_frame = zm_av_frame_alloc(); + out_frame = av_frame_ptr{zm_av_frame_alloc()}; if (!out_frame) { Error("Unable to allocate a frame"); return nullptr; @@ -272,18 +254,23 @@ AVFrame *ZMPacket::get_out_frame(int width, int height, AVPixelFormat format) { codec_imgsize = av_image_get_buffer_size( format, width, height, alignment); Debug(1, "buffer size %u from %s %dx%d", codec_imgsize, av_get_pix_fmt_name(format), width, height); - buffer = (uint8_t *)av_malloc(codec_imgsize); + out_frame->buf[0] = av_buffer_alloc(codec_imgsize); + if (!out_frame->buf[0]) { + Error("Unable to allocate a frame buffer"); + out_frame = nullptr; + return nullptr; + } int ret; if ((ret=av_image_fill_arrays( out_frame->data, out_frame->linesize, - buffer, + out_frame->buf[0]->data, format, width, height, alignment))<0) { Error("Failed to fill_arrays %s", av_make_error_string(ret).c_str()); - av_frame_free(&out_frame); + out_frame = nullptr; return nullptr; } @@ -291,5 +278,5 @@ AVFrame *ZMPacket::get_out_frame(int width, int height, AVPixelFormat format) { out_frame->height = height; out_frame->format = format; } - return out_frame; + return out_frame.get(); } // end AVFrame *ZMPacket::get_out_frame( AVCodecContext *ctx ); diff --git a/src/zm_packet.h b/src/zm_packet.h index 23a593b22..eb802831e 100644 --- a/src/zm_packet.h +++ b/src/zm_packet.h @@ -20,6 +20,7 @@ #ifndef ZM_PACKET_H #define ZM_PACKET_H +#include "zm_ffmpeg.h" #include "zm_logger.h" #include "zm_time.h" #include "zm_zone.h" @@ -43,11 +44,10 @@ class ZMPacket { int keyframe; AVStream *stream; // Input stream - AVPacket packet; // Input packet, undecoded - AVFrame *in_frame; // Input image, decoded Theoretically only filled if needed. - AVFrame *out_frame; // output image, Only filled if needed. + av_packet_ptr packet; // Input packet, undecoded + av_frame_ptr in_frame; // Input image, decoded Theoretically only filled if needed. + av_frame_ptr out_frame; // output image, Only filled if needed. SystemTimePoint timestamp; - uint8_t *buffer; // buffer used in image Image *image; Image *analysis_image; int score; @@ -60,9 +60,9 @@ class ZMPacket { std::string alarm_cause; public: - AVPacket *av_packet() { return &packet; } + AVPacket *av_packet() { return packet.get(); } AVPacket *set_packet(AVPacket *p) ; - AVFrame *av_frame() { return out_frame; } + AVFrame *av_frame() { return out_frame.get(); } Image *get_image(Image *i=nullptr); Image *set_image(Image *); ssize_t ram(); @@ -77,6 +77,9 @@ class ZMPacket { //AVFrame *get_out_frame(const AVCodecContext *ctx); AVFrame *get_out_frame(int width, int height, AVPixelFormat format); int get_codec_imgsize() { return codec_imgsize; }; + void notify_all() { + this->condition_.notify_all(); + } }; class ZMLockedPacket { @@ -119,6 +122,10 @@ class ZMLockedPacket { Debug(4, "packet %d waiting", packet_->image_index); packet_->condition_.wait(lck_); } + void notify_all() { + packet_->notify_all(); + } + }; #endif /* ZM_PACKET_H */ diff --git a/src/zm_packetqueue.cpp b/src/zm_packetqueue.cpp index 21ab89349..de545d166 100644 --- a/src/zm_packetqueue.cpp +++ b/src/zm_packetqueue.cpp @@ -99,13 +99,13 @@ bool PacketQueue::queuePacket(std::shared_ptr add_packet) { } } // end foreach iterator - packet_counts[add_packet->packet.stream_index] += 1; + packet_counts[add_packet->packet->stream_index] += 1; Debug(2, "packet counts for %d is %d", - add_packet->packet.stream_index, - packet_counts[add_packet->packet.stream_index]); + add_packet->packet->stream_index, + packet_counts[add_packet->packet->stream_index]); if ( - (add_packet->packet.stream_index == video_stream_id) + (add_packet->packet->stream_index == video_stream_id) and (max_video_packet_count > 0) and @@ -130,7 +130,7 @@ bool PacketQueue::queuePacket(std::shared_ptr add_packet) { ZMLockedPacket lp(zm_packet); if (!lp.trylock()) { - if (warned_count <2) { + if (warned_count < 2) { warned_count++; // Can't delete a locked packet, but can delete one after it. Warning("Found locked packet when trying to free up video packets. This basically means that decoding is not keeping up."); @@ -153,20 +153,20 @@ bool PacketQueue::queuePacket(std::shared_ptr add_packet) { } // end foreach iterator it = pktQueue.erase(it); - packet_counts[zm_packet->packet.stream_index] -= 1; + packet_counts[zm_packet->packet->stream_index] -= 1; Debug(1, "Deleting a packet with stream index:%d image_index:%d with keyframe:%d, video frames in queue:%d max: %d, queuesize:%zu", - zm_packet->packet.stream_index, + zm_packet->packet->stream_index, zm_packet->image_index, zm_packet->keyframe, packet_counts[video_stream_id], max_video_packet_count, pktQueue.size()); - if (zm_packet->packet.stream_index == video_stream_id) + if (zm_packet->packet->stream_index == video_stream_id) break; } // end while - } else { + } else if (warned_count > 0) { warned_count--; } // end if not able catch up } // end lock scope @@ -192,7 +192,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { if (deleting) return; if (keep_keyframes and ! ( - add_packet->packet.stream_index == video_stream_id + add_packet->packet->stream_index == video_stream_id and add_packet->keyframe and @@ -202,7 +202,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { ) ) { Debug(3, "stream index %d ?= video_stream_id %d, keyframe %d, keep_keyframes %d, counts %d > pre_event_count %d at begin %d", - add_packet->packet.stream_index, video_stream_id, add_packet->keyframe, keep_keyframes, packet_counts[video_stream_id], pre_event_video_packet_count, + add_packet->packet->stream_index, video_stream_id, add_packet->keyframe, keep_keyframes, packet_counts[video_stream_id], pre_event_video_packet_count, ( *(pktQueue.begin()) != add_packet ) ); return; @@ -215,7 +215,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { packetqueue_iterator it = pktQueue.end(); --it; while (*it != add_packet) { - if ((*it)->packet.stream_index == video_stream_id) + if ((*it)->packet->stream_index == video_stream_id) ++tail_count; --it; } @@ -239,10 +239,10 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { } pktQueue.pop_front(); - packet_counts[zm_packet->packet.stream_index] -= 1; + packet_counts[zm_packet->packet->stream_index] -= 1; Debug(1, "Deleting a packet with stream index:%d image_index:%d with keyframe:%d, video frames in queue:%d max: %d, queuesize:%zu", - zm_packet->packet.stream_index, + zm_packet->packet->stream_index, zm_packet->image_index, zm_packet->keyframe, packet_counts[video_stream_id], @@ -262,6 +262,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { return; } + int keyframe_interval = 1; ZMLockedPacket *lp = new ZMLockedPacket(zm_packet); if (!lp->trylock()) { Debug(4, "Failed getting lock on first packet"); @@ -269,7 +270,6 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { return; } // end if first packet not locked - int keyframe_interval = 1; int video_packets_to_delete = 0; // This is a count of how many packets we will delete so we know when to stop looking ++it; delete lp; @@ -295,7 +295,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { } #endif - if (zm_packet->packet.stream_index == video_stream_id) { + if (zm_packet->packet->stream_index == video_stream_id) { if (zm_packet->keyframe) { Debug(4, "Have a video keyframe so setting next front to it. Keyframe interval so far is %d", keyframe_interval); keyframe_interval = 1; @@ -313,9 +313,15 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { ++it; } // end while - Debug(1, "Resulting it pointing at latest packet? %d, next front points to begin? %d", + if ((keyframe_interval == 1) and max_video_packet_count) { + Warning("Did not find a second keyframe in the packet queue. It may be that" + " the Max Image Buffer setting is lower than the keyframe interval. We" + " need it to be greater than the keyframe interval."); + } + Debug(1, "Resulting it pointing at latest packet? %d, next front points to begin? %d, Keyframe interval %d", ( *it == add_packet ), - ( next_front == pktQueue.begin() ) + ( next_front == pktQueue.begin() ), + keyframe_interval ); if (next_front != pktQueue.begin()) { while (pktQueue.begin() != next_front) { @@ -327,14 +333,14 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { Debug(1, "Deleting a packet with stream index:%d image_index:%d with keyframe:%d, video frames in queue:%d max: %d, queuesize:%zu", - zm_packet->packet.stream_index, + zm_packet->packet->stream_index, zm_packet->image_index, zm_packet->keyframe, packet_counts[video_stream_id], pre_event_video_packet_count, pktQueue.size()); pktQueue.pop_front(); - packet_counts[zm_packet->packet.stream_index] -= 1; + packet_counts[zm_packet->packet->stream_index] -= 1; } } // end if have at least max_video_packet_count video packets remaining @@ -344,6 +350,7 @@ void PacketQueue::clearPackets(const std::shared_ptr &add_packet) { void PacketQueue::stop() { deleting = true; condition.notify_all(); + for (const auto p : pktQueue) p->notify_all(); } void PacketQueue::clear() { @@ -361,13 +368,13 @@ void PacketQueue::clear() { lp->lock(); Debug(1, "Deleting a packet with stream index:%d image_index:%d with keyframe:%d, video frames in queue:%d max: %d, queuesize:%zu", - packet->packet.stream_index, + packet->packet->stream_index, packet->image_index, packet->keyframe, packet_counts[video_stream_id], pre_event_video_packet_count, pktQueue.size()); - packet_counts[packet->packet.stream_index] -= 1; + packet_counts[packet->packet->stream_index] -= 1; pktQueue.pop_front(); delete lp; } @@ -525,7 +532,7 @@ bool PacketQueue::increment_it(packetqueue_iterator *it, int stream_id) { std::unique_lock lck(mutex); do { ++(*it); - } while ( (*it != pktQueue.end()) and ( (*(*it))->packet.stream_index != stream_id) ); + } while ( (*it != pktQueue.end()) and ( (*(*it))->packet->stream_index != stream_id) ); if ( *it != pktQueue.end() ) { Debug(2, "Incrementing %p, still not at end, so incrementing", it); @@ -554,10 +561,10 @@ packetqueue_iterator *PacketQueue::get_event_start_packet_it( packet = *(*it); /* Debug(1, "Previous packet pre_event_count %d stream_index %d keyframe %d score %d", - pre_event_count, packet->packet.stream_index, packet->keyframe, packet->score); + pre_event_count, packet->packet->stream_index, packet->keyframe, packet->score); ZM_DUMP_PACKET(packet->packet, ""); */ - if (packet->packet.stream_index == video_stream_id) { + if (packet->packet->stream_index == video_stream_id) { pre_event_count --; if (!pre_event_count) break; @@ -585,7 +592,7 @@ packetqueue_iterator *PacketQueue::get_event_start_packet_it( while ((*it) != pktQueue.begin()) { packet = *(*it); //ZM_DUMP_PACKET(packet->packet, "No keyframe"); - if ((packet->packet.stream_index == video_stream_id) and packet->keyframe) + if ((packet->packet->stream_index == video_stream_id) and packet->keyframe) return it; // Success --(*it); } @@ -634,8 +641,8 @@ packetqueue_iterator * PacketQueue::get_video_it(bool wait) { return nullptr; } Debug(1, "Packet keyframe %d for stream %d, so returning the it to it", - zm_packet->keyframe, zm_packet->packet.stream_index); - if (zm_packet->keyframe and ( zm_packet->packet.stream_index == video_stream_id )) { + zm_packet->keyframe, zm_packet->packet->stream_index); + if (zm_packet->keyframe and ( zm_packet->packet->stream_index == video_stream_id )) { Debug(1, "Found a keyframe for stream %d, so returning the it to it", video_stream_id); return it; } diff --git a/src/zm_remote_camera_http.cpp b/src/zm_remote_camera_http.cpp index 598afcd61..0d561073b 100644 --- a/src/zm_remote_camera_http.cpp +++ b/src/zm_remote_camera_http.cpp @@ -1099,7 +1099,7 @@ int RemoteCameraHttp::Capture(std::shared_ptr &packet) { Image *image = packet->image; packet->keyframe = 1; packet->codec_type = AVMEDIA_TYPE_VIDEO; - packet->packet.stream_index = mVideoStreamId; + packet->packet->stream_index = mVideoStreamId; packet->stream = mVideoStream; switch (format) { diff --git a/src/zm_remote_camera_rtsp.cpp b/src/zm_remote_camera_rtsp.cpp index b8a533fae..b6dec6dd4 100644 --- a/src/zm_remote_camera_rtsp.cpp +++ b/src/zm_remote_camera_rtsp.cpp @@ -216,7 +216,7 @@ int RemoteCameraRtsp::PreCapture() { int RemoteCameraRtsp::Capture(std::shared_ptr &zm_packet) { int frameComplete = false; - AVPacket *packet = &zm_packet->packet; + AVPacket *packet = zm_packet->packet.get(); while (!frameComplete) { buffer.clear(); diff --git a/src/zm_rtsp_server_device_source.cpp b/src/zm_rtsp_server_device_source.cpp index ba648b2f4..59df44d31 100644 --- a/src/zm_rtsp_server_device_source.cpp +++ b/src/zm_rtsp_server_device_source.cpp @@ -141,7 +141,7 @@ int ZoneMinderDeviceSource::getNextFrame() { m_packetqueue_it = m_packetqueue->get_video_it(true); } ZMPacket *zm_packet = m_packetqueue->get_packet(m_packetqueue_it); - while ( zm_packet and (zm_packet->packet.stream_index != m_stream->index) ) { + while ( zm_packet and (zm_packet->packet->stream_index != m_stream->index) ) { zm_packet->unlock(); // We want our stream to start at the same it as the video // but if this is an audio stream we need to increment past that first packet diff --git a/src/zm_rtsp_server_fifo_source.cpp b/src/zm_rtsp_server_fifo_source.cpp index d067395ae..fc1ee9caa 100644 --- a/src/zm_rtsp_server_fifo_source.cpp +++ b/src/zm_rtsp_server_fifo_source.cpp @@ -215,7 +215,7 @@ int ZoneMinderFifoSource::getNextFrame() { return 0; } if (header_start != m_buffer) { - Debug(4, "ZM Packet didn't start at beginning of buffer %ld. %c%c", + Debug(4, "ZM Packet didn't start at beginning of buffer %td. %c%c", header_start - m_buffer.head(), m_buffer[0], m_buffer[1]); } diff --git a/src/zm_sdp.cpp b/src/zm_sdp.cpp index bdbc9e1d1..1593b1640 100644 --- a/src/zm_sdp.cpp +++ b/src/zm_sdp.cpp @@ -290,7 +290,8 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const { #if (LIBAVFORMAT_VERSION_CHECK(58, 12, 0, 0, 100)) formatContext->url = av_strdup(mUrl.c_str()); #else - strncpy(formatContext->filename, mUrl.c_str(), sizeof(formatContext->filename)); + strncpy(formatContext->filename, mUrl.c_str(), sizeof(formatContext->filename) - 1); + formatContext->filename[sizeof(formatContext->filename) - 1] = '\0'; #endif /* if ( mName.length() ) diff --git a/src/zm_sendfile.h b/src/zm_sendfile.h index 26d6011df..a4f703efc 100644 --- a/src/zm_sendfile.h +++ b/src/zm_sendfile.h @@ -22,10 +22,12 @@ ssize_t zm_sendfile(int out_fd, int in_fd, off_t *offset, size_t size) { return err; #elif HAVE_SENDFILE7_SUPPORT - ssize_t err = sendfile(in_fd, out_fd, offset, size, nullptr, &size, 0); + off_t sbytes = 0; + off_t ofs = offset ? *offset : 0; + ssize_t err = sendfile(in_fd, out_fd, ofs, size, nullptr, &sbytes, 0); if (err && errno != EAGAIN) return -errno; - return size; + return sbytes; #else uint8_t buffer[size]; ssize_t err = read(in_fd, buffer, size); diff --git a/src/zm_swscale.cpp b/src/zm_swscale.cpp index 68e7af4ef..a5cf67fda 100644 --- a/src/zm_swscale.cpp +++ b/src/zm_swscale.cpp @@ -25,8 +25,6 @@ SWScale::SWScale() : gotdefaults(false), swscale_ctx(nullptr), - input_avframe(nullptr), - output_avframe(nullptr), default_width(0), default_height(0) { @@ -34,13 +32,13 @@ SWScale::SWScale() : } bool SWScale::init() { - input_avframe = av_frame_alloc(); + input_avframe = av_frame_ptr{zm_av_frame_alloc()}; if (!input_avframe) { Error("Failed allocating AVFrame for the input"); return false; } - output_avframe = av_frame_alloc(); + output_avframe = av_frame_ptr{zm_av_frame_alloc()}; if (!output_avframe) { Error("Failed allocating AVFrame for the output"); return false; @@ -51,12 +49,6 @@ bool SWScale::init() { SWScale::~SWScale() { /* Free up everything */ - if ( input_avframe ) - av_frame_free(&input_avframe); - - if ( output_avframe ) - av_frame_free(&output_avframe); - if ( swscale_ctx ) { sws_freeContext(swscale_ctx); swscale_ctx = nullptr; diff --git a/src/zm_swscale.h b/src/zm_swscale.h index c0d1e7120..32ce70217 100644 --- a/src/zm_swscale.h +++ b/src/zm_swscale.h @@ -24,8 +24,8 @@ class SWScale { protected: bool gotdefaults; struct SwsContext* swscale_ctx; - AVFrame* input_avframe; - AVFrame* output_avframe; + av_frame_ptr input_avframe; + av_frame_ptr output_avframe; enum _AVPIXELFORMAT default_input_pf; enum _AVPIXELFORMAT default_output_pf; unsigned int default_width; diff --git a/src/zm_time.cpp b/src/zm_time.cpp index 041962003..828e7b0f3 100644 --- a/src/zm_time.cpp +++ b/src/zm_time.cpp @@ -36,8 +36,9 @@ std::string SystemTimePointToString(SystemTimePoint tp) { } std::string TimePointToString(TimePoint tp) { + const auto tp_dur = std::chrono::duration_cast(tp - std::chrono::steady_clock::now()); time_t tp_sec = std::chrono::system_clock::to_time_t( - std::chrono::system_clock::now() + (tp - std::chrono::steady_clock::now())); + std::chrono::system_clock::now() + tp_dur); Microseconds now_frac = std::chrono::duration_cast( tp.time_since_epoch() - std::chrono::duration_cast(tp.time_since_epoch())); diff --git a/src/zm_videostore.cpp b/src/zm_videostore.cpp index 2599c903b..fe771160c 100644 --- a/src/zm_videostore.cpp +++ b/src/zm_videostore.cpp @@ -85,10 +85,6 @@ VideoStore::VideoStore( audio_in_ctx(p_audio_in_ctx), audio_out_codec(nullptr), audio_out_ctx(nullptr), - video_in_frame(nullptr), - in_frame(nullptr), - out_frame(nullptr), - hw_frame(nullptr), packets_written(0), frame_count(0), hw_device_ctx(nullptr), @@ -110,7 +106,7 @@ VideoStore::VideoStore( { FFMPEGInit(); swscale.init(); - opkt = new AVPacket; + opkt = av_packet_ptr{av_packet_alloc()}; } // VideoStore::VideoStore bool VideoStore::open() { @@ -575,21 +571,22 @@ bool VideoStore::open() { void VideoStore::flush_codecs() { // The codec queues data. We need to send a flush command and out // whatever we get. Failures are not fatal. - AVPacket pkt; - // Without these we seg fault becuse av_init_packet doesn't init them - pkt.data = nullptr; - pkt.size = 0; - av_init_packet(&pkt); + av_packet_ptr pkt{av_packet_alloc()}; + + if (!pkt) { + Error("Unable to allocate packet."); + return; + } // I got crashes if the codec didn't do DELAY, so let's test for it. if (video_out_ctx && video_out_ctx->codec && (video_out_ctx->codec->capabilities & AV_CODEC_CAP_DELAY)) { // Put encoder into flushing mode - while ((zm_send_frame_receive_packet(video_out_ctx, nullptr, pkt)) > 0) { - av_packet_rescale_ts(&pkt, + while ((zm_send_frame_receive_packet(video_out_ctx, nullptr, *pkt)) > 0) { + av_packet_guard pkt_guard{pkt}; + av_packet_rescale_ts(pkt.get(), video_out_ctx->time_base, video_out_stream->time_base); - write_packet(&pkt, video_out_stream); - zm_av_packet_unref(&pkt); + write_packet(pkt.get(), video_out_stream); } // while have buffered frames Debug(1, "Done writing buffered video."); } // end if have delay capability @@ -603,17 +600,17 @@ void VideoStore::flush_codecs() { * At the end of the file, we pass the remaining samples to * the encoder. */ while (zm_resample_get_delay(resample_ctx, audio_out_ctx->sample_rate)) { - zm_resample_audio(resample_ctx, nullptr, out_frame); + zm_resample_audio(resample_ctx, nullptr, out_frame.get()); - if (zm_add_samples_to_fifo(fifo, out_frame)) { + if (zm_add_samples_to_fifo(fifo, out_frame.get())) { // Should probably set the frame size to what is reported FIXME - if (zm_get_samples_from_fifo(fifo, out_frame)) { - if (zm_send_frame_receive_packet(audio_out_ctx, out_frame, pkt) > 0) { - av_packet_rescale_ts(&pkt, + if (zm_get_samples_from_fifo(fifo, out_frame.get())) { + if (zm_send_frame_receive_packet(audio_out_ctx, out_frame.get(), *pkt) > 0) { + av_packet_guard pkt_guard{pkt}; + av_packet_rescale_ts(pkt.get(), audio_out_ctx->time_base, audio_out_stream->time_base); - write_packet(&pkt, audio_out_stream); - zm_av_packet_unref(&pkt); + write_packet(pkt.get(), audio_out_stream); } } // end if data returned from fifo } @@ -629,14 +626,14 @@ void VideoStore::flush_codecs() { // SHould probably set the frame size to what is reported FIXME if (av_audio_fifo_read(fifo, (void **)out_frame->data, frame_size)) { - if (zm_send_frame_receive_packet(audio_out_ctx, out_frame, pkt)) { - pkt.stream_index = audio_out_stream->index; + if (zm_send_frame_receive_packet(audio_out_ctx, out_frame.get(), *pkt)) { + av_packet_guard pkt_guard{pkt}; + pkt->stream_index = audio_out_stream->index; - av_packet_rescale_ts(&pkt, + av_packet_rescale_ts(pkt.get(), audio_out_ctx->time_base, audio_out_stream->time_base); - write_packet(&pkt, audio_out_stream); - zm_av_packet_unref(&pkt); + write_packet(pkt.get(), audio_out_stream); } } // end if data returned from fifo } // end while still data in the fifo @@ -645,16 +642,16 @@ void VideoStore::flush_codecs() { avcodec_send_frame(audio_out_ctx, nullptr); while (true) { - if (0 >= zm_receive_packet(audio_out_ctx, pkt)) { + if (0 >= zm_receive_packet(audio_out_ctx, *pkt)) { Debug(1, "No more packets"); break; } + av_packet_guard pkt_guard{pkt}; ZM_DUMP_PACKET(pkt, "raw from encoder"); - av_packet_rescale_ts(&pkt, audio_out_ctx->time_base, audio_out_stream->time_base); + av_packet_rescale_ts(pkt.get(), audio_out_ctx->time_base, audio_out_stream->time_base); ZM_DUMP_STREAM_PACKET(audio_out_stream, pkt, "writing flushed packet"); - write_packet(&pkt, audio_out_stream); - zm_av_packet_unref(&pkt); + write_packet(pkt.get(), audio_out_stream); } // while have buffered frames } // end if audio_out_codec } // end flush_codecs @@ -739,14 +736,6 @@ VideoStore::~VideoStore() { } swr_free(&resample_ctx); } - if (in_frame) { - av_frame_free(&in_frame); - in_frame = nullptr; - } - if (out_frame) { - av_frame_free(&out_frame); - out_frame = nullptr; - } if (converted_in_samples) { av_free(converted_in_samples); converted_in_samples = nullptr; @@ -882,16 +871,15 @@ bool VideoStore::setup_resampler() { /** Create a new frame to store the audio samples. */ if (!in_frame) { - if (!(in_frame = zm_av_frame_alloc())) { + if (!(in_frame = av_frame_ptr{zm_av_frame_alloc()})) { Error("Could not allocate in frame"); return false; } } /** Create a new frame to store the audio samples. */ - if (!(out_frame = zm_av_frame_alloc())) { + if (!(out_frame = av_frame_ptr{zm_av_frame_alloc()})) { Error("Could not allocate out frame"); - av_frame_free(&in_frame); return false; } out_frame->sample_rate = audio_out_ctx->sample_rate; @@ -912,14 +900,10 @@ bool VideoStore::setup_resampler() { 0, nullptr); if (!resample_ctx) { Error("Could not allocate resample context"); - av_frame_free(&in_frame); - av_frame_free(&out_frame); return false; } if ((ret = swr_init(resample_ctx)) < 0) { Error("Could not open resampler %d", ret); - av_frame_free(&in_frame); - av_frame_free(&out_frame); swr_free(&resample_ctx); return false; } @@ -948,7 +932,7 @@ bool VideoStore::setup_resampler() { // Setup the data pointers in the AVFrame if (avcodec_fill_audio_frame( - out_frame, audio_out_ctx->channels, + out_frame.get(), audio_out_ctx->channels, audio_out_ctx->sample_fmt, (const uint8_t *)converted_in_samples, audioSampleBuffer_size, 0) < 0) { @@ -964,6 +948,11 @@ int VideoStore::writePacket(const std::shared_ptr &zm_pkt) { if (zm_pkt->codec_type == AVMEDIA_TYPE_VIDEO) { stream_index = video_out_stream->index; } else if (zm_pkt->codec_type == AVMEDIA_TYPE_AUDIO) { + if (!audio_out_stream) { + Debug(1, "Called writeAudioFramePacket when no audio_out_stream"); + return 0; + // FIXME -ve return codes do not free packet in ffmpeg_camera at the moment + } stream_index = audio_out_stream->index; } else { Error("Unknown stream type in packet (%d)", zm_pkt->codec_type); @@ -972,13 +961,13 @@ int VideoStore::writePacket(const std::shared_ptr &zm_pkt) { auto &queue = reorder_queues[stream_index]; Debug(1, "Queue size for %d is %zu", stream_index, queue.size()); - AVPacket *av_pkt = &zm_pkt->packet; + AVPacket *av_pkt = zm_pkt->packet.get(); // queue the packet bool have_out_of_order = false; auto rit = queue.rbegin(); // Find the previous packet for the stream, and check dts while (rit != queue.rend()) { - AVPacket *p = &((*rit)->packet); + AVPacket *p = ((*rit)->packet).get(); if (p->dts <= av_pkt->dts) { Debug(1, "Found in order packet"); // packets are in order, everything is fine @@ -1014,6 +1003,11 @@ int VideoStore::writePacket(const std::shared_ptr &zm_pkt) { } int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet) { + av_packet_guard pkt_guard; +#if HAVE_LIBAVUTIL_HWCONTEXT_H + av_frame_ptr hw_frame; +#endif + frame_count += 1; // if we have to transcode @@ -1038,7 +1032,7 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet //Go straight to out frame swscale.Convert( zm_packet->image, - zm_packet->buffer, + zm_packet->out_frame->buf[0]->data, zm_packet->codec_imgsize, zm_packet->image->AVPixFormat(), chosen_codec_data->sw_pix_fmt, @@ -1047,14 +1041,14 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet ); } else if (!zm_packet->in_frame) { Debug(4, "Have no in_frame"); - if (zm_packet->packet.size and !zm_packet->decoded) { + if (zm_packet->packet->size and !zm_packet->decoded) { Debug(4, "Decoding"); if (!zm_packet->decode(video_in_ctx)) { Debug(2, "unable to decode yet."); return 0; } // Go straight to out frame - swscale.Convert(zm_packet->in_frame, out_frame); + swscale.Convert(zm_packet->in_frame.get(), out_frame); } else { Error("Have neither in_frame or image in packet %d!", zm_packet->image_index); @@ -1062,36 +1056,33 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet } // end if has packet or image } else { // Have in_frame.... may need to convert it to out_frame - swscale.Convert(zm_packet->in_frame, zm_packet->out_frame); + swscale.Convert(zm_packet->in_frame.get(), zm_packet->out_frame.get()); } // end if no in_frame } // end if no out_frame - AVFrame *frame = zm_packet->out_frame; + AVFrame *frame = zm_packet->out_frame.get(); #if HAVE_LIBAVUTIL_HWCONTEXT_H if (video_out_ctx->hw_frames_ctx) { int ret; - if (!(hw_frame = av_frame_alloc())) { - ret = AVERROR(ENOMEM); - return ret; + hw_frame = av_frame_ptr{zm_av_frame_alloc()}; + if (!hw_frame) { + return AVERROR(ENOMEM); } - if ((ret = av_hwframe_get_buffer(video_out_ctx->hw_frames_ctx, hw_frame, 0)) < 0) { + if ((ret = av_hwframe_get_buffer(video_out_ctx->hw_frames_ctx, hw_frame.get(), 0)) < 0) { Error("Error code: %s", av_err2str(ret)); - av_frame_free(&hw_frame); return ret; } if (!hw_frame->hw_frames_ctx) { Error("Outof ram!"); - av_frame_free(&hw_frame); return 0; } - if ((ret = av_hwframe_transfer_data(hw_frame, zm_packet->out_frame, 0)) < 0) { + if ((ret = av_hwframe_transfer_data(hw_frame.get(), zm_packet->out_frame.get(), 0)) < 0) { Error("Error while transferring frame data to surface: %s.", av_err2str(ret)); - av_frame_free(&hw_frame); return ret; } - frame = hw_frame; + frame = hw_frame.get(); } // end if hwaccel #endif @@ -1126,10 +1117,6 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet video_out_ctx->time_base.den); } - av_init_packet(opkt); - opkt->data = nullptr; - opkt->size = 0; - int ret = zm_send_frame_receive_packet(video_out_ctx, frame, *opkt); if (ret <= 0) { if (ret < 0) { @@ -1137,7 +1124,8 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet } return ret; } - ZM_DUMP_PACKET((*opkt), "packet returned by codec"); + pkt_guard.acquire(opkt); + ZM_DUMP_PACKET(opkt, "packet returned by codec"); // Need to adjust pts/dts values from codec time to stream time if (opkt->pts != AV_NOPTS_VALUE) @@ -1188,15 +1176,11 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet } // end if in_frmae opkt->duration = duration; } else { // Passthrough - AVPacket *ipkt = &zm_packet->packet; - ZM_DUMP_STREAM_PACKET(video_in_stream, (*ipkt), "Doing passthrough, just copy packet"); + AVPacket *ipkt = zm_packet->packet.get(); + ZM_DUMP_STREAM_PACKET(video_in_stream, ipkt, "Doing passthrough, just copy packet"); // Just copy it because the codec is the same - av_init_packet(opkt); - opkt->data = ipkt->data; - opkt->size = ipkt->size; - opkt->flags = ipkt->flags; - opkt->duration = ipkt->duration; - av_packet_ref(opkt, ipkt); + av_packet_ref(opkt.get(), ipkt); + pkt_guard.acquire(opkt); if (ipkt->dts != AV_NOPTS_VALUE) { if (video_first_dts == AV_NOPTS_VALUE) { @@ -1212,25 +1196,17 @@ int VideoStore::writeVideoFramePacket(const std::shared_ptr &zm_packet opkt->pts = ipkt->pts - video_first_dts; } - av_packet_rescale_ts(opkt, video_in_stream->time_base, video_out_stream->time_base); + av_packet_rescale_ts(opkt.get(), video_in_stream->time_base, video_out_stream->time_base); } // end if codec matches - write_packet(opkt, video_out_stream); - zm_av_packet_unref(opkt); - if (hw_frame) av_frame_free(&hw_frame); + write_packet(opkt.get(), video_out_stream); return 1; } // end int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) int VideoStore::writeAudioFramePacket(const std::shared_ptr &zm_packet) { - if (!audio_out_stream) { - Debug(1, "Called writeAudioFramePacket when no audio_out_stream"); - return 0; - // FIXME -ve return codes do not free packet in ffmpeg_camera at the moment - } - - AVPacket *ipkt = &zm_packet->packet; - ZM_DUMP_STREAM_PACKET(audio_in_stream, (*ipkt), "input packet"); + AVPacket *ipkt = zm_packet->packet.get(); + ZM_DUMP_STREAM_PACKET(audio_in_stream, ipkt, "input packet"); if (audio_first_dts == AV_NOPTS_VALUE) { audio_first_dts = ipkt->dts; @@ -1242,24 +1218,24 @@ int VideoStore::writeAudioFramePacket(const std::shared_ptr &zm_packet if (audio_out_codec) { // I wonder if we can get multiple frames per packet? Probably - int ret = zm_send_packet_receive_frame(audio_in_ctx, in_frame, *ipkt); + int ret = zm_send_packet_receive_frame(audio_in_ctx, in_frame.get(), *ipkt); if (ret < 0) { Debug(3, "failed to receive frame code: %d", ret); return 0; } zm_dump_frame(in_frame, "In frame from decode"); - AVFrame *input_frame = in_frame; + AVFrame *input_frame = in_frame.get(); - while (zm_resample_audio(resample_ctx, input_frame, out_frame)) { + while (zm_resample_audio(resample_ctx, input_frame, out_frame.get())) { //out_frame->pkt_duration = in_frame->pkt_duration; // resampling doesn't alter duration - if (zm_add_samples_to_fifo(fifo, out_frame) <= 0) + if (zm_add_samples_to_fifo(fifo, out_frame.get()) <= 0) break; // We put the samples into the fifo so we are basically resetting the frame out_frame->nb_samples = audio_out_ctx->frame_size; - if (zm_get_samples_from_fifo(fifo, out_frame) <= 0) + if (zm_get_samples_from_fifo(fifo, out_frame.get()) <= 0) break; out_frame->pts = audio_next_pts; @@ -1267,17 +1243,16 @@ int VideoStore::writeAudioFramePacket(const std::shared_ptr &zm_packet zm_dump_frame(out_frame, "Out frame after resample"); - av_init_packet(opkt); - if (zm_send_frame_receive_packet(audio_out_ctx, out_frame, *opkt) <= 0) + if (zm_send_frame_receive_packet(audio_out_ctx, out_frame.get(), *opkt) <= 0) break; // Scale the PTS of the outgoing packet to be the correct time base - av_packet_rescale_ts(opkt, + av_packet_rescale_ts(opkt.get(), audio_out_ctx->time_base, audio_out_stream->time_base); - write_packet(opkt, audio_out_stream); - zm_av_packet_unref(opkt); + write_packet(opkt.get(), audio_out_stream); + zm_av_packet_unref(opkt.get()); if (zm_resample_get_delay(resample_ctx, out_frame->sample_rate) < out_frame->nb_samples) break; @@ -1285,7 +1260,6 @@ int VideoStore::writeAudioFramePacket(const std::shared_ptr &zm_packet input_frame = nullptr; } // end while there is data in the resampler } else { - av_init_packet(opkt); opkt->data = ipkt->data; opkt->size = ipkt->size; opkt->flags = ipkt->flags; @@ -1298,12 +1272,12 @@ int VideoStore::writeAudioFramePacket(const std::shared_ptr &zm_packet opkt->dts = ipkt->dts; } - ZM_DUMP_STREAM_PACKET(audio_in_stream, (*ipkt), "after pts adjustment"); - av_packet_rescale_ts(opkt, audio_in_stream->time_base, audio_out_stream->time_base); - ZM_DUMP_STREAM_PACKET(audio_out_stream, (*opkt), "after stream pts adjustment"); - write_packet(opkt, audio_out_stream); + ZM_DUMP_STREAM_PACKET(audio_in_stream, ipkt, "after pts adjustment"); + av_packet_rescale_ts(opkt.get(), audio_in_stream->time_base, audio_out_stream->time_base); + ZM_DUMP_STREAM_PACKET(audio_out_stream, opkt, "after stream pts adjustment"); + write_packet(opkt.get(), audio_out_stream); - zm_av_packet_unref(opkt); + zm_av_packet_unref(opkt.get()); } // end if encoding or copying return 0; @@ -1321,8 +1295,8 @@ int VideoStore::write_packet(AVPacket *pkt, AVStream *stream) { pkt->dts = last_dts[stream->index]; } else { if ((last_dts[stream->index] != AV_NOPTS_VALUE) and (pkt->dts < last_dts[stream->index])) { - Warning("non increasing dts, fixing. our dts %" PRId64 " stream %d last_dts %" PRId64, - pkt->dts, stream->index, last_dts[stream->index]); + Warning("non increasing dts, fixing. our dts %" PRId64 " stream %d last_dts %" PRId64 ". reorder_queue_size=%zu", + pkt->dts, stream->index, last_dts[stream->index], reorder_queue_size); pkt->dts = last_dts[stream->index]; } next_dts[stream->index] = pkt->dts + pkt->duration; @@ -1342,7 +1316,7 @@ int VideoStore::write_packet(AVPacket *pkt, AVStream *stream) { pkt->pts = pkt->dts; } - ZM_DUMP_STREAM_PACKET(stream, (*pkt), "finished pkt"); + ZM_DUMP_STREAM_PACKET(stream, pkt, "finished pkt"); Debug(3, "next_dts for stream %d has become %" PRId64 " last_dts %" PRId64, stream->index, next_dts[stream->index], last_dts[stream->index]); diff --git a/src/zm_videostore.h b/src/zm_videostore.h index a5b0f20df..b04550225 100644 --- a/src/zm_videostore.h +++ b/src/zm_videostore.h @@ -57,12 +57,10 @@ class VideoStore { const AVCodec *audio_out_codec; AVCodecContext *audio_out_ctx; // Move this into the object so that we aren't constantly allocating/deallocating it on the stack - AVPacket *opkt; + av_packet_ptr opkt; - AVFrame *video_in_frame; - AVFrame *in_frame; - AVFrame *out_frame; - AVFrame *hw_frame; + av_frame_ptr in_frame; + av_frame_ptr out_frame; SWScale swscale; unsigned int packets_written; diff --git a/src/zmc.cpp b/src/zmc.cpp index 9e4506447..ae2f5de9c 100644 --- a/src/zmc.cpp +++ b/src/zmc.cpp @@ -65,6 +65,7 @@ possible, this should run at more or less constant speed. #include #include +#include void Usage() { fprintf(stderr, "zmc -d or -r -H -P -p or -f or -m \n"); @@ -252,36 +253,31 @@ int main(int argc, char *argv[]) { monitor->Id()); zmDbDo(sql); - if (monitor->Capturing() == Monitor::CAPTURING_ONDEMAND) { while (!zm_terminate and !monitor->hasViewers()) { Debug(1, "ONDEMAND and no Viewers. Sleeping"); std::this_thread::sleep_for(Seconds(1)); + monitor->SetHeartbeatTime(std::chrono::system_clock::now()); } } Seconds sleep_time = Seconds(0); - while (monitor->PrimeCapture() <= 0) { + while ((monitor->PrimeCapture() <= 0) and !zm_terminate) { if (prime_capture_log_count % 60) { - logPrintf(Logger::ERROR + monitor->Importance(), - "Failed to prime capture of initial monitor"); + logPrintf(Logger::ERROR + monitor->Importance(), "Failed to prime capture of initial monitor"); } else { Debug(1, "Failed to prime capture of initial monitor"); } prime_capture_log_count++; - if (zm_terminate) { - break; - } - if (sleep_time < Seconds(60)) { + if (sleep_time < Seconds(ZM_WATCH_MAX_DELAY)) { sleep_time++; } std::this_thread::sleep_for(sleep_time); + monitor->SetHeartbeatTime(std::chrono::system_clock::now()); } - if (zm_terminate) { - break; - } + if (zm_terminate) break; sql = stringtf( "INSERT INTO Monitor_Status (MonitorId,Status) VALUES (%u, 'Connected') ON DUPLICATE KEY UPDATE Status='Connected'", @@ -289,9 +285,7 @@ int main(int argc, char *argv[]) { zmDbDo(sql); } // end foreach monitor - if (zm_terminate) { - break; - } + if (zm_terminate) break; std::vector last_capture_times = std::vector(monitors.size()); Microseconds sleep_time = Microseconds(0); @@ -326,11 +320,13 @@ int main(int argc, char *argv[]) { } monitors[i]->UpdateFPS(); + SystemTimePoint now = std::chrono::system_clock::now(); + monitors[i]->SetHeartbeatTime(now); + // capture_delay is the amount of time we should sleep in useconds to achieve the desired framerate. Microseconds delay = (monitors[i]->GetState() == Monitor::ALARM) ? monitors[i]->GetAlarmCaptureDelay() : monitors[i]->GetCaptureDelay(); if (delay != Seconds(0)) { - SystemTimePoint now = std::chrono::system_clock::now(); if (last_capture_times[i].time_since_epoch() != Seconds(0)) { Microseconds delta_time = std::chrono::duration_cast(now - last_capture_times[i]); diff --git a/src/zmu.cpp b/src/zmu.cpp index 99af22e37..b081673a1 100644 --- a/src/zmu.cpp +++ b/src/zmu.cpp @@ -750,33 +750,35 @@ int main(int argc, char *argv[]) { exit_zmu(-1); #endif // ZM_HAS_V4L2 } + } // end if monitor id or not - if ( function & ZMU_LIST ) { - std::string sql = "SELECT `Id`, `Capturing`+0, `Analysing`+0, `Recording`+0 FROM `Monitors`"; - if (!verbose) { - sql += " WHERE `Capturing` != 'None'"; - } - sql += " ORDER BY Id ASC"; + if (function & ZMU_LIST) { + std::string sql = "SELECT `Id`, `Capturing`+0, `Analysing`+0, `Recording`+0 FROM `Monitors`"; + if (!verbose) { + sql += " WHERE `Capturing` != 'None'"; + } + sql += " ORDER BY Id ASC"; - MYSQL_RES *result = zmDbFetch(sql); - if (!result) { - exit_zmu(-1); - } - Debug(1, "Got %" PRIu64 " monitors", static_cast(mysql_num_rows(result))); + MYSQL_RES *result = zmDbFetch(sql); + if (!result) { + exit_zmu(-1); + } + Debug(1, "Got %" PRIu64 " monitors", static_cast(mysql_num_rows(result))); - printf("%4s %9s %9s %9s %5s %8s %13s %5s %5s %9s %9s\n", - "Id", "Capturing", "Analysing", "Recording", "State", "TrgState", - "LastImageTime", "RdIdx", "WrIdx", "LastEvent", "FrameRate"); - for ( int i = 0; MYSQL_ROW dbrow = mysql_fetch_row(result); i++ ) { - int monitor_id = atoi(dbrow[0]); + printf("%4s %9s %9s %9s %5s %8s %13s %5s %5s %9s %9s\n", + "Id", "Capturing", "Analysing", "Recording", "State", "TrgState", + "LastImageTime", "RdIdx", "WrIdx", "LastEvent", "FrameRate"); + for (int i=0; MYSQL_ROW dbrow = mysql_fetch_row(result); i++) { + int monitor_id = atoi(dbrow[0]); + if (mon_id and (monitor_id != mon_id)) continue; + if (!user || user->canAccess(monitor_id)) { int monitor_capturing = atoi(dbrow[1]); - if ( !user || user->canAccess(monitor_id) ) { - if (monitor_capturing > Monitor::CAPTURING_NONE) { - std::shared_ptr monitor = Monitor::Load(monitor_id, false, Monitor::QUERY); - if ( monitor && monitor->connect() ) { - SystemTimePoint timestamp = monitor->GetTimestamp(); + if (monitor_capturing > Monitor::CAPTURING_NONE) { + std::shared_ptr monitor = Monitor::Load(monitor_id, false, Monitor::QUERY); + if (monitor && monitor->connect()) { + SystemTimePoint timestamp = monitor->GetTimestamp(); - printf("%4d %9d %9d %9d %5d %8d %13.2f %5d %5d %9" PRIu64 "%10.2f\n", + printf("%4d %9d %9d %9d %5d %8d %13.2f %5d %5d %9" PRIu64 "%10.2f\n", monitor->Id(), monitor->Capturing(), monitor->Analysing(), @@ -788,11 +790,11 @@ int main(int argc, char *argv[]) { monitor->GetLastWriteIndex(), monitor->GetLastEventId(), monitor->GetFPS() - ); - } - } else { - printf("%4d%5d%6d%9d%11ld.%02ld%6d%6d%8d%8.2f\n", - mon_id, + ); + } + } else { + printf("%4d%5d%6d%9d%11ld.%02ld%6d%6d%8d%8.2f\n", + monitor_id, function, 0, 0, @@ -801,13 +803,12 @@ int main(int argc, char *argv[]) { 0, 0, 0.0 - ); - } // end if function filter - } // endif !user || canAccess(mon_id) - } // end foreach row - mysql_free_result(result); - } // end if function && ZMU_LIST - } // end if monitor id or not + ); + } // end if function filter + } // endif !user || canAccess(mon_id) + } // end foreach row + mysql_free_result(result); + } // end if function && ZMU_LIST delete user; exit_zmu(0); diff --git a/utils/packpack/startpackpack.sh b/utils/packpack/startpackpack.sh index 9737c794a..8cee2fe20 100755 --- a/utils/packpack/startpackpack.sh +++ b/utils/packpack/startpackpack.sh @@ -369,7 +369,7 @@ elif [ "${OS}" == "debian" ] || [ "${OS}" == "ubuntu" ] || [ "${OS}" == "raspbia setdebpkgname movecrud - if [ "${DIST}" == "bionic" ] || [ "${DIST}" == "focal" ] || [ "${DIST}" == "hirsute" ] || [ "${DIST}" == "impish" ] || [ "${DIST}" == "buster" ] || [ "${DIST}" == "bullseye" ]; then + if [ "${DIST}" == "bionic" ] || [ "${DIST}" == "focal" ] || [ "${DIST}" == "hirsute" ] || [ "${DIST}" == "impish" ] || [ "${DIST}" == "jammy" ] || [ "${DIST}" == "buster" ] || [ "${DIST}" == "bullseye" ]; then ln -sfT distros/ubuntu2004 debian elif [ "${DIST}" == "beowulf" ]; then ln -sfT distros/beowulf debian diff --git a/version b/version index 05f79ff79..b49b97a4b 100644 --- a/version +++ b/version @@ -1 +1 @@ -1.37.18 +1.37.19 diff --git a/web/ajax/events.php b/web/ajax/events.php index 3ee83eb1f..1bb4581e7 100644 --- a/web/ajax/events.php +++ b/web/ajax/events.php @@ -154,8 +154,7 @@ function queryRequest($filter, $search, $advsearch, $sort, $offset, $order, $lim 'updated' => $dateTimeFormatter->format(time()) ); - $failed = !$filter->test_pre_sql_conditions(); - if ($failed) { + if (!$filter->test_pre_sql_conditions()) { ZM\Debug('Pre conditions failed, not doing sql'); return $data; } @@ -193,7 +192,7 @@ function queryRequest($filter, $search, $advsearch, $sort, $offset, $order, $lim $col_str = 'E.*, M.Name AS Monitor'; $sql = 'SELECT ' .$col_str. ' FROM `Events` AS E INNER JOIN Monitors AS M ON E.MonitorId = M.Id'.$where.($sort?' ORDER BY '.$sort.' '.$order:''); - if ($filter->limit() and !count($filter->pre_sql_conditions()) and !count($filter->post_sql_conditions())) { + if ($filter->limit() and !count($filter->post_sql_conditions())) { $sql .= ' LIMIT '.$filter->limit(); } diff --git a/web/ajax/stream.php b/web/ajax/stream.php index fe2bd52fc..492d8d8bd 100644 --- a/web/ajax/stream.php +++ b/web/ajax/stream.php @@ -158,7 +158,7 @@ if ($have_semaphore !== false) { ajaxResponse(array('status'=>$data)); break; case MSG_DATA_EVENT : - if ( version_compare( phpversion(), '5.6.0', '<') ) { + if ( PHP_INT_SIZE===4 || version_compare( phpversion(), '5.6.0', '<') ) { ZM\Debug('Using old unpack methods to handle 64bit event id'); $data = unpack('ltype/ieventlow/ieventhigh/dduration/dprogress/irate/izoom/Cpaused', $msg); $data['event'] = $data['eventhigh'] << 32 | $data['eventlow']; diff --git a/web/includes/Filter.php b/web/includes/Filter.php index 1b29e66bf..5be6c252a 100644 --- a/web/includes/Filter.php +++ b/web/includes/Filter.php @@ -311,33 +311,21 @@ class Filter extends ZM_Object { } public function test_pre_sql_conditions() { - if ( !count($this->pre_sql_conditions()) ) { - return true; - } # end if pre_sql_conditions - - $failed = false; - foreach ( $this->pre_sql_conditions() as $term ) { - if ( !$term->test() ) { - $failed = true; - break; + if (count($this->pre_sql_conditions())) { + foreach ($this->pre_sql_conditions() as $term) { + if (!$term->test()) return false; } - } - return $failed; + } # end if pre_sql_conditions + return true; } public function test_post_sql_conditions($event) { - if ( !count($this->post_sql_conditions()) ) { - return true; - } # end if pre_sql_conditions - - $failed = true; - foreach ( $this->post_sql_conditions() as $term ) { - if ( !$term->test($event) ) { - $failed = false; - break; + if (count($this->post_sql_conditions())) { + foreach ($this->post_sql_conditions() as $term) { + if (!$term->test($event)) return false; } - } - return $failed; + } # end if pre_sql_conditions + return true; } function tree() { diff --git a/web/includes/FilterTerm.php b/web/includes/FilterTerm.php index 2e90d969e..bd09b5647 100644 --- a/web/includes/FilterTerm.php +++ b/web/includes/FilterTerm.php @@ -77,7 +77,7 @@ class FilterTerm { switch ( $this->attr ) { case 'AlarmedZoneId': - $value = '(SELECT * FROM Stats WHERE EventId=E.Id AND ZoneId='.$value.')'; + $value = '(SELECT * FROM Stats WHERE EventId=E.Id AND ZoneId='.$value.' AND Score > 0)'; break; case 'ExistsInFileSystem': $value = ''; diff --git a/web/includes/Monitor.php b/web/includes/Monitor.php index d583c0525..2deac9be5 100644 --- a/web/includes/Monitor.php +++ b/web/includes/Monitor.php @@ -154,6 +154,7 @@ public static function getStatuses() { 'ONVIF_Username' => '', 'ONVIF_Password' => '', 'ONVIF_Options' => '', + 'ONVIF_Alarm_Text' => '', 'ONVIF_Event_Listener' => '0', 'use_Amcrest_API' => '0', 'Device' => '', diff --git a/web/includes/actions/monitor.php b/web/includes/actions/monitor.php index edc9ab1bd..b27ff4443 100644 --- a/web/includes/actions/monitor.php +++ b/web/includes/actions/monitor.php @@ -109,7 +109,7 @@ if ($action == 'save') { } } # end foreach type - if ($newMonitor['ServerId'] == 'auto') { + if (isset($newMonitor['ServerId']) and ($newMonitor['ServerId'] == 'auto')) { $newMonitor['ServerId'] = dbFetchOne( 'SELECT Id FROM Servers WHERE Status=\'Running\' ORDER BY FreeMem DESC, CpuLoad ASC LIMIT 1', 'Id'); ZM\Debug('Auto selecting server: Got ' . $newMonitor['ServerId']); diff --git a/web/lang/es_la.php b/web/lang/es_la.php index 63c83cef0..3ce632091 100644 --- a/web/lang/es_la.php +++ b/web/lang/es_la.php @@ -907,7 +907,7 @@ $SLANG = array( 'Showing Analysis' => 'Mostrar Analisis', 'ConfirmDeleteTitle' => 'Borrar Seleccionados', 'Continuous' => 'Continuo', - + 'ONVIF_Alarm_Text' => 'Texto Alarma ONVIF', //added 18/07/2022 ); diff --git a/web/skins/classic/views/js/event.js b/web/skins/classic/views/js/event.js index e50ea65f3..81155b25e 100644 --- a/web/skins/classic/views/js/event.js +++ b/web/skins/classic/views/js/event.js @@ -922,7 +922,7 @@ function initPage() { vid.on('volumechange', function() { setCookie('volume', vid.volume(), 3600); }); - var cookie = getCookie('volume'); + const cookie = getCookie('volume'); if (cookie) vid.volume(cookie); vid.on('timeupdate', function() { @@ -930,7 +930,6 @@ function initPage() { }); vid.on('ratechange', function() { rate = vid.playbackRate() * 100; - console.log("rate change " + rate); $j('select[name="rate"]').val(rate); setCookie('zmEventRate', rate, 3600); }); @@ -947,7 +946,7 @@ function initPage() { if (!$j('#videoFeed')) { console.log('No element with id tag videoFeed found.'); } else { - var streamImg = $j('#videoFeed img'); + let streamImg = $j('#videoFeed img'); if (!streamImg) { streamImg = $j('#videoFeed object'); } diff --git a/web/skins/classic/views/js/events.js b/web/skins/classic/views/js/events.js index af5c67863..f1b76ff14 100644 --- a/web/skins/classic/views/js/events.js +++ b/web/skins/classic/views/js/events.js @@ -123,7 +123,11 @@ function manageDelConfirmModalBtns() { evt.preventDefault(); const selections = getIdSelections(); - deleteEvents(selections); + if (!selections.length) { + alert('Please select events to delete.'); + } else { + deleteEvents(selections); + } }); // Manage the CANCEL modal button diff --git a/web/skins/classic/views/js/monitor.js b/web/skins/classic/views/js/monitor.js index ebfd7625f..bf5f5b7ef 100644 --- a/web/skins/classic/views/js/monitor.js +++ b/web/skins/classic/views/js/monitor.js @@ -294,7 +294,7 @@ function initPage() { } }); - if ( ZM_OPT_USE_GEOLOCATION ) { + if ( parseInt(ZM_OPT_USE_GEOLOCATION) ) { if ( window.L ) { const form = document.getElementById('contentForm'); const latitude = form.elements['newMonitor[Latitude]'].value; diff --git a/web/skins/classic/views/monitor.php b/web/skins/classic/views/monitor.php index 34f55e3c8..01a231c18 100644 --- a/web/skins/classic/views/monitor.php +++ b/web/skins/classic/views/monitor.php @@ -578,6 +578,10 @@ switch ($name) { + + + + translate('Enabled'), '0'=>translate('Disabled')), $monitor->ONVIF_Event_Listener()); ?>