diff --git a/CMakeLists.txt b/CMakeLists.txt index f340a2c6..bcf7b861 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,9 @@ project(motion C) cmake_minimum_required(VERSION 2.8) + +unset(SQLITE3_FOUND CACHE ) +unset(FFMPEG_FOUND CACHE ) + include(CheckIncludeFiles) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -Wall") @@ -24,7 +28,7 @@ option(WITH_MMAL "enable MMAL (Multi-Media Abstraction Layer API) support for Ra option(WITH_MYSQL "enable MySQL database support" ${MYSQL_FOUND}) option(WITH_PGSQL "enable PostgreSQL database support" ${PostgreSQL_FOUND}) option(WITH_SQLITE3 "enable SQLite database support" ${SQLITE3_FOUND}) -option(WITH_V4L "enable Video 4 Linux (2) webcam support" ON) +option(WITH_V4L2 "enable Video 4 Linux (2) webcam support" ON) option(WITH_PWCBSD "enable PWC webcam support (BSD only)" OFF) set(HAVE_FFMPEG ${WITH_FFMPEG}) @@ -32,15 +36,30 @@ set(HAVE_MMAL ${WITH_MMAL}) set(HAVE_MYSQL ${WITH_MYSQL}) set(HAVE_PGSQL ${WITH_PGSQL}) set(HAVE_SQLITE3 ${WITH_SQLITE3}) -check_include_files("linux/videodev.h" HAVE_LINUX_VIDEODEV_H) -check_include_files("linux/videodev2.h" HAVE_LINUX_VIDEODEV2_H) -check_include_files("sys/videoio.h" HAVE_SYS_VIDEOIO_H) -if(${WITH_V4L}) - set(WITHOUT_V4L OFF) -else(${WITH_V4L}) - set(WITHOUT_V4L ON) -endif(${WITH_V4L}) -set(MOTION_V4L2 ${HAVE_LINUX_VIDEODEV2_H}) + +set(WITHOUT_V4L2 ON) + if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD)) + check_include_files("dev/bktr/ioctl_bt848.h" HAVE_FREEBSD_BT848) + check_include_files("dev/bktr/ioctl_meteor.h" HAVE_FREEBSD_METEOR) + if(HAVE_FREEBSD_BT848 AND HAVE_FREEBSD_METEOR AND WITH_V4L2 ) + set(WITHOUT_V4L2 OFF ) + endif(HAVE_FREEBSD_BT848 AND HAVE_FREEBSD_METEOR AND WITH_V4L2 ) + endif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD)) + + if(CMAKE_SYSTEM_NAME MATCHES "NetBSD" OR CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + check_include_files("dev/ic/bt8xx.h" HAVE_NETOPEN_BT8XX) + if(HAVE_NETOPEN_BT8XX AND WITH_V4L2) + set(WITHOUT_V4L2 OFF ) + endif(HAVE_NETOPEN_BT8XX AND WITH_V4L2) + endif(CMAKE_SYSTEM_NAME MATCHES "NetBSD" OR CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + + if(CMAKE_SYSTEM_NAME MATCHES "Linux") + check_include_files("linux/videodev2.h" HAVE_LINUX_VIDEODEV2_H) + if(HAVE_LINUX_VIDEODEV2_H AND WITH_V4L2) + set(WITHOUT_V4L2 OFF ) + endif(HAVE_LINUX_VIDEODEV2_H AND WITH_V4L2) + endif(CMAKE_SYSTEM_NAME MATCHES "Linux") + set(PWCBSD WITH_PWCBSD) configure_file(config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/config.h") @@ -58,11 +77,11 @@ configure_file(motion.init-FreeBSD.sh.in motion.init-FreeBSD.sh) list(APPEND SRC_FILES conf.c motion.c alg.c draw.c event.c ffmpeg.c jpegutils.c logger.c md5.c netcam.c netcam_ftp.c netcam_jpeg.c netcam_rtsp.c netcam_wget.c - picture.c rotate.c stream.c track.c vloopback_motion.c webhttpd.c) + picture.c rotate.c stream.c track.c vloopback_motion2.c webhttpd.c) if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD)) list(APPEND SRC_FILES video_freebsd.c) else() - list(APPEND SRC_FILES video2.c video.c video_common.c) + list(APPEND SRC_FILES video2.c video_common.c) endif() include_directories(${JPEG_INCLUDE_DIR}) @@ -95,6 +114,23 @@ if(WITH_MMAL) list(APPEND SRC_FILES mmalcam.c) endif(WITH_MMAL) +message("-- Configuration: " ) +message("-- Package Name: " ${PACKAGE_NAME}) +message("-- Package Version: " ${PACKAGE_VERSION}) +message("-- Build System: " ${CMAKE_SYSTEM_NAME}) +message("-- Config Dir: " ${sysconfdir}) +message("-- Bin Dir: " ${BIN_PATH}) +message("-- JPEG Include Dir: " ${JPEG_INCLUDE_DIR}) +message("-- WITH_V4L2: " ${WITH_V4L2}) +message("-- Without V4L2: " ${WITHOUT_V4L2}) +message("-- With PWC BSD: " ${WITH_PWCBSD}) +message("-- MYSQL: " ${WITH_MYSQL} " Include Dir: " ${MYSQL_INCLUDE_DIRS} ) +message("-- SQLITE3: " ${WITH_SQLITE3} " Include Dir: " ${SQLITE3_INCLUDE_DIRS} ) +message("-- PGSQL: " ${WITH_PGSQL} " Include Dir: " ${PostgreSQL_INCLUDE_DIRS} ) +message("-- FFMPEG: " ${WITH_FFMPEG} " Include Dir: " ${FFMPEG_INCLUDE_DIRS} ) +message("-- MMAL: " ${WITH_MMAL} ) +message("-- CFLAGS: " ${CMAKE_C_FLAGS} ) + add_executable(motion ${SRC_FILES}) target_link_libraries(motion ${LINK_LIBRARIES}) @@ -105,3 +141,4 @@ install(FILES motion.service motion.spec motion.init-Debian motion.init-FreeBSD. DESTINATION "lib/${PROJECT_NAME}/examples" COMPONENT examples) install(FILES CHANGELOG COPYING CREDITS README.md motion_guide.html mask1.png normal.jpg outputmotion1.jpg outputnormal1.jpg DESTINATION "lib/${PROJECT_NAME}/doc" COMPONENT doc) + diff --git a/Makefile.in b/Makefile.in index 523dbafb..7c2a242f 100644 --- a/Makefile.in +++ b/Makefile.in @@ -35,11 +35,12 @@ CFLAGS = @CFLAGS@ -Wall -DVERSION=\"@PACKAGE_VERSION@\" -Dsysconfdir=\"$(s LDFLAGS = @LDFLAGS@ LIBS = @LIBS@ @MMAL_LIBS@ @FFMPEG_LIBS@ VIDEO_OBJ = @VIDEO@ -OBJ = motion.o logger.o conf.o draw.o jpegutils.o vloopback_motion.o $(VIDEO_OBJ) \ +OBJ = motion.o logger.o conf.o draw.o jpegutils.o \ + vloopback_motion2.o $(VIDEO_OBJ) \ netcam.o netcam_ftp.o netcam_jpeg.o netcam_wget.o track.o \ alg.o event.o picture.o rotate.o webhttpd.o \ - stream.o md5.o netcam_rtsp.o \ - @FFMPEG_OBJ@ @MMAL_OBJ@ + stream.o md5.o netcam_rtsp.o ffmpeg.o \ + @MMAL_OBJ@ SRC = $(OBJ:.o=.c) DOC = CHANGELOG COPYING CREDITS README.md motion_guide.html mask1.png normal.jpg outputmotion1.jpg outputnormal1.jpg EXAMPLES = *.conf motion.service diff --git a/conf.c b/conf.c index d5021488..71f85216 100644 --- a/conf.c +++ b/conf.c @@ -32,7 +32,7 @@ #if (defined(__FreeBSD__) && !defined(PWCBSD)) #include "video_freebsd.h" #else -#include "video.h" +#include "video2.h" #endif #define EXTENSION ".conf" @@ -116,6 +116,7 @@ struct config conf_template = { .on_event_start = NULL, .on_event_end = NULL, .mask_file = NULL, + .mask_privacy = NULL, .smart_mask_speed = 0, #if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) .sql_log_image = 1, @@ -292,6 +293,9 @@ config_param config_params[] = { "# V4L2_PIX_FMT_YUYV : 15 'YUYV'\n" "# V4L2_PIX_FMT_YUV422P : 16 '422P'\n" "# V4L2_PIX_FMT_YUV420 : 17 'YU12'\n" + "# V4L2_PIX_FMT_Y10 : 18 'Y10'\n" + "# V4L2_PIX_FMT_Y12 : 19 'Y12'\n" + "# V4L2_PIX_FMT_GREY : 20 'GREY'\n" "#", 0, CONF_OFFSET(v4l2_palette), @@ -620,6 +624,15 @@ config_param config_params[] = { print_string }, { + "mask_privacy", + "# PGM file to completely mask out an area of the image.\n" + "# Full path name to. (Default: not defined)", + 0, + CONF_OFFSET(mask_privacy), + copy_string, + print_string + }, + { "smart_mask_speed", "# Dynamically create a mask file during operation (default: 0)\n" "# Adjust speed of mask changes from 0 (off) to 10 (fast)", @@ -750,7 +763,6 @@ config_param config_params[] = { copy_string, print_string }, -#ifdef HAVE_FFMPEG { "ffmpeg_output_movies", "\n############################################################\n" @@ -843,7 +855,6 @@ config_param config_params[] = { copy_bool, print_bool }, -#endif /* HAVE_FFMPEG */ { "use_extpipe", "\n############################################################\n" @@ -1024,7 +1035,6 @@ config_param config_params[] = { copy_string, print_string }, -#ifdef HAVE_FFMPEG { "movie_filename", "# File path for motion triggered ffmpeg films (movies) relative to target_dir\n" @@ -1050,7 +1060,6 @@ config_param config_params[] = { copy_string, print_string }, -#endif /* HAVE_FFMPEG */ { "ipv6_enabled", "\n############################################################\n" diff --git a/conf.h b/conf.h index 60ab991a..fefc436b 100644 --- a/conf.h +++ b/conf.h @@ -100,6 +100,7 @@ struct config { char *on_event_start; char *on_event_end; const char *mask_file; + const char *mask_privacy; int smart_mask_speed; int sql_log_image; int sql_log_snapshot; diff --git a/config.h.in b/config.h.in index dd3f8d32..5b2dae15 100644 --- a/config.h.in +++ b/config.h.in @@ -9,10 +9,8 @@ #cmakedefine HAVE_PGSQL #cmakedefine HAVE_SQLITE3 #cmakedefine PWCBSD -#cmakedefine MOTION_V4L2 -#cmakedefine WITHOUT_V4L +#cmakedefine WITHOUT_V4L2 /* Optional headers */ -#cmakedefine HAVE_LINUX_VIDEODEV_H #cmakedefine HAVE_LINUX_VIDEODEV2_H #cmakedefine HAVE_SYS_VIDEOIO_H diff --git a/configure.ac b/configure.ac index bcfd794a..cc33cddc 100644 --- a/configure.ac +++ b/configure.ac @@ -40,7 +40,7 @@ if test "${Darwin}" = ""; then FreeBSD=`uname -a | grep "FreeBSD"` if test "${FreeBSD}" = ""; then AC_MSG_RESULT(no) - VIDEO="video.o video2.o video_common.o" + VIDEO="video2.o video_common.o" else AC_MSG_RESULT(yes) if test "${LINUXTHREADS}" = "no"; then @@ -52,7 +52,7 @@ if test "${Darwin}" = ""; then fi if test "${PWCBSD}" != "no"; then - VIDEO="video.o video2.o video_common.o" + VIDEO="video2.o video_common.o" TEMP_CFLAGS="${CFLAGS} -I/usr/local/include -DPWCBSD" else VIDEO="video_freebsd.o" @@ -71,7 +71,7 @@ else VIDEO="video_freebsd.o" FINK_LIB="-L/sw/lib" Darwin="yes" - V4L="no" + V4L2="no" AC_MSG_RESULT($Darwin) fi @@ -94,43 +94,43 @@ if test "${FreeBSD}" != "" && test "${PWCBSD}" = "no"; then else AC_MSG_RESULT(no) fi -# -# Check to Exclude BKTR -# -BKTR="yes" -AC_ARG_WITH(bktr, -[ --without-bktr Exclude to use bktr subsystem , that usually useful - for devices as network cameras ( ONLY used in *BSD). - ] - , -BKTR="$withval" -) - - if test "${BKTR}" = "no"; then - TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L" - fi - + # + # Check to Exclude BKTR + # + BKTR="yes" + AC_ARG_WITH(bktr, + [ --without-bktr Exclude to use bktr subsystem , that usually useful + for devices as network cameras ( ONLY used in *BSD). + ] , + BKTR="$withval") + if test "${BKTR}" = "no"; then + TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2" + fi else + # + # Check to Exclude V4L2 + # + V4L2="yes" + AC_ARG_WITH(v4l2, + [ --without-v4l2 Exclude using v4l2 (video4linux2) subsystem. + Makes Motion so it only supports network cameras. + ], + V4L2="$withval" ) -# -# Check to Exclude V4L -# -V4L="yes" -AC_ARG_WITH(v4l, -[ --without-v4l Exclude using v4l (video4linux) subsystem. - Makes Motion so it only supports network cameras. - ], -V4L="$withval" -) - + if test "${V4L2}" = "no"; then + TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2" + else + AC_CHECK_HEADERS(linux/videodev2.h,[V4L2="yes"],[V4L2="no"]) + AC_MSG_CHECKING(for V4L2 support) + if test "${V4L2}" = "no"; then + AC_MSG_RESULT(no) + TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2" + else + AC_MSG_RESULT(yes) + fi + fi fi - -if test "${V4L}" = "no"; then - TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L" -fi - - if test "${FreeBSD}" != "" && test "${LINUXTHREADS}" != "no" ; then AC_MSG_CHECKING(for linuxthreads) @@ -740,34 +740,7 @@ fi #Checks for header files. AC_HEADER_STDC -AC_CHECK_HEADERS(stdio.h unistd.h stdint.h fcntl.h time.h signal.h sys/ioctl.h sys/mman.h linux/videodev.h linux/videodev2.h sys/param.h sys/types.h sys/videoio.h) - -# Check if v4l2 is available -SUPPORTED_V4L2=false - -if test "${V4L}" = "no"; then - AC_MSG_CHECKING(for V42L support) - AC_MSG_RESULT(skipping) -else - AC_CHECK_TYPE([struct v4l2_buffer], - [SUPPORTED_V4L2=true], - [SUPPORTED_V4L2=false], - [#include - #ifdef HAVE_LINUX_VIDEODEV2_H - #include - #elif HAVE_LINUX_VIDEODEV_H - #include - #elif HAVE_SYS_VIDEOIO_H - #include - #endif]) - AC_MSG_CHECKING(for V42L support) - if test x$SUPPORTED_V4L2 = xtrue; then - AC_MSG_RESULT(yes) - TEMP_CFLAGS="${TEMP_CFLAGS} -DMOTION_V4L2" - else - AC_MSG_RESULT(no) - fi -fi +AC_CHECK_HEADERS(stdio.h unistd.h stdint.h fcntl.h time.h signal.h sys/ioctl.h sys/mman.h linux/videodev2.h sys/param.h sys/types.h) OPTIMIZECPU="yes" @@ -1121,13 +1094,7 @@ if test "${FreeBSD}" != ""; then fi else - if test "${V4L}" = "yes"; then - echo "V4L support: Yes" - else - echo "V4L support: No" - fi - - if test x$SUPPORTED_V4L2 = xtrue; then + if test "$V4L2" = "yes"; then echo "V4L2 support: Yes" else echo "V4L2 support: No" diff --git a/event.c b/event.c index 20906598..e367684c 100644 --- a/event.c +++ b/event.c @@ -12,7 +12,7 @@ #include "picture.h" /* already includes motion.h */ #include "event.h" #if (!defined(__FreeBSD__)) -#include "video.h" +#include "video2.h" #endif /* Various functions (most doing the actual action) */ @@ -65,7 +65,7 @@ static const char *eventToString(motion_event e) static void exec_command(struct context *cnt, char *command, char *filename, int filetype) { char stamp[PATH_MAX]; - mystrftime(cnt, stamp, sizeof(stamp), command, &cnt->current_image->timestamp_tm, filename, filetype); + mystrftime(cnt, stamp, sizeof(stamp), command, &cnt->current_image->timestamp_tv, filename, filetype); if (!fork()) { int i; @@ -100,7 +100,7 @@ static void exec_command(struct context *cnt, char *command, char *filename, int static void event_newfile(struct context *cnt ATTRIBUTE_UNUSED, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *filename, void *ftype, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: File of type %ld saved to: %s", (unsigned long)ftype, filename); @@ -111,7 +111,7 @@ static void event_beep(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *filename ATTRIBUTE_UNUSED, void *ftype ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (!cnt->conf.quiet) printf("\a"); @@ -128,7 +128,7 @@ static void event_beep(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, static void on_picture_save_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, - char *filename, void *arg, struct tm *tm ATTRIBUTE_UNUSED) + char *filename, void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED) { int filetype = (unsigned long)arg; @@ -143,7 +143,7 @@ static void on_motion_detected_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.on_motion_detected) exec_command(cnt, cnt->conf.on_motion_detected, NULL, 0); @@ -154,7 +154,7 @@ static void on_motion_detected_command(struct context *cnt, static void event_sqlnewfile(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, - char *filename, void *arg, struct tm *tm ATTRIBUTE_UNUSED) + char *filename, void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED) { int sqltype = (unsigned long)arg; @@ -170,7 +170,7 @@ static void event_sqlnewfile(struct context *cnt, char sqlquery[PATH_MAX]; mystrftime(cnt, sqlquery, sizeof(sqlquery), cnt->conf.sql_query, - &cnt->current_image->timestamp_tm, filename, sqltype); + &cnt->current_image->timestamp_tv, filename, sqltype); #ifdef HAVE_MYSQL if (!strcmp(cnt->conf.database_type, "mysql")) { @@ -260,7 +260,7 @@ static void on_area_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.on_area_detected) exec_command(cnt, cnt->conf.on_area_detected, NULL, 0); @@ -270,7 +270,7 @@ static void on_event_start_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.on_event_start) exec_command(cnt, cnt->conf.on_event_start, NULL, 0); @@ -280,7 +280,7 @@ static void on_event_end_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.on_event_end) exec_command(cnt, cnt->conf.on_event_end, NULL, 0); @@ -290,7 +290,7 @@ static void event_stop_stream(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if ((cnt->conf.stream_port) && (cnt->stream.socket != -1)) stream_stop(cnt); @@ -299,25 +299,25 @@ static void event_stop_stream(struct context *cnt, static void event_stream_put(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.stream_port) stream_put(cnt, img); } -#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) && !defined(__FreeBSD__) +#if !defined(WITHOUT_V4L2) && !defined(__FreeBSD__) static void event_vid_putpipe(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy ATTRIBUTE_UNUSED, void *devpipe, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (*(int *)devpipe >= 0) { if (vid_putpipe(*(int *)devpipe, img, cnt->imgs.size) == -1) MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: Failed to put image into video pipe"); } } -#endif /* !WITHOUT_V4L && !__FreeBSD__ */ +#endif /* !WITHOUT_V4L2 && !__FreeBSD__ */ const char *imageext(struct context *cnt) { @@ -330,7 +330,7 @@ const char *imageext(struct context *cnt) static void event_image_detect(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *newimg, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { char fullfilename[PATH_MAX]; char filename[PATH_MAX]; @@ -347,7 +347,7 @@ static void event_image_detect(struct context *cnt, else imagepath = DEF_IMAGEPATH; - mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tm, NULL, 0); + mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tv, NULL, 0); snprintf(fullfilename, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filename, imageext(cnt)); put_picture(cnt, fullfilename, newimg, FTYPE_IMAGE); @@ -357,7 +357,7 @@ static void event_image_detect(struct context *cnt, static void event_imagem_detect(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *newimg ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { struct config *conf = &cnt->conf; char fullfilenamem[PATH_MAX]; @@ -376,7 +376,7 @@ static void event_imagem_detect(struct context *cnt, else imagepath = DEF_IMAGEPATH; - mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tm, NULL, 0); + mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tv, NULL, 0); /* motion images gets same name as normal images plus an appended 'm' */ snprintf(filenamem, PATH_MAX, "%sm", filename); snprintf(fullfilenamem, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filenamem, imageext(cnt)); @@ -388,7 +388,7 @@ static void event_imagem_detect(struct context *cnt, static void event_image_snapshot(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { char fullfilename[PATH_MAX]; char filename[PATH_MAX]; @@ -411,7 +411,7 @@ static void event_image_snapshot(struct context *cnt, else snappath = DEF_SNAPPATH; - mystrftime(cnt, filepath, sizeof(filepath), snappath, currenttime_tm, NULL, 0); + mystrftime(cnt, filepath, sizeof(filepath), snappath, currenttime_tv, NULL, 0); snprintf(filename, PATH_MAX, "%s.%s", filepath, imageext(cnt)); snprintf(fullfilename, PATH_MAX, "%s/%s", cnt->conf.filepath, filename); put_picture(cnt, fullfilename, img, FTYPE_IMAGE_SNAPSHOT); @@ -429,7 +429,7 @@ static void event_image_snapshot(struct context *cnt, return; } } else { - mystrftime(cnt, filepath, sizeof(filepath), cnt->conf.snappath, currenttime_tm, NULL, 0); + mystrftime(cnt, filepath, sizeof(filepath), cnt->conf.snappath, currenttime_tv, NULL, 0); snprintf(filename, PATH_MAX, "%s.%s", filepath, imageext(cnt)); snprintf(fullfilename, PATH_MAX, "%s/%s", cnt->conf.filepath, filename); remove(fullfilename); @@ -442,7 +442,7 @@ static void event_image_snapshot(struct context *cnt, static void event_camera_lost(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->conf.on_camera_lost) exec_command(cnt, cnt->conf.on_camera_lost, NULL, 0); @@ -451,7 +451,7 @@ static void event_camera_lost(struct context *cnt, static void on_movie_end_command(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *filename, - void *arg, struct tm *tm ATTRIBUTE_UNUSED) + void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED) { int filetype = (unsigned long) arg; @@ -462,7 +462,7 @@ static void on_movie_end_command(struct context *cnt, static void event_extpipe_end(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->extpipe_open) { cnt->extpipe_open = 0; @@ -478,7 +478,7 @@ static void event_extpipe_end(struct context *cnt, static void event_create_extpipe(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { if ((cnt->conf.useextpipe) && (cnt->conf.extpipe)) { char stamp[PATH_MAX] = ""; @@ -497,7 +497,7 @@ static void event_create_extpipe(struct context *cnt, moviepath); } - mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tm, NULL, 0); + mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tv, NULL, 0); snprintf(cnt->extpipefilename, PATH_MAX - 4, "%s/%s", cnt->conf.filepath, stamp); /* Open a dummy file to check if path is correct */ @@ -522,12 +522,11 @@ static void event_create_extpipe(struct context *cnt, myfclose(fd_dummy); unlink(cnt->extpipefilename); - mystrftime(cnt, stamp, sizeof(stamp), cnt->conf.extpipe, currenttime_tm, cnt->extpipefilename, 0); + mystrftime(cnt, stamp, sizeof(stamp), cnt->conf.extpipe, currenttime_tv, cnt->extpipefilename, 0); - MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: pipe: %s", - stamp); - MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: cnt->moviefps: %d", - cnt->movie_fps); + MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: pipe: %s", stamp); + + MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: cnt->moviefps: %d", cnt->movie_fps); event(cnt, EVENT_FILECREATE, NULL, cnt->extpipefilename, (void *)FTYPE_MPEG, NULL); cnt->extpipe = popen(stamp, "w"); @@ -545,7 +544,7 @@ static void event_create_extpipe(struct context *cnt, static void event_extpipe_put(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED) { /* Check use_extpipe enabled and ext_pipe not NULL */ if ((cnt->conf.useextpipe) && (cnt->extpipe != NULL)) { @@ -567,7 +566,7 @@ static void event_extpipe_put(struct context *cnt, static void event_new_video(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED) { cnt->movie_last_shot = -1; @@ -578,7 +577,6 @@ static void event_new_video(struct context *cnt, if (cnt->movie_fps < 2) cnt->movie_fps = 2; } -#ifdef HAVE_FFMPEG static void grey2yuv420p(unsigned char *u, unsigned char *v, int width, int height) { @@ -590,7 +588,7 @@ static void grey2yuv420p(unsigned char *u, unsigned char *v, int width, int heig static void event_ffmpeg_newfile(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { int width = cnt->imgs.width; int height = cnt->imgs.height; @@ -612,7 +610,7 @@ static void event_ffmpeg_newfile(struct context *cnt, else moviepath = DEF_MOVIEPATH; - mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tm, NULL, 0); + mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tv, NULL, 0); /* * motion movies get the same name as normal movies plus an appended 'm' @@ -691,7 +689,7 @@ static void event_ffmpeg_newfile(struct context *cnt, if ((cnt->ffmpeg_output = ffmpeg_open(codec, cnt->newfilename, y, u, v, cnt->imgs.width, cnt->imgs.height, cnt->movie_fps, cnt->conf.ffmpeg_bps, - cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE)) == NULL) { + cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE, currenttime_tv)) == NULL) { MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: ffopen_open error creating (new) file [%s]", cnt->newfilename); cnt->finish = 1; @@ -719,7 +717,7 @@ static void event_ffmpeg_newfile(struct context *cnt, if ((cnt->ffmpeg_output_debug = ffmpeg_open(codec, cnt->motionfilename, y, u, v, cnt->imgs.width, cnt->imgs.height, cnt->movie_fps, cnt->conf.ffmpeg_bps, - cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE)) == NULL) { + cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE,currenttime_tv)) == NULL) { MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: ffopen_open error creating (motion) file [%s]", cnt->motionfilename); cnt->finish = 1; @@ -734,7 +732,7 @@ static void event_ffmpeg_newfile(struct context *cnt, static void event_ffmpeg_timelapse(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, void *dummy2 ATTRIBUTE_UNUSED, - struct tm *currenttime_tm) + struct timeval *currenttime_tv) { int width = cnt->imgs.width; int height = cnt->imgs.height; @@ -755,7 +753,7 @@ static void event_ffmpeg_timelapse(struct context *cnt, else timepath = DEF_TIMEPATH; - mystrftime(cnt, tmp, sizeof(tmp), timepath, currenttime_tm, NULL, 0); + mystrftime(cnt, tmp, sizeof(tmp), timepath, currenttime_tv, NULL, 0); /* PATH_MAX - 4 to allow for .mpg to be appended without overflow */ snprintf(cnt->timelapsefilename, PATH_MAX - 4, "%s/%s", cnt->conf.filepath, tmp); @@ -786,14 +784,14 @@ static void event_ffmpeg_timelapse(struct context *cnt, cnt->ffmpeg_timelapse = ffmpeg_open(codec_mpg,cnt->timelapsefilename, y, u, v ,cnt->imgs.width, cnt->imgs.height, cnt->conf.frame_limit - ,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_APPEND); + ,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_APPEND,currenttime_tv); } else { MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: Timelapse using mpeg4 codec."); MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: Events will be trigger new files"); cnt->ffmpeg_timelapse = ffmpeg_open(codec_mpeg ,cnt->timelapsefilename, y, u, v ,cnt->imgs.width, cnt->imgs.height, cnt->conf.frame_limit - ,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_NEW); + ,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_NEW,currenttime_tv); } if (cnt->ffmpeg_timelapse == NULL){ @@ -816,7 +814,7 @@ static void event_ffmpeg_timelapse(struct context *cnt, v = u + (width * height) / 4; - if (ffmpeg_put_other_image(cnt->ffmpeg_timelapse, y, u, v) == -1) { + if (ffmpeg_put_other_image(cnt->ffmpeg_timelapse, y, u, v,currenttime_tv) == -1) { cnt->finish = 1; cnt->restart = 0; } @@ -826,7 +824,7 @@ static void event_ffmpeg_timelapse(struct context *cnt, static void event_ffmpeg_put(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED, - void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED) + void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv) { if (cnt->ffmpeg_output) { int width = cnt->imgs.width; @@ -841,14 +839,14 @@ static void event_ffmpeg_put(struct context *cnt, v = u + (width * height) / 4; - if (ffmpeg_put_other_image(cnt->ffmpeg_output, y, u, v) == -1) { + if (ffmpeg_put_other_image(cnt->ffmpeg_output, y, u, v, currenttime_tv) == -1) { cnt->finish = 1; cnt->restart = 0; } } if (cnt->ffmpeg_output_debug) { - if (ffmpeg_put_image(cnt->ffmpeg_output_debug) == -1) { + if (ffmpeg_put_image(cnt->ffmpeg_output_debug, currenttime_tv) == -1) { cnt->finish = 1; cnt->restart = 0; } @@ -859,7 +857,7 @@ static void event_ffmpeg_closefile(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->ffmpeg_output) { @@ -885,7 +883,7 @@ static void event_ffmpeg_timelapseend(struct context *cnt, motion_event type ATTRIBUTE_UNUSED, unsigned char *dummy1 ATTRIBUTE_UNUSED, char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED, - struct tm *tm ATTRIBUTE_UNUSED) + struct timeval *tv1 ATTRIBUTE_UNUSED) { if (cnt->ffmpeg_timelapse) { free(cnt->ffmpeg_timelapse->udata); @@ -897,7 +895,6 @@ static void event_ffmpeg_timelapseend(struct context *cnt, } } -#endif /* HAVE_FFMPEG */ /* @@ -956,7 +953,7 @@ struct event_handlers event_handlers[] = { EVENT_IMAGE_SNAPSHOT, event_image_snapshot }, -#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) && !defined(__FreeBSD__) +#if !defined(WITHOUT_V4L2) && !defined(__FreeBSD__) { EVENT_IMAGE, event_vid_putpipe @@ -965,7 +962,7 @@ struct event_handlers event_handlers[] = { EVENT_IMAGEM, event_vid_putpipe }, -#endif /* !WITHOUT_V4L && !__FreeBSD__ */ +#endif /* !WITHOUT_V4L2 && !__FreeBSD__ */ { EVENT_STREAM, event_stream_put @@ -974,7 +971,6 @@ struct event_handlers event_handlers[] = { EVENT_FIRSTMOTION, event_new_video }, -#ifdef HAVE_FFMPEG { EVENT_FIRSTMOTION, event_ffmpeg_newfile @@ -999,7 +995,6 @@ struct event_handlers event_handlers[] = { EVENT_TIMELAPSEEND, event_ffmpeg_timelapseend }, -#endif /* HAVE_FFMPEG */ { EVENT_FILECLOSE, on_movie_end_command @@ -1045,13 +1040,12 @@ struct event_handlers event_handlers[] = { * as a code reading friendly solution to avoid a stream of compiler warnings in gcc 4.0. */ void event(struct context *cnt, motion_event type, unsigned char *image, - char *filename, void *eventdata, struct tm *tm) + char *filename, void *eventdata, struct timeval *tv1) { int i=-1; while (event_handlers[++i].handler) { if (type == event_handlers[i].type) - event_handlers[i].handler(cnt, type, image, filename, eventdata, - tm); + event_handlers[i].handler(cnt, type, image, filename, eventdata, tv1); } } diff --git a/event.h b/event.h index 71b97ac6..2595cae4 100644 --- a/event.h +++ b/event.h @@ -34,12 +34,10 @@ typedef enum { EVENT_LAST, } motion_event; - typedef void(* event_handler)(struct context *, motion_event, unsigned char *, - char *, void *, struct tm *); + char *, void *, struct timeval *); -void event(struct context *, motion_event, unsigned char *, char *, void *, - struct tm *); +void event(struct context *, motion_event, unsigned char *, char *, void *, struct timeval *); const char * imageext(struct context *); #endif /* _INCLUDE_EVENT_H_ */ diff --git a/ffmpeg.c b/ffmpeg.c index e7cab9fd..ba590bce 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -23,14 +23,12 @@ #include "config.h" - -#ifdef HAVE_FFMPEG - #include "ffmpeg.h" #include "motion.h" -#define AVSTREAM_CODEC_PTR(avs_ptr) (avs_ptr->codec) +#ifdef HAVE_FFMPEG +#define AVSTREAM_CODEC_PTR(avs_ptr) (avs_ptr->codec) /**************************************************************************** * The section below is the "my" section of functions. @@ -169,6 +167,7 @@ static int timelapse_exists(const char *fname){ } return 0; } + static int timelapse_append(struct ffmpeg *ffmpeg, AVPacket pkt){ FILE *file; @@ -219,39 +218,6 @@ static int ffmpeg_lockmgr_cb(void **arg, enum AVLockOp op) return 1; } -/** - * ffmpeg_init - * Initializes for libavformat. - * - * Returns - * Function returns nothing. - */ -void ffmpeg_init(void){ - int ret; - - MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, - "%s: ffmpeg libavcodec version %d.%d.%d" - " libavformat version %d.%d.%d" - , LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, LIBAVCODEC_VERSION_MICRO - , LIBAVFORMAT_VERSION_MAJOR, LIBAVFORMAT_VERSION_MINOR, LIBAVFORMAT_VERSION_MICRO); - - av_register_all(); - avcodec_register_all(); - avformat_network_init(); - av_log_set_callback((void *)ffmpeg_avcodec_log); - - ret = av_lockmgr_register(ffmpeg_lockmgr_cb); - if (ret < 0) - { - MOTION_LOG(EMG, TYPE_ALL, SHOW_ERRNO, "%s: av_lockmgr_register failed (%d)", ret); - exit(1); - } -} - -void ffmpeg_finalise(void) { - avformat_network_deinit(); -} - /** * get_oformat * Obtains the output format used for the specified codec. For mpeg4 codecs, @@ -323,6 +289,294 @@ static AVOutputFormat *get_oformat(const char *codec, char *filename){ return of; } + +/** + * ffmpeg_cleanups + * Clean up ffmpeg struct if something was wrong. + * + * Returns + * Function returns nothing. + */ +void ffmpeg_cleanups(struct ffmpeg *ffmpeg){ + + /* Close each codec */ + if (ffmpeg->video_st) { + avcodec_close(AVSTREAM_CODEC_PTR(ffmpeg->video_st)); + } + free(ffmpeg->video_outbuf); + av_freep(&ffmpeg->picture); + avformat_free_context(ffmpeg->oc); + free(ffmpeg); +} + +/** + * ffmpeg_put_frame + * Encodes and writes a video frame using the av_write_frame API. This is + * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. + * + * Returns + * Number of bytes written or -1 if any error happens. + */ +int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic, const struct timeval *tv1){ +/** + * Since the logic,return values and conditions changed so + * dramatically between versions, the encoding of the frame + * is 100% blocked based upon Libav/FFMpeg version + */ +#if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6)) + int retcd; + int got_packet_ptr; + AVPacket pkt; + char errstr[128]; + int64_t pts_interval; + + + av_init_packet(&pkt); /* Init static structure. */ + if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { + pkt.stream_index = ffmpeg->video_st->index; + pkt.flags |= AV_PKT_FLAG_KEY; + pkt.data = (uint8_t *)pic; + pkt.size = sizeof(AVPicture); + } else { + pkt.data = NULL; + pkt.size = 0; + retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st), + &pkt, pic, &got_packet_ptr); + if (retcd < 0 ){ + av_strerror(retcd, errstr, sizeof(errstr)); + MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr); + //Packet is freed upon failure of encoding + return -1; + } + if (got_packet_ptr == 0){ + //Buffered packet. Throw special return code + my_packet_unref(pkt); + return -2; + } + } + if (ffmpeg->tlapse == TIMELAPSE_APPEND) { + retcd = timelapse_append(ffmpeg, pkt); + } else if (ffmpeg->tlapse == TIMELAPSE_NEW) { + retcd = av_write_frame(ffmpeg->oc, &pkt); + } else { + pts_interval = ((1000000L * (tv1->tv_sec - ffmpeg->start_time.tv_sec)) + tv1->tv_usec - ffmpeg->start_time.tv_usec) + 10000; + +// MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: interval:%d img_sec:%d img_usec:%d strt_sec:%d strt_usec:%d " +// ,pts_interval,tv1->tv_sec,tv1->tv_usec,ffmpeg->start_time.tv_sec,ffmpeg->start_time.tv_usec); + + if (pts_interval < 0){ + /* This can occur when we have pre-capture frames. Reset start time of video. */ + ffmpeg->start_time.tv_sec = tv1->tv_sec ; + ffmpeg->start_time.tv_usec = tv1->tv_usec ; + pts_interval = 1; + } + pkt.pts = av_rescale_q(pts_interval,(AVRational){1, 1000000L},ffmpeg->video_st->time_base); + if (pkt.pts <= ffmpeg->last_pts) pkt.pts = ffmpeg->last_pts + 1; + pkt.dts = pkt.pts; + retcd = av_write_frame(ffmpeg->oc, &pkt); + ffmpeg->last_pts = pkt.pts; + } + // MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: pts:%d dts:%d stream:%d interval %d",pkt.pts,pkt.dts,ffmpeg->video_st->time_base.den,pts_interval); + my_packet_unref(pkt); + + if (retcd != 0) { + MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); + ffmpeg_cleanups(ffmpeg); + return -1; + } + + return retcd; + +#else // Old versions of Libav/FFmpeg + int retcd; + AVPacket pkt; + + av_init_packet(&pkt); /* Init static structure. */ + pkt.stream_index = ffmpeg->video_st->index; + if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { + // Raw video case. + pkt.size = sizeof(AVPicture); + pkt.data = (uint8_t *)pic; + pkt.flags |= AV_PKT_FLAG_KEY; + } else { + retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st), + ffmpeg->video_outbuf, + ffmpeg->video_outbuf_size, pic); + if (retcd < 0 ){ + MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video"); + my_packet_unref(pkt); + return -1; + } + if (retcd == 0 ){ + // No bytes encoded => buffered=>special handling + my_packet_unref(pkt); + return -2; + } + + pkt.size = retcd; + pkt.data = ffmpeg->video_outbuf; + pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts; + if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) + pkt.flags |= AV_PKT_FLAG_KEY; + } + if (ffmpeg->tlapse == TIMELAPSE_APPEND) { + retcd = timelapse_append(ffmpeg, pkt); + } else { + retcd = av_write_frame(ffmpeg->oc, &pkt); + } + my_packet_unref(pkt); + + if (retcd != 0) { + MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); + ffmpeg_cleanups(ffmpeg); + return -1; + } + + return retcd; + +#endif +} + +/** + * ffmpeg_prepare_frame + * Allocates and prepares a picture frame by setting up the U, Y and V pointers in + * the frame according to the passed pointers. + * + * Returns + * NULL If the allocation fails. + * + * The returned AVFrame pointer must be freed after use. + */ +AVFrame *ffmpeg_prepare_frame(struct ffmpeg *ffmpeg, unsigned char *y, + unsigned char *u, unsigned char *v) +{ + AVFrame *picture; + + picture = my_frame_alloc(); + + if (!picture) { + MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc frame"); + return NULL; + } + + /* Take care of variable bitrate setting. */ + if (ffmpeg->vbr) + picture->quality = ffmpeg->vbr; + + + /* Setup pointers and line widths. */ + picture->data[0] = y; + picture->data[1] = u; + picture->data[2] = v; + picture->linesize[0] = ffmpeg->c->width; + picture->linesize[1] = ffmpeg->c->width / 2; + picture->linesize[2] = ffmpeg->c->width / 2; + + picture->format = ffmpeg->c->pix_fmt; + picture->width = ffmpeg->c->width; + picture->height = ffmpeg->c->height; + + return picture; +} +/** + * ffmpeg_avcodec_log + * Handle any logging output from the ffmpeg library avcodec. + * + * Parameters + * *ignoreme A pointer we will ignore + * errno_flag The error number value + * fmt Text message to be used for log entry in printf() format. + * ap List of variables to be used in formatted message text. + * + * Returns + * Function returns nothing. + */ +void ffmpeg_avcodec_log(void *ignoreme ATTRIBUTE_UNUSED, int errno_flag, const char *fmt, va_list vl) +{ + char buf[1024]; + char *end; + + /* Flatten the message coming in from avcodec. */ + vsnprintf(buf, sizeof(buf), fmt, vl); + end = buf + strlen(buf); + if (end > buf && end[-1] == '\n') + { + *--end = 0; + } + + /* If the debug_level is correct then send the message to the motion logging routine. + * While it is not really desired to look for specific text in the message, there does + * not seem another option. The specific messages indicated are lost camera which we + * have our own message and UE golomb is not something that is possible for us to fix. + * It is caused by the stream sent from the source camera + */ + if(strstr(buf, "No route to host") == NULL){ + if (strstr(buf, "Invalid UE golomb") != NULL) { + MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); + } else if (errno_flag <= AV_LOG_ERROR) { + MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); + } else if (errno_flag <= AV_LOG_WARNING) { + MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); + } else if (errno_flag < AV_LOG_DEBUG){ + MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); + } + } +} + +#endif /* HAVE_FFMPEG */ + +/**************************************************************************** + **************************************************************************** + ****************************************************************************/ + +/** + * ffmpeg_init + * Initializes for libavformat. + * + * Returns + * Function returns nothing. + */ +void ffmpeg_init(void){ +#ifdef HAVE_FFMPEG + int ret; + + MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, + "%s: ffmpeg libavcodec version %d.%d.%d" + " libavformat version %d.%d.%d" + , LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, LIBAVCODEC_VERSION_MICRO + , LIBAVFORMAT_VERSION_MAJOR, LIBAVFORMAT_VERSION_MINOR, LIBAVFORMAT_VERSION_MICRO); + + av_register_all(); + avcodec_register_all(); + avformat_network_init(); + av_log_set_callback((void *)ffmpeg_avcodec_log); + + ret = av_lockmgr_register(ffmpeg_lockmgr_cb); + if (ret < 0) + { + MOTION_LOG(EMG, TYPE_ALL, SHOW_ERRNO, "%s: av_lockmgr_register failed (%d)", ret); + exit(1); + } + +#else /* No FFMPEG */ + + MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included"); + +#endif /* HAVE_FFMPEG */ +} + +void ffmpeg_finalise(void) { +#ifdef HAVE_FFMPEG + + avformat_network_deinit(); + +#else /* No FFMPEG */ + + MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included"); + +#endif /* HAVE_FFMPEG */ +} + /** * ffmpeg_open * Opens an mpeg file using the new libavformat method. Both mpeg1 @@ -335,8 +589,11 @@ static AVOutputFormat *get_oformat(const char *codec, char *filename){ */ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename, unsigned char *y, unsigned char *u, unsigned char *v, - int width, int height, int rate, int bps, int vbr, int tlapse) + int width, int height, int rate, int bps, int vbr, int tlapse, + const struct timeval *tv1) { +#ifdef HAVE_FFMPEG + AVCodecContext *c; AVCodec *codec; struct ffmpeg *ffmpeg; @@ -536,7 +793,10 @@ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename, } } } - gettimeofday(&ffmpeg->start_time, NULL); + + ffmpeg->start_time.tv_sec = tv1->tv_sec; + ffmpeg->start_time.tv_usec= tv1->tv_usec; + /* Write the stream header, For the TIMELAPSE_APPEND * we write the data via standard file I/O so we close the @@ -556,25 +816,33 @@ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename, } return ffmpeg; -} -/** - * ffmpeg_cleanups - * Clean up ffmpeg struct if something was wrong. - * - * Returns - * Function returns nothing. - */ -void ffmpeg_cleanups(struct ffmpeg *ffmpeg){ - /* Close each codec */ - if (ffmpeg->video_st) { - avcodec_close(AVSTREAM_CODEC_PTR(ffmpeg->video_st)); - } - free(ffmpeg->video_outbuf); - av_freep(&ffmpeg->picture); - avformat_free_context(ffmpeg->oc); - free(ffmpeg); +#else /* No FFMPEG */ + + MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included"); + + struct ffmpeg *ffmpeg; + ffmpeg = mymalloc(sizeof(struct ffmpeg)); + + ffmpeg_video_codec = ffmpeg_video_codec; + filename = filename; + y = y; + u = u; + v = v; + width = width; + height = height; + rate = rate; + bps = bps; + vbr = vbr; + tlapse = tlapse; + ffmpeg->dummy = 0; + tv1 = tv1; + return ffmpeg; + +#endif /* HAVE_FFMPEG */ + } + /** * ffmpeg_close * Closes a video file. @@ -583,6 +851,7 @@ void ffmpeg_cleanups(struct ffmpeg *ffmpeg){ * Function returns nothing. */ void ffmpeg_close(struct ffmpeg *ffmpeg){ +#ifdef HAVE_FFMPEG if (ffmpeg->tlapse != TIMELAPSE_APPEND) { av_write_trailer(ffmpeg->oc); @@ -600,43 +869,12 @@ void ffmpeg_close(struct ffmpeg *ffmpeg){ } } avformat_free_context(ffmpeg->oc); + +#endif // HAVE_FFMPEG + free(ffmpeg); - } -/** - * ffmpeg_put_image - * Puts the image pointed to by ffmpeg->picture. - * - * Returns - * value returned by ffmpeg_put_frame call. - */ -int ffmpeg_put_image(struct ffmpeg *ffmpeg){ - /* A return code of -2 is thrown by the put_frame - * when a image is buffered. For timelapse, we absolutely - * never want a frame buffered so we keep sending back the - * the same pic until it flushes or fails in a different way - */ - int retcd; - int cnt = 0; - - retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture); - while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) { - retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture); - cnt++; - if (cnt > 50){ - MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet"); - retcd = -1; - } - } - //non timelapse buffered is ok - if (retcd == -2){ - retcd = 0; - MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: Buffered packet"); - } - - return retcd; -} /** * ffmpeg_put_other_image * Puts an arbitrary picture defined by y, u and v. @@ -647,7 +885,8 @@ int ffmpeg_put_image(struct ffmpeg *ffmpeg){ * 0 if error allocating picture. */ int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y, - unsigned char *u, unsigned char *v){ + unsigned char *u, unsigned char *v, const struct timeval *tv1){ +#ifdef HAVE_FFMPEG AVFrame *picture; int retcd = 0; int cnt = 0; @@ -661,9 +900,9 @@ int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y, * never want a frame buffered so we keep sending back the * the same pic until it flushes or fails in a different way */ - retcd = ffmpeg_put_frame(ffmpeg, picture); + retcd = ffmpeg_put_frame(ffmpeg, picture, tv1); while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) { - retcd = ffmpeg_put_frame(ffmpeg, picture); + retcd = ffmpeg_put_frame(ffmpeg, picture, tv1); cnt++; if (cnt > 50){ MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet"); @@ -677,213 +916,58 @@ int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y, } av_free(picture); } - - return retcd; -} -/** - * ffmpeg_put_frame - * Encodes and writes a video frame using the av_write_frame API. This is - * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. - * - * Returns - * Number of bytes written or -1 if any error happens. - */ -int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic){ -/** - * Since the logic,return values and conditions changed so - * dramatically between versions, the encoding of the frame - * is 100% blocked based upon Libav/FFMpeg version - */ -#if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6)) - int retcd; - int got_packet_ptr; - AVPacket pkt; - char errstr[128]; - struct timeval tv1; - int64_t pts_interval; - - gettimeofday(&tv1, NULL); - - av_init_packet(&pkt); /* Init static structure. */ - if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { - pkt.stream_index = ffmpeg->video_st->index; - pkt.flags |= AV_PKT_FLAG_KEY; - pkt.data = (uint8_t *)pic; - pkt.size = sizeof(AVPicture); - } else { - pkt.data = NULL; - pkt.size = 0; - retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st), - &pkt, pic, &got_packet_ptr); - if (retcd < 0 ){ - av_strerror(retcd, errstr, sizeof(errstr)); - MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr); - //Packet is freed upon failure of encoding - return -1; - } - if (got_packet_ptr == 0){ - //Buffered packet. Throw special return code - my_packet_unref(pkt); - return -2; - } - } - if (ffmpeg->tlapse == TIMELAPSE_APPEND) { - retcd = timelapse_append(ffmpeg, pkt); - } else if (ffmpeg->tlapse == TIMELAPSE_NEW) { - retcd = av_write_frame(ffmpeg->oc, &pkt); - } else { - pts_interval = ((1000000L * (tv1.tv_sec - ffmpeg->start_time.tv_sec)) + tv1.tv_usec - ffmpeg->start_time.tv_usec) + 10000; - pkt.pts = av_rescale_q(pts_interval,(AVRational){1, 1000000L},ffmpeg->video_st->time_base); - if (pkt.pts <= ffmpeg->last_pts) pkt.pts = ffmpeg->last_pts + 1; - pkt.dts = pkt.pts; - retcd = av_write_frame(ffmpeg->oc, &pkt); - ffmpeg->last_pts = pkt.pts; - } -// MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: pts:%d dts:%d stream:%d interval %d",pkt.pts,pkt.dts,ffmpeg->video_st->time_base.den,pts_interval); - my_packet_unref(pkt); - - if (retcd != 0) { - MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); - ffmpeg_cleanups(ffmpeg); - return -1; - } - return retcd; -#else // Old versions of Libav/FFmpeg - int retcd; - AVPacket pkt; +#else - av_init_packet(&pkt); /* Init static structure. */ - pkt.stream_index = ffmpeg->video_st->index; - if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { - // Raw video case. - pkt.size = sizeof(AVPicture); - pkt.data = (uint8_t *)pic; - pkt.flags |= AV_PKT_FLAG_KEY; - } else { - retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st), - ffmpeg->video_outbuf, - ffmpeg->video_outbuf_size, pic); - if (retcd < 0 ){ - MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video"); - my_packet_unref(pkt); - return -1; - } - if (retcd == 0 ){ - // No bytes encoded => buffered=>special handling - my_packet_unref(pkt); - return -2; - } + ffmpeg = ffmpeg; + y = y; + u = u; + v = v; + tv1 = tv1; + return 0; - pkt.size = retcd; - pkt.data = ffmpeg->video_outbuf; - pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts; - if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) - pkt.flags |= AV_PKT_FLAG_KEY; - } - if (ffmpeg->tlapse == TIMELAPSE_APPEND) { - retcd = timelapse_append(ffmpeg, pkt); - } else { - retcd = av_write_frame(ffmpeg->oc, &pkt); - } - my_packet_unref(pkt); - - if (retcd != 0) { - MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); - ffmpeg_cleanups(ffmpeg); - return -1; - } - - return retcd; - -#endif +#endif // HAVE_FFMPEG } /** - * ffmpeg_prepare_frame - * Allocates and prepares a picture frame by setting up the U, Y and V pointers in - * the frame according to the passed pointers. + * ffmpeg_put_image + * Puts the image pointed to by ffmpeg->picture. * * Returns - * NULL If the allocation fails. - * - * The returned AVFrame pointer must be freed after use. + * value returned by ffmpeg_put_frame call. */ -AVFrame *ffmpeg_prepare_frame(struct ffmpeg *ffmpeg, unsigned char *y, - unsigned char *u, unsigned char *v) -{ - AVFrame *picture; +int ffmpeg_put_image(struct ffmpeg *ffmpeg, const struct timeval *tv1){ - picture = my_frame_alloc(); - - if (!picture) { - MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc frame"); - return NULL; - } - - /* Take care of variable bitrate setting. */ - if (ffmpeg->vbr) - picture->quality = ffmpeg->vbr; - - - /* Setup pointers and line widths. */ - picture->data[0] = y; - picture->data[1] = u; - picture->data[2] = v; - picture->linesize[0] = ffmpeg->c->width; - picture->linesize[1] = ffmpeg->c->width / 2; - picture->linesize[2] = ffmpeg->c->width / 2; - - picture->format = ffmpeg->c->pix_fmt; - picture->width = ffmpeg->c->width; - picture->height = ffmpeg->c->height; - - return picture; -} -/** - * ffmpeg_avcodec_log - * Handle any logging output from the ffmpeg library avcodec. - * - * Parameters - * *ignoreme A pointer we will ignore - * errno_flag The error number value - * fmt Text message to be used for log entry in printf() format. - * ap List of variables to be used in formatted message text. - * - * Returns - * Function returns nothing. - */ -void ffmpeg_avcodec_log(void *ignoreme ATTRIBUTE_UNUSED, int errno_flag, const char *fmt, va_list vl) -{ - char buf[1024]; - char *end; - - /* Flatten the message coming in from avcodec. */ - vsnprintf(buf, sizeof(buf), fmt, vl); - end = buf + strlen(buf); - if (end > buf && end[-1] == '\n') - { - *--end = 0; - } - - /* If the debug_level is correct then send the message to the motion logging routine. - * While it is not really desired to look for specific text in the message, there does - * not seem another option. The specific messages indicated are lost camera which we - * have our own message and UE golomb is not something that is possible for us to fix. - * It is caused by the stream sent from the source camera +#ifdef HAVE_FFMPEG + /* A return code of -2 is thrown by the put_frame + * when a image is buffered. For timelapse, we absolutely + * never want a frame buffered so we keep sending back the + * the same pic until it flushes or fails in a different way */ - if(strstr(buf, "No route to host") == NULL){ - if (strstr(buf, "Invalid UE golomb") != NULL) { - MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); - } else if (errno_flag <= AV_LOG_ERROR) { - MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); - } else if (errno_flag <= AV_LOG_WARNING) { - MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); - } else if (errno_flag < AV_LOG_DEBUG){ - MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf); + int retcd; + int cnt = 0; + + retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture, tv1); + while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) { + retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture, tv1); + cnt++; + if (cnt > 50){ + MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet"); + retcd = -1; } } + //non timelapse buffered is ok + if (retcd == -2){ + retcd = 0; + MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: Buffered packet"); + } + + return retcd; +#else + ffmpeg = ffmpeg; + tv1 = tv1; + return 0; +#endif // HAVE_FFMPEG } -#endif /* HAVE_FFMPEG */ diff --git a/ffmpeg.h b/ffmpeg.h index ccdc0f1f..9d3276e0 100644 --- a/ffmpeg.h +++ b/ffmpeg.h @@ -7,6 +7,10 @@ #include "config.h" +#define TIMELAPSE_NONE 0 /* No timelapse, regular processing */ +#define TIMELAPSE_APPEND 1 /* Use append version of timelapse */ +#define TIMELAPSE_NEW 2 /* Use create new file version of timelapse */ + #ifdef HAVE_FFMPEG #include @@ -28,14 +32,7 @@ #endif -#endif /* HAVE_FFMPEG */ - -#define TIMELAPSE_NONE 0 /* No timelapse, regular processing */ -#define TIMELAPSE_APPEND 1 /* Use append version of timelapse */ -#define TIMELAPSE_NEW 2 /* Use create new file version of timelapse */ - struct ffmpeg { -#ifdef HAVE_FFMPEG AVFormatContext *oc; AVStream *video_st; AVCodecContext *c; @@ -50,16 +47,33 @@ struct ffmpeg { int tlapse; int64_t last_pts; struct timeval start_time; -#else - int dummy; -#endif }; -/* Initialize FFmpeg stuff. Needs to be called before ffmpeg_open. */ -void ffmpeg_init(void); -/** Finalise ffmpeg; call only after all threads have finished */ -void ffmpeg_finalise(void); +AVFrame *my_frame_alloc(void); +void my_frame_free(AVFrame *frame); +int ffmpeg_put_frame(struct ffmpeg *, AVFrame *, const struct timeval *tv1); +void ffmpeg_cleanups(struct ffmpeg *); +AVFrame *ffmpeg_prepare_frame(struct ffmpeg *, unsigned char *, + unsigned char *, unsigned char *); +int my_image_get_buffer_size(enum MyPixelFormat pix_fmt, int width, int height); +int my_image_copy_to_buffer(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height,int dest_size); +int my_image_fill_arrays(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height); +void my_packet_unref(AVPacket pkt); + +#else /* No FFMPEG */ + +struct ffmpeg { + void *udata; + int dummy; + struct timeval start_time; +}; + +#endif /* HAVE_FFMPEG */ + +/* Now the functions that are ok for both situations */ +void ffmpeg_init(void); +void ffmpeg_finalise(void); struct ffmpeg *ffmpeg_open( const char *ffmpeg_video_codec, char *filename, @@ -71,38 +85,19 @@ struct ffmpeg *ffmpeg_open( int rate, /* framerate, fps */ int bps, /* bitrate; bits per second */ int vbr, /* variable bitrate */ - int tlapse + int tlapse, + const struct timeval *tv1 ); - -/* Puts the image pointed to by the picture member of struct ffmpeg. */ -int ffmpeg_put_image(struct ffmpeg *); - -/* Puts the image defined by u, y and v (YUV420 format). */ +int ffmpeg_put_image(struct ffmpeg *, const struct timeval *tv1); int ffmpeg_put_other_image( struct ffmpeg *ffmpeg, unsigned char *y, unsigned char *u, - unsigned char *v + unsigned char *v, + const struct timeval *tv1 ); - -/* Closes the mpeg file. */ void ffmpeg_close(struct ffmpeg *); - -/* Setup an avcodec log handler. */ void ffmpeg_avcodec_log(void *, int, const char *, va_list); -#ifdef HAVE_FFMPEG -AVFrame *my_frame_alloc(void); -void my_frame_free(AVFrame *frame); -int ffmpeg_put_frame(struct ffmpeg *, AVFrame *); -void ffmpeg_cleanups(struct ffmpeg *); -AVFrame *ffmpeg_prepare_frame(struct ffmpeg *, unsigned char *, - unsigned char *, unsigned char *); -int my_image_get_buffer_size(enum MyPixelFormat pix_fmt, int width, int height); -int my_image_copy_to_buffer(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height,int dest_size); -int my_image_fill_arrays(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height); -void my_packet_unref(AVPacket pkt); - -#endif #endif /* _INCLUDE_FFMPEG_H_ */ diff --git a/motion-dist.conf.in b/motion-dist.conf.in index e3ba8893..0d78ed1b 100644 --- a/motion-dist.conf.in +++ b/motion-dist.conf.in @@ -66,6 +66,7 @@ videodevice /dev/video0 # V4L2_PIX_FMT_YUV420 : 17 'YU12' # V4L2_PIX_FMT_Y10 : 18 'Y10' # V4L2_PIX_FMT_Y12 : 19 'Y12' +# V4L2_PIX_FMT_GREY : 20 'GREY' # v4l2_palette 17 @@ -86,7 +87,7 @@ norm 0 frequency 0 # Override the power line frequency for the webcam. (normally not necessary) -# Values: +# Values: # -1 : Do not modify device setting # 0 : Power line frequency Disabled # 1 : 50hz @@ -223,6 +224,10 @@ despeckle_filter EedDl # Full path name to. (Default: not defined) ; mask_file value +# PGM file to completely mask out a area of image. +# Full path name to. (Default: not defined) +# mask_privacy value + # Dynamically create a mask file during operation (default: 0) # Adjust speed of mask changes from 0 (off) to 10 (fast) smart_mask_speed 0 @@ -337,7 +342,7 @@ ffmpeg_variable_bitrate 0 # hevc - H.265 / HEVC (High Efficiency Video Coding) ffmpeg_video_codec mpeg4 -# When creating videos, should frames be duplicated in order +# When creating videos, should frames be duplicated in order # to keep up with the requested frames per second # (default: true) ffmpeg_duplicate_frames true @@ -732,11 +737,13 @@ quiet on ############################################################ # Output images to a video4linux loopback device -# The value '-' means next available (default: not defined) +# Specify the device associated with the loopback device +# For example /dev/video1 (default: not defined) ; video_pipe value # Output motion images to a video4linux loopback device -# The value '-' means next available (default: not defined) +# Specify the device associated with the loopback device +# For example /dev/video1 (default: not defined) ; motion_video_pipe value diff --git a/motion.1 b/motion.1 index 7dbac4c1..c89a5e11 100644 --- a/motion.1 +++ b/motion.1 @@ -1805,7 +1805,7 @@ Default: Not Defined Description: .fi .RS -Output images to a video4linux loopback device. The value '-' means next available +Output images to a video4linux loopback device. .RE .RE @@ -1818,7 +1818,7 @@ Default: Not Defined Description: .fi .RS -Output motion images to a video4linux loopback device. The value '-' means next available +Output motion images to a video4linux loopback device. .RE .RE diff --git a/motion.c b/motion.c index b80cf4ee..33eb9b62 100644 --- a/motion.c +++ b/motion.c @@ -12,7 +12,7 @@ #if (defined(__FreeBSD__) && !defined(PWCBSD)) #include "video_freebsd.h" #else -#include "video.h" +#include "video2.h" #endif #include "conf.h" @@ -22,11 +22,7 @@ #include "picture.h" #include "rotate.h" -/* Forward declarations */ -static int motion_init(struct context *cnt); -static void motion_cleanup(struct context *cnt); -static void setup_signals(struct sigaction *, struct sigaction *); - +#define IMAGE_BUFFER_FLUSH ((unsigned int)-1) /** * tls_key_threadnr @@ -379,6 +375,62 @@ static void sigchild_handler(int signo ATTRIBUTE_UNUSED) return; } +/** + * setup_signals + * + * Attaches handlers to a number of signals that Motion need to catch. + * + * Parameters: sigaction structs for signals in general and SIGCHLD. + * + * Returns: nothing + */ +static void setup_signals(struct sigaction *sig_handler_action, struct sigaction *sigchild_action) +{ +#ifdef SA_NOCLDWAIT + sigchild_action->sa_flags = SA_NOCLDWAIT; +#else + sigchild_action->sa_flags = 0; +#endif + sigchild_action->sa_handler = sigchild_handler; + sigemptyset(&sigchild_action->sa_mask); +#ifdef SA_RESTART + sig_handler_action->sa_flags = SA_RESTART; +#else + sig_handler_action->sa_flags = 0; +#endif + sig_handler_action->sa_handler = sig_handler; + sigemptyset(&sig_handler_action->sa_mask); + + /* Enable automatic zombie reaping */ + sigaction(SIGCHLD, sigchild_action, NULL); + sigaction(SIGPIPE, sigchild_action, NULL); + sigaction(SIGALRM, sig_handler_action, NULL); + sigaction(SIGHUP, sig_handler_action, NULL); + sigaction(SIGINT, sig_handler_action, NULL); + sigaction(SIGQUIT, sig_handler_action, NULL); + sigaction(SIGTERM, sig_handler_action, NULL); + sigaction(SIGUSR1, sig_handler_action, NULL); + + /* use SIGVTALRM as a way to break out of the ioctl, don't restart */ + sig_handler_action->sa_flags = 0; + sigaction(SIGVTALRM, sig_handler_action, NULL); +} + +static void setup_signals_BSD(struct context *cnt){ +#ifdef __OpenBSD__ + /* + * FIXMARK + * Fixes zombie issue on OpenBSD 4.6 + */ + struct sigaction sig_handler_action; + struct sigaction sigchild_action; + setup_signals(&sig_handler_action, &sigchild_action); +#else + /* Kill compiler warnings */ + cnt->log_level = cnt->log_level; +#endif +} + /** * motion_remove_pid * @@ -457,7 +509,7 @@ static void motion_detected(struct context *cnt, int dev, struct image_data *img * in both time_t and struct tm format. */ cnt->prev_event = cnt->event_nr; - cnt->eventtime = img->timestamp; + cnt->eventtime = img->timestamp_tv.tv_sec; localtime_r(&cnt->eventtime, cnt->eventtime_tm); /* @@ -466,10 +518,11 @@ static void motion_detected(struct context *cnt, int dev, struct image_data *img * on_motion_detected_commend so it must be done now. */ mystrftime(cnt, cnt->text_event_string, sizeof(cnt->text_event_string), - cnt->conf.text_event, cnt->eventtime_tm, NULL, 0); + cnt->conf.text_event, &img->timestamp_tv, NULL, 0); /* EVENT_FIRSTMOTION triggers on_event_start_command and event_ffmpeg_newfile */ - event(cnt, EVENT_FIRSTMOTION, img->image, NULL, NULL, &img->timestamp_tm); + event(cnt, EVENT_FIRSTMOTION, img->image, NULL, NULL, + &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tv); MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Motion detected - starting event %d", cnt->event_nr); @@ -481,7 +534,7 @@ static void motion_detected(struct context *cnt, int dev, struct image_data *img } /* EVENT_MOTION triggers event_beep and on_motion_detected_command */ - event(cnt, EVENT_MOTION, NULL, NULL, NULL, &img->timestamp_tm); + event(cnt, EVENT_MOTION, NULL, NULL, NULL, &img->timestamp_tv); } /* Limit framerate */ @@ -493,14 +546,14 @@ static void motion_detected(struct context *cnt, int dev, struct image_data *img * We also disable this in setup_mode. */ if (conf->stream_motion && !conf->setup_mode && img->shot != 1) - event(cnt, EVENT_STREAM, img->image, NULL, NULL, &img->timestamp_tm); + event(cnt, EVENT_STREAM, img->image, NULL, NULL, &img->timestamp_tv); /* * Save motion jpeg, if configured * Output the image_out (motion) picture. */ if (conf->motion_img) - event(cnt, EVENT_IMAGEM_DETECTED, NULL, NULL, NULL, &img->timestamp_tm); + event(cnt, EVENT_IMAGEM_DETECTED, NULL, NULL, NULL, &img->timestamp_tv); } /* if track enabled and auto track on */ @@ -520,7 +573,7 @@ static void motion_detected(struct context *cnt, int dev, struct image_data *img * max_images - Max number of images to process * Set to IMAGE_BUFFER_FLUSH to send/save all images in buffer */ -#define IMAGE_BUFFER_FLUSH ((unsigned int)-1) + static void process_image_ring(struct context *cnt, unsigned int max_images) { /* @@ -556,7 +609,7 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) t = "Other"; mystrftime(cnt, tmp, sizeof(tmp), "%H%M%S-%q", - &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tm, NULL, 0); + &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tv, NULL, 0); draw_text(cnt->imgs.image_ring[cnt->imgs.image_ring_out].image, 10, 20, cnt->imgs.width, tmp, cnt->conf.text_double); draw_text(cnt->imgs.image_ring[cnt->imgs.image_ring_out].image, 10, 30, @@ -566,7 +619,7 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) /* Output the picture to jpegs and ffmpeg */ event(cnt, EVENT_IMAGE_DETECTED, cnt->imgs.image_ring[cnt->imgs.image_ring_out].image, NULL, NULL, - &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tm); + &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tv); /* * Check if we must add any "filler" frames into movie to keep up fps @@ -578,11 +631,7 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) if (!cnt->conf.ffmpeg_duplicate_frames) { /* don't duplicate frames */ } else if ((cnt->imgs.image_ring[cnt->imgs.image_ring_out].shot == 0) && -#ifdef HAVE_FFMPEG (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))) { -#else - (cnt->conf.useextpipe && cnt->extpipe)) { -#endif /* * movie_last_shoot is -1 when file is created, * we don't know how many frames there is in first sec @@ -604,7 +653,7 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) /* Add a filler frame into encoder */ event(cnt, EVENT_FFMPEG_PUT, cnt->imgs.image_ring[cnt->imgs.image_ring_out].image, NULL, NULL, - &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tm); + &cnt->imgs.image_ring[cnt->imgs.image_ring_out].timestamp_tv); cnt->movie_last_shot++; } @@ -660,6 +709,54 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) cnt->current_image = saved_current_image; } +static void init_mask_privacy(struct context *cnt){ + + int indxrow; + int indxcol; + FILE *picture; + + /* Load the privacy file if any */ + if (cnt->conf.mask_privacy) { + if ((picture = myfopen(cnt->conf.mask_privacy, "r"))) { + /* + * NOTE: The mask is expected to have the output dimensions. I.e., the mask + * applies to the already rotated image, not the capture image. Thus, use + * width and height from imgs. + */ + cnt->imgs.mask_privacy = get_pgm(picture, cnt->imgs.width, cnt->imgs.height); + myfclose(picture); + } else { + MOTION_LOG(ERR, TYPE_ALL, SHOW_ERRNO, "%s: Error opening mask file %s", + cnt->conf.mask_privacy); + /* + * Try to write an empty mask file to make it easier + * for the user to edit it + */ + put_fixed_mask(cnt, cnt->conf.mask_privacy); + } + + if (!cnt->imgs.mask_privacy) { + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Failed to read mask privacy image. Mask privacy feature disabled."); + } else { + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Mask privacy file \"%s\" loaded.", cnt->conf.mask_privacy); + //swap black vs white for efficient processing + for (indxrow = 0; indxrow < cnt->imgs.height; indxrow++) { + for (indxcol = 0; indxcol < cnt->imgs.width; indxcol++) { + if ( cnt->imgs.mask_privacy[indxcol + (indxrow * cnt->imgs.width)] == 0xff) { + cnt->imgs.mask_privacy[indxcol + (indxrow * cnt->imgs.width)] = 0x00; + } else{ + cnt->imgs.mask_privacy[indxcol + (indxrow * cnt->imgs.width)] = 0xff; + } + } + } + } + + } else { + cnt->imgs.mask_privacy = NULL; + } + +} + /** * motion_init * @@ -678,6 +775,14 @@ static void process_image_ring(struct context *cnt, unsigned int max_images) static int motion_init(struct context *cnt) { FILE *picture; + int indx; + + char tname[16]; + snprintf(tname, sizeof(tname), "ml%d%s%s", + cnt->threadnr, + cnt->conf.camera_name ? ":" : "", + cnt->conf.camera_name ? cnt->conf.camera_name : ""); + MOTION_PTHREAD_SETNAME(tname); /* Store thread number in TLS. */ pthread_setspecific(tls_key_threadnr, (void *)((unsigned long)cnt->threadnr)); @@ -789,13 +894,13 @@ static int motion_init(struct context *cnt) /* create a reference frame */ alg_update_reference_frame(cnt, RESET_REF_FRAME); -#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) && !defined(__FreeBSD__) +#if !defined(WITHOUT_V4L2) && !defined(__FreeBSD__) /* open video loopback devices if enabled */ if (cnt->conf.vidpipe) { MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Opening video loopback device for normal pictures"); /* vid_startpipe should get the output dimensions */ - cnt->pipe = vid_startpipe(cnt->conf.vidpipe, cnt->imgs.width, cnt->imgs.height, cnt->imgs.type); + cnt->pipe = vid_startpipe(cnt->conf.vidpipe, cnt->imgs.width, cnt->imgs.height, V4L2_PIX_FMT_YUV420); if (cnt->pipe < 0) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Failed to open video loopback for normal pictures"); @@ -807,14 +912,14 @@ static int motion_init(struct context *cnt) MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Opening video loopback device for motion pictures"); /* vid_startpipe should get the output dimensions */ - cnt->mpipe = vid_startpipe(cnt->conf.motionvidpipe, cnt->imgs.width, cnt->imgs.height, cnt->imgs.type); + cnt->mpipe = vid_startpipe(cnt->conf.motionvidpipe, cnt->imgs.width, cnt->imgs.height, V4L2_PIX_FMT_YUV420); if (cnt->mpipe < 0) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Failed to open video loopback for motion pictures"); return -1; } } -#endif /* !WITHOUT_V4L && !__FreeBSD__ */ +#endif /* !WITHOUT_V4L2 && !__FreeBSD__ */ #if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) if (cnt->conf.database_type) { @@ -933,6 +1038,8 @@ static int motion_init(struct context *cnt) cnt->imgs.mask = NULL; } + init_mask_privacy(cnt); + /* Always initialize smart_mask - someone could turn it on later... */ memset(cnt->imgs.smartmask, 0, cnt->imgs.motionsize); memset(cnt->imgs.smartmask_final, 255, cnt->imgs.motionsize); @@ -961,6 +1068,83 @@ static int motion_init(struct context *cnt) /* 2 sec startup delay so FPS is calculated correct */ cnt->startup_frames = cnt->conf.frame_limit * 2; + /* Initialize the double sized characters if needed. */ + if (cnt->conf.text_double) + cnt->text_size_factor = 2; + else + cnt->text_size_factor = 1; + + + /* Work out expected frame rate based on config setting */ + if (cnt->conf.frame_limit < 2) + cnt->conf.frame_limit = 2; + + cnt->required_frame_time = 1000000L / cnt->conf.frame_limit; + + cnt->frame_delay = cnt->required_frame_time; + + /* + * Reserve enough space for a 10 second timing history buffer. Note that, + * if there is any problem on the allocation, mymalloc does not return. + */ + cnt->rolling_average_data = NULL; + cnt->rolling_average_limit = 10 * cnt->conf.frame_limit; + cnt->rolling_average_data = mymalloc(sizeof(cnt->rolling_average_data) * cnt->rolling_average_limit); + + /* Preset history buffer with expected frame rate */ + for (indx = 0; indx < cnt->rolling_average_limit; indx++) + cnt->rolling_average_data[indx] = cnt->required_frame_time; + + + if (cnt->track.type) + cnt->moved = track_center(cnt, cnt->video_dev, 0, 0, 0); + + setup_signals_BSD(cnt); + + /* Initialize area detection */ + cnt->area_minx[0] = cnt->area_minx[3] = cnt->area_minx[6] = 0; + cnt->area_miny[0] = cnt->area_miny[1] = cnt->area_miny[2] = 0; + + cnt->area_minx[1] = cnt->area_minx[4] = cnt->area_minx[7] = cnt->imgs.width / 3; + cnt->area_maxx[0] = cnt->area_maxx[3] = cnt->area_maxx[6] = cnt->imgs.width / 3; + + cnt->area_minx[2] = cnt->area_minx[5] = cnt->area_minx[8] = cnt->imgs.width / 3 * 2; + cnt->area_maxx[1] = cnt->area_maxx[4] = cnt->area_maxx[7] = cnt->imgs.width / 3 * 2; + + cnt->area_miny[3] = cnt->area_miny[4] = cnt->area_miny[5] = cnt->imgs.height / 3; + cnt->area_maxy[0] = cnt->area_maxy[1] = cnt->area_maxy[2] = cnt->imgs.height / 3; + + cnt->area_miny[6] = cnt->area_miny[7] = cnt->area_miny[8] = cnt->imgs.height / 3 * 2; + cnt->area_maxy[3] = cnt->area_maxy[4] = cnt->area_maxy[5] = cnt->imgs.height / 3 * 2; + + cnt->area_maxx[2] = cnt->area_maxx[5] = cnt->area_maxx[8] = cnt->imgs.width; + cnt->area_maxy[6] = cnt->area_maxy[7] = cnt->area_maxy[8] = cnt->imgs.height; + + cnt->areadetect_eventnbr = 0; + + cnt->timenow = 0; + cnt->timebefore = 0; + cnt->rate_limit = 0; + cnt->lastframetime = 0; + cnt->minimum_frame_time_downcounter = cnt->conf.minimum_frame_time; + cnt->get_image = 1; + + cnt->olddiffs = 0; + cnt->smartmask_ratio = 0; + cnt->smartmask_count = 20; + + cnt->previous_diffs = 0; + cnt->previous_location_x = 0; + cnt->previous_location_y = 0; + + cnt->time_last_frame = 1; + cnt->time_current_frame = 0; + + cnt->smartmask_lastrate = 0; + + cnt->passflag = 0; //only purpose to flag first frame + cnt->rolling_frame = 0; + return 0; } @@ -981,10 +1165,8 @@ static void motion_cleanup(struct context *cnt) /* Stop stream */ event(cnt, EVENT_STOP, NULL, NULL, NULL, NULL); -#ifdef HAVE_FFMPEG event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, NULL); event(cnt, EVENT_ENDMOTION, NULL, NULL, NULL, NULL); -#endif // HAVE_FFMPEG if (cnt->video_dev >= 0) { MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Calling vid_close() from motion_cleanup"); @@ -1021,6 +1203,9 @@ static void motion_cleanup(struct context *cnt) if (cnt->imgs.mask) free(cnt->imgs.mask); cnt->imgs.mask = NULL; + if (cnt->imgs.mask_privacy) free(cnt->imgs.mask_privacy); + cnt->imgs.mask_privacy = NULL; + free(cnt->imgs.common_buffer); cnt->imgs.common_buffer = NULL; @@ -1071,6 +1256,1110 @@ static void motion_cleanup(struct context *cnt) } } +static void mlp_mask_privacy(struct context *cnt){ + + /* We do a bitwise OR of the image with the mask file. + * The value for black in the mask file is 0x00 so when + * it is bitwised OR with image, it will leave the original + * value. For this reason, we inverted the mask in the init + * function. The file is read in as black=blockout, white=keep + * this function works as black=keep, white=blockout. This also + * results with the blockout section being white on the result. + * This is done strictly for processing efficiency to lower cpu + * since this function is called for every single image. + * If user wants blockout in black instead of white, that means more cpu...... + */ + + int indxrow; + int indxcol; + + if (cnt->imgs.mask_privacy != NULL){ + for (indxrow = 0; indxrow < cnt->imgs.height; indxrow++) { + for (indxcol = 0; indxcol < cnt->imgs.width; indxcol++) { + cnt->current_image->image[indxcol + (indxrow*cnt->imgs.width)] |= cnt->imgs.mask_privacy[indxcol + (indxrow*cnt->imgs.width)]; + } + } + } + + +} + +static void mlp_areadetect(struct context *cnt){ + int i, j, z = 0; + /* + * Simple hack to recognize motion in a specific area + * Do we need a new coversion specifier as well?? + */ + if ((cnt->conf.area_detect) && + (cnt->event_nr != cnt->areadetect_eventnbr) && + (cnt->current_image->flags & IMAGE_TRIGGER)) { + j = strlen(cnt->conf.area_detect); + for (i = 0; i < j; i++) { + z = cnt->conf.area_detect[i] - 49; /* characters are stored as ascii 48-57 (0-9) */ + if ((z >= 0) && (z < 9)) { + if (cnt->current_image->location.x > cnt->area_minx[z] && + cnt->current_image->location.x < cnt->area_maxx[z] && + cnt->current_image->location.y > cnt->area_miny[z] && + cnt->current_image->location.y < cnt->area_maxy[z]) { + event(cnt, EVENT_AREA_DETECTED, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + cnt->areadetect_eventnbr = cnt->event_nr; /* Fire script only once per event */ + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: Motion in area %d detected.", z + 1); + break; + } + } + } + } + +} + +static void mlp_prepare(struct context *cnt){ + + int frame_buffer_size; + struct timeval tv1; + + /***** MOTION LOOP - PREPARE FOR NEW FRAME SECTION *****/ + cnt->watchdog = WATCHDOG_TMO; + + /* Get current time and preserver last time for frame interval calc. */ + + /* This may be better at the end of the loop or moving the part in + * the end doing elapsed time calc in here + */ + cnt->timebefore = cnt->timenow; + gettimeofday(&tv1, NULL); + cnt->timenow = tv1.tv_usec + 1000000L * tv1.tv_sec; + + /* + * Calculate detection rate limit. Above 5fps we limit the detection + * rate to 3fps to reduce load at higher framerates. + */ + cnt->process_thisframe = 0; + cnt->rate_limit++; + if (cnt->rate_limit >= (cnt->lastrate / 3)) { + cnt->rate_limit = 0; + cnt->process_thisframe = 1; + } + + /* + * Since we don't have sanity checks done when options are set, + * this sanity check must go in the main loop :(, before pre_captures + * are attempted. + */ + if (cnt->conf.minimum_motion_frames < 1) + cnt->conf.minimum_motion_frames = 1; + + if (cnt->conf.pre_capture < 0) + cnt->conf.pre_capture = 0; + + /* + * Check if our buffer is still the right size + * If pre_capture or minimum_motion_frames has been changed + * via the http remote control we need to re-size the ring buffer + */ + frame_buffer_size = cnt->conf.pre_capture + cnt->conf.minimum_motion_frames; + + if (cnt->imgs.image_ring_size != frame_buffer_size) + image_ring_resize(cnt, frame_buffer_size); + + /* Get time for current frame */ + cnt->currenttime = time(NULL); + + /* + * localtime returns static data and is not threadsafe + * so we use localtime_r which is reentrant and threadsafe + */ + localtime_r(&cnt->currenttime, cnt->currenttime_tm); + + /* + * If we have started on a new second we reset the shots variable + * lastrate is updated to be the number of the last frame. last rate + * is used as the ffmpeg framerate when motion is detected. + */ + if (cnt->lastframetime != cnt->currenttime) { + cnt->lastrate = cnt->shots + 1; + cnt->shots = -1; + cnt->lastframetime = cnt->currenttime; + + if (cnt->conf.minimum_frame_time) { + cnt->minimum_frame_time_downcounter--; + if (cnt->minimum_frame_time_downcounter == 0) + cnt->get_image = 1; + } else { + cnt->get_image = 1; + } + } + + + /* Increase the shots variable for each frame captured within this second */ + cnt->shots++; + + if (cnt->startup_frames > 0) + cnt->startup_frames--; + + +} + +static void mlp_resetimages(struct context *cnt){ + + struct image_data *old_image; + + if (cnt->conf.minimum_frame_time) { + cnt->minimum_frame_time_downcounter = cnt->conf.minimum_frame_time; + cnt->get_image = 0; + } + + /* ring_buffer_in is pointing to current pos, update before put in a new image */ + if (++cnt->imgs.image_ring_in >= cnt->imgs.image_ring_size) + cnt->imgs.image_ring_in = 0; + + /* Check if we have filled the ring buffer, throw away last image */ + if (cnt->imgs.image_ring_in == cnt->imgs.image_ring_out) { + if (++cnt->imgs.image_ring_out >= cnt->imgs.image_ring_size) + cnt->imgs.image_ring_out = 0; + } + + /* cnt->current_image points to position in ring where to store image, diffs etc. */ + old_image = cnt->current_image; + cnt->current_image = &cnt->imgs.image_ring[cnt->imgs.image_ring_in]; + + /* Init/clear current_image */ + if (cnt->process_thisframe) { + /* set diffs to 0 now, will be written after we calculated diffs in new image */ + cnt->current_image->diffs = 0; + + /* Set flags to 0 */ + cnt->current_image->flags = 0; + cnt->current_image->cent_dist = 0; + + /* Clear location data */ + memset(&cnt->current_image->location, 0, sizeof(cnt->current_image->location)); + cnt->current_image->total_labels = 0; + } else if (cnt->current_image && old_image) { + /* not processing this frame: save some important values for next image */ + cnt->current_image->diffs = old_image->diffs; + cnt->current_image->timestamp_tv = old_image->timestamp_tv; + cnt->current_image->shot = old_image->shot; + cnt->current_image->cent_dist = old_image->cent_dist; + cnt->current_image->flags = old_image->flags & (~IMAGE_SAVED); + cnt->current_image->location = old_image->location; + cnt->current_image->total_labels = old_image->total_labels; + } + + /* Store time with pre_captured image */ + gettimeofday(&cnt->current_image->timestamp_tv, NULL); + + /* Store shot number with pre_captured image */ + cnt->current_image->shot = cnt->shots; + +} + +static int mlp_retry(struct context *cnt){ + + /***** MOTION LOOP - RETRY INITIALIZING SECTION *****/ + /* + * If a camera is not available we keep on retrying every 10 seconds + * until it shows up. + */ + if (cnt->video_dev < 0 && + cnt->currenttime % 10 == 0 && cnt->shots == 0) { + MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, + "%s: Retrying until successful connection with camera"); + cnt->video_dev = vid_start(cnt); + /* + * If the netcam has different dimensions than in the config file + * we need to restart Motion to re-allocate all the buffers + */ + if (cnt->imgs.width != cnt->conf.width || cnt->imgs.height != cnt->conf.height) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Camera has finally become available\n" + "Camera image has different width and height" + "from what is in the config file. You should fix that\n" + "Restarting Motion thread to reinitialize all " + "image buffers to new picture dimensions"); + cnt->conf.width = cnt->imgs.width; + cnt->conf.height = cnt->imgs.height; + /* + * Break out of main loop terminating thread + * watchdog will start us again + */ + return 1; + } + } + return 0; +} + +static int mlp_capture(struct context *cnt){ + + const char *tmpin; + char tmpout[80]; + int vid_return_code = 0; /* Return code used when calling vid_next */ + struct timeval tv1; + + /***** MOTION LOOP - IMAGE CAPTURE SECTION *****/ + /* + * Fetch next frame from camera + * If vid_next returns 0 all is well and we got a new picture + * Any non zero value is an error. + * 0 = OK, valid picture + * <0 = fatal error - leave the thread by breaking out of the main loop + * >0 = non fatal error - copy last image or show grey image with message + */ + if (cnt->video_dev >= 0) + vid_return_code = vid_next(cnt, cnt->current_image->image); + else + vid_return_code = 1; /* Non fatal error */ + + // VALID PICTURE + if (vid_return_code == 0) { + cnt->lost_connection = 0; + cnt->connectionlosttime = 0; + + /* If all is well reset missing_frame_counter */ + if (cnt->missing_frame_counter >= MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) { + /* If we previously logged starting a grey image, now log video re-start */ + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Video signal re-acquired"); + // event for re-acquired video signal can be called here + } + cnt->missing_frame_counter = 0; + + /* + * Save the newly captured still virgin image to a buffer + * which we will not alter with text and location graphics + */ + memcpy(cnt->imgs.image_virgin, cnt->current_image->image, cnt->imgs.size); + + mlp_mask_privacy(cnt); + + /* + * If the camera is a netcam we let the camera decide the pace. + * Otherwise we will keep on adding duplicate frames. + * By resetting the timer the framerate becomes maximum the rate + * of the Netcam. + */ + if (cnt->conf.netcam_url) { + gettimeofday(&tv1, NULL); + cnt->timenow = tv1.tv_usec + 1000000L * tv1.tv_sec; + } + // FATAL ERROR - leave the thread by breaking out of the main loop + } else if (vid_return_code < 0) { + /* Fatal error - Close video device */ + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Video device fatal error - Closing video device"); + vid_close(cnt); + /* + * Use virgin image, if we are not able to open it again next loop + * a gray image with message is applied + * flag lost_connection + */ + memcpy(cnt->current_image->image, cnt->imgs.image_virgin, cnt->imgs.size); + cnt->lost_connection = 1; + /* NO FATAL ERROR - + * copy last image or show grey image with message + * flag on lost_connection if : + * vid_return_code == NETCAM_RESTART_ERROR + * cnt->video_dev < 0 + * cnt->missing_frame_counter > (MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) + */ + } else { + + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: vid_return_code %d",vid_return_code); + /* + * Netcams that change dimensions while Motion is running will + * require that Motion restarts to reinitialize all the many + * buffers inside Motion. It will be a mess to try and recover any + * other way + */ + if (vid_return_code == NETCAM_RESTART_ERROR) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Restarting Motion thread to reinitialize all " + "image buffers"); + /* + * Break out of main loop terminating thread + * watchdog will start us again + * Set lost_connection flag on + */ + cnt->lost_connection = 1; + return 1; + } + + /* + * First missed frame - store timestamp + * Don't reset time when thread restarts + */ + if (cnt->connectionlosttime == 0) + cnt->connectionlosttime = cnt->currenttime; + + /* + * Increase missing_frame_counter + * The first MISSING_FRAMES_TIMEOUT seconds we copy previous virgin image + * After MISSING_FRAMES_TIMEOUT seconds we put a grey error image in the buffer + * If we still have not yet received the initial image from a camera + * we go straight for the grey error image. + */ + ++cnt->missing_frame_counter; + + if (cnt->video_dev >= 0 && + cnt->missing_frame_counter < (MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit)) { + memcpy(cnt->current_image->image, cnt->imgs.image_virgin, cnt->imgs.size); + } else { + cnt->lost_connection = 1; + + if (cnt->video_dev >= 0) + tmpin = "CONNECTION TO CAMERA LOST\\nSINCE %Y-%m-%d %T"; + else + tmpin = "UNABLE TO OPEN VIDEO DEVICE\\nSINCE %Y-%m-%d %T"; + + tv1.tv_sec=cnt->connectionlosttime; + tv1.tv_usec = 0; + memset(cnt->current_image->image, 0x80, cnt->imgs.size); + mystrftime(cnt, tmpout, sizeof(tmpout), tmpin, &tv1, NULL, 0); + draw_text(cnt->current_image->image, 10, 20 * cnt->text_size_factor, cnt->imgs.width, + tmpout, cnt->conf.text_double); + + /* Write error message only once */ + if (cnt->missing_frame_counter == MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Video signal lost - Adding grey image"); + // Event for lost video signal can be called from here + event(cnt, EVENT_CAMERA_LOST, NULL, NULL, NULL, &tv1); + } + + /* + * If we don't get a valid frame for a long time, try to close/reopen device + * Only try this when a device is open + */ + if ((cnt->video_dev > 0) && + (cnt->missing_frame_counter == (MISSING_FRAMES_TIMEOUT * 4) * cnt->conf.frame_limit)) { + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Video signal still lost - " + "Trying to close video device"); + vid_close(cnt); + } + } + } + return 0; + +} + +static void mlp_detection(struct context *cnt){ + + + /***** MOTION LOOP - MOTION DETECTION SECTION *****/ + /* + * The actual motion detection takes place in the following + * diffs is the number of pixels detected as changed + * Make a differences picture in image_out + * + * alg_diff_standard is the slower full feature motion detection algorithm + * alg_diff first calls a fast detection algorithm which only looks at a + * fraction of the pixels. If this detects possible motion alg_diff_standard + * is called. + */ + if (cnt->process_thisframe) { + if (cnt->threshold && !cnt->pause) { + /* + * If we've already detected motion and we want to see if there's + * still motion, don't bother trying the fast one first. IF there's + * motion, the alg_diff will trigger alg_diff_standard + * anyway + */ + if (cnt->detecting_motion || cnt->conf.setup_mode) + cnt->current_image->diffs = alg_diff_standard(cnt, cnt->imgs.image_virgin); + else + cnt->current_image->diffs = alg_diff(cnt, cnt->imgs.image_virgin); + + /* Lightswitch feature - has light intensity changed? + * This can happen due to change of light conditions or due to a sudden change of the camera + * sensitivity. If alg_lightswitch detects lightswitch we suspend motion detection the next + * 5 frames to allow the camera to settle. + * Don't check if we have lost connection, we detect "Lost signal" frame as lightswitch + */ + if (cnt->conf.lightswitch > 1 && !cnt->lost_connection) { + if (alg_lightswitch(cnt, cnt->current_image->diffs)) { + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Lightswitch detected"); + + if (cnt->moved < 5) + cnt->moved = 5; + + cnt->current_image->diffs = 0; + alg_update_reference_frame(cnt, RESET_REF_FRAME); + } + } + + /* + * Switchfilter feature tries to detect a change in the video signal + * from one camera to the next. This is normally used in the Round + * Robin feature. The algorithm is not very safe. + * The algorithm takes a little time so we only call it when needed + * ie. when feature is enabled and diffs>threshold. + * We do not suspend motion detection like we did for lightswitch + * because with Round Robin this is controlled by roundrobin_skip. + */ + if (cnt->conf.switchfilter && cnt->current_image->diffs > cnt->threshold) { + cnt->current_image->diffs = alg_switchfilter(cnt, cnt->current_image->diffs, + cnt->current_image->image); + + if (cnt->current_image->diffs <= cnt->threshold) { + cnt->current_image->diffs = 0; + + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Switchfilter detected"); + } + } + + /* + * Despeckle feature + * First we run (as given by the despeckle_filter option iterations + * of erode and dilate algorithms. + * Finally we run the labelling feature. + * All this is done in the alg_despeckle code. + */ + cnt->current_image->total_labels = 0; + cnt->imgs.largest_label = 0; + cnt->olddiffs = 0; + + if (cnt->conf.despeckle_filter && cnt->current_image->diffs > 0) { + cnt->olddiffs = cnt->current_image->diffs; + cnt->current_image->diffs = alg_despeckle(cnt, cnt->olddiffs); + } else if (cnt->imgs.labelsize_max) { + cnt->imgs.labelsize_max = 0; /* Disable labeling if enabled */ + } + + } else if (!cnt->conf.setup_mode) { + cnt->current_image->diffs = 0; + } + } + + //TODO: This section needs investigation for purpose, cause and effect + /* Manipulate smart_mask sensitivity (only every smartmask_ratio seconds) */ + if ((cnt->smartmask_speed && (cnt->event_nr != cnt->prev_event)) && + (!--cnt->smartmask_count)) { + alg_tune_smartmask(cnt); + cnt->smartmask_count = cnt->smartmask_ratio; + } + + /* + * cnt->moved is set by the tracking code when camera has been asked to move. + * When camera is moving we do not want motion to detect motion or we will + * get our camera chasing itself like crazy and we will get motion detected + * which is not really motion. So we pretend there is no motion by setting + * cnt->diffs = 0. + * We also pretend to have a moving camera when we start Motion and when light + * switch has been detected to allow camera to settle. + */ + if (cnt->moved) { + cnt->moved--; + cnt->current_image->diffs = 0; + } + +} + +static void mlp_tuning(struct context *cnt){ + + /***** MOTION LOOP - TUNING SECTION *****/ + + /* + * If noise tuning was selected, do it now. but only when + * no frames have been recorded and only once per second + */ + if ((cnt->conf.noise_tune && cnt->shots == 0) && + (!cnt->detecting_motion && (cnt->current_image->diffs <= cnt->threshold))) + alg_noise_tune(cnt, cnt->imgs.image_virgin); + + + /* + * If we are not noise tuning lets make sure that remote controlled + * changes of noise_level are used. + */ + if (cnt->process_thisframe) { + if (!cnt->conf.noise_tune) + cnt->noise = cnt->conf.noise; + + /* + * threshold tuning if enabled + * if we are not threshold tuning lets make sure that remote controlled + * changes of threshold are used. + */ + if (cnt->conf.threshold_tune) + alg_threshold_tune(cnt, cnt->current_image->diffs, cnt->detecting_motion); + else + cnt->threshold = cnt->conf.max_changes; + + /* + * If motion is detected (cnt->current_image->diffs > cnt->threshold) and before we add text to the pictures + * we find the center and size coordinates of the motion to be used for text overlays and later + * for adding the locate rectangle + */ + if (cnt->current_image->diffs > cnt->threshold) + alg_locate_center_size(&cnt->imgs, cnt->imgs.width, cnt->imgs.height, &cnt->current_image->location); + + /* + * Update reference frame. + * micro-lighswitch: trying to auto-detect lightswitch events. + * frontdoor illumination. Updates are rate-limited to 3 per second at + * framerates above 5fps to save CPU resources and to keep sensitivity + * at a constant level. + */ + + if ((cnt->current_image->diffs > cnt->threshold) && (cnt->conf.lightswitch == 1) && + (cnt->lightswitch_framecounter < (cnt->lastrate * 2)) && /* two seconds window only */ + /* number of changed pixels almost the same in two consecutive frames and */ + ((abs(cnt->previous_diffs - cnt->current_image->diffs)) < (cnt->previous_diffs / 15)) && + /* center of motion in about the same place ? */ + ((abs(cnt->current_image->location.x - cnt->previous_location_x)) <= (cnt->imgs.width / 150)) && + ((abs(cnt->current_image->location.y - cnt->previous_location_y)) <= (cnt->imgs.height / 150))) { + alg_update_reference_frame(cnt, RESET_REF_FRAME); + cnt->current_image->diffs = 0; + cnt->lightswitch_framecounter = 0; + + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: micro-lightswitch!"); + } else { + alg_update_reference_frame(cnt, UPDATE_REF_FRAME); + } + cnt->previous_diffs = cnt->current_image->diffs; + cnt->previous_location_x = cnt->current_image->location.x; + cnt->previous_location_y = cnt->current_image->location.y; + } + + +} + +static void mlp_overlay(struct context *cnt){ + + char tmp[PATH_MAX]; + /***** MOTION LOOP - TEXT AND GRAPHICS OVERLAY SECTION *****/ + /* + * Some overlays on top of the motion image + * Note that these now modifies the cnt->imgs.out so this buffer + * can no longer be used for motion detection features until next + * picture frame is captured. + */ + + /* Smartmask overlay */ + if (cnt->smartmask_speed && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || + cnt->conf.setup_mode)) + overlay_smartmask(cnt, cnt->imgs.out); + + /* Largest labels overlay */ + if (cnt->imgs.largest_label && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || + cnt->conf.setup_mode)) + overlay_largest_label(cnt, cnt->imgs.out); + + /* Fixed mask overlay */ + if (cnt->imgs.mask && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || + cnt->conf.setup_mode)) + overlay_fixed_mask(cnt, cnt->imgs.out); + + /* Initialize the double sized characters if needed. */ + if (cnt->conf.text_double && cnt->text_size_factor == 1) { + cnt->text_size_factor = 2; + /* If text_double is set to off, then reset the scaling text_size_factor. */ + } else if (!cnt->conf.text_double && cnt->text_size_factor == 2) { + cnt->text_size_factor = 1; + } + + /* Add changed pixels in upper right corner of the pictures */ + if (cnt->conf.text_changes) { + if (!cnt->pause) + sprintf(tmp, "%d", cnt->current_image->diffs); + else + sprintf(tmp, "-"); + + draw_text(cnt->current_image->image, cnt->imgs.width - 10, 10, + cnt->imgs.width, tmp, cnt->conf.text_double); + } + + /* + * Add changed pixels to motion-images (for stream) in setup_mode + * and always overlay smartmask (not only when motion is detected) + */ + if (cnt->conf.setup_mode) { + sprintf(tmp, "D:%5d L:%3d N:%3d", cnt->current_image->diffs, + cnt->current_image->total_labels, cnt->noise); + draw_text(cnt->imgs.out, cnt->imgs.width - 10, cnt->imgs.height - 30 * cnt->text_size_factor, + cnt->imgs.width, tmp, cnt->conf.text_double); + sprintf(tmp, "THREAD %d SETUP", cnt->threadnr); + draw_text(cnt->imgs.out, cnt->imgs.width - 10, cnt->imgs.height - 10 * cnt->text_size_factor, + cnt->imgs.width, tmp, cnt->conf.text_double); + } + + /* Add text in lower left corner of the pictures */ + if (cnt->conf.text_left) { + mystrftime(cnt, tmp, sizeof(tmp), cnt->conf.text_left, + &cnt->current_image->timestamp_tv, NULL, 0); + draw_text(cnt->current_image->image, 10, cnt->imgs.height - 10 * cnt->text_size_factor, + cnt->imgs.width, tmp, cnt->conf.text_double); + } + + /* Add text in lower right corner of the pictures */ + if (cnt->conf.text_right) { + mystrftime(cnt, tmp, sizeof(tmp), cnt->conf.text_right, + &cnt->current_image->timestamp_tv, NULL, 0); + draw_text(cnt->current_image->image, cnt->imgs.width - 10, + cnt->imgs.height - 10 * cnt->text_size_factor, + cnt->imgs.width, tmp, cnt->conf.text_double); + } + +} + +static void mlp_actions(struct context *cnt){ + + int indx; + + /***** MOTION LOOP - ACTIONS AND EVENT CONTROL SECTION *****/ + + if (cnt->current_image->diffs > cnt->threshold) { + /* flag this image, it have motion */ + cnt->current_image->flags |= IMAGE_MOTION; + cnt->lightswitch_framecounter++; /* micro lightswitch */ + } else { + cnt->lightswitch_framecounter = 0; + } + + /* + * If motion has been detected we take action and start saving + * pictures and movies etc by calling motion_detected(). + * Is emulate_motion enabled we always call motion_detected() + * If post_capture is enabled we also take care of this in the this + * code section. + */ + if (cnt->conf.emulate_motion && (cnt->startup_frames == 0)) { + cnt->detecting_motion = 1; + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Emulating motion"); + if (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe)) { + /* Setup the postcap counter */ + cnt->postcap = cnt->conf.post_capture; + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: (Em) Init post capture %d", + cnt->postcap); + } + + cnt->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); + motion_detected(cnt, cnt->video_dev, cnt->current_image); + } else if ((cnt->current_image->flags & IMAGE_MOTION) && (cnt->startup_frames == 0)) { + /* + * Did we detect motion (like the cat just walked in :) )? + * If so, ensure the motion is sustained if minimum_motion_frames + */ + + /* Count how many frames with motion there is in the last minimum_motion_frames in precap buffer */ + int frame_count = 0; + int pos = cnt->imgs.image_ring_in; + + for (indx = 0; indx < cnt->conf.minimum_motion_frames; indx++) { + if (cnt->imgs.image_ring[pos].flags & IMAGE_MOTION) + frame_count++; + + if (pos == 0) + pos = cnt->imgs.image_ring_size-1; + else + pos--; + } + + if (frame_count >= cnt->conf.minimum_motion_frames) { + + cnt->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); + /* If we were previously detecting motion, started a movie, then got + * no motion then we reset the start movie time so that we do not + * get a pause in the movie. + */ + if ( (cnt->detecting_motion == 0) && (cnt->ffmpeg_output != NULL) ) { + cnt->ffmpeg_output->start_time.tv_sec=cnt->current_image->timestamp_tv.tv_sec; + cnt->ffmpeg_output->start_time.tv_usec=cnt->current_image->timestamp_tv.tv_usec; + } + + cnt->detecting_motion = 1; + + /* Setup the postcap counter */ + cnt->postcap = cnt->conf.post_capture; + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: Setup post capture %d", + cnt->postcap); + + /* Mark all images in image_ring to be saved */ + for (indx = 0; indx < cnt->imgs.image_ring_size; indx++) + cnt->imgs.image_ring[indx].flags |= IMAGE_SAVE; + + } else if ((cnt->postcap) && + (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))) { + /* we have motion in this frame, but not enought frames for trigger. Check postcap */ + cnt->current_image->flags |= (IMAGE_POSTCAP | IMAGE_SAVE); + cnt->postcap--; + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: post capture %d", + cnt->postcap); + } else { + cnt->current_image->flags |= IMAGE_PRECAP; + } + + /* Always call motion_detected when we have a motion image */ + motion_detected(cnt, cnt->video_dev, cnt->current_image); + } else if ((cnt->postcap) && + (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))) { + /* No motion, doing postcap */ + cnt->current_image->flags |= (IMAGE_POSTCAP | IMAGE_SAVE); + cnt->postcap--; + MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: post capture %d", + cnt->postcap); + } else { + /* Done with postcap, so just have the image in the precap buffer */ + cnt->current_image->flags |= IMAGE_PRECAP; + /* gapless movie feature */ + if ((cnt->conf.event_gap == 0) && (cnt->detecting_motion == 1)) + cnt->makemovie = 1; + cnt->detecting_motion = 0; + } + + /* Update last frame saved time, so we can end event after gap time */ + if (cnt->current_image->flags & IMAGE_SAVE) + cnt->lasttime = cnt->current_image->timestamp_tv.tv_sec; + + + mlp_areadetect(cnt); + + /* + * Is the movie too long? Then make movies + * First test for max_movie_time + */ + if ((cnt->conf.max_movie_time && cnt->event_nr == cnt->prev_event) && + (cnt->currenttime - cnt->eventtime >= cnt->conf.max_movie_time)) + cnt->makemovie = 1; + + /* + * Now test for quiet longer than 'gap' OR make movie as decided in + * previous statement. + */ + if (((cnt->currenttime - cnt->lasttime >= cnt->conf.event_gap) && cnt->conf.event_gap > 0) || + cnt->makemovie) { + if (cnt->event_nr == cnt->prev_event || cnt->makemovie) { + + /* Flush image buffer */ + process_image_ring(cnt, IMAGE_BUFFER_FLUSH); + + /* Save preview_shot here at the end of event */ + if (cnt->imgs.preview_image.diffs) { + preview_save(cnt); + cnt->imgs.preview_image.diffs = 0; + } + + event(cnt, EVENT_ENDMOTION, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + + /* + * If tracking is enabled we center our camera so it does not + * point to a place where it will miss the next action + */ + if (cnt->track.type) + cnt->moved = track_center(cnt, cnt->video_dev, 0, 0, 0); + + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: End of event %d", + cnt->event_nr); + + cnt->makemovie = 0; + /* Reset post capture */ + cnt->postcap = 0; + + /* Finally we increase the event number */ + cnt->event_nr++; + cnt->lightswitch_framecounter = 0; + + /* + * And we unset the text_event_string to avoid that buffered + * images get a timestamp from previous event. + */ + cnt->text_event_string[0] = '\0'; + } + } + + /* Save/send to movie some images */ + process_image_ring(cnt, 2); + + +} + +static void mlp_setupmode(struct context *cnt){ +/***** MOTION LOOP - SETUP MODE CONSOLE OUTPUT SECTION *****/ + + /* If CAMERA_VERBOSE enabled output some numbers to console */ + if (cnt->conf.setup_mode) { + char msg[1024] = "\0"; + char part[100]; + + if (cnt->conf.despeckle_filter) { + snprintf(part, 99, "Raw changes: %5d - changes after '%s': %5d", + cnt->olddiffs, cnt->conf.despeckle_filter, cnt->current_image->diffs); + strcat(msg, part); + if (strchr(cnt->conf.despeckle_filter, 'l')) { + sprintf(part, " - labels: %3d", cnt->current_image->total_labels); + strcat(msg, part); + } + } else { + sprintf(part, "Changes: %5d", cnt->current_image->diffs); + strcat(msg, part); + } + + if (cnt->conf.noise_tune) { + sprintf(part, " - noise level: %2d", cnt->noise); + strcat(msg, part); + } + + if (cnt->conf.threshold_tune) { + sprintf(part, " - threshold: %d", cnt->threshold); + strcat(msg, part); + } + + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: %s", msg); + } + +} + +static void mlp_snapshot(struct context *cnt){ + /***** MOTION LOOP - SNAPSHOT FEATURE SECTION *****/ + /* + * Did we get triggered to make a snapshot from control http? Then shoot a snap + * If snapshot_interval is not zero and time since epoch MOD snapshot_interval = 0 then snap + * We actually allow the time to run over the interval in case we have a delay + * from slow camera. + * Note: Negative value means SIGALRM snaps are enabled + * httpd-control snaps are always enabled. + */ + + /* time_current_frame is used both for snapshot and timelapse features */ + cnt->time_current_frame = cnt->currenttime; + + if ((cnt->conf.snapshot_interval > 0 && cnt->shots == 0 && + cnt->time_current_frame % cnt->conf.snapshot_interval <= cnt->time_last_frame % cnt->conf.snapshot_interval) || + cnt->snapshot) { + event(cnt, EVENT_IMAGE_SNAPSHOT, cnt->current_image->image, NULL, NULL, &cnt->current_image->timestamp_tv); + cnt->snapshot = 0; + } + +} + +static void mlp_timelapse(struct context *cnt){ + struct tm timestamp_tm; + + /***** MOTION LOOP - TIMELAPSE FEATURE SECTION *****/ + + if (cnt->conf.timelapse) { + localtime_r(&cnt->current_image->timestamp_tv.tv_sec, ×tamp_tm); + + /* + * Check to see if we should start a new timelapse file. We start one when + * we are on the first shot, and and the seconds are zero. We must use the seconds + * to prevent the timelapse file from getting reset multiple times during the minute. + */ + if (timestamp_tm.tm_min == 0 && + (cnt->time_current_frame % 60 < cnt->time_last_frame % 60) && + cnt->shots == 0) { + + if (strcasecmp(cnt->conf.timelapse_mode, "manual") == 0) { + ;/* No action */ + + /* If we are daily, raise timelapseend event at midnight */ + } else if (strcasecmp(cnt->conf.timelapse_mode, "daily") == 0) { + if (timestamp_tm.tm_hour == 0) + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + + /* handle the hourly case */ + } else if (strcasecmp(cnt->conf.timelapse_mode, "hourly") == 0) { + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + + /* If we are weekly-sunday, raise timelapseend event at midnight on sunday */ + } else if (strcasecmp(cnt->conf.timelapse_mode, "weekly-sunday") == 0) { + if (timestamp_tm.tm_wday == 0 && + timestamp_tm.tm_hour == 0) + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + /* If we are weekly-monday, raise timelapseend event at midnight on monday */ + } else if (strcasecmp(cnt->conf.timelapse_mode, "weekly-monday") == 0) { + if (timestamp_tm.tm_wday == 1 && + timestamp_tm.tm_hour == 0) + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + /* If we are monthly, raise timelapseend event at midnight on first day of month */ + } else if (strcasecmp(cnt->conf.timelapse_mode, "monthly") == 0) { + if (timestamp_tm.tm_mday == 1 && + timestamp_tm.tm_hour == 0) + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + /* If invalid we report in syslog once and continue in manual mode */ + } else { + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Invalid timelapse_mode argument '%s'", + cnt->conf.timelapse_mode); + MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, "%:s Defaulting to manual timelapse mode"); + conf_cmdparse(&cnt, (char *)"ffmpeg_timelapse_mode",(char *)"manual"); + } + } + + /* + * If ffmpeg timelapse is enabled and time since epoch MOD ffmpeg_timelaps = 0 + * add a timelapse frame to the timelapse movie. + */ + if (cnt->shots == 0 && cnt->time_current_frame % cnt->conf.timelapse <= + cnt->time_last_frame % cnt->conf.timelapse) + event(cnt, EVENT_TIMELAPSE, cnt->current_image->image, NULL, NULL, + &cnt->current_image->timestamp_tv); + } else if (cnt->ffmpeg_timelapse) { + /* + * If timelapse movie is in progress but conf.timelapse is zero then close timelapse file + * This is an important feature that allows manual roll-over of timelapse file using the http + * remote control via a cron job. + */ + event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tv); + } + + cnt->time_last_frame = cnt->time_current_frame; + + +} + +static void mlp_loopback(struct context *cnt){ + /* + * Feed last image and motion image to video device pipes and the stream clients + * In setup mode we send the special setup mode image to both stream and vloopback pipe + * In normal mode we feed the latest image to vloopback device and we send + * the image to the stream. We always send the first image in a second to the stream. + * Other image are sent only when the config option stream_motion is off + * The result is that with stream_motion on the stream stream is normally at the minimal + * 1 frame per second but the minute motion is detected the motion_detected() function + * sends all detected pictures to the stream except the 1st per second which is already sent. + */ + if (cnt->conf.setup_mode) { + event(cnt, EVENT_IMAGE, cnt->imgs.out, NULL, &cnt->pipe, &cnt->current_image->timestamp_tv); + event(cnt, EVENT_STREAM, cnt->imgs.out, NULL, NULL, &cnt->current_image->timestamp_tv); + } else { + event(cnt, EVENT_IMAGE, cnt->current_image->image, NULL, + &cnt->pipe, &cnt->current_image->timestamp_tv); + + if (!cnt->conf.stream_motion || cnt->shots == 1) + event(cnt, EVENT_STREAM, cnt->current_image->image, NULL, NULL, + &cnt->current_image->timestamp_tv); + } + + event(cnt, EVENT_IMAGEM, cnt->imgs.out, NULL, &cnt->mpipe, &cnt->current_image->timestamp_tv); + +} + +static void mlp_parmsupdate(struct context *cnt){ + /***** MOTION LOOP - ONCE PER SECOND PARAMETER UPDATE SECTION *****/ + + /* Check for some config parameter changes but only every second */ + if (cnt->shots == 0) { + if (strcasecmp(cnt->conf.output_pictures, "on") == 0) + cnt->new_img = NEWIMG_ON; + else if (strcasecmp(cnt->conf.output_pictures, "first") == 0) + cnt->new_img = NEWIMG_FIRST; + else if (strcasecmp(cnt->conf.output_pictures, "best") == 0) + cnt->new_img = NEWIMG_BEST; + else if (strcasecmp(cnt->conf.output_pictures, "center") == 0) + cnt->new_img = NEWIMG_CENTER; + else + cnt->new_img = NEWIMG_OFF; + + if (strcasecmp(cnt->conf.locate_motion_mode, "on") == 0) + cnt->locate_motion_mode = LOCATE_ON; + else if (strcasecmp(cnt->conf.locate_motion_mode, "preview") == 0) + cnt->locate_motion_mode = LOCATE_PREVIEW; + else + cnt->locate_motion_mode = LOCATE_OFF; + + if (strcasecmp(cnt->conf.locate_motion_style, "box") == 0) + cnt->locate_motion_style = LOCATE_BOX; + else if (strcasecmp(cnt->conf.locate_motion_style, "redbox") == 0) + cnt->locate_motion_style = LOCATE_REDBOX; + else if (strcasecmp(cnt->conf.locate_motion_style, "cross") == 0) + cnt->locate_motion_style = LOCATE_CROSS; + else if (strcasecmp(cnt->conf.locate_motion_style, "redcross") == 0) + cnt->locate_motion_style = LOCATE_REDCROSS; + else + cnt->locate_motion_style = LOCATE_BOX; + + /* Sanity check for smart_mask_speed, silly value disables smart mask */ + if (cnt->conf.smart_mask_speed < 0 || cnt->conf.smart_mask_speed > 10) + cnt->conf.smart_mask_speed = 0; + + /* Has someone changed smart_mask_speed or framerate? */ + if (cnt->conf.smart_mask_speed != cnt->smartmask_speed || + cnt->smartmask_lastrate != cnt->lastrate) { + if (cnt->conf.smart_mask_speed == 0) { + memset(cnt->imgs.smartmask, 0, cnt->imgs.motionsize); + memset(cnt->imgs.smartmask_final, 255, cnt->imgs.motionsize); + } + + cnt->smartmask_lastrate = cnt->lastrate; + cnt->smartmask_speed = cnt->conf.smart_mask_speed; + /* + * Decay delay - based on smart_mask_speed (framerate independent) + * This is always 5*smartmask_speed seconds + */ + cnt->smartmask_ratio = 5 * cnt->lastrate * (11 - cnt->smartmask_speed); + } + +#if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) + + /* + * Set the sql mask file according to the SQL config options + * We update it for every frame in case the config was updated + * via remote control. + */ + cnt->sql_mask = cnt->conf.sql_log_image * (FTYPE_IMAGE + FTYPE_IMAGE_MOTION) + + cnt->conf.sql_log_snapshot * FTYPE_IMAGE_SNAPSHOT + + cnt->conf.sql_log_movie * (FTYPE_MPEG + FTYPE_MPEG_MOTION) + + cnt->conf.sql_log_timelapse * FTYPE_MPEG_TIMELAPSE; +#endif /* defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) */ + + } + +} + +static void mlp_frametiming(struct context *cnt){ + + int indx; + struct timeval tv2; + unsigned long int elapsedtime; //TODO: Need to evaluate logic for needing this. + long int delay_time_nsec; + + /***** MOTION LOOP - FRAMERATE TIMING AND SLEEPING SECTION *****/ + /* + * Work out expected frame rate based on config setting which may + * have changed from http-control + */ + if (cnt->conf.frame_limit) + cnt->required_frame_time = 1000000L / cnt->conf.frame_limit; + else + cnt->required_frame_time = 0; + + /* Get latest time to calculate time taken to process video data */ + gettimeofday(&tv2, NULL); + elapsedtime = (tv2.tv_usec + 1000000L * tv2.tv_sec) - cnt->timenow; + + /* + * Update history buffer but ignore first pass as timebefore + * variable will be inaccurate + */ + if (cnt->passflag) + cnt->rolling_average_data[cnt->rolling_frame] = cnt->timenow - cnt->timebefore; + else + cnt->passflag = 1; + + cnt->rolling_frame++; + if (cnt->rolling_frame >= cnt->rolling_average_limit) + cnt->rolling_frame = 0; + + /* Calculate 10 second average and use deviation in delay calculation */ + cnt->rolling_average = 0L; + + for (indx = 0; indx < cnt->rolling_average_limit; indx++) + cnt->rolling_average += cnt->rolling_average_data[indx]; + + cnt->rolling_average /= cnt->rolling_average_limit; + cnt->frame_delay = cnt->required_frame_time - elapsedtime - (cnt->rolling_average - cnt->required_frame_time); + + if (cnt->frame_delay > 0) { + /* Apply delay to meet frame time */ + if (cnt->frame_delay > cnt->required_frame_time) + cnt->frame_delay = cnt->required_frame_time; + + /* Delay time in nanoseconds for SLEEP */ + delay_time_nsec = cnt->frame_delay * 1000; + + if (delay_time_nsec > 999999999) + delay_time_nsec = 999999999; + + /* SLEEP as defined in motion.h A safe sleep using nanosleep */ + SLEEP(0, delay_time_nsec); + } + +} + /** * motion_loop * @@ -1080,1153 +2369,30 @@ static void motion_cleanup(struct context *cnt) static void *motion_loop(void *arg) { struct context *cnt = arg; - int i, j, z = 0; - time_t lastframetime = 0; - int frame_buffer_size; - unsigned int rate_limit = 0; - int area_once = 0; - int area_minx[9], area_miny[9], area_maxx[9], area_maxy[9]; - int smartmask_ratio = 0; - int smartmask_count = 20; - unsigned int smartmask_lastrate = 0; - int olddiffs = 0; - int previous_diffs = 0, previous_location_x = 0, previous_location_y = 0; - unsigned int text_size_factor; - unsigned int passflag = 0; - long int *rolling_average_data = NULL; - long int rolling_average_limit, required_frame_time, frame_delay, delay_time_nsec; - int rolling_frame = 0; - struct timeval tv1, tv2; - unsigned long int rolling_average, elapsedtime; - unsigned long long int timenow = 0, timebefore = 0; - int vid_return_code = 0; /* Return code used when calling vid_next */ - int minimum_frame_time_downcounter = cnt->conf.minimum_frame_time; /* time in seconds to skip between capturing images */ - unsigned int get_image = 1; /* Flag used to signal that we capture new image when we run the loop */ - struct image_data *old_image; - { - char tname[16]; - snprintf(tname, sizeof(tname), "ml%d%s%s", - cnt->threadnr, - cnt->conf.camera_name ? ":" : "", - cnt->conf.camera_name ? cnt->conf.camera_name : ""); - MOTION_PTHREAD_SETNAME(tname); - } - - /* - * Next two variables are used for snapshot and timelapse feature - * time_last_frame is set to 1 so that first coming timelapse or second = 0 - * is acted upon. - */ - unsigned long int time_last_frame = 1, time_current_frame; - - if (motion_init(cnt) < 0) - goto err; - - - /* Initialize the double sized characters if needed. */ - if (cnt->conf.text_double) - text_size_factor = 2; - else - text_size_factor = 1; - - /* Initialize area detection */ - area_minx[0] = area_minx[3] = area_minx[6] = 0; - area_miny[0] = area_miny[1] = area_miny[2] = 0; - - area_minx[1] = area_minx[4] = area_minx[7] = cnt->imgs.width / 3; - area_maxx[0] = area_maxx[3] = area_maxx[6] = cnt->imgs.width / 3; - - area_minx[2] = area_minx[5] = area_minx[8] = cnt->imgs.width / 3 * 2; - area_maxx[1] = area_maxx[4] = area_maxx[7] = cnt->imgs.width / 3 * 2; - - area_miny[3] = area_miny[4] = area_miny[5] = cnt->imgs.height / 3; - area_maxy[0] = area_maxy[1] = area_maxy[2] = cnt->imgs.height / 3; - - area_miny[6] = area_miny[7] = area_miny[8] = cnt->imgs.height / 3 * 2; - area_maxy[3] = area_maxy[4] = area_maxy[5] = cnt->imgs.height / 3 * 2; - - area_maxx[2] = area_maxx[5] = area_maxx[8] = cnt->imgs.width; - area_maxy[6] = area_maxy[7] = area_maxy[8] = cnt->imgs.height; - - /* Work out expected frame rate based on config setting */ - if (cnt->conf.frame_limit < 2) - cnt->conf.frame_limit = 2; - - required_frame_time = 1000000L / cnt->conf.frame_limit; - - frame_delay = required_frame_time; - - /* - * Reserve enough space for a 10 second timing history buffer. Note that, - * if there is any problem on the allocation, mymalloc does not return. - */ - rolling_average_limit = 10 * cnt->conf.frame_limit; - rolling_average_data = mymalloc(sizeof(rolling_average_data) * rolling_average_limit); - - /* Preset history buffer with expected frame rate */ - for (j = 0; j < rolling_average_limit; j++) - rolling_average_data[j] = required_frame_time; - - - if (cnt->track.type) - cnt->moved = track_center(cnt, cnt->video_dev, 0, 0, 0); - -#ifdef __OpenBSD__ - /* - * FIXMARK - * Fixes zombie issue on OpenBSD 4.6 - */ - struct sigaction sig_handler_action; - struct sigaction sigchild_action; - setup_signals(&sig_handler_action, &sigchild_action); -#endif - - /* - * MAIN MOTION LOOP BEGINS HERE - * Should go on forever... unless you bought vaporware :) - */ + if (motion_init(cnt) < 0) goto err; while (!cnt->finish || cnt->makemovie) { - - /***** MOTION LOOP - PREPARE FOR NEW FRAME SECTION *****/ - cnt->watchdog = WATCHDOG_TMO; - - /* Get current time and preserver last time for frame interval calc. */ - timebefore = timenow; - gettimeofday(&tv1, NULL); - timenow = tv1.tv_usec + 1000000L * tv1.tv_sec; - - /* - * Calculate detection rate limit. Above 5fps we limit the detection - * rate to 3fps to reduce load at higher framerates. - */ - cnt->process_thisframe = 0; - rate_limit++; - if (rate_limit >= (cnt->lastrate / 3)) { - rate_limit = 0; - cnt->process_thisframe = 1; - } - - /* - * Since we don't have sanity checks done when options are set, - * this sanity check must go in the main loop :(, before pre_captures - * are attempted. - */ - if (cnt->conf.minimum_motion_frames < 1) - cnt->conf.minimum_motion_frames = 1; - - if (cnt->conf.pre_capture < 0) - cnt->conf.pre_capture = 0; - - /* - * Check if our buffer is still the right size - * If pre_capture or minimum_motion_frames has been changed - * via the http remote control we need to re-size the ring buffer - */ - frame_buffer_size = cnt->conf.pre_capture + cnt->conf.minimum_motion_frames; - - if (cnt->imgs.image_ring_size != frame_buffer_size) - image_ring_resize(cnt, frame_buffer_size); - - /* Get time for current frame */ - cnt->currenttime = time(NULL); - - /* - * localtime returns static data and is not threadsafe - * so we use localtime_r which is reentrant and threadsafe - */ - localtime_r(&cnt->currenttime, cnt->currenttime_tm); - - /* - * If we have started on a new second we reset the shots variable - * lastrate is updated to be the number of the last frame. last rate - * is used as the ffmpeg framerate when motion is detected. - */ - if (lastframetime != cnt->currenttime) { - cnt->lastrate = cnt->shots + 1; - cnt->shots = -1; - lastframetime = cnt->currenttime; - - if (cnt->conf.minimum_frame_time) { - minimum_frame_time_downcounter--; - if (minimum_frame_time_downcounter == 0) - get_image = 1; - } else { - get_image = 1; - } - } - - - /* Increase the shots variable for each frame captured within this second */ - cnt->shots++; - - if (cnt->startup_frames > 0) - cnt->startup_frames--; - - if (get_image) { - if (cnt->conf.minimum_frame_time) { - minimum_frame_time_downcounter = cnt->conf.minimum_frame_time; - get_image = 0; - } - - /* ring_buffer_in is pointing to current pos, update before put in a new image */ - if (++cnt->imgs.image_ring_in >= cnt->imgs.image_ring_size) - cnt->imgs.image_ring_in = 0; - - /* Check if we have filled the ring buffer, throw away last image */ - if (cnt->imgs.image_ring_in == cnt->imgs.image_ring_out) { - if (++cnt->imgs.image_ring_out >= cnt->imgs.image_ring_size) - cnt->imgs.image_ring_out = 0; - } - - /* cnt->current_image points to position in ring where to store image, diffs etc. */ - old_image = cnt->current_image; - cnt->current_image = &cnt->imgs.image_ring[cnt->imgs.image_ring_in]; - - /* Init/clear current_image */ - if (cnt->process_thisframe) { - /* set diffs to 0 now, will be written after we calculated diffs in new image */ - cnt->current_image->diffs = 0; - - /* Set flags to 0 */ - cnt->current_image->flags = 0; - cnt->current_image->cent_dist = 0; - - /* Clear location data */ - memset(&cnt->current_image->location, 0, sizeof(cnt->current_image->location)); - cnt->current_image->total_labels = 0; - } else if (cnt->current_image && old_image) { - /* not processing this frame: save some important values for next image */ - cnt->current_image->diffs = old_image->diffs; - cnt->current_image->timestamp = old_image->timestamp; - cnt->current_image->timestamp_tm = old_image->timestamp_tm; - cnt->current_image->shot = old_image->shot; - cnt->current_image->cent_dist = old_image->cent_dist; - cnt->current_image->flags = old_image->flags & (~IMAGE_SAVED); - cnt->current_image->location = old_image->location; - cnt->current_image->total_labels = old_image->total_labels; - } - - /* Store time with pre_captured image */ - cnt->current_image->timestamp = cnt->currenttime; - localtime_r(&cnt->current_image->timestamp, &cnt->current_image->timestamp_tm); - - /* Store shot number with pre_captured image */ - cnt->current_image->shot = cnt->shots; - - /***** MOTION LOOP - RETRY INITIALIZING SECTION *****/ - /* - * If a camera is not available we keep on retrying every 10 seconds - * until it shows up. - */ - if (cnt->video_dev < 0 && - cnt->currenttime % 10 == 0 && cnt->shots == 0) { - MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, - "%s: Retrying until successful connection with camera"); - cnt->video_dev = vid_start(cnt); - - /* - * If the netcam has different dimensions than in the config file - * we need to restart Motion to re-allocate all the buffers - */ - if (cnt->imgs.width != cnt->conf.width || cnt->imgs.height != cnt->conf.height) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Camera has finally become available\n" - "Camera image has different width and height" - "from what is in the config file. You should fix that\n" - "Restarting Motion thread to reinitialize all " - "image buffers to new picture dimensions"); - cnt->conf.width = cnt->imgs.width; - cnt->conf.height = cnt->imgs.height; - /* - * Break out of main loop terminating thread - * watchdog will start us again - */ - break; - } - } - - - /***** MOTION LOOP - IMAGE CAPTURE SECTION *****/ - - /* - * Fetch next frame from camera - * If vid_next returns 0 all is well and we got a new picture - * Any non zero value is an error. - * 0 = OK, valid picture - * <0 = fatal error - leave the thread by breaking out of the main loop - * >0 = non fatal error - copy last image or show grey image with message - */ - if (cnt->video_dev >= 0) - vid_return_code = vid_next(cnt, cnt->current_image->image); - else - vid_return_code = 1; /* Non fatal error */ - - // VALID PICTURE - if (vid_return_code == 0) { - cnt->lost_connection = 0; - cnt->connectionlosttime = 0; - - /* If all is well reset missing_frame_counter */ - if (cnt->missing_frame_counter >= MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) { - /* If we previously logged starting a grey image, now log video re-start */ - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Video signal re-acquired"); - // event for re-acquired video signal can be called here - } - cnt->missing_frame_counter = 0; - - /* - * Save the newly captured still virgin image to a buffer - * which we will not alter with text and location graphics - */ - memcpy(cnt->imgs.image_virgin, cnt->current_image->image, cnt->imgs.size); - - /* - * If the camera is a netcam we let the camera decide the pace. - * Otherwise we will keep on adding duplicate frames. - * By resetting the timer the framerate becomes maximum the rate - * of the Netcam. - */ - if (cnt->conf.netcam_url) { - gettimeofday(&tv1, NULL); - timenow = tv1.tv_usec + 1000000L * tv1.tv_sec; - } - // FATAL ERROR - leave the thread by breaking out of the main loop - } else if (vid_return_code < 0) { - /* Fatal error - Close video device */ - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Video device fatal error - Closing video device"); - vid_close(cnt); - /* - * Use virgin image, if we are not able to open it again next loop - * a gray image with message is applied - * flag lost_connection - */ - memcpy(cnt->current_image->image, cnt->imgs.image_virgin, cnt->imgs.size); - cnt->lost_connection = 1; - /* NO FATAL ERROR - - * copy last image or show grey image with message - * flag on lost_connection if : - * vid_return_code == NETCAM_RESTART_ERROR - * cnt->video_dev < 0 - * cnt->missing_frame_counter > (MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) - */ - } else { - - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: vid_return_code %d", - vid_return_code); - - /* - * Netcams that change dimensions while Motion is running will - * require that Motion restarts to reinitialize all the many - * buffers inside Motion. It will be a mess to try and recover any - * other way - */ - if (vid_return_code == NETCAM_RESTART_ERROR) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Restarting Motion thread to reinitialize all " - "image buffers"); - /* - * Break out of main loop terminating thread - * watchdog will start us again - * Set lost_connection flag on - */ - - cnt->lost_connection = 1; - break; - } - - /* - * First missed frame - store timestamp - * Don't reset time when thread restarts - */ - if (cnt->connectionlosttime == 0) - cnt->connectionlosttime = cnt->currenttime; - - /* - * Increase missing_frame_counter - * The first MISSING_FRAMES_TIMEOUT seconds we copy previous virgin image - * After MISSING_FRAMES_TIMEOUT seconds we put a grey error image in the buffer - * If we still have not yet received the initial image from a camera - * we go straight for the grey error image. - */ - ++cnt->missing_frame_counter; - - if (cnt->video_dev >= 0 && - cnt->missing_frame_counter < (MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit)) { - memcpy(cnt->current_image->image, cnt->imgs.image_virgin, cnt->imgs.size); - } else { - const char *tmpin; - char tmpout[80]; - struct tm tmptime; - cnt->lost_connection = 1; - - if (cnt->video_dev >= 0) - tmpin = "CONNECTION TO CAMERA LOST\\nSINCE %Y-%m-%d %T"; - else - tmpin = "UNABLE TO OPEN VIDEO DEVICE\\nSINCE %Y-%m-%d %T"; - - localtime_r(&cnt->connectionlosttime, &tmptime); - memset(cnt->current_image->image, 0x80, cnt->imgs.size); - mystrftime(cnt, tmpout, sizeof(tmpout), tmpin, &tmptime, NULL, 0); - draw_text(cnt->current_image->image, 10, 20 * text_size_factor, cnt->imgs.width, - tmpout, cnt->conf.text_double); - - /* Write error message only once */ - if (cnt->missing_frame_counter == MISSING_FRAMES_TIMEOUT * cnt->conf.frame_limit) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Video signal lost - Adding grey image"); - // Event for lost video signal can be called from here - event(cnt, EVENT_CAMERA_LOST, NULL, NULL, - NULL, cnt->currenttime_tm); - } - - /* - * If we don't get a valid frame for a long time, try to close/reopen device - * Only try this when a device is open - */ - if ((cnt->video_dev > 0) && - (cnt->missing_frame_counter == (MISSING_FRAMES_TIMEOUT * 4) * cnt->conf.frame_limit)) { - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Video signal still lost - " - "Trying to close video device"); - vid_close(cnt); - } - } - } - - /***** MOTION LOOP - MOTION DETECTION SECTION *****/ - - /* - * The actual motion detection takes place in the following - * diffs is the number of pixels detected as changed - * Make a differences picture in image_out - * - * alg_diff_standard is the slower full feature motion detection algorithm - * alg_diff first calls a fast detection algorithm which only looks at a - * fraction of the pixels. If this detects possible motion alg_diff_standard - * is called. - */ - if (cnt->process_thisframe) { - if (cnt->threshold && !cnt->pause) { - /* - * If we've already detected motion and we want to see if there's - * still motion, don't bother trying the fast one first. IF there's - * motion, the alg_diff will trigger alg_diff_standard - * anyway - */ - if (cnt->detecting_motion || cnt->conf.setup_mode) - cnt->current_image->diffs = alg_diff_standard(cnt, cnt->imgs.image_virgin); - else - cnt->current_image->diffs = alg_diff(cnt, cnt->imgs.image_virgin); - - /* Lightswitch feature - has light intensity changed? - * This can happen due to change of light conditions or due to a sudden change of the camera - * sensitivity. If alg_lightswitch detects lightswitch we suspend motion detection the next - * 5 frames to allow the camera to settle. - * Don't check if we have lost connection, we detect "Lost signal" frame as lightswitch - */ - if (cnt->conf.lightswitch > 1 && !cnt->lost_connection) { - if (alg_lightswitch(cnt, cnt->current_image->diffs)) { - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Lightswitch detected"); - - if (cnt->moved < 5) - cnt->moved = 5; - - cnt->current_image->diffs = 0; - alg_update_reference_frame(cnt, RESET_REF_FRAME); - } - } - - /* - * Switchfilter feature tries to detect a change in the video signal - * from one camera to the next. This is normally used in the Round - * Robin feature. The algorithm is not very safe. - * The algorithm takes a little time so we only call it when needed - * ie. when feature is enabled and diffs>threshold. - * We do not suspend motion detection like we did for lightswitch - * because with Round Robin this is controlled by roundrobin_skip. - */ - if (cnt->conf.switchfilter && cnt->current_image->diffs > cnt->threshold) { - cnt->current_image->diffs = alg_switchfilter(cnt, cnt->current_image->diffs, - cnt->current_image->image); - - if (cnt->current_image->diffs <= cnt->threshold) { - cnt->current_image->diffs = 0; - - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Switchfilter detected"); - } - } - - /* - * Despeckle feature - * First we run (as given by the despeckle_filter option iterations - * of erode and dilate algorithms. - * Finally we run the labelling feature. - * All this is done in the alg_despeckle code. - */ - cnt->current_image->total_labels = 0; - cnt->imgs.largest_label = 0; - olddiffs = 0; - - if (cnt->conf.despeckle_filter && cnt->current_image->diffs > 0) { - olddiffs = cnt->current_image->diffs; - cnt->current_image->diffs = alg_despeckle(cnt, olddiffs); - } else if (cnt->imgs.labelsize_max) { - cnt->imgs.labelsize_max = 0; /* Disable labeling if enabled */ - } - - } else if (!cnt->conf.setup_mode) { - cnt->current_image->diffs = 0; - } - } - - /* Manipulate smart_mask sensitivity (only every smartmask_ratio seconds) */ - if ((cnt->smartmask_speed && (cnt->event_nr != cnt->prev_event)) && - (!--smartmask_count)) { - alg_tune_smartmask(cnt); - smartmask_count = smartmask_ratio; - } - - /* - * cnt->moved is set by the tracking code when camera has been asked to move. - * When camera is moving we do not want motion to detect motion or we will - * get our camera chasing itself like crazy and we will get motion detected - * which is not really motion. So we pretend there is no motion by setting - * cnt->diffs = 0. - * We also pretend to have a moving camera when we start Motion and when light - * switch has been detected to allow camera to settle. - */ - if (cnt->moved) { - cnt->moved--; - cnt->current_image->diffs = 0; - } - - /***** MOTION LOOP - TUNING SECTION *****/ - - /* - * If noise tuning was selected, do it now. but only when - * no frames have been recorded and only once per second - */ - if ((cnt->conf.noise_tune && cnt->shots == 0) && - (!cnt->detecting_motion && (cnt->current_image->diffs <= cnt->threshold))) - alg_noise_tune(cnt, cnt->imgs.image_virgin); - - - /* - * If we are not noise tuning lets make sure that remote controlled - * changes of noise_level are used. - */ - if (cnt->process_thisframe) { - if (!cnt->conf.noise_tune) - cnt->noise = cnt->conf.noise; - - /* - * threshold tuning if enabled - * if we are not threshold tuning lets make sure that remote controlled - * changes of threshold are used. - */ - if (cnt->conf.threshold_tune) - alg_threshold_tune(cnt, cnt->current_image->diffs, cnt->detecting_motion); - else - cnt->threshold = cnt->conf.max_changes; - - /* - * If motion is detected (cnt->current_image->diffs > cnt->threshold) and before we add text to the pictures - * we find the center and size coordinates of the motion to be used for text overlays and later - * for adding the locate rectangle - */ - if (cnt->current_image->diffs > cnt->threshold) - alg_locate_center_size(&cnt->imgs, cnt->imgs.width, cnt->imgs.height, &cnt->current_image->location); - - /* - * Update reference frame. - * micro-lighswitch: trying to auto-detect lightswitch events. - * frontdoor illumination. Updates are rate-limited to 3 per second at - * framerates above 5fps to save CPU resources and to keep sensitivity - * at a constant level. - */ - - if ((cnt->current_image->diffs > cnt->threshold) && (cnt->conf.lightswitch == 1) && - (cnt->lightswitch_framecounter < (cnt->lastrate * 2)) && /* two seconds window only */ - /* number of changed pixels almost the same in two consecutive frames and */ - ((abs(previous_diffs - cnt->current_image->diffs)) < (previous_diffs / 15)) && - /* center of motion in about the same place ? */ - ((abs(cnt->current_image->location.x - previous_location_x)) <= (cnt->imgs.width / 150)) && - ((abs(cnt->current_image->location.y - previous_location_y)) <= (cnt->imgs.height / 150))) { - alg_update_reference_frame(cnt, RESET_REF_FRAME); - cnt->current_image->diffs = 0; - cnt->lightswitch_framecounter = 0; - - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: micro-lightswitch!"); - } else { - alg_update_reference_frame(cnt, UPDATE_REF_FRAME); - } - - previous_diffs = cnt->current_image->diffs; - previous_location_x = cnt->current_image->location.x; - previous_location_y = cnt->current_image->location.y; - } - - /***** MOTION LOOP - TEXT AND GRAPHICS OVERLAY SECTION *****/ - - /* - * Some overlays on top of the motion image - * Note that these now modifies the cnt->imgs.out so this buffer - * can no longer be used for motion detection features until next - * picture frame is captured. - */ - - /* Smartmask overlay */ - if (cnt->smartmask_speed && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || - cnt->conf.setup_mode)) - overlay_smartmask(cnt, cnt->imgs.out); - - /* Largest labels overlay */ - if (cnt->imgs.largest_label && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || - cnt->conf.setup_mode)) - overlay_largest_label(cnt, cnt->imgs.out); - - /* Fixed mask overlay */ - if (cnt->imgs.mask && (cnt->conf.motion_img || cnt->conf.ffmpeg_output_debug || - cnt->conf.setup_mode)) - overlay_fixed_mask(cnt, cnt->imgs.out); - - /* Initialize the double sized characters if needed. */ - if (cnt->conf.text_double && text_size_factor == 1) { - text_size_factor = 2; - /* If text_double is set to off, then reset the scaling text_size_factor. */ - } else if (!cnt->conf.text_double && text_size_factor == 2) { - text_size_factor = 1; - } - - /* Add changed pixels in upper right corner of the pictures */ - if (cnt->conf.text_changes) { - char tmp[25]; - - if (!cnt->pause) - sprintf(tmp, "%d", cnt->current_image->diffs); - else - sprintf(tmp, "-"); - - draw_text(cnt->current_image->image, cnt->imgs.width - 10, 10, - cnt->imgs.width, tmp, cnt->conf.text_double); - } - - /* - * Add changed pixels to motion-images (for stream) in setup_mode - * and always overlay smartmask (not only when motion is detected) - */ - if (cnt->conf.setup_mode) { - char tmp[PATH_MAX]; - sprintf(tmp, "D:%5d L:%3d N:%3d", cnt->current_image->diffs, - cnt->current_image->total_labels, cnt->noise); - draw_text(cnt->imgs.out, cnt->imgs.width - 10, cnt->imgs.height - 30 * text_size_factor, - cnt->imgs.width, tmp, cnt->conf.text_double); - sprintf(tmp, "THREAD %d SETUP", cnt->threadnr); - draw_text(cnt->imgs.out, cnt->imgs.width - 10, cnt->imgs.height - 10 * text_size_factor, - cnt->imgs.width, tmp, cnt->conf.text_double); - } - - /* Add text in lower left corner of the pictures */ - if (cnt->conf.text_left) { - char tmp[PATH_MAX]; - mystrftime(cnt, tmp, sizeof(tmp), cnt->conf.text_left, - &cnt->current_image->timestamp_tm, NULL, 0); - draw_text(cnt->current_image->image, 10, cnt->imgs.height - 10 * text_size_factor, - cnt->imgs.width, tmp, cnt->conf.text_double); - } - - /* Add text in lower right corner of the pictures */ - if (cnt->conf.text_right) { - char tmp[PATH_MAX]; - mystrftime(cnt, tmp, sizeof(tmp), cnt->conf.text_right, - &cnt->current_image->timestamp_tm, NULL, 0); - draw_text(cnt->current_image->image, cnt->imgs.width - 10, - cnt->imgs.height - 10 * text_size_factor, - cnt->imgs.width, tmp, cnt->conf.text_double); - } - - - /***** MOTION LOOP - ACTIONS AND EVENT CONTROL SECTION *****/ - - if (cnt->current_image->diffs > cnt->threshold) { - /* flag this image, it have motion */ - cnt->current_image->flags |= IMAGE_MOTION; - cnt->lightswitch_framecounter++; /* micro lightswitch */ - } else { - cnt->lightswitch_framecounter = 0; - } - - /* - * If motion has been detected we take action and start saving - * pictures and movies etc by calling motion_detected(). - * Is emulate_motion enabled we always call motion_detected() - * If post_capture is enabled we also take care of this in the this - * code section. - */ - if (cnt->conf.emulate_motion && (cnt->startup_frames == 0)) { - cnt->detecting_motion = 1; - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: Emulating motion"); -#ifdef HAVE_FFMPEG - if (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe)) { -#else - if (cnt->conf.useextpipe && cnt->extpipe) { -#endif - /* Setup the postcap counter */ - cnt->postcap = cnt->conf.post_capture; - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: (Em) Init post capture %d", - cnt->postcap); - } - - cnt->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); - motion_detected(cnt, cnt->video_dev, cnt->current_image); - } else if ((cnt->current_image->flags & IMAGE_MOTION) && (cnt->startup_frames == 0)) { - /* - * Did we detect motion (like the cat just walked in :) )? - * If so, ensure the motion is sustained if minimum_motion_frames - */ - - /* Count how many frames with motion there is in the last minimum_motion_frames in precap buffer */ - int frame_count = 0; - int pos = cnt->imgs.image_ring_in; - - for (i = 0; i < cnt->conf.minimum_motion_frames; i++) { - - if (cnt->imgs.image_ring[pos].flags & IMAGE_MOTION) - frame_count++; - - if (pos == 0) - pos = cnt->imgs.image_ring_size-1; - else - pos--; - } - - if (frame_count >= cnt->conf.minimum_motion_frames) { - - cnt->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); - cnt->detecting_motion = 1; - - /* Setup the postcap counter */ - cnt->postcap = cnt->conf.post_capture; - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: Setup post capture %d", - cnt->postcap); - - /* Mark all images in image_ring to be saved */ - for (i = 0; i < cnt->imgs.image_ring_size; i++) - cnt->imgs.image_ring[i].flags |= IMAGE_SAVE; - - } else if ((cnt->postcap) && -#ifdef HAVE_FFMPEG - (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))) { -#else - (cnt->conf.useextpipe && cnt->extpipe)) { -#endif - /* we have motion in this frame, but not enought frames for trigger. Check postcap */ - cnt->current_image->flags |= (IMAGE_POSTCAP | IMAGE_SAVE); - cnt->postcap--; - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: post capture %d", - cnt->postcap); - } else { - cnt->current_image->flags |= IMAGE_PRECAP; - } - - /* Always call motion_detected when we have a motion image */ - motion_detected(cnt, cnt->video_dev, cnt->current_image); - } else if ((cnt->postcap) && -#ifdef HAVE_FFMPEG - (cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))) { -#else - (cnt->conf.useextpipe && cnt->extpipe)) { -#endif - /* No motion, doing postcap */ - cnt->current_image->flags |= (IMAGE_POSTCAP | IMAGE_SAVE); - cnt->postcap--; - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: post capture %d", - cnt->postcap); - } else { - /* Done with postcap, so just have the image in the precap buffer */ - cnt->current_image->flags |= IMAGE_PRECAP; - /* gapless movie feature */ - if ((cnt->conf.event_gap == 0) && (cnt->detecting_motion == 1)) - cnt->makemovie = 1; - cnt->detecting_motion = 0; - } - - /* Update last frame saved time, so we can end event after gap time */ - if (cnt->current_image->flags & IMAGE_SAVE) - cnt->lasttime = cnt->current_image->timestamp; - - - /* - * Simple hack to recognize motion in a specific area - * Do we need a new coversion specifier as well?? - */ - if ((cnt->conf.area_detect) && (cnt->event_nr != area_once) && - (cnt->current_image->flags & IMAGE_TRIGGER)) { - j = strlen(cnt->conf.area_detect); - - for (i = 0; i < j; i++) { - z = cnt->conf.area_detect[i] - 49; /* 1 becomes 0 */ - if ((z >= 0) && (z < 9)) { - if (cnt->current_image->location.x > area_minx[z] && - cnt->current_image->location.x < area_maxx[z] && - cnt->current_image->location.y > area_miny[z] && - cnt->current_image->location.y < area_maxy[z]) { - event(cnt, EVENT_AREA_DETECTED, NULL, NULL, - NULL, cnt->currenttime_tm); - area_once = cnt->event_nr; /* Fire script only once per event */ - - MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%s: Motion in area %d detected.", - z + 1); - break; - } - } - } - } - - /* - * Is the movie too long? Then make movies - * First test for max_movie_time - */ - if ((cnt->conf.max_movie_time && cnt->event_nr == cnt->prev_event) && - (cnt->currenttime - cnt->eventtime >= cnt->conf.max_movie_time)) - cnt->makemovie = 1; - - /* - * Now test for quiet longer than 'gap' OR make movie as decided in - * previous statement. - */ - if (((cnt->currenttime - cnt->lasttime >= cnt->conf.event_gap) && cnt->conf.event_gap > 0) || - cnt->makemovie) { - if (cnt->event_nr == cnt->prev_event || cnt->makemovie) { - - /* Flush image buffer */ - process_image_ring(cnt, IMAGE_BUFFER_FLUSH); - - /* Save preview_shot here at the end of event */ - if (cnt->imgs.preview_image.diffs) { - preview_save(cnt); - cnt->imgs.preview_image.diffs = 0; - } - - event(cnt, EVENT_ENDMOTION, NULL, NULL, NULL, cnt->currenttime_tm); - - /* - * If tracking is enabled we center our camera so it does not - * point to a place where it will miss the next action - */ - if (cnt->track.type) - cnt->moved = track_center(cnt, cnt->video_dev, 0, 0, 0); - - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: End of event %d", - cnt->event_nr); - - cnt->makemovie = 0; - /* Reset post capture */ - cnt->postcap = 0; - - /* Finally we increase the event number */ - cnt->event_nr++; - cnt->lightswitch_framecounter = 0; - - /* - * And we unset the text_event_string to avoid that buffered - * images get a timestamp from previous event. - */ - cnt->text_event_string[0] = '\0'; - } - } - - /* Save/send to movie some images */ - process_image_ring(cnt, 2); - - /***** MOTION LOOP - SETUP MODE CONSOLE OUTPUT SECTION *****/ - - /* If CAMERA_VERBOSE enabled output some numbers to console */ - if (cnt->conf.setup_mode) { - char msg[1024] = "\0"; - char part[100]; - - if (cnt->conf.despeckle_filter) { - snprintf(part, 99, "Raw changes: %5d - changes after '%s': %5d", - olddiffs, cnt->conf.despeckle_filter, cnt->current_image->diffs); - strcat(msg, part); - if (strchr(cnt->conf.despeckle_filter, 'l')) { - sprintf(part, " - labels: %3d", cnt->current_image->total_labels); - strcat(msg, part); - } - } else { - sprintf(part, "Changes: %5d", cnt->current_image->diffs); - strcat(msg, part); - } - - if (cnt->conf.noise_tune) { - sprintf(part, " - noise level: %2d", cnt->noise); - strcat(msg, part); - } - - if (cnt->conf.threshold_tune) { - sprintf(part, " - threshold: %d", cnt->threshold); - strcat(msg, part); - } - - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, "%s: %s", msg); - } - - } /* get_image end */ - - /***** MOTION LOOP - SNAPSHOT FEATURE SECTION *****/ - - /* - * Did we get triggered to make a snapshot from control http? Then shoot a snap - * If snapshot_interval is not zero and time since epoch MOD snapshot_interval = 0 then snap - * We actually allow the time to run over the interval in case we have a delay - * from slow camera. - * Note: Negative value means SIGALRM snaps are enabled - * httpd-control snaps are always enabled. - */ - - /* time_current_frame is used both for snapshot and timelapse features */ - time_current_frame = cnt->currenttime; - - if ((cnt->conf.snapshot_interval > 0 && cnt->shots == 0 && - time_current_frame % cnt->conf.snapshot_interval <= time_last_frame % cnt->conf.snapshot_interval) || - cnt->snapshot) { - event(cnt, EVENT_IMAGE_SNAPSHOT, cnt->current_image->image, NULL, NULL, &cnt->current_image->timestamp_tm); - cnt->snapshot = 0; - } - - - /***** MOTION LOOP - TIMELAPSE FEATURE SECTION *****/ - -#ifdef HAVE_FFMPEG - - if (cnt->conf.timelapse) { - - /* - * Check to see if we should start a new timelapse file. We start one when - * we are on the first shot, and and the seconds are zero. We must use the seconds - * to prevent the timelapse file from getting reset multiple times during the minute. - */ - if (cnt->current_image->timestamp_tm.tm_min == 0 && - (time_current_frame % 60 < time_last_frame % 60) && - cnt->shots == 0) { - - if (strcasecmp(cnt->conf.timelapse_mode, "manual") == 0) { - ;/* No action */ - - /* If we are daily, raise timelapseend event at midnight */ - } else if (strcasecmp(cnt->conf.timelapse_mode, "daily") == 0) { - if (cnt->current_image->timestamp_tm.tm_hour == 0) - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tm); - - /* handle the hourly case */ - } else if (strcasecmp(cnt->conf.timelapse_mode, "hourly") == 0) { - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, &cnt->current_image->timestamp_tm); - - /* If we are weekly-sunday, raise timelapseend event at midnight on sunday */ - } else if (strcasecmp(cnt->conf.timelapse_mode, "weekly-sunday") == 0) { - if (cnt->current_image->timestamp_tm.tm_wday == 0 && - cnt->current_image->timestamp_tm.tm_hour == 0) - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, - &cnt->current_image->timestamp_tm); - /* If we are weekly-monday, raise timelapseend event at midnight on monday */ - } else if (strcasecmp(cnt->conf.timelapse_mode, "weekly-monday") == 0) { - if (cnt->current_image->timestamp_tm.tm_wday == 1 && - cnt->current_image->timestamp_tm.tm_hour == 0) - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, - &cnt->current_image->timestamp_tm); - /* If we are monthly, raise timelapseend event at midnight on first day of month */ - } else if (strcasecmp(cnt->conf.timelapse_mode, "monthly") == 0) { - if (cnt->current_image->timestamp_tm.tm_mday == 1 && - cnt->current_image->timestamp_tm.tm_hour == 0) - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, - &cnt->current_image->timestamp_tm); - /* If invalid we report in syslog once and continue in manual mode */ - } else { - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Invalid timelapse_mode argument '%s'", - cnt->conf.timelapse_mode); - MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, "%:s Defaulting to manual timelapse mode"); - conf_cmdparse(&cnt, (char *)"ffmpeg_timelapse_mode",(char *)"manual"); - } - } - - /* - * If ffmpeg timelapse is enabled and time since epoch MOD ffmpeg_timelaps = 0 - * add a timelapse frame to the timelapse movie. - */ - if (cnt->shots == 0 && time_current_frame % cnt->conf.timelapse <= - time_last_frame % cnt->conf.timelapse) - event(cnt, EVENT_TIMELAPSE, cnt->current_image->image, NULL, NULL, - &cnt->current_image->timestamp_tm); - } else if (cnt->ffmpeg_timelapse) { - /* - * If timelapse movie is in progress but conf.timelapse is zero then close timelapse file - * This is an important feature that allows manual roll-over of timelapse file using the http - * remote control via a cron job. - */ - event(cnt, EVENT_TIMELAPSEEND, NULL, NULL, NULL, cnt->currenttime_tm); - } - -#endif /* HAVE_FFMPEG */ - - time_last_frame = time_current_frame; - - - /***** MOTION LOOP - VIDEO LOOPBACK SECTION *****/ - - /* - * Feed last image and motion image to video device pipes and the stream clients - * In setup mode we send the special setup mode image to both stream and vloopback pipe - * In normal mode we feed the latest image to vloopback device and we send - * the image to the stream. We always send the first image in a second to the stream. - * Other image are sent only when the config option stream_motion is off - * The result is that with stream_motion on the stream stream is normally at the minimal - * 1 frame per second but the minute motion is detected the motion_detected() function - * sends all detected pictures to the stream except the 1st per second which is already sent. - */ - if (cnt->conf.setup_mode) { - event(cnt, EVENT_IMAGE, cnt->imgs.out, NULL, &cnt->pipe, cnt->currenttime_tm); - event(cnt, EVENT_STREAM, cnt->imgs.out, NULL, NULL, cnt->currenttime_tm); - } else { - event(cnt, EVENT_IMAGE, cnt->current_image->image, NULL, - &cnt->pipe, &cnt->current_image->timestamp_tm); - - if (!cnt->conf.stream_motion || cnt->shots == 1) - event(cnt, EVENT_STREAM, cnt->current_image->image, NULL, NULL, - &cnt->current_image->timestamp_tm); - } - - event(cnt, EVENT_IMAGEM, cnt->imgs.out, NULL, &cnt->mpipe, cnt->currenttime_tm); - - - /***** MOTION LOOP - ONCE PER SECOND PARAMETER UPDATE SECTION *****/ - - /* Check for some config parameter changes but only every second */ - if (cnt->shots == 0) { - if (strcasecmp(cnt->conf.output_pictures, "on") == 0) - cnt->new_img = NEWIMG_ON; - else if (strcasecmp(cnt->conf.output_pictures, "first") == 0) - cnt->new_img = NEWIMG_FIRST; - else if (strcasecmp(cnt->conf.output_pictures, "best") == 0) - cnt->new_img = NEWIMG_BEST; - else if (strcasecmp(cnt->conf.output_pictures, "center") == 0) - cnt->new_img = NEWIMG_CENTER; - else - cnt->new_img = NEWIMG_OFF; - - if (strcasecmp(cnt->conf.locate_motion_mode, "on") == 0) - cnt->locate_motion_mode = LOCATE_ON; - else if (strcasecmp(cnt->conf.locate_motion_mode, "preview") == 0) - cnt->locate_motion_mode = LOCATE_PREVIEW; - else - cnt->locate_motion_mode = LOCATE_OFF; - - if (strcasecmp(cnt->conf.locate_motion_style, "box") == 0) - cnt->locate_motion_style = LOCATE_BOX; - else if (strcasecmp(cnt->conf.locate_motion_style, "redbox") == 0) - cnt->locate_motion_style = LOCATE_REDBOX; - else if (strcasecmp(cnt->conf.locate_motion_style, "cross") == 0) - cnt->locate_motion_style = LOCATE_CROSS; - else if (strcasecmp(cnt->conf.locate_motion_style, "redcross") == 0) - cnt->locate_motion_style = LOCATE_REDCROSS; - else - cnt->locate_motion_style = LOCATE_BOX; - - /* Sanity check for smart_mask_speed, silly value disables smart mask */ - if (cnt->conf.smart_mask_speed < 0 || cnt->conf.smart_mask_speed > 10) - cnt->conf.smart_mask_speed = 0; - - /* Has someone changed smart_mask_speed or framerate? */ - if (cnt->conf.smart_mask_speed != cnt->smartmask_speed || - smartmask_lastrate != cnt->lastrate) { - if (cnt->conf.smart_mask_speed == 0) { - memset(cnt->imgs.smartmask, 0, cnt->imgs.motionsize); - memset(cnt->imgs.smartmask_final, 255, cnt->imgs.motionsize); - } - - smartmask_lastrate = cnt->lastrate; - cnt->smartmask_speed = cnt->conf.smart_mask_speed; - /* - * Decay delay - based on smart_mask_speed (framerate independent) - * This is always 5*smartmask_speed seconds - */ - smartmask_ratio = 5 * cnt->lastrate * (11 - cnt->smartmask_speed); - } - -#if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) - - /* - * Set the sql mask file according to the SQL config options - * We update it for every frame in case the config was updated - * via remote control. - */ - cnt->sql_mask = cnt->conf.sql_log_image * (FTYPE_IMAGE + FTYPE_IMAGE_MOTION) + - cnt->conf.sql_log_snapshot * FTYPE_IMAGE_SNAPSHOT + - cnt->conf.sql_log_movie * (FTYPE_MPEG + FTYPE_MPEG_MOTION) + - cnt->conf.sql_log_timelapse * FTYPE_MPEG_TIMELAPSE; -#endif /* defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) */ - - } - - - /***** MOTION LOOP - FRAMERATE TIMING AND SLEEPING SECTION *****/ - - - /* - * Work out expected frame rate based on config setting which may - * have changed from http-control - */ - if (cnt->conf.frame_limit) - required_frame_time = 1000000L / cnt->conf.frame_limit; - else - required_frame_time = 0; - - /* Get latest time to calculate time taken to process video data */ - gettimeofday(&tv2, NULL); - elapsedtime = (tv2.tv_usec + 1000000L * tv2.tv_sec) - timenow; - - /* - * Update history buffer but ignore first pass as timebefore - * variable will be inaccurate - */ - if (passflag) - rolling_average_data[rolling_frame] = timenow-timebefore; - else - passflag = 1; - - rolling_frame++; - if (rolling_frame >= rolling_average_limit) - rolling_frame = 0; - - /* Calculate 10 second average and use deviation in delay calculation */ - rolling_average = 0L; - - for (j = 0; j < rolling_average_limit; j++) - rolling_average += rolling_average_data[j]; - - rolling_average /= rolling_average_limit; - frame_delay = required_frame_time-elapsedtime - (rolling_average - required_frame_time); - - if (frame_delay > 0) { - /* Apply delay to meet frame time */ - if (frame_delay > required_frame_time) - frame_delay = required_frame_time; - - /* Delay time in nanoseconds for SLEEP */ - delay_time_nsec = frame_delay * 1000; - - if (delay_time_nsec > 999999999) - delay_time_nsec = 999999999; - - /* SLEEP as defined in motion.h A safe sleep using nanosleep */ - SLEEP(0, delay_time_nsec); + mlp_prepare(cnt); + if (cnt->get_image) { + mlp_resetimages(cnt); + if (mlp_retry(cnt) == 1) break; + if (mlp_capture(cnt) == 1) break; + mlp_detection(cnt); + mlp_tuning(cnt); + mlp_overlay(cnt); + mlp_actions(cnt); + mlp_setupmode(cnt); } + mlp_snapshot(cnt); + mlp_timelapse(cnt); + mlp_loopback(cnt); + mlp_parmsupdate(cnt); + mlp_frametiming(cnt); } - /* - * END OF MOTION MAIN LOOP - * If code continues here it is because the thread is exiting or restarting - */ err: - free(rolling_average_data); + free(cnt->rolling_average_data); cnt->lost_connection = 1; MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "%s: Thread exiting"); @@ -2413,7 +2579,7 @@ static void motion_shutdown(void) free(cnt_list); cnt_list = NULL; -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 vid_cleanup(); #endif } @@ -2504,52 +2670,11 @@ static void motion_startup(int daemonize, int argc, char *argv[]) } } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 vid_init(); #endif } -/** - * setup_signals - * - * Attaches handlers to a number of signals that Motion need to catch. - * - * Parameters: sigaction structs for signals in general and SIGCHLD. - * - * Returns: nothing - */ -static void setup_signals(struct sigaction *sig_handler_action, struct sigaction *sigchild_action) -{ -#ifdef SA_NOCLDWAIT - sigchild_action->sa_flags = SA_NOCLDWAIT; -#else - sigchild_action->sa_flags = 0; -#endif - sigchild_action->sa_handler = sigchild_handler; - sigemptyset(&sigchild_action->sa_mask); -#ifdef SA_RESTART - sig_handler_action->sa_flags = SA_RESTART; -#else - sig_handler_action->sa_flags = 0; -#endif - sig_handler_action->sa_handler = sig_handler; - sigemptyset(&sig_handler_action->sa_mask); - - /* Enable automatic zombie reaping */ - sigaction(SIGCHLD, sigchild_action, NULL); - sigaction(SIGPIPE, sigchild_action, NULL); - sigaction(SIGALRM, sig_handler_action, NULL); - sigaction(SIGHUP, sig_handler_action, NULL); - sigaction(SIGINT, sig_handler_action, NULL); - sigaction(SIGQUIT, sig_handler_action, NULL); - sigaction(SIGTERM, sig_handler_action, NULL); - sigaction(SIGUSR1, sig_handler_action, NULL); - - /* use SIGVTALRM as a way to break out of the ioctl, don't restart */ - sig_handler_action->sa_flags = 0; - sigaction(SIGVTALRM, sig_handler_action, NULL); -} - /** * start_motion_thread * @@ -2666,13 +2791,8 @@ int main (int argc, char **argv) motion_startup(1, argc, argv); -#ifdef HAVE_FFMPEG - /* - * FFMpeg initialization is only performed if FFMpeg support was found - * and not disabled during the configure phase. - */ ffmpeg_init(); -#endif /* HAVE_FFMPEG */ + #ifdef HAVE_MYSQL if (mysql_library_init(0, NULL, NULL)) { fprintf(stderr, "could not initialize MySQL library\n"); @@ -2737,7 +2857,7 @@ int main (int argc, char **argv) MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, "%s: Restarting motion."); motion_shutdown(); restart = 0; /* only one reset for now */ -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 SLEEP(5, 0); // maybe some cameras needs less time #endif motion_startup(0, argc, argv); /* 0 = skip daemon init */ @@ -2893,9 +3013,7 @@ int main (int argc, char **argv) } while (restart); /* loop if we're supposed to restart */ -#ifdef HAVE_FFMPEG ffmpeg_finalise(); -#endif /* HAVE_FFMPEG */ // Be sure that http control exits fine cnt_list[0]->webcontrol_finish = 1; @@ -3116,13 +3234,16 @@ int myfclose(FILE* fh) * Returns: number of bytes written to the string s */ size_t mystrftime(const struct context *cnt, char *s, size_t max, const char *userformat, - const struct tm *tm, const char *filename, int sqltype) + const struct timeval *tv1, const char *filename, int sqltype) { char formatstring[PATH_MAX] = ""; char tempstring[PATH_MAX] = ""; char *format, *tempstr; const char *pos_userformat; int width; + struct tm timestamp_tm; + + localtime_r(&tv1->tv_sec, ×tamp_tm); format = formatstring; @@ -3269,6 +3390,6 @@ size_t mystrftime(const struct context *cnt, char *s, size_t max, const char *us *format = '\0'; format = formatstring; - return strftime(s, max, format, tm); + return strftime(s, max, format, ×tamp_tm); } diff --git a/motion.h b/motion.h index 98f6123d..7e96104e 100644 --- a/motion.h +++ b/motion.h @@ -82,7 +82,7 @@ #endif -/* +/* * The macro below defines a version of sleep using nanosleep * If a signal such as SIG_CHLD interrupts the sleep we just continue sleeping */ @@ -91,7 +91,7 @@ tv.tv_sec = (seconds); \ tv.tv_nsec = (nanoseconds); \ while (nanosleep(&tv, &tv) == -1); \ - } + } #define CLEAR(x) memset(&(x), 0, sizeof(x)) @@ -211,7 +211,7 @@ struct images; #include "mmalcam.h" #endif -/* +/* * Structure to hold images information * The idea is that this should have all information about a picture e.g. diffs, timestamp etc. * The exception is the label information, it uses a lot of memory @@ -230,13 +230,12 @@ struct images; struct image_data { unsigned char *image; int diffs; - time_t timestamp; /* Timestamp when image was captured */ - struct tm timestamp_tm; + struct timeval timestamp_tv; int shot; /* Sub second timestamp count */ - /* - * Movement center to img center distance - * Note: Dist is calculated distX*distX + distY*distY + /* + * Movement center to img center distance + * Note: Dist is calculated distX*distX + distY*distY */ unsigned long cent_dist; @@ -247,24 +246,24 @@ struct image_data { int total_labels; }; -/* +/* * DIFFERENCES BETWEEN imgs.width, conf.width AND rotate_data.cap_width * (and the corresponding height values, of course) * =========================================================================== * Location Purpose - * + * * conf The values in conf reflect width and height set in the - * configuration file. These can be set via http remote control, + * configuration file. These can be set via http remote control, * but they are not used internally by Motion, so it won't break * anything. These values are transferred to imgs in vid_start. * * imgs The values in imgs are the actual output dimensions. Normally * the output dimensions are the same as the capture dimensions, - * but for 90 or 270 degrees rotation, they are not. E.g., if + * but for 90 or 270 degrees rotation, they are not. E.g., if * you capture at 320x240, and rotate 90 degrees, the output * dimensions are 240x320. - * These values are set from the conf values in vid_start, or - * from the first JPEG image in netcam_start. For 90 or 270 + * These values are set from the conf values in vid_start, or + * from the first JPEG image in netcam_start. For 90 or 270 * degrees rotation, they are swapped in rotate_init. * * rotate_data The values in rotate_data are named cap_width and cap_height, @@ -292,13 +291,16 @@ struct images { unsigned char *smartmask; unsigned char *smartmask_final; unsigned char *common_buffer; + + unsigned char *mask_privacy; /* Buffer for the privacy mask values */ + int *smartmask_buffer; int *labels; int *labelsize; int width; int height; int type; - int picture_type; /* Output picture type IMAGE_JPEG, IMAGE_PPM */ + int picture_type; /* Output picture type IMAGE_JPEG, IMAGE_PPM */ int size; int motionsize; int labelgroup_max; @@ -311,7 +313,7 @@ struct images { struct rotdata { /* Temporary buffer for 90 and 270 degrees rotation. */ unsigned char *temp_buf; - /* + /* * Degrees to rotate; copied from conf.rotate_deg. This is the value * that is actually used. The value of conf.rotate_deg cannot be used * because it can be changed by motion-control, and changing rotation @@ -319,8 +321,8 @@ struct rotdata { */ int degrees; /* - * Capture width and height - different from output width and height if - * rotating 90 or 270 degrees. + * Capture width and height - different from output width and height if + * rotating 90 or 270 degrees. */ int cap_width; int cap_height; @@ -398,7 +400,7 @@ struct context { unsigned int moved; unsigned int pause; int missing_frame_counter; /* counts failed attempts to fetch picture frame from camera */ - unsigned int lost_connection; + unsigned int lost_connection; int video_dev; int pipe; @@ -406,7 +408,7 @@ struct context { struct stream stream; int stream_count; - + #if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3) int sql_mask; #endif @@ -428,14 +430,42 @@ struct context { char extpipefilename[PATH_MAX]; int movie_last_shot; -#ifdef HAVE_FFMPEG struct ffmpeg *ffmpeg_output; struct ffmpeg *ffmpeg_output_debug; struct ffmpeg *ffmpeg_timelapse; struct ffmpeg *ffmpeg_smartmask; char timelapsefilename[PATH_MAX]; char motionfilename[PATH_MAX]; -#endif + + int area_minx[9], area_miny[9], area_maxx[9], area_maxy[9]; + int areadetect_eventnbr; + /* ToDo Determine why we need these...just put it all into prepare? */ + unsigned long long int timenow, timebefore; + + unsigned int rate_limit; + time_t lastframetime; + int minimum_frame_time_downcounter; + unsigned int get_image; /* Flag used to signal that we capture new image when we run the loop */ + + unsigned int text_size_factor; + long int required_frame_time, frame_delay; + + long int rolling_average_limit; + long int *rolling_average_data; + unsigned long int rolling_average; + + int olddiffs; //only need this in here for a printf later...do we need that printf? + int smartmask_ratio; + int smartmask_count; + + int previous_diffs, previous_location_x, previous_location_y; + unsigned long int time_last_frame, time_current_frame; + + unsigned int smartmask_lastrate; + + unsigned int passflag; //only purpose is to flag first frame vs all others..... + int rolling_frame; + }; extern pthread_mutex_t global_lock; @@ -450,6 +480,6 @@ void * mymalloc(size_t); void * myrealloc(void *, size_t, const char *); FILE * myfopen(const char *, const char *); int myfclose(FILE *); -size_t mystrftime(const struct context *, char *, size_t, const char *, const struct tm *, const char *, int); +size_t mystrftime(const struct context *, char *, size_t, const char *, const struct timeval *, const char *, int); int create_path(const char *); #endif /* _INCLUDE_MOTION_H */ diff --git a/motion_guide.html b/motion_guide.html index 98134d5e..0bbc11c9 100644 --- a/motion_guide.html +++ b/motion_guide.html @@ -608,7 +608,7 @@ how Motion is built. --with-pwcbsd Use pwcbsd based webcams ( only BSD ) - This option allow to build motion to support V4L/V4L2 in BSD. + This option allow to build motion to support V4L2 in BSD. --without-bktr @@ -616,8 +616,8 @@ how Motion is built. ONLY used in *BSD - --without-v4l - Exclude using v4l (video4linux) subsystem. Makes Motion so it only supports network cameras. + --without-v4l2 + Exclude using v4l2 (video4linux2) subsystem. Makes Motion so it only supports network cameras. Can be used if you do not need support and maybe lack some of the libraries for it. @@ -1042,15 +1042,18 @@ Motion permits the use of video cards that have discreet input channels. Since the option input must be set to the value -1 for USB cameras.

-Network cameras are set up via the netcam_url parameter. +Network cameras are set up via the + netcam_url parameter. The latest versions of Motion support rtsp format which many cameras now stream. The URL connection string to enter is specific to the camera and is usually provided by the manufacturer. The connection string is the same as what would be used by other video playing software such as VLC. If the camera does not stream via RTSP and instead uses a MJPEG, then Motion -can also view that format. See the option netcam_url for additional options. +can also view that format. See the option netcam_url +for additional options.

-Raspberry Pi cameras are set up via the mmalcam_name parameter. +Raspberry Pi cameras are set up via the +mmalcam_name parameter. Note that name for this parameter derives from the MMAL/OpenMax software. The most common use of this option is to use the Raspberry PI camera.

@@ -1082,6 +1085,16 @@ option frequency. Otherwise set frequency to 0. Finally you need to set the TV norm. Values: 0 (PAL), 1 (NTSC), 2 (SECAM), 3 (PAL NC no colour). Default is 0 (PAL). If your camera is a PAL black and white you may get a better result with norm=3 (PAL no colour).

+ +Static files can also be processed with a bit of additional setup via a v4l2loopback. +Install the loopback software to create a /dev/videoX device and then use software such as ffmpeg to stream +the static file into the v4l2 device. e.g. ffmpeg -re -i -f v4l2 -pix_fmt gray /dev/video0 +As illustrated in the example, this method can also be used to reformat the content to a different pixel. This +can be helpful as a interim process where ffmpeg supports a particular format but that format is not yet supported +by Motion. +

+ +

@@ -1335,6 +1348,12 @@ Some configuration options are only used if Motion is built on a system that has mask_file mask_file + + + + mask_privacy + + max_mpeg_time max_movie_time @@ -1994,9 +2013,10 @@ Some configuration options are only used if Motion is built on a system that has noise_tune area_detect mask_file - smart_mask_speed + mask_privacy + smart_mask_speed lightswitch minimum_motion_frames event_gap @@ -2598,6 +2618,9 @@ format that Motion uses internally.
  • V4L2_PIX_FMT_YUYV : 15 'YUYV'
  • V4L2_PIX_FMT_YUV422P : 16 '422P'
  • V4L2_PIX_FMT_YUV420 : 17 'YU12'
  • +
  • V4L2_PIX_FMT_Y10 : 18 'YU10'
  • +
  • V4L2_PIX_FMT_Y12 : 19 'YU12'
  • +
  • V4L2_PIX_FMT_GREY : 20 'GREY'
  • @@ -3003,7 +3026,7 @@ Motion automatically swaps width and height if you rotate 90 or 270 degrees, so

    The width in pixels of each frame. Valid range is camera dependent. -Motion does not scale so should be set to the actual size of the v4l device. +Motion does not scale so should be set to the actual size of the v4l2 device. In case of a net camera motion sets the height to the height of the first image read except for rtsp streams which does rescale the network camera image to the requested dimensions. Note that this rescaling comes at a very high CPU cost so it @@ -3492,6 +3515,26 @@ Mask file (converted to png format so it can be shown by your web browser)

    +

    mask_privacy

    +

    +
      +
    • Type: String
    • +
    • Range / Valid values: Max 4095 characters
    • +
    • Default: Not defined
    • +
    +

    +The full path and filename for the privacy masking pgm file. This file works like the mask_file as +described above. The difference with this parameter is that while the mask_file excludes the section from +detecting motion, this file excludes the section of the image completely. Excluded areas will appear as +white on all images and movies. +

    +mask_privacy is applied before detection so no motion will ever be detected in the excluded area. +This parameter could however still be used with the mask_file. e.g. This file could exclude the +neighbors yard and the mask_file would exclude the blowing tree from motion detection. The resulting +pictures/movies would show solid white in place of the neighbors yard but the tree would still be in +the pictures/movies. +

    +

    smart_mask_speed

      @@ -4278,109 +4321,21 @@ Camstream is "fooled" to think it is looking at a real camera.

      Installing

      -Installing the video loopback device is not difficult. At least not when you have this document available. -

      -First you must prepare your system for more video devices. You will need two extra devices for each video -pipe that you want. -

      -For example if you have 4 cameras they will probably run at -/dev/video0, /dev/video1, /dev/video2, and /dev/video3. So you will need additional 8 video devices. -This is easy to do. -

      -
      -mknod /dev/video4 c 81 4
      -mknod /dev/video5 c 81 5
      -mknod /dev/video6 c 81 6
      -mknod /dev/video7 c 81 7
      -mknod /dev/video8 c 81 8
      -mknod /dev/video9 c 81 9
      -mknod /dev/video10 c 81 10
      -mknod /dev/video11 c 81 11
      -
      -

      -Note that the video device number is the same as the last parameter given on each line. -

      -You may need to set the ownership and permissions (chown and chmod) to be the same as the video devices -that were already there. -

      -Now you need to install the video loopback device. -

      -Download the latest via the apt packages and place the file in a place of your own choice. -

      -Untar and uncompress the file to the place you want the program installed. Editor recommends /usr/local/vloopback. -

      -cd /usr/local -

      -tar -xvzf /path/to/vloopback-1.1-rc1.tar.gz -

      -You now have a directory called vloopback-1.1-rc1. You can rename it to vloopback (mv vloopback-1.1-rc1 vloopback). -I recommend creating a symbolic link to the current version. This way you can more easily experiment with different -versions simply by changing the link. -

      -ln -s vloopback-1.1-rc1 vloopback -

      -Now change to the new directory -

      -cd vloopback -

      -Build the code -

      -make -

      -There is a good chance that the make will not work and give you a long list of errors. -To run make the following must be available on you machine.
        -
      • The kernel source files must be installed. -
      • The source files must be available at /usr/src/linux.
        E.g. -the new Red Hat 7.3 does not have a link to the sources called linux. Instead there is a link -called linux-2.4. This is easy to fix. Just create a link to the real source tree. Do not rename! -Add a link using this command (replacing the kernel version number with the one you have on your -machine)
        ln -s /usr/src/linux-2.4.18-4 /usr/src/linux -
      • Alternatively you can change the vloopback makefile so that the "LINUXSRC=/usr/src/linux" line is -changed to the actual path. I recommend the link solution since this may solve other similar problems that you -can get when installing other software. -
      -

      -When compiling on a newer Linux distribution you may get a warning about a header file malloc.h. -To remove this warning simply change the header reference as suggested by the warning. -

      -In vloopback.c you replace the line -

      -#include <linux/malloc.h> -

      -with the line -

      -#include <linux/slab.h> -

      -Install the code you built as a Kernel module. There are two options: -pipes should be set to the number of video loopbacks that you want. Probably one for each camera. -The dev_offset defines which video device number will be the first. If dev_offset is not defined the -vloopback module will install itself from the first available video device. If you want the cameras to be -assigned to the lower video device numbers you must either load vloopback after loading the video device -modules OR use the dev_offset option when loading vloopback. Vloopback then installs itself in the sequence -input 0, output 0, input 1, output 1, input 2, output 2 etc. Here is shown the command for our example of 4 -cameras and 4 loopback devices and the first loopback device offset to /dev/video4. -

      -/sbin/insmod /usr/local/vloopback/vloopback.o pipes=4 dev_offset=4 -

      -When you run the command you may get a warning about tainting the Kernel. Just ignore this. -You can choose to copy the vloopback.o file into a directory in the /lib/modules tree where the insmod/modprobe programs are already looking for modules. Then the command gets simpler (/sbin/insmod vloopback pipes=.....). -

      -If you want the loopback device to load during boot, you can place the call in one of the bootup scripts such as /etc/rc.d/rc.local. Vloopback should be loaded before you start motion. +The video loopback device can be added installed via apt in many distributions. The package tested +with Motion is v4l2loopback-dkms. Once the package is installed, you just need to run +sudo modprobe v4l2loopback. This will add a new video device that you +can use for the loopback. It is believed that there are additional options associated with the +v4l2loopback that allows for adding more than one device. See the documentation of the v4l2loopback +project for additional details.

      To activate the vloopback device in motion set the 'video_pipe' option in the motion.conf file. You can also view the special motion pictures where you see the changed pixels by setting the option 'motion_video_pipe' in motion.conf. When setting the video_pipe and/or motion_video_pipe options either -specify the input device as e.g. /dev/video4. You can also set the parameter to '-' which means that motion -will find the first vacant video loopback device input. If you have more than one camera you may want to -control which loopback device each camera uses. Then you need to define the specific device name in motion.conf -for the first camera and in each camera config file for the other cameras. If you set the video_pipe parameter -to '-' in the motion.conf file and not setting it in the camera config files, motion automatically assign video -devices in the same sequence as the camera config files are loaded. You can combine both video_pipe and motion_video_pipe -but then naturally you will need twice as many pipes. +specify the input device as e.g. /dev/video4.

      De-activating should be done with this command

      -/sbin/modprobe -r vloopback +sudo modprobe -r v4l2loopback

      @@ -4394,10 +4349,9 @@ De-activating should be done with this command
    • Default:

    -if a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate a free pipe. Default: not set -The video4linux video loopback input device for normal images. If a particular pipe is to be used then use the -device filename of this pipe. If a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate -a free pipe. +Default: not set +The video4linux video loopback input device for normal images. The device would be specified +in the format like /dev/video1

    @@ -4408,9 +4362,8 @@ a free pipe.
  • Default: Not defined
  • -The video4linux video loopback input device for motion images. -If a particular pipe is to be used then use the device filename of this pipe, -if a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate a free pipe. Default: not set +The video4linux video loopback input device for motion images. The device would be specified +in the format like /dev/video1

    diff --git a/netcam_rtsp.c b/netcam_rtsp.c index 60fabc92..21bf3791 100644 --- a/netcam_rtsp.c +++ b/netcam_rtsp.c @@ -728,9 +728,8 @@ int netcam_connect_rtsp(netcam_context_ptr netcam){ return 0; #else /* No FFmpeg/Libav */ - netcam->rtsp->status = RTSP_NOTCONNECTED; - netcam->rtsp->format_context = NULL; - MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); + if (netcam) + MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); return -1; #endif /* End #ifdef HAVE_FFMPEG */ } @@ -757,7 +756,7 @@ void netcam_shutdown_rtsp(netcam_context_ptr netcam){ netcam_rtsp_close_context(netcam); MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO,"%s: netcam shut down"); } - + free(netcam->rtsp->path); free(netcam->rtsp->user); free(netcam->rtsp->pass); @@ -767,8 +766,8 @@ void netcam_shutdown_rtsp(netcam_context_ptr netcam){ #else /* No FFmpeg/Libav */ /* Stop compiler warnings */ - netcam->rtsp->status = RTSP_NOTCONNECTED; - MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); + if (netcam) + MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); #endif /* End #ifdef HAVE_FFMPEG */ } @@ -892,8 +891,8 @@ int netcam_setup_rtsp(netcam_context_ptr netcam, struct url_t *url){ #else /* No FFmpeg/Libav */ /* Stop compiler warnings */ - if (url->port == url->port) netcam->rtsp->status = RTSP_NOTCONNECTED; - MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); + if ((url) || (netcam)) + MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support"); return -1; #endif /* End #ifdef HAVE_FFMPEG */ } @@ -922,7 +921,7 @@ int netcam_next_rtsp(unsigned char *image , netcam_context_ptr netcam){ * or call anything else without taking care of thread safety. * The netcam mutex *only* protects netcam->latest, it cannot be * used to safely call other netcam functions. */ - + pthread_mutex_lock(&netcam->mutex); memcpy(image, netcam->latest->ptr, netcam->latest->used); pthread_mutex_unlock(&netcam->mutex); diff --git a/picture.c b/picture.c index bd7266af..cd20a8d3 100644 --- a/picture.c +++ b/picture.c @@ -222,7 +222,7 @@ static void put_subjectarea(struct tiff_writing *into, const struct coord *box) */ static void put_jpeg_exif(j_compress_ptr cinfo, const struct context *cnt, - const struct tm *timestamp, + const struct timeval *tv1, const struct coord *box) { /* description, datetime, and subtime are the values that are actually @@ -230,16 +230,18 @@ static void put_jpeg_exif(j_compress_ptr cinfo, */ char *description, *datetime, *subtime; char datetime_buf[22]; + struct tm timestamp_tm; - if (timestamp) { + if (tv1->tv_sec) { + localtime_r(&tv1->tv_sec, ×tamp_tm); /* Exif requires this exact format */ snprintf(datetime_buf, 21, "%04d:%02d:%02d %02d:%02d:%02d", - timestamp->tm_year + 1900, - timestamp->tm_mon + 1, - timestamp->tm_mday, - timestamp->tm_hour, - timestamp->tm_min, - timestamp->tm_sec); + timestamp_tm.tm_year + 1900, + timestamp_tm.tm_mon + 1, + timestamp_tm.tm_mday, + timestamp_tm.tm_hour, + timestamp_tm.tm_min, + timestamp_tm.tm_sec); datetime = datetime_buf; } else { datetime = NULL; @@ -253,7 +255,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo, description = malloc(PATH_MAX); mystrftime(cnt, description, PATH_MAX-1, cnt->conf.exif_text, - timestamp, NULL, 0); + tv1, NULL, 0); } else { description = NULL; } @@ -351,7 +353,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo, if (datetime) { memcpy(writing.buf, exif_tzoffset_tag, 12); - put_sint16(writing.buf+8, timestamp->tm_gmtoff / 3600); + put_sint16(writing.buf+8, timestamp_tm.tm_gmtoff / 3600); writing.buf += 12; } @@ -413,7 +415,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo, */ static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size, unsigned char *input_image, int width, int height, int quality, - struct context *cnt, struct tm *tm, struct coord *box) + struct context *cnt, struct timeval *tv1, struct coord *box) { int i, j, jpeg_image_size; @@ -451,14 +453,14 @@ static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size, jpeg_set_quality(&cinfo, quality, TRUE); cinfo.dct_method = JDCT_FASTEST; - + _jpeg_mem_dest(&cinfo, dest_image, image_size); // Data written to mem - + jpeg_start_compress(&cinfo, TRUE); - put_jpeg_exif(&cinfo, cnt, tm, box); - + put_jpeg_exif(&cinfo, cnt, tv1, box); + /* If the image is not a multiple of 16, this overruns the buffers * we'll just pad those last bytes with zeros */ @@ -468,13 +470,13 @@ static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size, y[i] = input_image + width * (i + j); if (i % 2 == 0) { cb[i / 2] = input_image + width * height + width / 2 * ((i + j) /2); - cr[i / 2] = input_image + width * height + width * height / 4 + width / 2 * ((i + j) / 2); + cr[i / 2] = input_image + width * height + width * height / 4 + width / 2 * ((i + j) / 2); } } else { y[i] = 0x00; cb[i] = 0x00; cr[i] = 0x00; - } + } } jpeg_write_raw_data(&cinfo, data, 16); } @@ -557,7 +559,7 @@ static int put_jpeg_grey_memory(unsigned char *dest_image, int image_size, unsig static void put_jpeg_yuv420p_file(FILE *fp, unsigned char *image, int width, int height, int quality, - struct context *cnt, struct tm *tm, struct coord *box) + struct context *cnt, struct timeval *tv1, struct coord *box) { int i, j; @@ -598,7 +600,7 @@ static void put_jpeg_yuv420p_file(FILE *fp, jpeg_stdio_dest(&cinfo, fp); // Data written to file jpeg_start_compress(&cinfo, TRUE); - put_jpeg_exif(&cinfo, cnt, tm, box); + put_jpeg_exif(&cinfo, cnt, tv1, box); for (j = 0; j < height; j += 16) { for (i = 0; i < 16; i++) { @@ -612,8 +614,8 @@ static void put_jpeg_yuv420p_file(FILE *fp, y[i] = 0x00; cb[i] = 0x00; cr[i] = 0x00; - } - } + } + } jpeg_write_raw_data(&cinfo, data, 16); } @@ -897,7 +899,7 @@ int put_picture_memory(struct context *cnt, unsigned char* dest_image, int image switch (cnt->imgs.type) { case VIDEO_PALETTE_YUV420P: return put_jpeg_yuv420p_memory(dest_image, image_size, image, - cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tm), &(cnt->current_image->location)); + cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tv), &(cnt->current_image->location)); case VIDEO_PALETTE_GREY: return put_jpeg_grey_memory(dest_image, image_size, image, cnt->imgs.width, cnt->imgs.height, quality); @@ -916,7 +918,7 @@ void put_picture_fd(struct context *cnt, FILE *picture, unsigned char *image, in } else { switch (cnt->imgs.type) { case VIDEO_PALETTE_YUV420P: - put_jpeg_yuv420p_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tm), &(cnt->current_image->location)); + put_jpeg_yuv420p_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tv), &(cnt->current_image->location)); break; case VIDEO_PALETTE_GREY: put_jpeg_grey_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality); @@ -1026,7 +1028,7 @@ unsigned char *get_pgm(FILE *picture, int width, int height) for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { resized_image[y * width + x] = image[ - (mask_height - 1) * y / (height - 1) * mask_width + + (mask_height - 1) * y / (height - 1) * mask_width + (mask_width - 1) * x / (width - 1)]; } } @@ -1106,12 +1108,7 @@ void preview_save(struct context *cnt) /* Use filename of movie i.o. jpeg_filename when set to 'preview'. */ use_imagepath = strcmp(cnt->conf.imagepath, "preview"); -#ifdef HAVE_FFMPEG - if ((cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe)) - && !use_imagepath) { -#else - if ((cnt->conf.useextpipe && cnt->extpipe) && !use_imagepath) { -#endif + if ((cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe)) && !use_imagepath) { if (cnt->conf.useextpipe && cnt->extpipe) { basename_len = strlen(cnt->extpipefilename) + 1; strncpy(previewname, cnt->extpipefilename, basename_len); @@ -1140,7 +1137,7 @@ void preview_save(struct context *cnt) else imagepath = (char *)DEF_IMAGEPATH; - mystrftime(cnt, filename, sizeof(filename), imagepath, &cnt->imgs.preview_image.timestamp_tm, NULL, 0); + mystrftime(cnt, filename, sizeof(filename), imagepath, &cnt->imgs.preview_image.timestamp_tv, NULL, 0); snprintf(previewname, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filename, imageext(cnt)); put_picture(cnt, previewname, cnt->imgs.preview_image.image, FTYPE_IMAGE); diff --git a/pwc-ioctl.h b/pwc-ioctl.h index 5f44c082..4117d122 100644 --- a/pwc-ioctl.h +++ b/pwc-ioctl.h @@ -103,7 +103,7 @@ struct pwc_serial { char serial[30]; /* String with serial number. Contains terminating 0 */ }; - + /* pwc_whitebalance.mode values */ #define PWC_WB_INDOOR 0 #define PWC_WB_OUTDOOR 1 @@ -111,14 +111,14 @@ struct pwc_serial #define PWC_WB_MANUAL 3 #define PWC_WB_AUTO 4 -/* Used with VIDIOCPWC[SG]AWB (Auto White Balance). +/* Used with VIDIOCPWC[SG]AWB (Auto White Balance). Set mode to one of the PWC_WB_* values above. - *red and *blue are the respective gains of these colour components inside + *red and *blue are the respective gains of these colour components inside the camera; range 0..65535 - When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read; + When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read; otherwise undefined. 'read_red' and 'read_blue' are read-only. -*/ +*/ struct pwc_whitebalance { int mode; @@ -126,9 +126,9 @@ struct pwc_whitebalance int read_red, read_blue; /* R/O */ }; -/* +/* 'control_speed' and 'control_delay' are used in automatic whitebalance mode, - and tell the camera how fast it should react to changes in lighting, and + and tell the camera how fast it should react to changes in lighting, and with how much delay. Valid values are 0..65535. */ struct pwc_wb_speed @@ -157,11 +157,11 @@ struct pwc_imagesize #define PWC_MPT_TILT 0x02 #define PWC_MPT_TIMEOUT 0x04 /* for status */ -/* Set angles; when absolute != 0, the angle is absolute and the +/* Set angles; when absolute != 0, the angle is absolute and the driver calculates the relative offset for you. This can only be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns absolute angles. - */ + */ struct pwc_mpt_angles { int absolute; /* write-only */ @@ -188,7 +188,7 @@ struct pwc_mpt_status /* This is used for out-of-kernel decompression. With it, you can get all the necessary information to initialize and use the decompressor routines in standalone applications. - */ + */ struct pwc_video_command { int type; /* camera type (645, 675, 730, etc.) */ @@ -273,7 +273,7 @@ struct pwc_video_command /* Flickerless mode; = 0 off, otherwise on */ #define VIDIOCPWCSFLICKER _IOW('v', 208, int) -#define VIDIOCPWCGFLICKER _IOR('v', 208, int) +#define VIDIOCPWCGFLICKER _IOR('v', 208, int) /* Dynamic noise reduction; 0 off, 3 = high noise reduction */ #define VIDIOCPWCSDYNNOISE _IOW('v', 209, int) @@ -282,7 +282,7 @@ struct pwc_video_command /* Real image size as used by the camera; tells you whether or not there's a gray border around the image */ #define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize) - /* Motorized pan & tilt functions */ + /* Motorized pan & tilt functions */ #define VIDIOCPWCMPTRESET _IOW('v', 211, int) #define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range) #define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles) @@ -299,11 +299,11 @@ struct pwc_table_init_buffer { /* * This is private command used when communicating with v4l2. - * In the future all private ioctl will be remove/replace to + * In the future all private ioctl will be remove/replace to * use interface offer by v4l2. */ -#if (defined(MOTION_V4L2)) && defined(__linux__) +#if (!defined(WITHOUT_V4L2)) && defined(__linux__) #define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0) #define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1) @@ -323,6 +323,6 @@ struct pwc_raw_frame { __u8 rawframe[0]; /* frame_size = H/4*vbandlength */ } __attribute__ ((packed)); -#endif /* MOTION_V4L2 && __linux__ */ +#endif /* !WITHOUT_V4L2 && __linux__ */ #endif diff --git a/track.c b/track.c index 6e19ddcd..1679306c 100644 --- a/track.c +++ b/track.c @@ -8,7 +8,7 @@ #include #include "motion.h" -#ifdef MOTION_V4L2 +#ifndef WITHOUT_V4L2 #include #include "pwc-ioctl.h" #endif @@ -52,14 +52,14 @@ static unsigned int servo_move(struct context *cnt, struct coord *cent, struct images *imgs, unsigned int manual); static unsigned int iomojo_move(struct context *cnt, int dev, struct coord *cent, struct images *imgs); -#ifdef MOTION_V4L2 +#ifndef WITHOUT_V4L2 static unsigned int lqos_center(struct context *cnt, int dev, int xoff, int yoff); static unsigned int lqos_move(struct context *cnt, int dev, struct coord *cent, struct images *imgs, unsigned int manual); static unsigned int uvc_center(struct context *cnt, int dev, int xoff, int yoff); static unsigned int uvc_move(struct context *cnt, int dev, struct coord *cent, struct images *imgs, unsigned int manual); -#endif /* MOTION_V4L2 */ +#endif /* WITHOUT_V4L2 */ /* Add a call to your functions here: */ unsigned int track_center(struct context *cnt, int dev ATTRIBUTE_UNUSED, @@ -79,7 +79,7 @@ unsigned int track_center(struct context *cnt, int dev ATTRIBUTE_UNUSED, } else if (cnt->track.type == TRACK_TYPE_SERVO) { return servo_center(cnt, xoff, yoff); } -#ifdef MOTION_V4L2 +#ifndef WITHOUT_V4L2 else if (cnt->track.type == TRACK_TYPE_PWC) return lqos_center(cnt, dev, xoff, yoff); else if (cnt->track.type == TRACK_TYPE_UVC) @@ -108,7 +108,7 @@ unsigned int track_move(struct context *cnt, int dev, struct coord *cent, struct return stepper_move(cnt, cent, imgs); else if (cnt->track.type == TRACK_TYPE_SERVO) return servo_move(cnt, cent, imgs, manual); -#ifdef MOTION_V4L2 +#ifndef WITHOUT_V4L2 else if (cnt->track.type == TRACK_TYPE_PWC) return lqos_move(cnt, dev, cent, imgs, manual); else if (cnt->track.type == TRACK_TYPE_UVC) @@ -780,7 +780,7 @@ static unsigned int iomojo_move(struct context *cnt, int dev, struct coord *cent Logitech QuickCam Orbit camera tracking code by folkert@vanheusden.com ******************************************************************************/ -#ifdef MOTION_V4L2 +#ifndef WITHOUT_V4L2 static unsigned int lqos_center(struct context *cnt, int dev, int x_angle, int y_angle) { int reset = 3; @@ -1219,4 +1219,4 @@ static unsigned int uvc_move(struct context *cnt, int dev, struct coord *cent, return cnt->track.move_wait; } -#endif /* MOTION_V4L2 */ +#endif /* WITHOUT_V4L2 */ diff --git a/track.h b/track.h index 0e44ae6b..9477eec6 100644 --- a/track.h +++ b/track.h @@ -117,7 +117,7 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *, #define SERVO_BAUDRATE B9600 -#define SERVO_COMMAND_STATUS 0 +#define SERVO_COMMAND_STATUS 0 #define SERVO_COMMAND_LEFT_N 1 #define SERVO_COMMAND_RIGHT_N 2 #define SERVO_COMMAND_LEFT 3 @@ -155,7 +155,7 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *, #define IOMOJO_DIRECTION_DOWN 0x04 #define IOMOJO_DIRECTION_UP 0x08 -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* * Defines for the Logitech QuickCam Orbit/Sphere USB webcam @@ -165,11 +165,9 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *, #define LQOS_HORIZONAL_DEGREES 120 /* - * UVC + * UVC */ -#ifdef MOTION_V4L2 - #ifndef V4L2_CID_PAN_RELATIVE #define V4L2_CID_PAN_RELATIVE (V4L2_CID_PRIVATE_BASE+7) #endif @@ -183,9 +181,8 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *, #endif #define INCPANTILT 64 // 1 degree -#endif /* MOTION_V4L2 */ -#endif /* WITHOUT_V4L */ +#endif /* WITHOUT_V4L2 */ #endif /* _INCLUDE_TRACK_H */ diff --git a/video.c b/video.c deleted file mode 100644 index 76f0ffee..00000000 --- a/video.c +++ /dev/null @@ -1,453 +0,0 @@ -/* - * video.c - * - * Video stream functions for motion. - * Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org) - * This software is distributed under the GNU public license version 2 - * See also the file 'COPYING'. - * - */ - -#include "motion.h" -#include "video.h" -#include "rotate.h" - -#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) - -/** - * v4l_picture_controls - */ -static void v4l_picture_controls(struct context *cnt, struct video_dev *viddev) -{ - int dev = viddev->fd; - struct video_picture vid_pic; - int make_change = 0; - - if (cnt->conf.contrast && cnt->conf.contrast != viddev->contrast) { - - if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - - make_change = 1; - vid_pic.contrast = cnt->conf.contrast * 256; - viddev->contrast = cnt->conf.contrast; - } - - if (cnt->conf.saturation && cnt->conf.saturation != viddev->saturation) { - - if (!make_change) { - if (ioctl(dev, VIDIOCGPICT, &vid_pic)==-1) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - } - - make_change = 1; - vid_pic.colour = cnt->conf.saturation * 256; - viddev->saturation = cnt->conf.saturation; - } - - if (cnt->conf.hue && cnt->conf.hue != viddev->hue) { - - if (!make_change) { - if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - } - - make_change = 1; - vid_pic.hue = cnt->conf.hue * 256; - viddev->hue = cnt->conf.hue; - } - -/* Only tested with PWCBSD in FreeBSD */ -#if defined(PWCBSD) - if (cnt->conf.frame_limit != viddev->fps) { - struct video_window vw; - int fps; - - if (ioctl(dev, VIDIOCGWIN, &vw) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCGWIN"); - } else { - fps = vw.flags >> PWC_FPS_SHIFT; - MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Get Current framerate %d .. trying %d", - fps, cnt->conf.frame_limit); - } - - fps = cnt->conf.frame_limit; - vw.flags = fps << PWC_FPS_SHIFT; - - if (ioctl(dev, VIDIOCSWIN, &vw) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCSWIN"); - } else if (ioctl(dev, VIDIOCGWIN, &vw) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCGWIN"); - } else { - fps = vw.flags >> PWC_FPS_SHIFT; - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set new framerate %d", fps); - } - - viddev->fps = fps; - } -#endif - - if (cnt->conf.autobright) { - - if (vid_do_autobright(cnt, viddev)) { - /* If we already read the VIDIOGPICT - we should not do it again. */ - if (!make_change) { - if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - } - - vid_pic.brightness = viddev->brightness * 256; - make_change = 1; - } - - } else if (cnt->conf.brightness && cnt->conf.brightness != viddev->brightness) { - - if ((!make_change) && (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1)) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - - make_change = 1; - vid_pic.brightness = cnt->conf.brightness * 256; - viddev->brightness = cnt->conf.brightness; - } - - if (make_change) { - if (ioctl(dev, VIDIOCSPICT, &vid_pic) == -1) - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSPICT)"); - } -} - -/******************************************************************************* - Video4linux capture routines -********************************************************************************/ - -/** - * v4l_start - * Initialize video device to start capturing and allocates memory map - * for video device. - * - * Returns mmapped buffer for video device or NULL if any error happens. - * - */ -unsigned char *v4l_start(struct video_dev *viddev, int width, int height,int input, - int norm, unsigned long freq, int tuner_number) -{ - int dev = viddev->fd; - struct video_capability vid_caps; - struct video_channel vid_chnl; - struct video_tuner vid_tuner; - struct video_mbuf vid_buf; - struct video_mmap vid_mmap; - void *map; - - if (ioctl (dev, VIDIOCGCAP, &vid_caps) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCAP)"); - return NULL; - } - - if (vid_caps.type & VID_TYPE_MONOCHROME) - viddev->v4l_fmt = VIDEO_PALETTE_GREY; - - if (input != IN_DEFAULT) { - memset(&vid_chnl, 0, sizeof(struct video_channel)); - vid_chnl.channel = input; - - if (ioctl (dev, VIDIOCGCHAN, &vid_chnl) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCHAN) Input %d", - input); - } else { - vid_chnl.channel = input; - vid_chnl.norm = norm; - if (ioctl (dev, VIDIOCSCHAN, &vid_chnl) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSCHAN) Input %d" - " Standard method %d", input, norm); - return NULL; - } - } - } - - if (freq) { - memset(&vid_tuner, 0, sizeof(struct video_tuner)); - vid_tuner.tuner = tuner_number; - if (ioctl (dev, VIDIOCGTUNER, &vid_tuner) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGTUNER) tuner %d", - tuner_number); - } else { - if (vid_tuner.flags & VIDEO_TUNER_LOW) - freq = freq * 16; /* steps of 1/16 KHz */ - else - freq = freq * 10 / 625; - - if (ioctl(dev, VIDIOCSFREQ, &freq) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSFREQ)" - " Frequency %ul", freq); - return NULL; - } - - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Tuner to %d Frequency set to %ul", - tuner_number, freq); - } - } - - if (ioctl (dev, VIDIOCGMBUF, &vid_buf) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, NO_ERRNO, "%s: ioctl(VIDIOCGMBUF) - Error device" - " does not support memory map\n V4L capturing using read is deprecated!\n" - "Motion only supports mmap."); - return NULL; - } else { - map = mmap(0, vid_buf.size, PROT_READ|PROT_WRITE, MAP_SHARED, dev, 0); - viddev->size_map = vid_buf.size; - - if (vid_buf.frames > 1) { - viddev->v4l_maxbuffer = 2; - viddev->v4l_buffers[0] = map; - viddev->v4l_buffers[1] = (unsigned char *)map + vid_buf.offsets[1]; - } else { - viddev->v4l_buffers[0] = map; - viddev->v4l_maxbuffer = 1; - } - - if (MAP_FAILED == map) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: MAP_FAILED"); - return NULL; - } - - viddev->v4l_curbuffer = 0; - vid_mmap.format = viddev->v4l_fmt; - vid_mmap.frame = viddev->v4l_curbuffer; - vid_mmap.width = width; - vid_mmap.height = height; - - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUV420P, " - "trying YUV422 palette"); - viddev->v4l_fmt = VIDEO_PALETTE_YUV422; - vid_mmap.format = viddev->v4l_fmt; - /* Try again... */ - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUV422," - " trying YUYV palette"); - viddev->v4l_fmt = VIDEO_PALETTE_YUYV; - vid_mmap.format = viddev->v4l_fmt; - - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUYV, trying RGB24 palette"); - viddev->v4l_fmt = VIDEO_PALETTE_RGB24; - vid_mmap.format = viddev->v4l_fmt; - /* Try again... */ - - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with RGB24, trying" - "GREYSCALE palette"); - viddev->v4l_fmt = VIDEO_PALETTE_GREY; - vid_mmap.format = viddev->v4l_fmt; - - /* Try one last time... */ - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with all supported palettes " - "- giving up"); - return NULL; - } - } - } - } - } - } - - switch (viddev->v4l_fmt) { - case VIDEO_PALETTE_YUV420P: - viddev->v4l_bufsize = (width * height * 3) / 2; - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUV420P palette"); - break; - case VIDEO_PALETTE_YUV422: - viddev->v4l_bufsize = (width * height * 2); - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUV422 palette"); - break; - case VIDEO_PALETTE_YUYV: - viddev->v4l_bufsize = (width * height * 2); - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUYV palette"); - break; - case VIDEO_PALETTE_RGB24: - viddev->v4l_bufsize = (width * height * 3); - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_RGB24 palette"); - break; - case VIDEO_PALETTE_GREY: - viddev->v4l_bufsize = width * height; - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_GREY palette"); - break; - } - - return map; -} - - -/** - * v4l_next - * Fetches a video frame from a v4l device - * - * Parameters: - * viddev Pointer to struct containing video device handle amd device parameters - * map Pointer to the buffer in which the function puts the new image - * width Width of image in pixels - * height Height of image in pixels - * - * Returns - * 0 Success - * V4L_FATAL_ERROR Fatal error - * Positive with bit 0 set and bit 1 unset - * Non fatal error (not implemented) - */ -int v4l_next(struct video_dev *viddev, unsigned char *map, int width, int height) -{ - int dev = viddev->fd; - int frame = viddev->v4l_curbuffer; - struct video_mmap vid_mmap; - unsigned char *cap_map; - - sigset_t set, old; - - /* MMAP method is used */ - vid_mmap.format = viddev->v4l_fmt; - vid_mmap.width = width; - vid_mmap.height = height; - - /* Block signals during IOCTL */ - sigemptyset(&set); - sigaddset(&set, SIGCHLD); - sigaddset(&set, SIGALRM); - sigaddset(&set, SIGUSR1); - sigaddset(&set, SIGTERM); - sigaddset(&set, SIGHUP); - pthread_sigmask (SIG_BLOCK, &set, &old); - - cap_map = viddev->v4l_buffers[viddev->v4l_curbuffer]; - viddev->v4l_curbuffer++; - - if (viddev->v4l_curbuffer >= viddev->v4l_maxbuffer) - viddev->v4l_curbuffer = 0; - - vid_mmap.frame = viddev->v4l_curbuffer; - - if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) { - MOTION_LOG(ALR, TYPE_VIDEO, SHOW_ERRNO, "%s: mcapture error in proc %d", - getpid()); - sigprocmask (SIG_UNBLOCK, &old, NULL); - return V4L_FATAL_ERROR; - } - - vid_mmap.frame = frame; - - if (ioctl(dev, VIDIOCSYNC, &vid_mmap.frame) == -1) { - MOTION_LOG(ALR, TYPE_VIDEO, SHOW_ERRNO, "%s: sync error in proc %d", - getpid()); - sigprocmask (SIG_UNBLOCK, &old, NULL); - } - - pthread_sigmask (SIG_UNBLOCK, &old, NULL); /*undo the signal blocking*/ - - switch (viddev->v4l_fmt) { - case VIDEO_PALETTE_RGB24: - conv_rgb24toyuv420p(map, cap_map, width, height); - break; - case VIDEO_PALETTE_YUYV: - case VIDEO_PALETTE_YUV422: - conv_yuv422to420p(map, cap_map, width, height); - break; - default: - memcpy(map, cap_map, viddev->v4l_bufsize); - } - - return 0; -} - -/** - * v4l_set_input - * Sets input for video device, adjust picture controls. - * If needed skip frames for round robin. - * - * Parameters: - * cnt Pointer to context struct - * viddev Pointer to struct containing video device handle amd device parameters - * map Pointer to the buffer in which the function puts the new image - * width Width of image in pixels - * height Height of image in pixels - * conf Pointer to config struct - * - * Returns nothing - */ -void v4l_set_input(struct context *cnt, struct video_dev *viddev, unsigned char *map, - int width, int height, struct config *conf) -{ - int dev = viddev->fd; - struct video_channel vid_chnl; - struct video_tuner vid_tuner; - unsigned long frequnits , freq; - int input = conf->input; - int norm = conf->norm; - int tuner_number = conf->tuner_number; - - frequnits = freq = conf->frequency; - - if (input != viddev->input || width != viddev->width || height != viddev->height || - freq != viddev->freq || tuner_number != viddev->tuner_number || norm != viddev->norm) { - unsigned int skip = conf->roundrobin_skip, i; - - if (freq) { - memset(&vid_tuner, 0, sizeof(struct video_tuner)); - vid_tuner.tuner = tuner_number; - - if (ioctl (dev, VIDIOCGTUNER, &vid_tuner) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGTUNER) tuner number %d", - tuner_number); - } else { - if (vid_tuner.flags & VIDEO_TUNER_LOW) - frequnits = freq * 16; /* steps of 1/16 KHz */ - else - frequnits = (freq * 10) / 625; - - if (ioctl(dev, VIDIOCSFREQ, &frequnits) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSFREQ) Frequency %ul", - frequnits); - return; - } - - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Tuner to %d Frequency to %ul", - tuner_number, frequnits); - } - } - - memset(&vid_chnl, 0, sizeof(struct video_channel)); - vid_chnl.channel = input; - - if (ioctl (dev, VIDIOCGCHAN, &vid_chnl) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCHAN) Input %d", - input); - } else { - vid_chnl.channel = input; - vid_chnl.norm = norm; - - if (ioctl (dev, VIDIOCSCHAN, &vid_chnl) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSCHAN) Input %d" - " Standard method %d", input, norm); - return; - } - - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Input to %d Standard method to %d", - input, norm); - } - - v4l_picture_controls(cnt, viddev); - conf->input = viddev->input = input; - conf->width = viddev->width = width; - conf->height = viddev->height = height; - conf->frequency = viddev->freq = freq; - conf->tuner_number = viddev->tuner_number = tuner_number; - conf->norm = viddev->norm = norm; - /* skip a few frames if needed */ - for (i = 0; i < skip; i++) - v4l_next(viddev, map, width, height); - } else { - /* No round robin - we only adjust picture controls */ - v4l_picture_controls(cnt, viddev); - } -} -#endif /* !WITHOUT_V4L */ diff --git a/video2.c b/video2.c index e828f563..26a625b4 100644 --- a/video2.c +++ b/video2.c @@ -69,9 +69,9 @@ */ #include "motion.h" -#include "video.h" +#include "video2.h" -#if !defined(WITHOUT_V4L) && defined(MOTION_V4L2) +#ifndef WITHOUT_V4L2 #define u8 unsigned char #define u16 unsigned short @@ -138,6 +138,10 @@ #define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ #endif +#ifndef V4L2_PIX_FMT_GREY +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#endif + #define ZC301_V4L2_CID_DAC_MAGN V4L2_CID_PRIVATE_BASE #define ZC301_V4L2_CID_GREEN_BALANCE (V4L2_CID_PRIVATE_BASE+1) @@ -271,7 +275,7 @@ static int v4l2_select_input(struct config *conf, struct video_dev *viddev, if (xioctl(vid_source, VIDIOC_ENUMINPUT, &input) == -1) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to query input %d." - " VIDIOC_ENUMINPUT, if you use a WEBCAM change input value in conf by -1", + " VIDIOC_ENUMINPUT, if you use a WEBCAM change input value in conf by -1", input.index); return -1; } @@ -485,7 +489,8 @@ static int v4l2_set_pix_format(struct context *cnt, src_v4l2_t * vid_source, V4L2_PIX_FMT_YUV422P, V4L2_PIX_FMT_YUV420, /* most efficient for motion */ V4L2_PIX_FMT_Y10, - V4L2_PIX_FMT_Y12 + V4L2_PIX_FMT_Y12, + V4L2_PIX_FMT_GREY }; int array_size = sizeof(supported_formats) / sizeof(supported_formats[0]); @@ -1101,6 +1106,10 @@ int v4l2_next(struct context *cnt, struct video_dev *viddev, unsigned char *map, y10torgb24(cnt->imgs.common_buffer, the_buffer->ptr, width, height, shift); conv_rgb24toyuv420p(map, cnt->imgs.common_buffer, width, height); return 0; + case V4L2_PIX_FMT_GREY: + conv_greytoyuv420p(map, the_buffer->ptr, width, height); + return 0; + } } @@ -1144,4 +1153,4 @@ void v4l2_cleanup(struct video_dev *viddev) free(vid_source); viddev->v4l2_private = NULL; } -#endif /* !WITHOUT_V4L && MOTION_V4L2 */ +#endif /* !WITHOUT_V4L2 */ diff --git a/video.h b/video2.h similarity index 82% rename from video.h rename to video2.h index a39cd2c5..bca49b96 100644 --- a/video.h +++ b/video2.h @@ -1,4 +1,4 @@ -/* video.h +/* video2.h * * Include file for video.c * Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org) @@ -12,16 +12,12 @@ #include -#if !defined(WITHOUT_V4L) -#if defined(HAVE_LINUX_VIDEODEV2_H) +#ifndef WITHOUT_V4L2 + #include -#elif defined(HAVE_LINUX_VIDEODEV_H) -#include -#elif defined(HAVE_SYS_VIDEOIO_H) -#include -#endif -#include "vloopback_motion.h" +#include "vloopback_motion2.h" #include "pwc-ioctl.h" + #endif /* video4linux stuff */ @@ -76,11 +72,11 @@ struct video_dev { int frames; /* Device type specific stuff: */ -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* v4l */ int v4l2; void *v4l2_private; - + int size_map; int v4l_fmt; unsigned char *v4l_buffers[2]; @@ -104,14 +100,9 @@ void bayer2rgb24(unsigned char *dst, unsigned char *src, long int width, long in int vid_do_autobright(struct context *cnt, struct video_dev *viddev); int mjpegtoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height, unsigned int size); void y10torgb24(unsigned char *map, unsigned char *cap_map, int width, int height, int shift); +void conv_greytoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height); -#ifndef WITHOUT_V4L -/* video functions, video.c */ -unsigned char *v4l_start(struct video_dev *viddev, int width, int height, - int input, int norm, unsigned long freq, int tuner_number); -void v4l_set_input(struct context *cnt, struct video_dev *viddev, unsigned char *map, int width, int height, - struct config *conf); -int v4l_next(struct video_dev *viddev, unsigned char *map, int width, int height); +#ifndef WITHOUT_V4L2 /* video2.c */ unsigned char *v4l2_start(struct context *cnt, struct video_dev *viddev, int width, int height, @@ -121,6 +112,6 @@ void v4l2_set_input(struct context *cnt, struct video_dev *viddev, unsigned char int v4l2_next(struct context *cnt, struct video_dev *viddev, unsigned char *map, int width, int height); void v4l2_close(struct video_dev *viddev); void v4l2_cleanup(struct video_dev *viddev); -#endif /* WITHOUT_V4L */ +#endif /* WITHOUT_V4L2 */ #endif /* _INCLUDE_VIDEO_H */ diff --git a/video_common.c b/video_common.c index d3b8a85b..97e8a650 100644 --- a/video_common.c +++ b/video_common.c @@ -11,7 +11,7 @@ /* For rotation */ #include "rotate.h" /* Already includes motion.h */ -#include "video.h" +#include "video2.h" #include "jpegutils.h" typedef unsigned char uint8_t; @@ -459,6 +459,55 @@ void y10torgb24(unsigned char *map, unsigned char *cap_map, int width, int heigh } } +/** + * conv_greytoyuv420p + * + * + */ +void conv_greytoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height) +{ + /* This is a adaptation of the rgb to yuv. + * For grey, we use just a single color + */ + + unsigned char *y, *u, *v; + unsigned char *r; + int i, loop; + + r = cap_map; + + y = map; + u = y + width * height; + v = u + (width * height) / 4; + memset(u, 0, width * height / 4); + memset(v, 0, width * height / 4); + + for (loop = 0; loop < height; loop++) { + for (i = 0; i < width; i += 2) { + *y++ = (9796 ** r + 19235 ** r + 3736 ** r) >> 15; + *u += ((-4784 ** r - 9437 ** r + 14221 ** r) >> 17) + 32; + *v += ((20218 ** r - 16941 ** r - 3277 ** r) >> 17) + 32; + r++; + + *y++ = (9796 ** r + 19235 ** r + 3736 ** r) >> 15; + *u += ((-4784 ** r - 9437 ** r + 14221 ** r) >> 17) + 32; + *v += ((20218 ** r - 16941 ** r - 3277 ** r) >> 17) + 32; + r ++; + + u++; + v++; + } + + if ((loop & 1) == 0) { + u -= width / 2; + v -= width / 2; + } + } + + +} + + #define MAX2(x, y) ((x) > (y) ? (x) : (y)) #define MIN2(x, y) ((x) < (y) ? (x) : (y)) @@ -535,7 +584,7 @@ int vid_do_autobright(struct context *cnt, struct video_dev *viddev) Wrappers calling the actual capture routines *****************************************************************************/ -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* * Big lock for vid_start to ensure exclusive access to viddevs while adding * devices during initialization of each thread. @@ -569,7 +618,7 @@ void vid_cleanup(void) pthread_mutex_destroy(&vid_mutex); } -#endif /* WITHOUT_V4L */ +#endif /* WITHOUT_V4L2 */ /** * vid_close @@ -578,10 +627,10 @@ void vid_cleanup(void) */ void vid_close(struct context *cnt) { -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 struct video_dev *dev = viddevs; struct video_dev *prev = NULL; -#endif /* WITHOUT_V4L */ +#endif /* WITHOUT_V4L2 */ /* Cleanup the netcam part */ #ifdef HAVE_MMAL @@ -600,9 +649,9 @@ void vid_close(struct context *cnt) return; } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 - /* Cleanup the v4l part */ + /* Cleanup the v4l2 part */ pthread_mutex_lock(&vid_mutex); while (dev) { if (dev->fd == cnt->video_dev) @@ -623,17 +672,14 @@ void vid_close(struct context *cnt) if (--dev->usage_count == 0) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Closing video device %s", dev->video_device); -#ifdef MOTION_V4L2 if (dev->v4l2) { v4l2_close(dev); v4l2_cleanup(dev); } else { -#endif close(dev->fd); munmap(viddevs->v4l_buffers[0], dev->size_map); -#ifdef MOTION_V4L2 } -#endif + dev->fd = -1; pthread_mutex_lock(&vid_mutex); /* Remove from list */ @@ -659,10 +705,10 @@ void vid_close(struct context *cnt) pthread_mutex_unlock(&dev->mutex); } } -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /** * vid_v4lx_start @@ -806,11 +852,8 @@ static int vid_v4lx_start(struct context *cnt) dev->owner = -1; dev->v4l_fmt = VIDEO_PALETTE_YUV420P; dev->fps = 0; -#ifdef MOTION_V4L2 - /* First lets try V4L2 and if it's not supported V4L1. */ dev->v4l2 = 1; - if (!v4l2_start(cnt, dev, width, height, input, norm, frequency, tuner_number)) { /* * Restore width & height before test with v4l @@ -818,24 +861,9 @@ static int vid_v4lx_start(struct context *cnt) */ dev->width = width; dev->height = height; -#endif - -#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L)) - if (!v4l_start(dev, width, height, input, norm, frequency, tuner_number)) { - close(dev->fd); - pthread_mutexattr_destroy(&dev->attr); - pthread_mutex_destroy(&dev->mutex); - free(dev); - - pthread_mutex_unlock(&vid_mutex); - return -1; - } -#endif - -#ifdef MOTION_V4L2 dev->v4l2 = 0; } -#endif + if (dev->v4l2 == 0) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using V4L1"); } else { @@ -872,7 +900,7 @@ static int vid_v4lx_start(struct context *cnt) return fd; } -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ /** * vid_start @@ -918,13 +946,13 @@ int vid_start(struct context *cnt) cnt->netcam = NULL; } } -#ifdef WITHOUT_V4L +#ifdef WITHOUT_V4L2 else MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: You must setup netcam_url"); #else else dev = vid_v4lx_start(cnt); -#endif /*WITHOUT_V4L */ +#endif /*WITHOUT_V4L2 */ return dev; } @@ -969,7 +997,7 @@ int vid_next(struct context *cnt, unsigned char *map) return netcam_next(cnt, map); } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* * We start a new block so we can make declarations without breaking * gcc 2.95 or older. @@ -999,19 +1027,12 @@ int vid_next(struct context *cnt, unsigned char *map) dev->owner = cnt->threadnr; dev->frames = conf->roundrobin_frames; } -#ifdef MOTION_V4L2 + if (dev->v4l2) { v4l2_set_input(cnt, dev, map, width, height, conf); ret = v4l2_next(cnt, dev, map, width, height); - } else { -#endif -#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L)) - v4l_set_input(cnt, dev, map, width, height, conf); - ret = v4l_next(dev, map, width, height); -#endif -#ifdef MOTION_V4L2 } -#endif + if (--dev->frames <= 0) { dev->owner = -1; dev->frames = 0; @@ -1023,6 +1044,6 @@ int vid_next(struct context *cnt, unsigned char *map) rotate_map(cnt, map); } -#endif /*WITHOUT_V4L */ +#endif /*WITHOUT_V4L2 */ return ret; } diff --git a/video_freebsd.c b/video_freebsd.c index 66a01656..65090656 100644 --- a/video_freebsd.c +++ b/video_freebsd.c @@ -11,7 +11,7 @@ #include "rotate.h" /* Already includes motion.h */ #include "video_freebsd.h" -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* For the v4l stuff: */ #include @@ -950,7 +950,7 @@ void vid_cleanup(void) pthread_mutex_destroy(&vid_mutex); } -#endif /*WITHOUT_V4L*/ +#endif /*WITHOUT_V4L2*/ /** * vid_close @@ -959,7 +959,7 @@ void vid_cleanup(void) */ void vid_close(struct context *cnt) { -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 struct video_dev *dev = viddevs; struct video_dev *prev = NULL; #endif @@ -971,7 +971,7 @@ void vid_close(struct context *cnt) return; } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 /* Cleanup the v4l part */ pthread_mutex_lock(&vid_mutex); @@ -1041,7 +1041,7 @@ void vid_close(struct context *cnt) pthread_mutex_unlock(&dev->mutex); } } -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ } @@ -1061,7 +1061,7 @@ int vid_start(struct context *cnt) cnt->netcam = NULL; } } -#ifdef WITHOUT_V4L +#ifdef WITHOUT_V4L2 else MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: You must setup netcam_url"); #else @@ -1249,7 +1249,7 @@ int vid_start(struct context *cnt) pthread_mutex_unlock(&vid_mutex); } -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ /* FIXME needed tuner device ?! */ return fd_bktr; @@ -1282,7 +1282,7 @@ int vid_next(struct context *cnt, unsigned char *map) return ret; } -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 struct video_dev *dev; int width, height; @@ -1329,6 +1329,6 @@ int vid_next(struct context *cnt, unsigned char *map) rotate_map(cnt, map); -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ return ret; } diff --git a/video_freebsd.h b/video_freebsd.h index eb1e064c..1cd17836 100644 --- a/video_freebsd.h +++ b/video_freebsd.h @@ -11,7 +11,7 @@ #ifndef _INCLUDE_VIDEO_FREEBSD_H #define _INCLUDE_VIDEO_FREEBSD_H -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 #if defined(__NetBSD__) || defined(__OpenBSD__) #include @@ -20,7 +20,7 @@ #include #endif -#endif /* !WITHOUT_V4L */ +#endif /* !WITHOUT_V4L2 */ /* bktr (video4linux) stuff FIXME more modes not only these */ @@ -109,7 +109,7 @@ struct video_dev { int frames; /* Device type specific stuff: */ -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 int capture_method; int v4l_fmt; unsigned char *v4l_buffers[2]; @@ -124,7 +124,7 @@ int vid_start(struct context *); int vid_next(struct context *, unsigned char *); void vid_close(struct context *); -#ifndef WITHOUT_V4L +#ifndef WITHOUT_V4L2 void vid_init(void); void vid_cleanup(void); #endif diff --git a/vloopback_motion.c b/vloopback_motion.c deleted file mode 100644 index 36f8cf2f..00000000 --- a/vloopback_motion.c +++ /dev/null @@ -1,252 +0,0 @@ -/* - * vloopback_motion.c - * - * Video loopback functions for motion. - * Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org) - * Copyright 2008 by Angel Carpintero (motiondevelop@gmail.com) - * This software is distributed under the GNU public license version 2 - * See also the file 'COPYING'. - * - */ -#include "vloopback_motion.h" -#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L)) && (!defined(BSD)) -#include -#include - -/** - * v4l_open_vidpipe - * - */ -static int v4l_open_vidpipe(void) -{ - int pipe_fd = -1; - char pipepath[255]; - char buffer[255]; - char *major; - char *minor; - struct utsname uts; - - if (uname(&uts) < 0) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to execute uname"); - return -1; - } - - major = strtok(uts.release, "."); - minor = strtok(NULL, "."); - - if ((major == NULL) || (minor == NULL) || (strcmp(major, "2"))) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to decipher OS version"); - return -1; - } - - if (strcmp(minor, "5") < 0) { - FILE *vloopbacks; - char *input; - char *istatus; - char *output; - - vloopbacks = fopen("/proc/video/vloopback/vloopbacks", "r"); - - if (!vloopbacks) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open " - "'/proc/video/vloopback/vloopbacks'"); - return -1; - } - - /* Read vloopback version*/ - if (!fgets(buffer, sizeof(buffer), vloopbacks)) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback version"); - myfclose(vloopbacks); - return -1; - } - - fprintf(stderr, "\t%s", buffer); - - /* Read explanation line */ - - if (!fgets(buffer, sizeof(buffer), vloopbacks)) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback" - " explanation line"); - myfclose(vloopbacks); - return -1; - } - - while (fgets(buffer, sizeof(buffer), vloopbacks)) { - if (strlen(buffer) > 1) { - buffer[strlen(buffer)-1] = 0; - input = strtok(NULL, "\t"); - istatus = strtok(NULL, "\t"); - output = strtok(NULL, "\t"); - - if (istatus[0] == '-') { - snprintf(pipepath, sizeof(pipepath), "/dev/%s", input); - pipe_fd = open(pipepath, O_RDWR); - - if (pipe_fd >= 0) { - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: \tInput: /dev/%s " - "\tOutput: /dev/%s", input, output); - break; - } - } - } - } - - myfclose(vloopbacks); - } else { - DIR *dir; - struct dirent *dirp; - const char prefix[] = "/sys/class/video4linux/"; - char *ptr, *io; - int fd; - int low = 9999; - int tfd; - int tnum; - - if ((dir = opendir(prefix)) == NULL) { - MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open '%s'", - prefix); - return -1; - } - - while ((dirp = readdir(dir)) != NULL) { - if (!strncmp(dirp->d_name, "video", 5)) { - strncpy(buffer, prefix, sizeof(buffer)); - strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); - strncat(buffer, "/name", sizeof(buffer) - strlen(buffer)); - - if ((fd = open(buffer, O_RDONLY)) >= 0) { - if ((read(fd, buffer, sizeof(buffer)-1)) < 0) { - close(fd); - continue; - } - - ptr = strtok(buffer, " "); - - if (strcmp(ptr, "Video")) { - close(fd); - continue; - } - - major = strtok(NULL, " "); - minor = strtok(NULL, " "); - io = strtok(NULL, " \n"); - - if (strcmp(major, "loopback") || strcmp(io, "input")) { - close(fd); - continue; - } - - if ((ptr = strtok(buffer, " ")) == NULL) { - close(fd); - continue; - } - - tnum = atoi(minor); - - if (tnum < low) { - mystrcpy(buffer, "/dev/"); - strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); - if ((tfd = open(buffer, O_RDWR)) >= 0) { - strncpy(pipepath, buffer, sizeof(pipepath)); - - if (pipe_fd >= 0) - close(pipe_fd); - - pipe_fd = tfd; - low = tnum; - } - } - close(fd); - } - } - } - - closedir(dir); - - if (pipe_fd >= 0) - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as input", - pipepath); - } - - return pipe_fd; -} - -/** - * v4l_startpipe - * - */ -static int v4l_startpipe(const char *dev_name, int width, int height, int type) -{ - int dev; - struct video_picture vid_pic; - struct video_window vid_win; - - if (!strcmp(dev_name, "-")) { - dev = v4l_open_vidpipe(); - } else { - dev = open(dev_name, O_RDWR); - MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as input", - dev_name); - } - - if (dev < 0) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening %s as input failed", - dev_name); - return -1; - } - - if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)"); - return -1; - } - - vid_pic.palette = type; - - if (ioctl(dev, VIDIOCSPICT, &vid_pic) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSPICT)"); - return -1; - } - - if (ioctl(dev, VIDIOCGWIN, &vid_win) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGWIN)"); - return -1; - } - - vid_win.height = height; - vid_win.width = width; - - if (ioctl(dev, VIDIOCSWIN, &vid_win) == -1) { - MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSWIN)"); - return -1; - } - - return dev; -} - -/** - * v4l_putpipe - * - */ -static int v4l_putpipe(int dev, unsigned char *image, int size) -{ - return write(dev, image, size); -} - -/** - * vid_startpipe - * - */ -int vid_startpipe(const char *dev_name, int width, int height, int type) -{ - return v4l_startpipe(dev_name, width, height, type); -} - -/** - * vid_putpipe - * - */ -int vid_putpipe (int dev, unsigned char *image, int size) -{ - return v4l_putpipe(dev, image, size); -} -#endif /* !WITHOUT_V4L && !BSD */ diff --git a/vloopback_motion2.c b/vloopback_motion2.c new file mode 100644 index 00000000..0476a22a --- /dev/null +++ b/vloopback_motion2.c @@ -0,0 +1,225 @@ +/* + * vloopback_motion.c + * + * Video loopback functions for motion. + * Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org) + * Copyright 2008 by Angel Carpintero (motiondevelop@gmail.com) + * This software is distributed under the GNU public license version 2 + * See also the file 'COPYING'. + * + */ +#include "vloopback_motion2.h" +#if (!defined(WITHOUT_V4L2)) && (!defined(BSD)) +#include +#include +#include + +/** + * v4l2_open_vidpipe + * + */ +static int v4l2_open_vidpipe(void) +{ + int pipe_fd = -1; + char pipepath[255]; + char buffer[255]; + DIR *dir; + struct dirent *dirp; + const char prefix[] = "/sys/class/video4linux/"; + int fd,tfd; + int len,min; + + if ((dir = opendir(prefix)) == NULL) { + MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open '%s'", prefix); + return -1; + } + + while ((dirp = readdir(dir)) != NULL) { + if (!strncmp(dirp->d_name, "video", 5)) { + strncpy(buffer, prefix, sizeof(buffer)); + strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); + strncat(buffer, "/name", sizeof(buffer) - strlen(buffer)); + MOTION_LOG(NTC, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening buffer: %s",buffer); + if ((fd = open(buffer, O_RDONLY)) >= 0) { + if ((len = read(fd, buffer, sizeof(buffer)-1)) < 0) { + close(fd); + continue; + } + buffer[len]=0; + MOTION_LOG(NTC, TYPE_VIDEO, SHOW_ERRNO, "%s: Read buffer: %s",buffer); + if (strncmp(buffer, "Loopback video device",21)) { /* weird stuff after minor */ + close(fd); + continue; + } + min = atoi(&buffer[21]); + strcpy(buffer, "/dev/"); + strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); + MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO,"%s: found video device '%s' %d", buffer,min); + if ((tfd = open(buffer, O_RDWR)) >= 0) { + strncpy(pipepath, buffer, sizeof(pipepath)); + if (pipe_fd >= 0) close(pipe_fd); + pipe_fd = tfd; + break; + } + } + close(fd); + } + } + + closedir(dir); + + if (pipe_fd >= 0) + MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as pipe output", pipepath); + + return pipe_fd; +} + +typedef struct capent {const char *cap; int code;} capentT; + capentT cap_list[] ={ + {"V4L2_CAP_VIDEO_CAPTURE" ,0x00000001 }, + {"V4L2_CAP_VIDEO_CAPTURE_MPLANE" ,0x00001000 }, + {"V4L2_CAP_VIDEO_OUTPUT" ,0x00000002 }, + {"V4L2_CAP_VIDEO_OUTPUT_MPLANE" ,0x00002000 }, + {"V4L2_CAP_VIDEO_M2M" ,0x00004000 }, + {"V4L2_CAP_VIDEO_M2M_MPLANE" ,0x00008000 }, + {"V4L2_CAP_VIDEO_OVERLAY" ,0x00000004 }, + {"V4L2_CAP_VBI_CAPTURE" ,0x00000010 }, + {"V4L2_CAP_VBI_OUTPUT" ,0x00000020 }, + {"V4L2_CAP_SLICED_VBI_CAPTURE" ,0x00000040 }, + {"V4L2_CAP_SLICED_VBI_OUTPUT" ,0x00000080 }, + {"V4L2_CAP_RDS_CAPTURE" ,0x00000100 }, + {"V4L2_CAP_VIDEO_OUTPUT_OVERLAY" ,0x00000200 }, + {"V4L2_CAP_HW_FREQ_SEEK" ,0x00000400 }, + {"V4L2_CAP_RDS_OUTPUT" ,0x00000800 }, + {"V4L2_CAP_TUNER" ,0x00010000 }, + {"V4L2_CAP_AUDIO" ,0x00020000 }, + {"V4L2_CAP_RADIO" ,0x00040000 }, + {"V4L2_CAP_MODULATOR" ,0x00080000 }, + {"V4L2_CAP_SDR_CAPTURE" ,0x00100000 }, + {"V4L2_CAP_EXT_PIX_FORMAT" ,0x00200000 }, + {"V4L2_CAP_SDR_OUTPUT" ,0x00400000 }, + {"V4L2_CAP_READWRITE" ,0x01000000 }, + {"V4L2_CAP_ASYNCIO" ,0x02000000 }, + {"V4L2_CAP_STREAMING" ,0x04000000 }, + {"V4L2_CAP_DEVICE_CAPS" ,0x80000000 }, + {"Last",0} +}; + +static void show_vcap(struct v4l2_capability *cap) { + unsigned int vers = cap->version; + unsigned int c = cap->capabilities; + int i; + + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Pipe Device"); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.driver: %s",cap->driver); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.card: %s",cap->card); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.bus_info: %s",cap->bus_info); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.card: %u.%u.%u",(vers >> 16) & 0xFF,(vers >> 8) & 0xFF,vers & 0xFF); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Device capabilities"); + for (i=0;cap_list[i].code;i++) + if (c & cap_list[i].code) + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: %s",cap_list[i].cap); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: ------------------------"); +} + +static void show_vfmt(struct v4l2_format *v) { + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: type: type: %d",v->type); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.width: %d",v->fmt.pix.width); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.height: %d",v->fmt.pix.height); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.pixelformat: %d",v->fmt.pix.pixelformat); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.sizeimage: %d",v->fmt.pix.sizeimage); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.field: %d",v->fmt.pix.field); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.bytesperline: %d",v->fmt.pix.bytesperline); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.colorspace: %d",v->fmt.pix.colorspace); + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: ------------------------"); +} + +/** + * v4l2_startpipe + * + */ +static int v4l2_startpipe(const char *dev_name, int width, int height, int type) +{ + int dev; + struct v4l2_format v; + struct v4l2_capability vc; + + if (!strcmp(dev_name, "-")) { + dev = v4l2_open_vidpipe(); + } else { + dev = open(dev_name, O_RDWR); + MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as pipe output", dev_name); + } + + if (dev < 0) { + MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening %s as pipe output failed", dev_name); + return -1; + } + + if (ioctl(dev, VIDIOC_QUERYCAP, &vc) == -1) { + MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_QUERYCAP)"); + return -1; + } + + show_vcap(&vc); + + memset(&v, 0, sizeof(v)); + + v.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + if (ioctl(dev, VIDIOC_G_FMT, &v) == -1) { + MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_G_FMT)"); + return -1; + } + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Original pipe specifications"); + show_vfmt(&v); + + v.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + v.fmt.pix.width = width; + v.fmt.pix.height = height; + v.fmt.pix.pixelformat = type; + v.fmt.pix.sizeimage = 3 * width * height / 2; + v.fmt.pix.bytesperline = width; + v.fmt.pix.field = V4L2_FIELD_NONE; + v.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Proposed pipe specifications"); + show_vfmt(&v); + + if (ioctl(dev,VIDIOC_S_FMT, &v) == -1) { + MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_S_FMT)"); + return -1; + } + + MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Final pipe specifications"); + show_vfmt(&v); + + return dev; +} + +/** + * v4l2_putpipe + * + */ +static int v4l2_putpipe(int dev, unsigned char *image, int size) +{ + return write(dev, image, size); +} + +/** + * vid_startpipe + * + */ +int vid_startpipe(const char *dev_name, int width, int height, int type) +{ + return v4l2_startpipe(dev_name, width, height, type); +} + +/** + * vid_putpipe + * + */ +int vid_putpipe (int dev, unsigned char *image, int size) +{ + return v4l2_putpipe(dev, image, size); +} +#endif /* !WITHOUT_V4L2 && !BSD */ diff --git a/vloopback_motion.h b/vloopback_motion2.h similarity index 100% rename from vloopback_motion.h rename to vloopback_motion2.h