mirror of
https://github.com/Motion-Project/motion.git
synced 2026-02-06 13:01:38 -05:00
@@ -1,5 +1,9 @@
|
||||
project(motion C)
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
unset(SQLITE3_FOUND CACHE )
|
||||
unset(FFMPEG_FOUND CACHE )
|
||||
|
||||
include(CheckIncludeFiles)
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -Wall")
|
||||
@@ -24,7 +28,7 @@ option(WITH_MMAL "enable MMAL (Multi-Media Abstraction Layer API) support for Ra
|
||||
option(WITH_MYSQL "enable MySQL database support" ${MYSQL_FOUND})
|
||||
option(WITH_PGSQL "enable PostgreSQL database support" ${PostgreSQL_FOUND})
|
||||
option(WITH_SQLITE3 "enable SQLite database support" ${SQLITE3_FOUND})
|
||||
option(WITH_V4L "enable Video 4 Linux (2) webcam support" ON)
|
||||
option(WITH_V4L2 "enable Video 4 Linux (2) webcam support" ON)
|
||||
option(WITH_PWCBSD "enable PWC webcam support (BSD only)" OFF)
|
||||
|
||||
set(HAVE_FFMPEG ${WITH_FFMPEG})
|
||||
@@ -32,15 +36,30 @@ set(HAVE_MMAL ${WITH_MMAL})
|
||||
set(HAVE_MYSQL ${WITH_MYSQL})
|
||||
set(HAVE_PGSQL ${WITH_PGSQL})
|
||||
set(HAVE_SQLITE3 ${WITH_SQLITE3})
|
||||
check_include_files("linux/videodev.h" HAVE_LINUX_VIDEODEV_H)
|
||||
check_include_files("linux/videodev2.h" HAVE_LINUX_VIDEODEV2_H)
|
||||
check_include_files("sys/videoio.h" HAVE_SYS_VIDEOIO_H)
|
||||
if(${WITH_V4L})
|
||||
set(WITHOUT_V4L OFF)
|
||||
else(${WITH_V4L})
|
||||
set(WITHOUT_V4L ON)
|
||||
endif(${WITH_V4L})
|
||||
set(MOTION_V4L2 ${HAVE_LINUX_VIDEODEV2_H})
|
||||
|
||||
set(WITHOUT_V4L2 ON)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD))
|
||||
check_include_files("dev/bktr/ioctl_bt848.h" HAVE_FREEBSD_BT848)
|
||||
check_include_files("dev/bktr/ioctl_meteor.h" HAVE_FREEBSD_METEOR)
|
||||
if(HAVE_FREEBSD_BT848 AND HAVE_FREEBSD_METEOR AND WITH_V4L2 )
|
||||
set(WITHOUT_V4L2 OFF )
|
||||
endif(HAVE_FREEBSD_BT848 AND HAVE_FREEBSD_METEOR AND WITH_V4L2 )
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD))
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "NetBSD" OR CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||
check_include_files("dev/ic/bt8xx.h" HAVE_NETOPEN_BT8XX)
|
||||
if(HAVE_NETOPEN_BT8XX AND WITH_V4L2)
|
||||
set(WITHOUT_V4L2 OFF )
|
||||
endif(HAVE_NETOPEN_BT8XX AND WITH_V4L2)
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES "NetBSD" OR CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
check_include_files("linux/videodev2.h" HAVE_LINUX_VIDEODEV2_H)
|
||||
if(HAVE_LINUX_VIDEODEV2_H AND WITH_V4L2)
|
||||
set(WITHOUT_V4L2 OFF )
|
||||
endif(HAVE_LINUX_VIDEODEV2_H AND WITH_V4L2)
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
|
||||
set(PWCBSD WITH_PWCBSD)
|
||||
|
||||
configure_file(config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/config.h")
|
||||
@@ -58,11 +77,11 @@ configure_file(motion.init-FreeBSD.sh.in motion.init-FreeBSD.sh)
|
||||
list(APPEND SRC_FILES
|
||||
conf.c motion.c alg.c draw.c event.c ffmpeg.c jpegutils.c logger.c md5.c
|
||||
netcam.c netcam_ftp.c netcam_jpeg.c netcam_rtsp.c netcam_wget.c
|
||||
picture.c rotate.c stream.c track.c vloopback_motion.c webhttpd.c)
|
||||
picture.c rotate.c stream.c track.c vloopback_motion2.c webhttpd.c)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT(WITH_PWCBSD))
|
||||
list(APPEND SRC_FILES video_freebsd.c)
|
||||
else()
|
||||
list(APPEND SRC_FILES video2.c video.c video_common.c)
|
||||
list(APPEND SRC_FILES video2.c video_common.c)
|
||||
endif()
|
||||
|
||||
include_directories(${JPEG_INCLUDE_DIR})
|
||||
@@ -95,6 +114,23 @@ if(WITH_MMAL)
|
||||
list(APPEND SRC_FILES mmalcam.c)
|
||||
endif(WITH_MMAL)
|
||||
|
||||
message("-- Configuration: " )
|
||||
message("-- Package Name: " ${PACKAGE_NAME})
|
||||
message("-- Package Version: " ${PACKAGE_VERSION})
|
||||
message("-- Build System: " ${CMAKE_SYSTEM_NAME})
|
||||
message("-- Config Dir: " ${sysconfdir})
|
||||
message("-- Bin Dir: " ${BIN_PATH})
|
||||
message("-- JPEG Include Dir: " ${JPEG_INCLUDE_DIR})
|
||||
message("-- WITH_V4L2: " ${WITH_V4L2})
|
||||
message("-- Without V4L2: " ${WITHOUT_V4L2})
|
||||
message("-- With PWC BSD: " ${WITH_PWCBSD})
|
||||
message("-- MYSQL: " ${WITH_MYSQL} " Include Dir: " ${MYSQL_INCLUDE_DIRS} )
|
||||
message("-- SQLITE3: " ${WITH_SQLITE3} " Include Dir: " ${SQLITE3_INCLUDE_DIRS} )
|
||||
message("-- PGSQL: " ${WITH_PGSQL} " Include Dir: " ${PostgreSQL_INCLUDE_DIRS} )
|
||||
message("-- FFMPEG: " ${WITH_FFMPEG} " Include Dir: " ${FFMPEG_INCLUDE_DIRS} )
|
||||
message("-- MMAL: " ${WITH_MMAL} )
|
||||
message("-- CFLAGS: " ${CMAKE_C_FLAGS} )
|
||||
|
||||
add_executable(motion ${SRC_FILES})
|
||||
target_link_libraries(motion ${LINK_LIBRARIES})
|
||||
|
||||
@@ -105,3 +141,4 @@ install(FILES motion.service motion.spec motion.init-Debian motion.init-FreeBSD.
|
||||
DESTINATION "lib/${PROJECT_NAME}/examples" COMPONENT examples)
|
||||
install(FILES CHANGELOG COPYING CREDITS README.md motion_guide.html mask1.png normal.jpg outputmotion1.jpg outputnormal1.jpg
|
||||
DESTINATION "lib/${PROJECT_NAME}/doc" COMPONENT doc)
|
||||
|
||||
|
||||
@@ -35,11 +35,12 @@ CFLAGS = @CFLAGS@ -Wall -DVERSION=\"@PACKAGE_VERSION@\" -Dsysconfdir=\"$(s
|
||||
LDFLAGS = @LDFLAGS@
|
||||
LIBS = @LIBS@ @MMAL_LIBS@ @FFMPEG_LIBS@
|
||||
VIDEO_OBJ = @VIDEO@
|
||||
OBJ = motion.o logger.o conf.o draw.o jpegutils.o vloopback_motion.o $(VIDEO_OBJ) \
|
||||
OBJ = motion.o logger.o conf.o draw.o jpegutils.o \
|
||||
vloopback_motion2.o $(VIDEO_OBJ) \
|
||||
netcam.o netcam_ftp.o netcam_jpeg.o netcam_wget.o track.o \
|
||||
alg.o event.o picture.o rotate.o webhttpd.o \
|
||||
stream.o md5.o netcam_rtsp.o \
|
||||
@FFMPEG_OBJ@ @MMAL_OBJ@
|
||||
stream.o md5.o netcam_rtsp.o ffmpeg.o \
|
||||
@MMAL_OBJ@
|
||||
SRC = $(OBJ:.o=.c)
|
||||
DOC = CHANGELOG COPYING CREDITS README.md motion_guide.html mask1.png normal.jpg outputmotion1.jpg outputnormal1.jpg
|
||||
EXAMPLES = *.conf motion.service
|
||||
|
||||
19
conf.c
19
conf.c
@@ -32,7 +32,7 @@
|
||||
#if (defined(__FreeBSD__) && !defined(PWCBSD))
|
||||
#include "video_freebsd.h"
|
||||
#else
|
||||
#include "video.h"
|
||||
#include "video2.h"
|
||||
#endif
|
||||
|
||||
#define EXTENSION ".conf"
|
||||
@@ -116,6 +116,7 @@ struct config conf_template = {
|
||||
.on_event_start = NULL,
|
||||
.on_event_end = NULL,
|
||||
.mask_file = NULL,
|
||||
.mask_privacy = NULL,
|
||||
.smart_mask_speed = 0,
|
||||
#if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3)
|
||||
.sql_log_image = 1,
|
||||
@@ -292,6 +293,9 @@ config_param config_params[] = {
|
||||
"# V4L2_PIX_FMT_YUYV : 15 'YUYV'\n"
|
||||
"# V4L2_PIX_FMT_YUV422P : 16 '422P'\n"
|
||||
"# V4L2_PIX_FMT_YUV420 : 17 'YU12'\n"
|
||||
"# V4L2_PIX_FMT_Y10 : 18 'Y10'\n"
|
||||
"# V4L2_PIX_FMT_Y12 : 19 'Y12'\n"
|
||||
"# V4L2_PIX_FMT_GREY : 20 'GREY'\n"
|
||||
"#",
|
||||
0,
|
||||
CONF_OFFSET(v4l2_palette),
|
||||
@@ -620,6 +624,15 @@ config_param config_params[] = {
|
||||
print_string
|
||||
},
|
||||
{
|
||||
"mask_privacy",
|
||||
"# PGM file to completely mask out an area of the image.\n"
|
||||
"# Full path name to. (Default: not defined)",
|
||||
0,
|
||||
CONF_OFFSET(mask_privacy),
|
||||
copy_string,
|
||||
print_string
|
||||
},
|
||||
{
|
||||
"smart_mask_speed",
|
||||
"# Dynamically create a mask file during operation (default: 0)\n"
|
||||
"# Adjust speed of mask changes from 0 (off) to 10 (fast)",
|
||||
@@ -750,7 +763,6 @@ config_param config_params[] = {
|
||||
copy_string,
|
||||
print_string
|
||||
},
|
||||
#ifdef HAVE_FFMPEG
|
||||
{
|
||||
"ffmpeg_output_movies",
|
||||
"\n############################################################\n"
|
||||
@@ -843,7 +855,6 @@ config_param config_params[] = {
|
||||
copy_bool,
|
||||
print_bool
|
||||
},
|
||||
#endif /* HAVE_FFMPEG */
|
||||
{
|
||||
"use_extpipe",
|
||||
"\n############################################################\n"
|
||||
@@ -1024,7 +1035,6 @@ config_param config_params[] = {
|
||||
copy_string,
|
||||
print_string
|
||||
},
|
||||
#ifdef HAVE_FFMPEG
|
||||
{
|
||||
"movie_filename",
|
||||
"# File path for motion triggered ffmpeg films (movies) relative to target_dir\n"
|
||||
@@ -1050,7 +1060,6 @@ config_param config_params[] = {
|
||||
copy_string,
|
||||
print_string
|
||||
},
|
||||
#endif /* HAVE_FFMPEG */
|
||||
{
|
||||
"ipv6_enabled",
|
||||
"\n############################################################\n"
|
||||
|
||||
1
conf.h
1
conf.h
@@ -100,6 +100,7 @@ struct config {
|
||||
char *on_event_start;
|
||||
char *on_event_end;
|
||||
const char *mask_file;
|
||||
const char *mask_privacy;
|
||||
int smart_mask_speed;
|
||||
int sql_log_image;
|
||||
int sql_log_snapshot;
|
||||
|
||||
@@ -9,10 +9,8 @@
|
||||
#cmakedefine HAVE_PGSQL
|
||||
#cmakedefine HAVE_SQLITE3
|
||||
#cmakedefine PWCBSD
|
||||
#cmakedefine MOTION_V4L2
|
||||
#cmakedefine WITHOUT_V4L
|
||||
#cmakedefine WITHOUT_V4L2
|
||||
|
||||
/* Optional headers */
|
||||
#cmakedefine HAVE_LINUX_VIDEODEV_H
|
||||
#cmakedefine HAVE_LINUX_VIDEODEV2_H
|
||||
#cmakedefine HAVE_SYS_VIDEOIO_H
|
||||
|
||||
109
configure.ac
109
configure.ac
@@ -40,7 +40,7 @@ if test "${Darwin}" = ""; then
|
||||
FreeBSD=`uname -a | grep "FreeBSD"`
|
||||
if test "${FreeBSD}" = ""; then
|
||||
AC_MSG_RESULT(no)
|
||||
VIDEO="video.o video2.o video_common.o"
|
||||
VIDEO="video2.o video_common.o"
|
||||
else
|
||||
AC_MSG_RESULT(yes)
|
||||
if test "${LINUXTHREADS}" = "no"; then
|
||||
@@ -52,7 +52,7 @@ if test "${Darwin}" = ""; then
|
||||
fi
|
||||
|
||||
if test "${PWCBSD}" != "no"; then
|
||||
VIDEO="video.o video2.o video_common.o"
|
||||
VIDEO="video2.o video_common.o"
|
||||
TEMP_CFLAGS="${CFLAGS} -I/usr/local/include -DPWCBSD"
|
||||
else
|
||||
VIDEO="video_freebsd.o"
|
||||
@@ -71,7 +71,7 @@ else
|
||||
VIDEO="video_freebsd.o"
|
||||
FINK_LIB="-L/sw/lib"
|
||||
Darwin="yes"
|
||||
V4L="no"
|
||||
V4L2="no"
|
||||
AC_MSG_RESULT($Darwin)
|
||||
fi
|
||||
|
||||
@@ -94,43 +94,43 @@ if test "${FreeBSD}" != "" && test "${PWCBSD}" = "no"; then
|
||||
else
|
||||
AC_MSG_RESULT(no)
|
||||
fi
|
||||
#
|
||||
# Check to Exclude BKTR
|
||||
#
|
||||
BKTR="yes"
|
||||
AC_ARG_WITH(bktr,
|
||||
[ --without-bktr Exclude to use bktr subsystem , that usually useful
|
||||
for devices as network cameras ( ONLY used in *BSD).
|
||||
]
|
||||
,
|
||||
BKTR="$withval"
|
||||
)
|
||||
|
||||
if test "${BKTR}" = "no"; then
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L"
|
||||
fi
|
||||
|
||||
#
|
||||
# Check to Exclude BKTR
|
||||
#
|
||||
BKTR="yes"
|
||||
AC_ARG_WITH(bktr,
|
||||
[ --without-bktr Exclude to use bktr subsystem , that usually useful
|
||||
for devices as network cameras ( ONLY used in *BSD).
|
||||
] ,
|
||||
BKTR="$withval")
|
||||
if test "${BKTR}" = "no"; then
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2"
|
||||
fi
|
||||
else
|
||||
#
|
||||
# Check to Exclude V4L2
|
||||
#
|
||||
V4L2="yes"
|
||||
AC_ARG_WITH(v4l2,
|
||||
[ --without-v4l2 Exclude using v4l2 (video4linux2) subsystem.
|
||||
Makes Motion so it only supports network cameras.
|
||||
],
|
||||
V4L2="$withval" )
|
||||
|
||||
#
|
||||
# Check to Exclude V4L
|
||||
#
|
||||
V4L="yes"
|
||||
AC_ARG_WITH(v4l,
|
||||
[ --without-v4l Exclude using v4l (video4linux) subsystem.
|
||||
Makes Motion so it only supports network cameras.
|
||||
],
|
||||
V4L="$withval"
|
||||
)
|
||||
|
||||
if test "${V4L2}" = "no"; then
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2"
|
||||
else
|
||||
AC_CHECK_HEADERS(linux/videodev2.h,[V4L2="yes"],[V4L2="no"])
|
||||
AC_MSG_CHECKING(for V4L2 support)
|
||||
if test "${V4L2}" = "no"; then
|
||||
AC_MSG_RESULT(no)
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L2"
|
||||
else
|
||||
AC_MSG_RESULT(yes)
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if test "${V4L}" = "no"; then
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DWITHOUT_V4L"
|
||||
fi
|
||||
|
||||
|
||||
if test "${FreeBSD}" != "" && test "${LINUXTHREADS}" != "no" ; then
|
||||
|
||||
AC_MSG_CHECKING(for linuxthreads)
|
||||
@@ -740,34 +740,7 @@ fi
|
||||
|
||||
#Checks for header files.
|
||||
AC_HEADER_STDC
|
||||
AC_CHECK_HEADERS(stdio.h unistd.h stdint.h fcntl.h time.h signal.h sys/ioctl.h sys/mman.h linux/videodev.h linux/videodev2.h sys/param.h sys/types.h sys/videoio.h)
|
||||
|
||||
# Check if v4l2 is available
|
||||
SUPPORTED_V4L2=false
|
||||
|
||||
if test "${V4L}" = "no"; then
|
||||
AC_MSG_CHECKING(for V42L support)
|
||||
AC_MSG_RESULT(skipping)
|
||||
else
|
||||
AC_CHECK_TYPE([struct v4l2_buffer],
|
||||
[SUPPORTED_V4L2=true],
|
||||
[SUPPORTED_V4L2=false],
|
||||
[#include <sys/time.h>
|
||||
#ifdef HAVE_LINUX_VIDEODEV2_H
|
||||
#include <linux/videodev2.h>
|
||||
#elif HAVE_LINUX_VIDEODEV_H
|
||||
#include <linux/videodev.h>
|
||||
#elif HAVE_SYS_VIDEOIO_H
|
||||
#include <sys/videoio.h>
|
||||
#endif])
|
||||
AC_MSG_CHECKING(for V42L support)
|
||||
if test x$SUPPORTED_V4L2 = xtrue; then
|
||||
AC_MSG_RESULT(yes)
|
||||
TEMP_CFLAGS="${TEMP_CFLAGS} -DMOTION_V4L2"
|
||||
else
|
||||
AC_MSG_RESULT(no)
|
||||
fi
|
||||
fi
|
||||
AC_CHECK_HEADERS(stdio.h unistd.h stdint.h fcntl.h time.h signal.h sys/ioctl.h sys/mman.h linux/videodev2.h sys/param.h sys/types.h)
|
||||
|
||||
|
||||
OPTIMIZECPU="yes"
|
||||
@@ -1121,13 +1094,7 @@ if test "${FreeBSD}" != ""; then
|
||||
fi
|
||||
|
||||
else
|
||||
if test "${V4L}" = "yes"; then
|
||||
echo "V4L support: Yes"
|
||||
else
|
||||
echo "V4L support: No"
|
||||
fi
|
||||
|
||||
if test x$SUPPORTED_V4L2 = xtrue; then
|
||||
if test "$V4L2" = "yes"; then
|
||||
echo "V4L2 support: Yes"
|
||||
else
|
||||
echo "V4L2 support: No"
|
||||
|
||||
110
event.c
110
event.c
@@ -12,7 +12,7 @@
|
||||
#include "picture.h" /* already includes motion.h */
|
||||
#include "event.h"
|
||||
#if (!defined(__FreeBSD__))
|
||||
#include "video.h"
|
||||
#include "video2.h"
|
||||
#endif
|
||||
|
||||
/* Various functions (most doing the actual action) */
|
||||
@@ -65,7 +65,7 @@ static const char *eventToString(motion_event e)
|
||||
static void exec_command(struct context *cnt, char *command, char *filename, int filetype)
|
||||
{
|
||||
char stamp[PATH_MAX];
|
||||
mystrftime(cnt, stamp, sizeof(stamp), command, &cnt->current_image->timestamp_tm, filename, filetype);
|
||||
mystrftime(cnt, stamp, sizeof(stamp), command, &cnt->current_image->timestamp_tv, filename, filetype);
|
||||
|
||||
if (!fork()) {
|
||||
int i;
|
||||
@@ -100,7 +100,7 @@ static void exec_command(struct context *cnt, char *command, char *filename, int
|
||||
static void event_newfile(struct context *cnt ATTRIBUTE_UNUSED,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED, char *filename, void *ftype,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: File of type %ld saved to: %s",
|
||||
(unsigned long)ftype, filename);
|
||||
@@ -111,7 +111,7 @@ static void event_beep(struct context *cnt, motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED,
|
||||
char *filename ATTRIBUTE_UNUSED,
|
||||
void *ftype ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (!cnt->conf.quiet)
|
||||
printf("\a");
|
||||
@@ -128,7 +128,7 @@ static void event_beep(struct context *cnt, motion_event type ATTRIBUTE_UNUSED,
|
||||
static void on_picture_save_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED,
|
||||
char *filename, void *arg, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
char *filename, void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
int filetype = (unsigned long)arg;
|
||||
|
||||
@@ -143,7 +143,7 @@ static void on_motion_detected_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.on_motion_detected)
|
||||
exec_command(cnt, cnt->conf.on_motion_detected, NULL, 0);
|
||||
@@ -154,7 +154,7 @@ static void on_motion_detected_command(struct context *cnt,
|
||||
static void event_sqlnewfile(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED,
|
||||
char *filename, void *arg, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
char *filename, void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
int sqltype = (unsigned long)arg;
|
||||
|
||||
@@ -170,7 +170,7 @@ static void event_sqlnewfile(struct context *cnt,
|
||||
char sqlquery[PATH_MAX];
|
||||
|
||||
mystrftime(cnt, sqlquery, sizeof(sqlquery), cnt->conf.sql_query,
|
||||
&cnt->current_image->timestamp_tm, filename, sqltype);
|
||||
&cnt->current_image->timestamp_tv, filename, sqltype);
|
||||
|
||||
#ifdef HAVE_MYSQL
|
||||
if (!strcmp(cnt->conf.database_type, "mysql")) {
|
||||
@@ -260,7 +260,7 @@ static void on_area_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.on_area_detected)
|
||||
exec_command(cnt, cnt->conf.on_area_detected, NULL, 0);
|
||||
@@ -270,7 +270,7 @@ static void on_event_start_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.on_event_start)
|
||||
exec_command(cnt, cnt->conf.on_event_start, NULL, 0);
|
||||
@@ -280,7 +280,7 @@ static void on_event_end_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.on_event_end)
|
||||
exec_command(cnt, cnt->conf.on_event_end, NULL, 0);
|
||||
@@ -290,7 +290,7 @@ static void event_stop_stream(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if ((cnt->conf.stream_port) && (cnt->stream.socket != -1))
|
||||
stream_stop(cnt);
|
||||
@@ -299,25 +299,25 @@ static void event_stop_stream(struct context *cnt,
|
||||
static void event_stream_put(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.stream_port)
|
||||
stream_put(cnt, img);
|
||||
}
|
||||
|
||||
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) && !defined(__FreeBSD__)
|
||||
#if !defined(WITHOUT_V4L2) && !defined(__FreeBSD__)
|
||||
static void event_vid_putpipe(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy ATTRIBUTE_UNUSED, void *devpipe,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (*(int *)devpipe >= 0) {
|
||||
if (vid_putpipe(*(int *)devpipe, img, cnt->imgs.size) == -1)
|
||||
MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: Failed to put image into video pipe");
|
||||
}
|
||||
}
|
||||
#endif /* !WITHOUT_V4L && !__FreeBSD__ */
|
||||
#endif /* !WITHOUT_V4L2 && !__FreeBSD__ */
|
||||
|
||||
const char *imageext(struct context *cnt)
|
||||
{
|
||||
@@ -330,7 +330,7 @@ const char *imageext(struct context *cnt)
|
||||
static void event_image_detect(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *newimg, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
char fullfilename[PATH_MAX];
|
||||
char filename[PATH_MAX];
|
||||
@@ -347,7 +347,7 @@ static void event_image_detect(struct context *cnt,
|
||||
else
|
||||
imagepath = DEF_IMAGEPATH;
|
||||
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tv, NULL, 0);
|
||||
snprintf(fullfilename, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filename, imageext(cnt));
|
||||
|
||||
put_picture(cnt, fullfilename, newimg, FTYPE_IMAGE);
|
||||
@@ -357,7 +357,7 @@ static void event_image_detect(struct context *cnt,
|
||||
static void event_imagem_detect(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *newimg ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
struct config *conf = &cnt->conf;
|
||||
char fullfilenamem[PATH_MAX];
|
||||
@@ -376,7 +376,7 @@ static void event_imagem_detect(struct context *cnt,
|
||||
else
|
||||
imagepath = DEF_IMAGEPATH;
|
||||
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, currenttime_tv, NULL, 0);
|
||||
/* motion images gets same name as normal images plus an appended 'm' */
|
||||
snprintf(filenamem, PATH_MAX, "%sm", filename);
|
||||
snprintf(fullfilenamem, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filenamem, imageext(cnt));
|
||||
@@ -388,7 +388,7 @@ static void event_imagem_detect(struct context *cnt,
|
||||
static void event_image_snapshot(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
char fullfilename[PATH_MAX];
|
||||
char filename[PATH_MAX];
|
||||
@@ -411,7 +411,7 @@ static void event_image_snapshot(struct context *cnt,
|
||||
else
|
||||
snappath = DEF_SNAPPATH;
|
||||
|
||||
mystrftime(cnt, filepath, sizeof(filepath), snappath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, filepath, sizeof(filepath), snappath, currenttime_tv, NULL, 0);
|
||||
snprintf(filename, PATH_MAX, "%s.%s", filepath, imageext(cnt));
|
||||
snprintf(fullfilename, PATH_MAX, "%s/%s", cnt->conf.filepath, filename);
|
||||
put_picture(cnt, fullfilename, img, FTYPE_IMAGE_SNAPSHOT);
|
||||
@@ -429,7 +429,7 @@ static void event_image_snapshot(struct context *cnt,
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
mystrftime(cnt, filepath, sizeof(filepath), cnt->conf.snappath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, filepath, sizeof(filepath), cnt->conf.snappath, currenttime_tv, NULL, 0);
|
||||
snprintf(filename, PATH_MAX, "%s.%s", filepath, imageext(cnt));
|
||||
snprintf(fullfilename, PATH_MAX, "%s/%s", cnt->conf.filepath, filename);
|
||||
remove(fullfilename);
|
||||
@@ -442,7 +442,7 @@ static void event_image_snapshot(struct context *cnt,
|
||||
static void event_camera_lost(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->conf.on_camera_lost)
|
||||
exec_command(cnt, cnt->conf.on_camera_lost, NULL, 0);
|
||||
@@ -451,7 +451,7 @@ static void event_camera_lost(struct context *cnt,
|
||||
static void on_movie_end_command(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED, char *filename,
|
||||
void *arg, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *arg, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
int filetype = (unsigned long) arg;
|
||||
|
||||
@@ -462,7 +462,7 @@ static void on_movie_end_command(struct context *cnt,
|
||||
static void event_extpipe_end(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->extpipe_open) {
|
||||
cnt->extpipe_open = 0;
|
||||
@@ -478,7 +478,7 @@ static void event_extpipe_end(struct context *cnt,
|
||||
static void event_create_extpipe(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
if ((cnt->conf.useextpipe) && (cnt->conf.extpipe)) {
|
||||
char stamp[PATH_MAX] = "";
|
||||
@@ -497,7 +497,7 @@ static void event_create_extpipe(struct context *cnt,
|
||||
moviepath);
|
||||
}
|
||||
|
||||
mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tv, NULL, 0);
|
||||
snprintf(cnt->extpipefilename, PATH_MAX - 4, "%s/%s", cnt->conf.filepath, stamp);
|
||||
|
||||
/* Open a dummy file to check if path is correct */
|
||||
@@ -522,12 +522,11 @@ static void event_create_extpipe(struct context *cnt,
|
||||
myfclose(fd_dummy);
|
||||
unlink(cnt->extpipefilename);
|
||||
|
||||
mystrftime(cnt, stamp, sizeof(stamp), cnt->conf.extpipe, currenttime_tm, cnt->extpipefilename, 0);
|
||||
mystrftime(cnt, stamp, sizeof(stamp), cnt->conf.extpipe, currenttime_tv, cnt->extpipefilename, 0);
|
||||
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: pipe: %s",
|
||||
stamp);
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: cnt->moviefps: %d",
|
||||
cnt->movie_fps);
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: pipe: %s", stamp);
|
||||
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: cnt->moviefps: %d", cnt->movie_fps);
|
||||
|
||||
event(cnt, EVENT_FILECREATE, NULL, cnt->extpipefilename, (void *)FTYPE_MPEG, NULL);
|
||||
cnt->extpipe = popen(stamp, "w");
|
||||
@@ -545,7 +544,7 @@ static void event_create_extpipe(struct context *cnt,
|
||||
static void event_extpipe_put(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
/* Check use_extpipe enabled and ext_pipe not NULL */
|
||||
if ((cnt->conf.useextpipe) && (cnt->extpipe != NULL)) {
|
||||
@@ -567,7 +566,7 @@ static void event_extpipe_put(struct context *cnt,
|
||||
static void event_new_video(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy ATTRIBUTE_UNUSED, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
cnt->movie_last_shot = -1;
|
||||
|
||||
@@ -578,7 +577,6 @@ static void event_new_video(struct context *cnt,
|
||||
if (cnt->movie_fps < 2) cnt->movie_fps = 2;
|
||||
}
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
static void grey2yuv420p(unsigned char *u, unsigned char *v, int width, int height)
|
||||
{
|
||||
@@ -590,7 +588,7 @@ static void grey2yuv420p(unsigned char *u, unsigned char *v, int width, int heig
|
||||
static void event_ffmpeg_newfile(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *currenttime_tm)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
int width = cnt->imgs.width;
|
||||
int height = cnt->imgs.height;
|
||||
@@ -612,7 +610,7 @@ static void event_ffmpeg_newfile(struct context *cnt,
|
||||
else
|
||||
moviepath = DEF_MOVIEPATH;
|
||||
|
||||
mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, stamp, sizeof(stamp), moviepath, currenttime_tv, NULL, 0);
|
||||
|
||||
/*
|
||||
* motion movies get the same name as normal movies plus an appended 'm'
|
||||
@@ -691,7 +689,7 @@ static void event_ffmpeg_newfile(struct context *cnt,
|
||||
if ((cnt->ffmpeg_output =
|
||||
ffmpeg_open(codec, cnt->newfilename, y, u, v,
|
||||
cnt->imgs.width, cnt->imgs.height, cnt->movie_fps, cnt->conf.ffmpeg_bps,
|
||||
cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE)) == NULL) {
|
||||
cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE, currenttime_tv)) == NULL) {
|
||||
MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: ffopen_open error creating (new) file [%s]",
|
||||
cnt->newfilename);
|
||||
cnt->finish = 1;
|
||||
@@ -719,7 +717,7 @@ static void event_ffmpeg_newfile(struct context *cnt,
|
||||
if ((cnt->ffmpeg_output_debug =
|
||||
ffmpeg_open(codec, cnt->motionfilename, y, u, v,
|
||||
cnt->imgs.width, cnt->imgs.height, cnt->movie_fps, cnt->conf.ffmpeg_bps,
|
||||
cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE)) == NULL) {
|
||||
cnt->conf.ffmpeg_vbr,TIMELAPSE_NONE,currenttime_tv)) == NULL) {
|
||||
MOTION_LOG(ERR, TYPE_EVENTS, SHOW_ERRNO, "%s: ffopen_open error creating (motion) file [%s]",
|
||||
cnt->motionfilename);
|
||||
cnt->finish = 1;
|
||||
@@ -734,7 +732,7 @@ static void event_ffmpeg_newfile(struct context *cnt,
|
||||
static void event_ffmpeg_timelapse(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED, unsigned char *img,
|
||||
char *dummy1 ATTRIBUTE_UNUSED, void *dummy2 ATTRIBUTE_UNUSED,
|
||||
struct tm *currenttime_tm)
|
||||
struct timeval *currenttime_tv)
|
||||
{
|
||||
int width = cnt->imgs.width;
|
||||
int height = cnt->imgs.height;
|
||||
@@ -755,7 +753,7 @@ static void event_ffmpeg_timelapse(struct context *cnt,
|
||||
else
|
||||
timepath = DEF_TIMEPATH;
|
||||
|
||||
mystrftime(cnt, tmp, sizeof(tmp), timepath, currenttime_tm, NULL, 0);
|
||||
mystrftime(cnt, tmp, sizeof(tmp), timepath, currenttime_tv, NULL, 0);
|
||||
|
||||
/* PATH_MAX - 4 to allow for .mpg to be appended without overflow */
|
||||
snprintf(cnt->timelapsefilename, PATH_MAX - 4, "%s/%s", cnt->conf.filepath, tmp);
|
||||
@@ -786,14 +784,14 @@ static void event_ffmpeg_timelapse(struct context *cnt,
|
||||
cnt->ffmpeg_timelapse =
|
||||
ffmpeg_open(codec_mpg,cnt->timelapsefilename, y, u, v
|
||||
,cnt->imgs.width, cnt->imgs.height, cnt->conf.frame_limit
|
||||
,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_APPEND);
|
||||
,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_APPEND,currenttime_tv);
|
||||
} else {
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: Timelapse using mpeg4 codec.");
|
||||
MOTION_LOG(NTC, TYPE_EVENTS, NO_ERRNO, "%s: Events will be trigger new files");
|
||||
cnt->ffmpeg_timelapse =
|
||||
ffmpeg_open(codec_mpeg ,cnt->timelapsefilename, y, u, v
|
||||
,cnt->imgs.width, cnt->imgs.height, cnt->conf.frame_limit
|
||||
,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_NEW);
|
||||
,cnt->conf.ffmpeg_bps,cnt->conf.ffmpeg_vbr,TIMELAPSE_NEW,currenttime_tv);
|
||||
}
|
||||
|
||||
if (cnt->ffmpeg_timelapse == NULL){
|
||||
@@ -816,7 +814,7 @@ static void event_ffmpeg_timelapse(struct context *cnt,
|
||||
|
||||
v = u + (width * height) / 4;
|
||||
|
||||
if (ffmpeg_put_other_image(cnt->ffmpeg_timelapse, y, u, v) == -1) {
|
||||
if (ffmpeg_put_other_image(cnt->ffmpeg_timelapse, y, u, v,currenttime_tv) == -1) {
|
||||
cnt->finish = 1;
|
||||
cnt->restart = 0;
|
||||
}
|
||||
@@ -826,7 +824,7 @@ static void event_ffmpeg_timelapse(struct context *cnt,
|
||||
static void event_ffmpeg_put(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *img, char *dummy1 ATTRIBUTE_UNUSED,
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct tm *tm ATTRIBUTE_UNUSED)
|
||||
void *dummy2 ATTRIBUTE_UNUSED, struct timeval *currenttime_tv)
|
||||
{
|
||||
if (cnt->ffmpeg_output) {
|
||||
int width = cnt->imgs.width;
|
||||
@@ -841,14 +839,14 @@ static void event_ffmpeg_put(struct context *cnt,
|
||||
|
||||
v = u + (width * height) / 4;
|
||||
|
||||
if (ffmpeg_put_other_image(cnt->ffmpeg_output, y, u, v) == -1) {
|
||||
if (ffmpeg_put_other_image(cnt->ffmpeg_output, y, u, v, currenttime_tv) == -1) {
|
||||
cnt->finish = 1;
|
||||
cnt->restart = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (cnt->ffmpeg_output_debug) {
|
||||
if (ffmpeg_put_image(cnt->ffmpeg_output_debug) == -1) {
|
||||
if (ffmpeg_put_image(cnt->ffmpeg_output_debug, currenttime_tv) == -1) {
|
||||
cnt->finish = 1;
|
||||
cnt->restart = 0;
|
||||
}
|
||||
@@ -859,7 +857,7 @@ static void event_ffmpeg_closefile(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
|
||||
if (cnt->ffmpeg_output) {
|
||||
@@ -885,7 +883,7 @@ static void event_ffmpeg_timelapseend(struct context *cnt,
|
||||
motion_event type ATTRIBUTE_UNUSED,
|
||||
unsigned char *dummy1 ATTRIBUTE_UNUSED,
|
||||
char *dummy2 ATTRIBUTE_UNUSED, void *dummy3 ATTRIBUTE_UNUSED,
|
||||
struct tm *tm ATTRIBUTE_UNUSED)
|
||||
struct timeval *tv1 ATTRIBUTE_UNUSED)
|
||||
{
|
||||
if (cnt->ffmpeg_timelapse) {
|
||||
free(cnt->ffmpeg_timelapse->udata);
|
||||
@@ -897,7 +895,6 @@ static void event_ffmpeg_timelapseend(struct context *cnt,
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
|
||||
/*
|
||||
@@ -956,7 +953,7 @@ struct event_handlers event_handlers[] = {
|
||||
EVENT_IMAGE_SNAPSHOT,
|
||||
event_image_snapshot
|
||||
},
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L) && !defined(__FreeBSD__)
|
||||
#if !defined(WITHOUT_V4L2) && !defined(__FreeBSD__)
|
||||
{
|
||||
EVENT_IMAGE,
|
||||
event_vid_putpipe
|
||||
@@ -965,7 +962,7 @@ struct event_handlers event_handlers[] = {
|
||||
EVENT_IMAGEM,
|
||||
event_vid_putpipe
|
||||
},
|
||||
#endif /* !WITHOUT_V4L && !__FreeBSD__ */
|
||||
#endif /* !WITHOUT_V4L2 && !__FreeBSD__ */
|
||||
{
|
||||
EVENT_STREAM,
|
||||
event_stream_put
|
||||
@@ -974,7 +971,6 @@ struct event_handlers event_handlers[] = {
|
||||
EVENT_FIRSTMOTION,
|
||||
event_new_video
|
||||
},
|
||||
#ifdef HAVE_FFMPEG
|
||||
{
|
||||
EVENT_FIRSTMOTION,
|
||||
event_ffmpeg_newfile
|
||||
@@ -999,7 +995,6 @@ struct event_handlers event_handlers[] = {
|
||||
EVENT_TIMELAPSEEND,
|
||||
event_ffmpeg_timelapseend
|
||||
},
|
||||
#endif /* HAVE_FFMPEG */
|
||||
{
|
||||
EVENT_FILECLOSE,
|
||||
on_movie_end_command
|
||||
@@ -1045,13 +1040,12 @@ struct event_handlers event_handlers[] = {
|
||||
* as a code reading friendly solution to avoid a stream of compiler warnings in gcc 4.0.
|
||||
*/
|
||||
void event(struct context *cnt, motion_event type, unsigned char *image,
|
||||
char *filename, void *eventdata, struct tm *tm)
|
||||
char *filename, void *eventdata, struct timeval *tv1)
|
||||
{
|
||||
int i=-1;
|
||||
|
||||
while (event_handlers[++i].handler) {
|
||||
if (type == event_handlers[i].type)
|
||||
event_handlers[i].handler(cnt, type, image, filename, eventdata,
|
||||
tm);
|
||||
event_handlers[i].handler(cnt, type, image, filename, eventdata, tv1);
|
||||
}
|
||||
}
|
||||
|
||||
6
event.h
6
event.h
@@ -34,12 +34,10 @@ typedef enum {
|
||||
EVENT_LAST,
|
||||
} motion_event;
|
||||
|
||||
|
||||
typedef void(* event_handler)(struct context *, motion_event, unsigned char *,
|
||||
char *, void *, struct tm *);
|
||||
char *, void *, struct timeval *);
|
||||
|
||||
void event(struct context *, motion_event, unsigned char *, char *, void *,
|
||||
struct tm *);
|
||||
void event(struct context *, motion_event, unsigned char *, char *, void *, struct timeval *);
|
||||
const char * imageext(struct context *);
|
||||
|
||||
#endif /* _INCLUDE_EVENT_H_ */
|
||||
|
||||
658
ffmpeg.c
658
ffmpeg.c
@@ -23,14 +23,12 @@
|
||||
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
#include "ffmpeg.h"
|
||||
#include "motion.h"
|
||||
|
||||
#define AVSTREAM_CODEC_PTR(avs_ptr) (avs_ptr->codec)
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
#define AVSTREAM_CODEC_PTR(avs_ptr) (avs_ptr->codec)
|
||||
|
||||
/****************************************************************************
|
||||
* The section below is the "my" section of functions.
|
||||
@@ -169,6 +167,7 @@ static int timelapse_exists(const char *fname){
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int timelapse_append(struct ffmpeg *ffmpeg, AVPacket pkt){
|
||||
FILE *file;
|
||||
|
||||
@@ -219,39 +218,6 @@ static int ffmpeg_lockmgr_cb(void **arg, enum AVLockOp op)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_init
|
||||
* Initializes for libavformat.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_init(void){
|
||||
int ret;
|
||||
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,
|
||||
"%s: ffmpeg libavcodec version %d.%d.%d"
|
||||
" libavformat version %d.%d.%d"
|
||||
, LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, LIBAVCODEC_VERSION_MICRO
|
||||
, LIBAVFORMAT_VERSION_MAJOR, LIBAVFORMAT_VERSION_MINOR, LIBAVFORMAT_VERSION_MICRO);
|
||||
|
||||
av_register_all();
|
||||
avcodec_register_all();
|
||||
avformat_network_init();
|
||||
av_log_set_callback((void *)ffmpeg_avcodec_log);
|
||||
|
||||
ret = av_lockmgr_register(ffmpeg_lockmgr_cb);
|
||||
if (ret < 0)
|
||||
{
|
||||
MOTION_LOG(EMG, TYPE_ALL, SHOW_ERRNO, "%s: av_lockmgr_register failed (%d)", ret);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void ffmpeg_finalise(void) {
|
||||
avformat_network_deinit();
|
||||
}
|
||||
|
||||
/**
|
||||
* get_oformat
|
||||
* Obtains the output format used for the specified codec. For mpeg4 codecs,
|
||||
@@ -323,6 +289,294 @@ static AVOutputFormat *get_oformat(const char *codec, char *filename){
|
||||
|
||||
return of;
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_cleanups
|
||||
* Clean up ffmpeg struct if something was wrong.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_cleanups(struct ffmpeg *ffmpeg){
|
||||
|
||||
/* Close each codec */
|
||||
if (ffmpeg->video_st) {
|
||||
avcodec_close(AVSTREAM_CODEC_PTR(ffmpeg->video_st));
|
||||
}
|
||||
free(ffmpeg->video_outbuf);
|
||||
av_freep(&ffmpeg->picture);
|
||||
avformat_free_context(ffmpeg->oc);
|
||||
free(ffmpeg);
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_put_frame
|
||||
* Encodes and writes a video frame using the av_write_frame API. This is
|
||||
* a helper function for ffmpeg_put_image and ffmpeg_put_other_image.
|
||||
*
|
||||
* Returns
|
||||
* Number of bytes written or -1 if any error happens.
|
||||
*/
|
||||
int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic, const struct timeval *tv1){
|
||||
/**
|
||||
* Since the logic,return values and conditions changed so
|
||||
* dramatically between versions, the encoding of the frame
|
||||
* is 100% blocked based upon Libav/FFMpeg version
|
||||
*/
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6))
|
||||
int retcd;
|
||||
int got_packet_ptr;
|
||||
AVPacket pkt;
|
||||
char errstr[128];
|
||||
int64_t pts_interval;
|
||||
|
||||
|
||||
av_init_packet(&pkt); /* Init static structure. */
|
||||
if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
pkt.stream_index = ffmpeg->video_st->index;
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.data = (uint8_t *)pic;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
} else {
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st),
|
||||
&pkt, pic, &got_packet_ptr);
|
||||
if (retcd < 0 ){
|
||||
av_strerror(retcd, errstr, sizeof(errstr));
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr);
|
||||
//Packet is freed upon failure of encoding
|
||||
return -1;
|
||||
}
|
||||
if (got_packet_ptr == 0){
|
||||
//Buffered packet. Throw special return code
|
||||
my_packet_unref(pkt);
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
if (ffmpeg->tlapse == TIMELAPSE_APPEND) {
|
||||
retcd = timelapse_append(ffmpeg, pkt);
|
||||
} else if (ffmpeg->tlapse == TIMELAPSE_NEW) {
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
} else {
|
||||
pts_interval = ((1000000L * (tv1->tv_sec - ffmpeg->start_time.tv_sec)) + tv1->tv_usec - ffmpeg->start_time.tv_usec) + 10000;
|
||||
|
||||
// MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: interval:%d img_sec:%d img_usec:%d strt_sec:%d strt_usec:%d "
|
||||
// ,pts_interval,tv1->tv_sec,tv1->tv_usec,ffmpeg->start_time.tv_sec,ffmpeg->start_time.tv_usec);
|
||||
|
||||
if (pts_interval < 0){
|
||||
/* This can occur when we have pre-capture frames. Reset start time of video. */
|
||||
ffmpeg->start_time.tv_sec = tv1->tv_sec ;
|
||||
ffmpeg->start_time.tv_usec = tv1->tv_usec ;
|
||||
pts_interval = 1;
|
||||
}
|
||||
pkt.pts = av_rescale_q(pts_interval,(AVRational){1, 1000000L},ffmpeg->video_st->time_base);
|
||||
if (pkt.pts <= ffmpeg->last_pts) pkt.pts = ffmpeg->last_pts + 1;
|
||||
pkt.dts = pkt.pts;
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
ffmpeg->last_pts = pkt.pts;
|
||||
}
|
||||
// MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: pts:%d dts:%d stream:%d interval %d",pkt.pts,pkt.dts,ffmpeg->video_st->time_base.den,pts_interval);
|
||||
my_packet_unref(pkt);
|
||||
|
||||
if (retcd != 0) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame");
|
||||
ffmpeg_cleanups(ffmpeg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return retcd;
|
||||
|
||||
#else // Old versions of Libav/FFmpeg
|
||||
int retcd;
|
||||
AVPacket pkt;
|
||||
|
||||
av_init_packet(&pkt); /* Init static structure. */
|
||||
pkt.stream_index = ffmpeg->video_st->index;
|
||||
if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
// Raw video case.
|
||||
pkt.size = sizeof(AVPicture);
|
||||
pkt.data = (uint8_t *)pic;
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
} else {
|
||||
retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st),
|
||||
ffmpeg->video_outbuf,
|
||||
ffmpeg->video_outbuf_size, pic);
|
||||
if (retcd < 0 ){
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video");
|
||||
my_packet_unref(pkt);
|
||||
return -1;
|
||||
}
|
||||
if (retcd == 0 ){
|
||||
// No bytes encoded => buffered=>special handling
|
||||
my_packet_unref(pkt);
|
||||
return -2;
|
||||
}
|
||||
|
||||
pkt.size = retcd;
|
||||
pkt.data = ffmpeg->video_outbuf;
|
||||
pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts;
|
||||
if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
}
|
||||
if (ffmpeg->tlapse == TIMELAPSE_APPEND) {
|
||||
retcd = timelapse_append(ffmpeg, pkt);
|
||||
} else {
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
}
|
||||
my_packet_unref(pkt);
|
||||
|
||||
if (retcd != 0) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame");
|
||||
ffmpeg_cleanups(ffmpeg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return retcd;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_prepare_frame
|
||||
* Allocates and prepares a picture frame by setting up the U, Y and V pointers in
|
||||
* the frame according to the passed pointers.
|
||||
*
|
||||
* Returns
|
||||
* NULL If the allocation fails.
|
||||
*
|
||||
* The returned AVFrame pointer must be freed after use.
|
||||
*/
|
||||
AVFrame *ffmpeg_prepare_frame(struct ffmpeg *ffmpeg, unsigned char *y,
|
||||
unsigned char *u, unsigned char *v)
|
||||
{
|
||||
AVFrame *picture;
|
||||
|
||||
picture = my_frame_alloc();
|
||||
|
||||
if (!picture) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc frame");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Take care of variable bitrate setting. */
|
||||
if (ffmpeg->vbr)
|
||||
picture->quality = ffmpeg->vbr;
|
||||
|
||||
|
||||
/* Setup pointers and line widths. */
|
||||
picture->data[0] = y;
|
||||
picture->data[1] = u;
|
||||
picture->data[2] = v;
|
||||
picture->linesize[0] = ffmpeg->c->width;
|
||||
picture->linesize[1] = ffmpeg->c->width / 2;
|
||||
picture->linesize[2] = ffmpeg->c->width / 2;
|
||||
|
||||
picture->format = ffmpeg->c->pix_fmt;
|
||||
picture->width = ffmpeg->c->width;
|
||||
picture->height = ffmpeg->c->height;
|
||||
|
||||
return picture;
|
||||
}
|
||||
/**
|
||||
* ffmpeg_avcodec_log
|
||||
* Handle any logging output from the ffmpeg library avcodec.
|
||||
*
|
||||
* Parameters
|
||||
* *ignoreme A pointer we will ignore
|
||||
* errno_flag The error number value
|
||||
* fmt Text message to be used for log entry in printf() format.
|
||||
* ap List of variables to be used in formatted message text.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_avcodec_log(void *ignoreme ATTRIBUTE_UNUSED, int errno_flag, const char *fmt, va_list vl)
|
||||
{
|
||||
char buf[1024];
|
||||
char *end;
|
||||
|
||||
/* Flatten the message coming in from avcodec. */
|
||||
vsnprintf(buf, sizeof(buf), fmt, vl);
|
||||
end = buf + strlen(buf);
|
||||
if (end > buf && end[-1] == '\n')
|
||||
{
|
||||
*--end = 0;
|
||||
}
|
||||
|
||||
/* If the debug_level is correct then send the message to the motion logging routine.
|
||||
* While it is not really desired to look for specific text in the message, there does
|
||||
* not seem another option. The specific messages indicated are lost camera which we
|
||||
* have our own message and UE golomb is not something that is possible for us to fix.
|
||||
* It is caused by the stream sent from the source camera
|
||||
*/
|
||||
if(strstr(buf, "No route to host") == NULL){
|
||||
if (strstr(buf, "Invalid UE golomb") != NULL) {
|
||||
MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag <= AV_LOG_ERROR) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag <= AV_LOG_WARNING) {
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag < AV_LOG_DEBUG){
|
||||
MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
/****************************************************************************
|
||||
****************************************************************************
|
||||
****************************************************************************/
|
||||
|
||||
/**
|
||||
* ffmpeg_init
|
||||
* Initializes for libavformat.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_init(void){
|
||||
#ifdef HAVE_FFMPEG
|
||||
int ret;
|
||||
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,
|
||||
"%s: ffmpeg libavcodec version %d.%d.%d"
|
||||
" libavformat version %d.%d.%d"
|
||||
, LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, LIBAVCODEC_VERSION_MICRO
|
||||
, LIBAVFORMAT_VERSION_MAJOR, LIBAVFORMAT_VERSION_MINOR, LIBAVFORMAT_VERSION_MICRO);
|
||||
|
||||
av_register_all();
|
||||
avcodec_register_all();
|
||||
avformat_network_init();
|
||||
av_log_set_callback((void *)ffmpeg_avcodec_log);
|
||||
|
||||
ret = av_lockmgr_register(ffmpeg_lockmgr_cb);
|
||||
if (ret < 0)
|
||||
{
|
||||
MOTION_LOG(EMG, TYPE_ALL, SHOW_ERRNO, "%s: av_lockmgr_register failed (%d)", ret);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#else /* No FFMPEG */
|
||||
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included");
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
}
|
||||
|
||||
void ffmpeg_finalise(void) {
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
#else /* No FFMPEG */
|
||||
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included");
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_open
|
||||
* Opens an mpeg file using the new libavformat method. Both mpeg1
|
||||
@@ -335,8 +589,11 @@ static AVOutputFormat *get_oformat(const char *codec, char *filename){
|
||||
*/
|
||||
struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename,
|
||||
unsigned char *y, unsigned char *u, unsigned char *v,
|
||||
int width, int height, int rate, int bps, int vbr, int tlapse)
|
||||
int width, int height, int rate, int bps, int vbr, int tlapse,
|
||||
const struct timeval *tv1)
|
||||
{
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
AVCodecContext *c;
|
||||
AVCodec *codec;
|
||||
struct ffmpeg *ffmpeg;
|
||||
@@ -536,7 +793,10 @@ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename,
|
||||
}
|
||||
}
|
||||
}
|
||||
gettimeofday(&ffmpeg->start_time, NULL);
|
||||
|
||||
ffmpeg->start_time.tv_sec = tv1->tv_sec;
|
||||
ffmpeg->start_time.tv_usec= tv1->tv_usec;
|
||||
|
||||
|
||||
/* Write the stream header, For the TIMELAPSE_APPEND
|
||||
* we write the data via standard file I/O so we close the
|
||||
@@ -556,25 +816,33 @@ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename,
|
||||
|
||||
}
|
||||
return ffmpeg;
|
||||
}
|
||||
/**
|
||||
* ffmpeg_cleanups
|
||||
* Clean up ffmpeg struct if something was wrong.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_cleanups(struct ffmpeg *ffmpeg){
|
||||
|
||||
/* Close each codec */
|
||||
if (ffmpeg->video_st) {
|
||||
avcodec_close(AVSTREAM_CODEC_PTR(ffmpeg->video_st));
|
||||
}
|
||||
free(ffmpeg->video_outbuf);
|
||||
av_freep(&ffmpeg->picture);
|
||||
avformat_free_context(ffmpeg->oc);
|
||||
free(ffmpeg);
|
||||
#else /* No FFMPEG */
|
||||
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO,"%s: No ffmpeg functionality included");
|
||||
|
||||
struct ffmpeg *ffmpeg;
|
||||
ffmpeg = mymalloc(sizeof(struct ffmpeg));
|
||||
|
||||
ffmpeg_video_codec = ffmpeg_video_codec;
|
||||
filename = filename;
|
||||
y = y;
|
||||
u = u;
|
||||
v = v;
|
||||
width = width;
|
||||
height = height;
|
||||
rate = rate;
|
||||
bps = bps;
|
||||
vbr = vbr;
|
||||
tlapse = tlapse;
|
||||
ffmpeg->dummy = 0;
|
||||
tv1 = tv1;
|
||||
return ffmpeg;
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_close
|
||||
* Closes a video file.
|
||||
@@ -583,6 +851,7 @@ void ffmpeg_cleanups(struct ffmpeg *ffmpeg){
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_close(struct ffmpeg *ffmpeg){
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
if (ffmpeg->tlapse != TIMELAPSE_APPEND) {
|
||||
av_write_trailer(ffmpeg->oc);
|
||||
@@ -600,43 +869,12 @@ void ffmpeg_close(struct ffmpeg *ffmpeg){
|
||||
}
|
||||
}
|
||||
avformat_free_context(ffmpeg->oc);
|
||||
|
||||
#endif // HAVE_FFMPEG
|
||||
|
||||
free(ffmpeg);
|
||||
|
||||
}
|
||||
/**
|
||||
* ffmpeg_put_image
|
||||
* Puts the image pointed to by ffmpeg->picture.
|
||||
*
|
||||
* Returns
|
||||
* value returned by ffmpeg_put_frame call.
|
||||
*/
|
||||
int ffmpeg_put_image(struct ffmpeg *ffmpeg){
|
||||
|
||||
/* A return code of -2 is thrown by the put_frame
|
||||
* when a image is buffered. For timelapse, we absolutely
|
||||
* never want a frame buffered so we keep sending back the
|
||||
* the same pic until it flushes or fails in a different way
|
||||
*/
|
||||
int retcd;
|
||||
int cnt = 0;
|
||||
|
||||
retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture);
|
||||
while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) {
|
||||
retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture);
|
||||
cnt++;
|
||||
if (cnt > 50){
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet");
|
||||
retcd = -1;
|
||||
}
|
||||
}
|
||||
//non timelapse buffered is ok
|
||||
if (retcd == -2){
|
||||
retcd = 0;
|
||||
MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: Buffered packet");
|
||||
}
|
||||
|
||||
return retcd;
|
||||
}
|
||||
/**
|
||||
* ffmpeg_put_other_image
|
||||
* Puts an arbitrary picture defined by y, u and v.
|
||||
@@ -647,7 +885,8 @@ int ffmpeg_put_image(struct ffmpeg *ffmpeg){
|
||||
* 0 if error allocating picture.
|
||||
*/
|
||||
int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y,
|
||||
unsigned char *u, unsigned char *v){
|
||||
unsigned char *u, unsigned char *v, const struct timeval *tv1){
|
||||
#ifdef HAVE_FFMPEG
|
||||
AVFrame *picture;
|
||||
int retcd = 0;
|
||||
int cnt = 0;
|
||||
@@ -661,9 +900,9 @@ int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y,
|
||||
* never want a frame buffered so we keep sending back the
|
||||
* the same pic until it flushes or fails in a different way
|
||||
*/
|
||||
retcd = ffmpeg_put_frame(ffmpeg, picture);
|
||||
retcd = ffmpeg_put_frame(ffmpeg, picture, tv1);
|
||||
while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) {
|
||||
retcd = ffmpeg_put_frame(ffmpeg, picture);
|
||||
retcd = ffmpeg_put_frame(ffmpeg, picture, tv1);
|
||||
cnt++;
|
||||
if (cnt > 50){
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet");
|
||||
@@ -677,213 +916,58 @@ int ffmpeg_put_other_image(struct ffmpeg *ffmpeg, unsigned char *y,
|
||||
}
|
||||
av_free(picture);
|
||||
}
|
||||
|
||||
return retcd;
|
||||
}
|
||||
/**
|
||||
* ffmpeg_put_frame
|
||||
* Encodes and writes a video frame using the av_write_frame API. This is
|
||||
* a helper function for ffmpeg_put_image and ffmpeg_put_other_image.
|
||||
*
|
||||
* Returns
|
||||
* Number of bytes written or -1 if any error happens.
|
||||
*/
|
||||
int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic){
|
||||
/**
|
||||
* Since the logic,return values and conditions changed so
|
||||
* dramatically between versions, the encoding of the frame
|
||||
* is 100% blocked based upon Libav/FFMpeg version
|
||||
*/
|
||||
#if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6))
|
||||
int retcd;
|
||||
int got_packet_ptr;
|
||||
AVPacket pkt;
|
||||
char errstr[128];
|
||||
struct timeval tv1;
|
||||
int64_t pts_interval;
|
||||
|
||||
gettimeofday(&tv1, NULL);
|
||||
|
||||
av_init_packet(&pkt); /* Init static structure. */
|
||||
if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
pkt.stream_index = ffmpeg->video_st->index;
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.data = (uint8_t *)pic;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
} else {
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st),
|
||||
&pkt, pic, &got_packet_ptr);
|
||||
if (retcd < 0 ){
|
||||
av_strerror(retcd, errstr, sizeof(errstr));
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr);
|
||||
//Packet is freed upon failure of encoding
|
||||
return -1;
|
||||
}
|
||||
if (got_packet_ptr == 0){
|
||||
//Buffered packet. Throw special return code
|
||||
my_packet_unref(pkt);
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
if (ffmpeg->tlapse == TIMELAPSE_APPEND) {
|
||||
retcd = timelapse_append(ffmpeg, pkt);
|
||||
} else if (ffmpeg->tlapse == TIMELAPSE_NEW) {
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
} else {
|
||||
pts_interval = ((1000000L * (tv1.tv_sec - ffmpeg->start_time.tv_sec)) + tv1.tv_usec - ffmpeg->start_time.tv_usec) + 10000;
|
||||
pkt.pts = av_rescale_q(pts_interval,(AVRational){1, 1000000L},ffmpeg->video_st->time_base);
|
||||
if (pkt.pts <= ffmpeg->last_pts) pkt.pts = ffmpeg->last_pts + 1;
|
||||
pkt.dts = pkt.pts;
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
ffmpeg->last_pts = pkt.pts;
|
||||
}
|
||||
// MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: pts:%d dts:%d stream:%d interval %d",pkt.pts,pkt.dts,ffmpeg->video_st->time_base.den,pts_interval);
|
||||
my_packet_unref(pkt);
|
||||
|
||||
if (retcd != 0) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame");
|
||||
ffmpeg_cleanups(ffmpeg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return retcd;
|
||||
|
||||
#else // Old versions of Libav/FFmpeg
|
||||
int retcd;
|
||||
AVPacket pkt;
|
||||
#else
|
||||
|
||||
av_init_packet(&pkt); /* Init static structure. */
|
||||
pkt.stream_index = ffmpeg->video_st->index;
|
||||
if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
// Raw video case.
|
||||
pkt.size = sizeof(AVPicture);
|
||||
pkt.data = (uint8_t *)pic;
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
} else {
|
||||
retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st),
|
||||
ffmpeg->video_outbuf,
|
||||
ffmpeg->video_outbuf_size, pic);
|
||||
if (retcd < 0 ){
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video");
|
||||
my_packet_unref(pkt);
|
||||
return -1;
|
||||
}
|
||||
if (retcd == 0 ){
|
||||
// No bytes encoded => buffered=>special handling
|
||||
my_packet_unref(pkt);
|
||||
return -2;
|
||||
}
|
||||
ffmpeg = ffmpeg;
|
||||
y = y;
|
||||
u = u;
|
||||
v = v;
|
||||
tv1 = tv1;
|
||||
return 0;
|
||||
|
||||
pkt.size = retcd;
|
||||
pkt.data = ffmpeg->video_outbuf;
|
||||
pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts;
|
||||
if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
}
|
||||
if (ffmpeg->tlapse == TIMELAPSE_APPEND) {
|
||||
retcd = timelapse_append(ffmpeg, pkt);
|
||||
} else {
|
||||
retcd = av_write_frame(ffmpeg->oc, &pkt);
|
||||
}
|
||||
my_packet_unref(pkt);
|
||||
|
||||
if (retcd != 0) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame");
|
||||
ffmpeg_cleanups(ffmpeg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return retcd;
|
||||
|
||||
#endif
|
||||
#endif // HAVE_FFMPEG
|
||||
}
|
||||
|
||||
/**
|
||||
* ffmpeg_prepare_frame
|
||||
* Allocates and prepares a picture frame by setting up the U, Y and V pointers in
|
||||
* the frame according to the passed pointers.
|
||||
* ffmpeg_put_image
|
||||
* Puts the image pointed to by ffmpeg->picture.
|
||||
*
|
||||
* Returns
|
||||
* NULL If the allocation fails.
|
||||
*
|
||||
* The returned AVFrame pointer must be freed after use.
|
||||
* value returned by ffmpeg_put_frame call.
|
||||
*/
|
||||
AVFrame *ffmpeg_prepare_frame(struct ffmpeg *ffmpeg, unsigned char *y,
|
||||
unsigned char *u, unsigned char *v)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ffmpeg_put_image(struct ffmpeg *ffmpeg, const struct timeval *tv1){
|
||||
|
||||
picture = my_frame_alloc();
|
||||
|
||||
if (!picture) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc frame");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Take care of variable bitrate setting. */
|
||||
if (ffmpeg->vbr)
|
||||
picture->quality = ffmpeg->vbr;
|
||||
|
||||
|
||||
/* Setup pointers and line widths. */
|
||||
picture->data[0] = y;
|
||||
picture->data[1] = u;
|
||||
picture->data[2] = v;
|
||||
picture->linesize[0] = ffmpeg->c->width;
|
||||
picture->linesize[1] = ffmpeg->c->width / 2;
|
||||
picture->linesize[2] = ffmpeg->c->width / 2;
|
||||
|
||||
picture->format = ffmpeg->c->pix_fmt;
|
||||
picture->width = ffmpeg->c->width;
|
||||
picture->height = ffmpeg->c->height;
|
||||
|
||||
return picture;
|
||||
}
|
||||
/**
|
||||
* ffmpeg_avcodec_log
|
||||
* Handle any logging output from the ffmpeg library avcodec.
|
||||
*
|
||||
* Parameters
|
||||
* *ignoreme A pointer we will ignore
|
||||
* errno_flag The error number value
|
||||
* fmt Text message to be used for log entry in printf() format.
|
||||
* ap List of variables to be used in formatted message text.
|
||||
*
|
||||
* Returns
|
||||
* Function returns nothing.
|
||||
*/
|
||||
void ffmpeg_avcodec_log(void *ignoreme ATTRIBUTE_UNUSED, int errno_flag, const char *fmt, va_list vl)
|
||||
{
|
||||
char buf[1024];
|
||||
char *end;
|
||||
|
||||
/* Flatten the message coming in from avcodec. */
|
||||
vsnprintf(buf, sizeof(buf), fmt, vl);
|
||||
end = buf + strlen(buf);
|
||||
if (end > buf && end[-1] == '\n')
|
||||
{
|
||||
*--end = 0;
|
||||
}
|
||||
|
||||
/* If the debug_level is correct then send the message to the motion logging routine.
|
||||
* While it is not really desired to look for specific text in the message, there does
|
||||
* not seem another option. The specific messages indicated are lost camera which we
|
||||
* have our own message and UE golomb is not something that is possible for us to fix.
|
||||
* It is caused by the stream sent from the source camera
|
||||
#ifdef HAVE_FFMPEG
|
||||
/* A return code of -2 is thrown by the put_frame
|
||||
* when a image is buffered. For timelapse, we absolutely
|
||||
* never want a frame buffered so we keep sending back the
|
||||
* the same pic until it flushes or fails in a different way
|
||||
*/
|
||||
if(strstr(buf, "No route to host") == NULL){
|
||||
if (strstr(buf, "Invalid UE golomb") != NULL) {
|
||||
MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag <= AV_LOG_ERROR) {
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag <= AV_LOG_WARNING) {
|
||||
MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
} else if (errno_flag < AV_LOG_DEBUG){
|
||||
MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s: %s", buf);
|
||||
int retcd;
|
||||
int cnt = 0;
|
||||
|
||||
retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture, tv1);
|
||||
while ((retcd == -2) && (ffmpeg->tlapse != TIMELAPSE_NONE)) {
|
||||
retcd = ffmpeg_put_frame(ffmpeg, ffmpeg->picture, tv1);
|
||||
cnt++;
|
||||
if (cnt > 50){
|
||||
MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Excessive attempts to clear buffered packet");
|
||||
retcd = -1;
|
||||
}
|
||||
}
|
||||
//non timelapse buffered is ok
|
||||
if (retcd == -2){
|
||||
retcd = 0;
|
||||
MOTION_LOG(DBG, TYPE_ENCODER, NO_ERRNO, "%s: Buffered packet");
|
||||
}
|
||||
|
||||
return retcd;
|
||||
#else
|
||||
ffmpeg = ffmpeg;
|
||||
tv1 = tv1;
|
||||
return 0;
|
||||
#endif // HAVE_FFMPEG
|
||||
}
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
71
ffmpeg.h
71
ffmpeg.h
@@ -7,6 +7,10 @@
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#define TIMELAPSE_NONE 0 /* No timelapse, regular processing */
|
||||
#define TIMELAPSE_APPEND 1 /* Use append version of timelapse */
|
||||
#define TIMELAPSE_NEW 2 /* Use create new file version of timelapse */
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
|
||||
#include <errno.h>
|
||||
@@ -28,14 +32,7 @@
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
#define TIMELAPSE_NONE 0 /* No timelapse, regular processing */
|
||||
#define TIMELAPSE_APPEND 1 /* Use append version of timelapse */
|
||||
#define TIMELAPSE_NEW 2 /* Use create new file version of timelapse */
|
||||
|
||||
struct ffmpeg {
|
||||
#ifdef HAVE_FFMPEG
|
||||
AVFormatContext *oc;
|
||||
AVStream *video_st;
|
||||
AVCodecContext *c;
|
||||
@@ -50,16 +47,33 @@ struct ffmpeg {
|
||||
int tlapse;
|
||||
int64_t last_pts;
|
||||
struct timeval start_time;
|
||||
#else
|
||||
int dummy;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Initialize FFmpeg stuff. Needs to be called before ffmpeg_open. */
|
||||
void ffmpeg_init(void);
|
||||
/** Finalise ffmpeg; call only after all threads have finished */
|
||||
void ffmpeg_finalise(void);
|
||||
AVFrame *my_frame_alloc(void);
|
||||
void my_frame_free(AVFrame *frame);
|
||||
int ffmpeg_put_frame(struct ffmpeg *, AVFrame *, const struct timeval *tv1);
|
||||
void ffmpeg_cleanups(struct ffmpeg *);
|
||||
AVFrame *ffmpeg_prepare_frame(struct ffmpeg *, unsigned char *,
|
||||
unsigned char *, unsigned char *);
|
||||
int my_image_get_buffer_size(enum MyPixelFormat pix_fmt, int width, int height);
|
||||
int my_image_copy_to_buffer(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height,int dest_size);
|
||||
int my_image_fill_arrays(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height);
|
||||
void my_packet_unref(AVPacket pkt);
|
||||
|
||||
|
||||
#else /* No FFMPEG */
|
||||
|
||||
struct ffmpeg {
|
||||
void *udata;
|
||||
int dummy;
|
||||
struct timeval start_time;
|
||||
};
|
||||
|
||||
#endif /* HAVE_FFMPEG */
|
||||
|
||||
/* Now the functions that are ok for both situations */
|
||||
void ffmpeg_init(void);
|
||||
void ffmpeg_finalise(void);
|
||||
struct ffmpeg *ffmpeg_open(
|
||||
const char *ffmpeg_video_codec,
|
||||
char *filename,
|
||||
@@ -71,38 +85,19 @@ struct ffmpeg *ffmpeg_open(
|
||||
int rate, /* framerate, fps */
|
||||
int bps, /* bitrate; bits per second */
|
||||
int vbr, /* variable bitrate */
|
||||
int tlapse
|
||||
int tlapse,
|
||||
const struct timeval *tv1
|
||||
);
|
||||
|
||||
/* Puts the image pointed to by the picture member of struct ffmpeg. */
|
||||
int ffmpeg_put_image(struct ffmpeg *);
|
||||
|
||||
/* Puts the image defined by u, y and v (YUV420 format). */
|
||||
int ffmpeg_put_image(struct ffmpeg *, const struct timeval *tv1);
|
||||
int ffmpeg_put_other_image(
|
||||
struct ffmpeg *ffmpeg,
|
||||
unsigned char *y,
|
||||
unsigned char *u,
|
||||
unsigned char *v
|
||||
unsigned char *v,
|
||||
const struct timeval *tv1
|
||||
);
|
||||
|
||||
/* Closes the mpeg file. */
|
||||
void ffmpeg_close(struct ffmpeg *);
|
||||
|
||||
/* Setup an avcodec log handler. */
|
||||
void ffmpeg_avcodec_log(void *, int, const char *, va_list);
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
AVFrame *my_frame_alloc(void);
|
||||
void my_frame_free(AVFrame *frame);
|
||||
int ffmpeg_put_frame(struct ffmpeg *, AVFrame *);
|
||||
void ffmpeg_cleanups(struct ffmpeg *);
|
||||
AVFrame *ffmpeg_prepare_frame(struct ffmpeg *, unsigned char *,
|
||||
unsigned char *, unsigned char *);
|
||||
int my_image_get_buffer_size(enum MyPixelFormat pix_fmt, int width, int height);
|
||||
int my_image_copy_to_buffer(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height,int dest_size);
|
||||
int my_image_fill_arrays(AVFrame *frame,uint8_t *buffer_ptr,enum MyPixelFormat pix_fmt,int width,int height);
|
||||
void my_packet_unref(AVPacket pkt);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _INCLUDE_FFMPEG_H_ */
|
||||
|
||||
@@ -66,6 +66,7 @@ videodevice /dev/video0
|
||||
# V4L2_PIX_FMT_YUV420 : 17 'YU12'
|
||||
# V4L2_PIX_FMT_Y10 : 18 'Y10'
|
||||
# V4L2_PIX_FMT_Y12 : 19 'Y12'
|
||||
# V4L2_PIX_FMT_GREY : 20 'GREY'
|
||||
#
|
||||
v4l2_palette 17
|
||||
|
||||
@@ -86,7 +87,7 @@ norm 0
|
||||
frequency 0
|
||||
|
||||
# Override the power line frequency for the webcam. (normally not necessary)
|
||||
# Values:
|
||||
# Values:
|
||||
# -1 : Do not modify device setting
|
||||
# 0 : Power line frequency Disabled
|
||||
# 1 : 50hz
|
||||
@@ -223,6 +224,10 @@ despeckle_filter EedDl
|
||||
# Full path name to. (Default: not defined)
|
||||
; mask_file value
|
||||
|
||||
# PGM file to completely mask out a area of image.
|
||||
# Full path name to. (Default: not defined)
|
||||
# mask_privacy value
|
||||
|
||||
# Dynamically create a mask file during operation (default: 0)
|
||||
# Adjust speed of mask changes from 0 (off) to 10 (fast)
|
||||
smart_mask_speed 0
|
||||
@@ -337,7 +342,7 @@ ffmpeg_variable_bitrate 0
|
||||
# hevc - H.265 / HEVC (High Efficiency Video Coding)
|
||||
ffmpeg_video_codec mpeg4
|
||||
|
||||
# When creating videos, should frames be duplicated in order
|
||||
# When creating videos, should frames be duplicated in order
|
||||
# to keep up with the requested frames per second
|
||||
# (default: true)
|
||||
ffmpeg_duplicate_frames true
|
||||
@@ -732,11 +737,13 @@ quiet on
|
||||
############################################################
|
||||
|
||||
# Output images to a video4linux loopback device
|
||||
# The value '-' means next available (default: not defined)
|
||||
# Specify the device associated with the loopback device
|
||||
# For example /dev/video1 (default: not defined)
|
||||
; video_pipe value
|
||||
|
||||
# Output motion images to a video4linux loopback device
|
||||
# The value '-' means next available (default: not defined)
|
||||
# Specify the device associated with the loopback device
|
||||
# For example /dev/video1 (default: not defined)
|
||||
; motion_video_pipe value
|
||||
|
||||
|
||||
|
||||
4
motion.1
4
motion.1
@@ -1805,7 +1805,7 @@ Default: Not Defined
|
||||
Description:
|
||||
.fi
|
||||
.RS
|
||||
Output images to a video4linux loopback device. The value '-' means next available
|
||||
Output images to a video4linux loopback device.
|
||||
.RE
|
||||
.RE
|
||||
|
||||
@@ -1818,7 +1818,7 @@ Default: Not Defined
|
||||
Description:
|
||||
.fi
|
||||
.RS
|
||||
Output motion images to a video4linux loopback device. The value '-' means next available
|
||||
Output motion images to a video4linux loopback device.
|
||||
.RE
|
||||
.RE
|
||||
|
||||
|
||||
76
motion.h
76
motion.h
@@ -82,7 +82,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
* The macro below defines a version of sleep using nanosleep
|
||||
* If a signal such as SIG_CHLD interrupts the sleep we just continue sleeping
|
||||
*/
|
||||
@@ -91,7 +91,7 @@
|
||||
tv.tv_sec = (seconds); \
|
||||
tv.tv_nsec = (nanoseconds); \
|
||||
while (nanosleep(&tv, &tv) == -1); \
|
||||
}
|
||||
}
|
||||
|
||||
#define CLEAR(x) memset(&(x), 0, sizeof(x))
|
||||
|
||||
@@ -211,7 +211,7 @@ struct images;
|
||||
#include "mmalcam.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
/*
|
||||
* Structure to hold images information
|
||||
* The idea is that this should have all information about a picture e.g. diffs, timestamp etc.
|
||||
* The exception is the label information, it uses a lot of memory
|
||||
@@ -230,13 +230,12 @@ struct images;
|
||||
struct image_data {
|
||||
unsigned char *image;
|
||||
int diffs;
|
||||
time_t timestamp; /* Timestamp when image was captured */
|
||||
struct tm timestamp_tm;
|
||||
struct timeval timestamp_tv;
|
||||
int shot; /* Sub second timestamp count */
|
||||
|
||||
/*
|
||||
* Movement center to img center distance
|
||||
* Note: Dist is calculated distX*distX + distY*distY
|
||||
/*
|
||||
* Movement center to img center distance
|
||||
* Note: Dist is calculated distX*distX + distY*distY
|
||||
*/
|
||||
unsigned long cent_dist;
|
||||
|
||||
@@ -247,24 +246,24 @@ struct image_data {
|
||||
int total_labels;
|
||||
};
|
||||
|
||||
/*
|
||||
/*
|
||||
* DIFFERENCES BETWEEN imgs.width, conf.width AND rotate_data.cap_width
|
||||
* (and the corresponding height values, of course)
|
||||
* ===========================================================================
|
||||
* Location Purpose
|
||||
*
|
||||
*
|
||||
* conf The values in conf reflect width and height set in the
|
||||
* configuration file. These can be set via http remote control,
|
||||
* configuration file. These can be set via http remote control,
|
||||
* but they are not used internally by Motion, so it won't break
|
||||
* anything. These values are transferred to imgs in vid_start.
|
||||
*
|
||||
* imgs The values in imgs are the actual output dimensions. Normally
|
||||
* the output dimensions are the same as the capture dimensions,
|
||||
* but for 90 or 270 degrees rotation, they are not. E.g., if
|
||||
* but for 90 or 270 degrees rotation, they are not. E.g., if
|
||||
* you capture at 320x240, and rotate 90 degrees, the output
|
||||
* dimensions are 240x320.
|
||||
* These values are set from the conf values in vid_start, or
|
||||
* from the first JPEG image in netcam_start. For 90 or 270
|
||||
* These values are set from the conf values in vid_start, or
|
||||
* from the first JPEG image in netcam_start. For 90 or 270
|
||||
* degrees rotation, they are swapped in rotate_init.
|
||||
*
|
||||
* rotate_data The values in rotate_data are named cap_width and cap_height,
|
||||
@@ -292,13 +291,16 @@ struct images {
|
||||
unsigned char *smartmask;
|
||||
unsigned char *smartmask_final;
|
||||
unsigned char *common_buffer;
|
||||
|
||||
unsigned char *mask_privacy; /* Buffer for the privacy mask values */
|
||||
|
||||
int *smartmask_buffer;
|
||||
int *labels;
|
||||
int *labelsize;
|
||||
int width;
|
||||
int height;
|
||||
int type;
|
||||
int picture_type; /* Output picture type IMAGE_JPEG, IMAGE_PPM */
|
||||
int picture_type; /* Output picture type IMAGE_JPEG, IMAGE_PPM */
|
||||
int size;
|
||||
int motionsize;
|
||||
int labelgroup_max;
|
||||
@@ -311,7 +313,7 @@ struct images {
|
||||
struct rotdata {
|
||||
/* Temporary buffer for 90 and 270 degrees rotation. */
|
||||
unsigned char *temp_buf;
|
||||
/*
|
||||
/*
|
||||
* Degrees to rotate; copied from conf.rotate_deg. This is the value
|
||||
* that is actually used. The value of conf.rotate_deg cannot be used
|
||||
* because it can be changed by motion-control, and changing rotation
|
||||
@@ -319,8 +321,8 @@ struct rotdata {
|
||||
*/
|
||||
int degrees;
|
||||
/*
|
||||
* Capture width and height - different from output width and height if
|
||||
* rotating 90 or 270 degrees.
|
||||
* Capture width and height - different from output width and height if
|
||||
* rotating 90 or 270 degrees.
|
||||
*/
|
||||
int cap_width;
|
||||
int cap_height;
|
||||
@@ -398,7 +400,7 @@ struct context {
|
||||
unsigned int moved;
|
||||
unsigned int pause;
|
||||
int missing_frame_counter; /* counts failed attempts to fetch picture frame from camera */
|
||||
unsigned int lost_connection;
|
||||
unsigned int lost_connection;
|
||||
|
||||
int video_dev;
|
||||
int pipe;
|
||||
@@ -406,7 +408,7 @@ struct context {
|
||||
|
||||
struct stream stream;
|
||||
int stream_count;
|
||||
|
||||
|
||||
#if defined(HAVE_MYSQL) || defined(HAVE_PGSQL) || defined(HAVE_SQLITE3)
|
||||
int sql_mask;
|
||||
#endif
|
||||
@@ -428,14 +430,42 @@ struct context {
|
||||
char extpipefilename[PATH_MAX];
|
||||
int movie_last_shot;
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
struct ffmpeg *ffmpeg_output;
|
||||
struct ffmpeg *ffmpeg_output_debug;
|
||||
struct ffmpeg *ffmpeg_timelapse;
|
||||
struct ffmpeg *ffmpeg_smartmask;
|
||||
char timelapsefilename[PATH_MAX];
|
||||
char motionfilename[PATH_MAX];
|
||||
#endif
|
||||
|
||||
int area_minx[9], area_miny[9], area_maxx[9], area_maxy[9];
|
||||
int areadetect_eventnbr;
|
||||
/* ToDo Determine why we need these...just put it all into prepare? */
|
||||
unsigned long long int timenow, timebefore;
|
||||
|
||||
unsigned int rate_limit;
|
||||
time_t lastframetime;
|
||||
int minimum_frame_time_downcounter;
|
||||
unsigned int get_image; /* Flag used to signal that we capture new image when we run the loop */
|
||||
|
||||
unsigned int text_size_factor;
|
||||
long int required_frame_time, frame_delay;
|
||||
|
||||
long int rolling_average_limit;
|
||||
long int *rolling_average_data;
|
||||
unsigned long int rolling_average;
|
||||
|
||||
int olddiffs; //only need this in here for a printf later...do we need that printf?
|
||||
int smartmask_ratio;
|
||||
int smartmask_count;
|
||||
|
||||
int previous_diffs, previous_location_x, previous_location_y;
|
||||
unsigned long int time_last_frame, time_current_frame;
|
||||
|
||||
unsigned int smartmask_lastrate;
|
||||
|
||||
unsigned int passflag; //only purpose is to flag first frame vs all others.....
|
||||
int rolling_frame;
|
||||
|
||||
};
|
||||
|
||||
extern pthread_mutex_t global_lock;
|
||||
@@ -450,6 +480,6 @@ void * mymalloc(size_t);
|
||||
void * myrealloc(void *, size_t, const char *);
|
||||
FILE * myfopen(const char *, const char *);
|
||||
int myfclose(FILE *);
|
||||
size_t mystrftime(const struct context *, char *, size_t, const char *, const struct tm *, const char *, int);
|
||||
size_t mystrftime(const struct context *, char *, size_t, const char *, const struct timeval *, const char *, int);
|
||||
int create_path(const char *);
|
||||
#endif /* _INCLUDE_MOTION_H */
|
||||
|
||||
@@ -608,7 +608,7 @@ how Motion is built.
|
||||
<tr>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > --with-pwcbsd </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > Use pwcbsd based webcams ( only BSD ) </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > This option allow to build motion to support V4L/V4L2 in BSD. </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > This option allow to build motion to support V4L2 in BSD. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > --without-bktr </td>
|
||||
@@ -616,8 +616,8 @@ how Motion is built.
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > ONLY used in *BSD </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > --without-v4l </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > Exclude using v4l (video4linux) subsystem. Makes Motion so it only supports network cameras. </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > --without-v4l2 </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > Exclude using v4l2 (video4linux2) subsystem. Makes Motion so it only supports network cameras. </td>
|
||||
<td bgcolor="#edf4f9" word-wrap:break-word > Can be used if you do not need support and maybe lack some of the libraries for it. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
@@ -1042,15 +1042,18 @@ Motion permits the use of video cards that have discreet input channels. Since
|
||||
the option <code>input</code> must be set to the value -1 for USB cameras.
|
||||
<p></p>
|
||||
|
||||
<strong>Network cameras</strong> are set up via the <code><strong>netcam_url</strong></code> parameter.
|
||||
<strong>Network cameras</strong> are set up via the
|
||||
<code><strong> <a href="#netcam_url" >netcam_url</a> </code></strong> parameter.
|
||||
The latest versions of Motion support rtsp format which many cameras now stream.
|
||||
The URL connection string to enter is specific to the camera and is
|
||||
usually provided by the manufacturer. The connection string is the same as what would be used by other
|
||||
video playing software such as VLC. If the camera does not stream via RTSP and instead uses a MJPEG, then Motion
|
||||
can also view that format. See the option <code><strong>netcam_url</strong></code> for additional options.
|
||||
can also view that format. See the option <code><strong> <a href="#netcam_url" >netcam_url</a> </code></strong>
|
||||
for additional options.
|
||||
<p></p>
|
||||
|
||||
<strong>Raspberry Pi cameras</strong> are set up via the <code><strong>mmalcam_name</strong></code> parameter.
|
||||
<strong>Raspberry Pi cameras</strong> are set up via the <code><strong>
|
||||
<a href="#mmalcam_name" >mmalcam_name</a> </code></strong> parameter.
|
||||
Note that name for this parameter derives from the MMAL/OpenMax software. The most common use of this
|
||||
option is to use the Raspberry PI camera.
|
||||
<p></p>
|
||||
@@ -1082,6 +1085,16 @@ option <code>frequency</code>. Otherwise set <code>frequency</code> to 0.
|
||||
Finally you need to set the TV norm. Values: 0 (PAL), 1 (NTSC), 2 (SECAM), 3 (PAL NC no colour). Default is 0 (PAL).
|
||||
If your camera is a PAL black and white you may get a better result with norm=3 (PAL no colour).
|
||||
<p></p>
|
||||
|
||||
<strong>Static files</strong> can also be processed with a bit of additional setup via a v4l2loopback.
|
||||
Install the loopback software to create a /dev/videoX device and then use software such as ffmpeg to stream
|
||||
the static file into the v4l2 device. e.g. <code><strong> ffmpeg -re -i <mymovie.mp4> -f v4l2 -pix_fmt gray /dev/video0 </strong></code>
|
||||
As illustrated in the example, this method can also be used to reformat the content to a different pixel. This
|
||||
can be helpful as a interim process where ffmpeg supports a particular format but that format is not yet supported
|
||||
by Motion.
|
||||
<p></p>
|
||||
|
||||
|
||||
<p></p>
|
||||
</ul>
|
||||
|
||||
@@ -1335,6 +1348,12 @@ Some configuration options are only used if Motion is built on a system that has
|
||||
<td align="left">mask_file</td>
|
||||
<td align="left"><a href="#mask_file" >mask_file</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td height="17" align="left"></td>
|
||||
<td align="left"></td>
|
||||
<td align="left"><a href="#mask_privacy" >mask_privacy</a></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td height="17" align="left">max_mpeg_time</td>
|
||||
<td align="left">max_movie_time</td>
|
||||
@@ -1994,9 +2013,10 @@ Some configuration options are only used if Motion is built on a system that has
|
||||
<td bgcolor="#edf4f9" ><a href="#noise_tune" >noise_tune</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#area_detect" >area_detect</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#mask_file" >mask_file</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#smart_mask_speed" >smart_mask_speed</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#mask_privacy" >mask_privacy</a> </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td bgcolor="#edf4f9" ><a href="#smart_mask_speed" >smart_mask_speed</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#lightswitch" >lightswitch</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#minimum_motion_frames" >minimum_motion_frames</a> </td>
|
||||
<td bgcolor="#edf4f9" ><a href="#event_gap" >event_gap</a> </td>
|
||||
@@ -2598,6 +2618,9 @@ format that Motion uses internally.
|
||||
<li> V4L2_PIX_FMT_YUYV : 15 'YUYV'</li>
|
||||
<li> V4L2_PIX_FMT_YUV422P : 16 '422P'</li>
|
||||
<li> V4L2_PIX_FMT_YUV420 : 17 'YU12'</li>
|
||||
<li> V4L2_PIX_FMT_Y10 : 18 'YU10'</li>
|
||||
<li> V4L2_PIX_FMT_Y12 : 19 'YU12'</li>
|
||||
<li> V4L2_PIX_FMT_GREY : 20 'GREY'</li>
|
||||
|
||||
</ul>
|
||||
|
||||
@@ -3003,7 +3026,7 @@ Motion automatically swaps width and height if you rotate 90 or 270 degrees, so
|
||||
</ul>
|
||||
<p></p>
|
||||
The width in pixels of each frame. Valid range is camera dependent.
|
||||
Motion does not scale so should be set to the actual size of the v4l device.
|
||||
Motion does not scale so should be set to the actual size of the v4l2 device.
|
||||
In case of a net camera motion sets the height to the height of the first image read
|
||||
except for rtsp streams which does rescale the network camera image to the
|
||||
requested dimensions. Note that this rescaling comes at a very high CPU cost so it
|
||||
@@ -3492,6 +3515,26 @@ Mask file (converted to png format so it can be shown by your web browser)
|
||||
|
||||
<p></p>
|
||||
|
||||
<h3><a name="mask_privacy"></a> mask_privacy </h3>
|
||||
<p></p>
|
||||
<ul>
|
||||
<li> Type: String</li>
|
||||
<li> Range / Valid values: Max 4095 characters</li>
|
||||
<li> Default: Not defined</li>
|
||||
</ul>
|
||||
<p></p>
|
||||
The full path and filename for the privacy masking pgm file. This file works like the mask_file as
|
||||
described above. The difference with this parameter is that while the mask_file excludes the section from
|
||||
detecting motion, this file excludes the section of the image completely. Excluded areas will appear as
|
||||
white on all images and movies.
|
||||
<p></p>
|
||||
mask_privacy is applied before detection so no motion will ever be detected in the excluded area.
|
||||
This parameter could however still be used with the mask_file. e.g. This file could exclude the
|
||||
neighbors yard and the mask_file would exclude the blowing tree from motion detection. The resulting
|
||||
pictures/movies would show solid white in place of the neighbors yard but the tree would still be in
|
||||
the pictures/movies.
|
||||
<p></p>
|
||||
|
||||
<h3><a name="smart_mask_speed"></a> smart_mask_speed </h3>
|
||||
<p></p>
|
||||
<ul>
|
||||
@@ -4278,109 +4321,21 @@ Camstream is "fooled" to think it is looking at a real camera.
|
||||
<p></p>
|
||||
<strong>Installing</strong>
|
||||
<p></p>
|
||||
Installing the video loopback device is not difficult. At least not when you have this document available.
|
||||
<p></p>
|
||||
First you must prepare your system for more video devices. You will need two extra devices for each video
|
||||
pipe that you want.
|
||||
<p></p>
|
||||
For example if you have 4 cameras they will probably run at
|
||||
/dev/video0, /dev/video1, /dev/video2, and /dev/video3. So you will need additional 8 video devices.
|
||||
This is easy to do.
|
||||
<p></p>
|
||||
<pre>
|
||||
mknod /dev/video4 c 81 4
|
||||
mknod /dev/video5 c 81 5
|
||||
mknod /dev/video6 c 81 6
|
||||
mknod /dev/video7 c 81 7
|
||||
mknod /dev/video8 c 81 8
|
||||
mknod /dev/video9 c 81 9
|
||||
mknod /dev/video10 c 81 10
|
||||
mknod /dev/video11 c 81 11
|
||||
</pre>
|
||||
<p></p>
|
||||
Note that the video device number is the same as the last parameter given on each line.
|
||||
<p></p>
|
||||
You may need to set the ownership and permissions (chown and chmod) to be the same as the video devices
|
||||
that were already there.
|
||||
<p></p>
|
||||
Now you need to install the video loopback device.
|
||||
<p></p>
|
||||
Download the latest via the apt packages and place the file in a place of your own choice.
|
||||
<p></p>
|
||||
Untar and uncompress the file to the place you want the program installed. Editor recommends /usr/local/vloopback.
|
||||
<p></p>
|
||||
<code>cd /usr/local</code>
|
||||
<p></p>
|
||||
<code>tar -xvzf /path/to/vloopback-1.1-rc1.tar.gz</code>
|
||||
<p></p>
|
||||
You now have a directory called vloopback-1.1-rc1. You can rename it to vloopback (mv vloopback-1.1-rc1 vloopback).
|
||||
I recommend creating a symbolic link to the current version. This way you can more easily experiment with different
|
||||
versions simply by changing the link.
|
||||
<p></p>
|
||||
<code>ln -s vloopback-1.1-rc1 vloopback</code>
|
||||
<p></p>
|
||||
Now change to the new directory
|
||||
<p></p>
|
||||
<code>cd vloopback</code>
|
||||
<p></p>
|
||||
Build the code
|
||||
<p></p>
|
||||
<code>make</code>
|
||||
<p></p>
|
||||
There is a good chance that the make will not work and give you a long list of errors.
|
||||
To run make the following must be available on you machine. <ul>
|
||||
<li> The kernel source files must be installed.
|
||||
</li> <li> The source files must be available at /usr/src/linux.<br />E.g.
|
||||
the new Red Hat 7.3 does not have a link to the sources called linux. Instead there is a link
|
||||
called linux-2.4. This is easy to fix. Just create a link to the real source tree. Do not rename!
|
||||
Add a link using this command (replacing the kernel version number with the one you have on your
|
||||
machine)<br /> <code>ln -s /usr/src/linux-2.4.18-4 /usr/src/linux</code>
|
||||
</li> <li> Alternatively you can change the vloopback makefile so that the "LINUXSRC=/usr/src/linux" line is
|
||||
changed to the actual path. I recommend the link solution since this may solve other similar problems that you
|
||||
can get when installing other software.
|
||||
</li></ul>
|
||||
<p></p>
|
||||
When compiling on a newer Linux distribution you may get a warning about a header file malloc.h.
|
||||
To remove this warning simply change the header reference as suggested by the warning.
|
||||
<p></p>
|
||||
In vloopback.c you replace the line
|
||||
<p></p>
|
||||
<code>#include <linux/malloc.h></code>
|
||||
<p></p>
|
||||
with the line
|
||||
<p></p>
|
||||
<code>#include <linux/slab.h></code>
|
||||
<p></p>
|
||||
Install the code you built as a Kernel module. There are two options:
|
||||
pipes should be set to the number of video loopbacks that you want. Probably one for each camera.
|
||||
The dev_offset defines which video device number will be the first. If dev_offset is not defined the
|
||||
vloopback module will install itself from the first available video device. If you want the cameras to be
|
||||
assigned to the lower video device numbers you must either load vloopback after loading the video device
|
||||
modules OR use the dev_offset option when loading vloopback. Vloopback then installs itself in the sequence
|
||||
input 0, output 0, input 1, output 1, input 2, output 2 etc. Here is shown the command for our example of 4
|
||||
cameras and 4 loopback devices and the first loopback device offset to /dev/video4.
|
||||
<p></p>
|
||||
<code>/sbin/insmod /usr/local/vloopback/vloopback.o pipes=4 dev_offset=4</code>
|
||||
<p></p>
|
||||
When you run the command you may get a warning about tainting the Kernel. Just ignore this.
|
||||
You can choose to copy the vloopback.o file into a directory in the /lib/modules tree where the insmod/modprobe programs are already looking for modules. Then the command gets simpler (/sbin/insmod vloopback pipes=.....).
|
||||
<p></p>
|
||||
If you want the loopback device to load during boot, you can place the call in one of the bootup scripts such as /etc/rc.d/rc.local. Vloopback should be loaded before you start motion.
|
||||
The video loopback device can be added installed via apt in many distributions. The package tested
|
||||
with Motion is v4l2loopback-dkms. Once the package is installed, you just need to run
|
||||
<code><strong>sudo modprobe v4l2loopback</strong></code>. This will add a new video device that you
|
||||
can use for the loopback. It is believed that there are additional options associated with the
|
||||
v4l2loopback that allows for adding more than one device. See the documentation of the v4l2loopback
|
||||
project for additional details.
|
||||
<p></p>
|
||||
To activate the vloopback device in motion set the 'video_pipe' option in the motion.conf file.
|
||||
You can also view the special motion pictures where you see the changed pixels by setting the option
|
||||
'motion_video_pipe' in motion.conf. When setting the video_pipe and/or motion_video_pipe options either
|
||||
specify the input device as e.g. /dev/video4. You can also set the parameter to '-' which means that motion
|
||||
will find the first vacant video loopback device input. If you have more than one camera you may want to
|
||||
control which loopback device each camera uses. Then you need to define the specific device name in motion.conf
|
||||
for the first camera and in each camera config file for the other cameras. If you set the video_pipe parameter
|
||||
to '-' in the motion.conf file and not setting it in the camera config files, motion automatically assign video
|
||||
devices in the same sequence as the camera config files are loaded. You can combine both video_pipe and motion_video_pipe
|
||||
but then naturally you will need twice as many pipes.
|
||||
specify the input device as e.g. /dev/video4.
|
||||
<p></p>
|
||||
De-activating should be done with this command
|
||||
<p></p>
|
||||
<code>/sbin/modprobe -r vloopback</code>
|
||||
<code>sudo modprobe -r v4l2loopback</code>
|
||||
<p></p>
|
||||
<p></p>
|
||||
|
||||
@@ -4394,10 +4349,9 @@ De-activating should be done with this command
|
||||
<li> Default: </li>
|
||||
</ul>
|
||||
<p></p>
|
||||
if a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate a free pipe. Default: not set
|
||||
The video4linux video loopback input device for normal images. If a particular pipe is to be used then use the
|
||||
device filename of this pipe. If a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate
|
||||
a free pipe.
|
||||
Default: not set
|
||||
The video4linux video loopback input device for normal images. The device would be specified
|
||||
in the format like /dev/video1
|
||||
<p></p>
|
||||
<p></p>
|
||||
|
||||
@@ -4408,9 +4362,8 @@ a free pipe.
|
||||
</li> <li> Default: Not defined
|
||||
</li></ul>
|
||||
<p></p>
|
||||
The video4linux video loopback input device for motion images.
|
||||
If a particular pipe is to be used then use the device filename of this pipe,
|
||||
if a dash '-' is given motion will use /proc/video/vloopback/vloopbacks to locate a free pipe. Default: not set
|
||||
The video4linux video loopback input device for motion images. The device would be specified
|
||||
in the format like /dev/video1
|
||||
<p></p>
|
||||
<p></p>
|
||||
|
||||
|
||||
@@ -728,9 +728,8 @@ int netcam_connect_rtsp(netcam_context_ptr netcam){
|
||||
return 0;
|
||||
|
||||
#else /* No FFmpeg/Libav */
|
||||
netcam->rtsp->status = RTSP_NOTCONNECTED;
|
||||
netcam->rtsp->format_context = NULL;
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
if (netcam)
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
return -1;
|
||||
#endif /* End #ifdef HAVE_FFMPEG */
|
||||
}
|
||||
@@ -757,7 +756,7 @@ void netcam_shutdown_rtsp(netcam_context_ptr netcam){
|
||||
netcam_rtsp_close_context(netcam);
|
||||
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO,"%s: netcam shut down");
|
||||
}
|
||||
|
||||
|
||||
free(netcam->rtsp->path);
|
||||
free(netcam->rtsp->user);
|
||||
free(netcam->rtsp->pass);
|
||||
@@ -767,8 +766,8 @@ void netcam_shutdown_rtsp(netcam_context_ptr netcam){
|
||||
|
||||
#else /* No FFmpeg/Libav */
|
||||
/* Stop compiler warnings */
|
||||
netcam->rtsp->status = RTSP_NOTCONNECTED;
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
if (netcam)
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
#endif /* End #ifdef HAVE_FFMPEG */
|
||||
}
|
||||
|
||||
@@ -892,8 +891,8 @@ int netcam_setup_rtsp(netcam_context_ptr netcam, struct url_t *url){
|
||||
|
||||
#else /* No FFmpeg/Libav */
|
||||
/* Stop compiler warnings */
|
||||
if (url->port == url->port) netcam->rtsp->status = RTSP_NOTCONNECTED;
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
if ((url) || (netcam))
|
||||
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: FFmpeg/Libav not found on computer. No RTSP support");
|
||||
return -1;
|
||||
#endif /* End #ifdef HAVE_FFMPEG */
|
||||
}
|
||||
@@ -922,7 +921,7 @@ int netcam_next_rtsp(unsigned char *image , netcam_context_ptr netcam){
|
||||
* or call anything else without taking care of thread safety.
|
||||
* The netcam mutex *only* protects netcam->latest, it cannot be
|
||||
* used to safely call other netcam functions. */
|
||||
|
||||
|
||||
pthread_mutex_lock(&netcam->mutex);
|
||||
memcpy(image, netcam->latest->ptr, netcam->latest->used);
|
||||
pthread_mutex_unlock(&netcam->mutex);
|
||||
|
||||
59
picture.c
59
picture.c
@@ -222,7 +222,7 @@ static void put_subjectarea(struct tiff_writing *into, const struct coord *box)
|
||||
*/
|
||||
static void put_jpeg_exif(j_compress_ptr cinfo,
|
||||
const struct context *cnt,
|
||||
const struct tm *timestamp,
|
||||
const struct timeval *tv1,
|
||||
const struct coord *box)
|
||||
{
|
||||
/* description, datetime, and subtime are the values that are actually
|
||||
@@ -230,16 +230,18 @@ static void put_jpeg_exif(j_compress_ptr cinfo,
|
||||
*/
|
||||
char *description, *datetime, *subtime;
|
||||
char datetime_buf[22];
|
||||
struct tm timestamp_tm;
|
||||
|
||||
if (timestamp) {
|
||||
if (tv1->tv_sec) {
|
||||
localtime_r(&tv1->tv_sec, ×tamp_tm);
|
||||
/* Exif requires this exact format */
|
||||
snprintf(datetime_buf, 21, "%04d:%02d:%02d %02d:%02d:%02d",
|
||||
timestamp->tm_year + 1900,
|
||||
timestamp->tm_mon + 1,
|
||||
timestamp->tm_mday,
|
||||
timestamp->tm_hour,
|
||||
timestamp->tm_min,
|
||||
timestamp->tm_sec);
|
||||
timestamp_tm.tm_year + 1900,
|
||||
timestamp_tm.tm_mon + 1,
|
||||
timestamp_tm.tm_mday,
|
||||
timestamp_tm.tm_hour,
|
||||
timestamp_tm.tm_min,
|
||||
timestamp_tm.tm_sec);
|
||||
datetime = datetime_buf;
|
||||
} else {
|
||||
datetime = NULL;
|
||||
@@ -253,7 +255,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo,
|
||||
description = malloc(PATH_MAX);
|
||||
mystrftime(cnt, description, PATH_MAX-1,
|
||||
cnt->conf.exif_text,
|
||||
timestamp, NULL, 0);
|
||||
tv1, NULL, 0);
|
||||
} else {
|
||||
description = NULL;
|
||||
}
|
||||
@@ -351,7 +353,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo,
|
||||
|
||||
if (datetime) {
|
||||
memcpy(writing.buf, exif_tzoffset_tag, 12);
|
||||
put_sint16(writing.buf+8, timestamp->tm_gmtoff / 3600);
|
||||
put_sint16(writing.buf+8, timestamp_tm.tm_gmtoff / 3600);
|
||||
writing.buf += 12;
|
||||
}
|
||||
|
||||
@@ -413,7 +415,7 @@ static void put_jpeg_exif(j_compress_ptr cinfo,
|
||||
*/
|
||||
static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size,
|
||||
unsigned char *input_image, int width, int height, int quality,
|
||||
struct context *cnt, struct tm *tm, struct coord *box)
|
||||
struct context *cnt, struct timeval *tv1, struct coord *box)
|
||||
|
||||
{
|
||||
int i, j, jpeg_image_size;
|
||||
@@ -451,14 +453,14 @@ static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size,
|
||||
|
||||
jpeg_set_quality(&cinfo, quality, TRUE);
|
||||
cinfo.dct_method = JDCT_FASTEST;
|
||||
|
||||
|
||||
_jpeg_mem_dest(&cinfo, dest_image, image_size); // Data written to mem
|
||||
|
||||
|
||||
|
||||
jpeg_start_compress(&cinfo, TRUE);
|
||||
|
||||
put_jpeg_exif(&cinfo, cnt, tm, box);
|
||||
|
||||
put_jpeg_exif(&cinfo, cnt, tv1, box);
|
||||
|
||||
/* If the image is not a multiple of 16, this overruns the buffers
|
||||
* we'll just pad those last bytes with zeros
|
||||
*/
|
||||
@@ -468,13 +470,13 @@ static int put_jpeg_yuv420p_memory(unsigned char *dest_image, int image_size,
|
||||
y[i] = input_image + width * (i + j);
|
||||
if (i % 2 == 0) {
|
||||
cb[i / 2] = input_image + width * height + width / 2 * ((i + j) /2);
|
||||
cr[i / 2] = input_image + width * height + width * height / 4 + width / 2 * ((i + j) / 2);
|
||||
cr[i / 2] = input_image + width * height + width * height / 4 + width / 2 * ((i + j) / 2);
|
||||
}
|
||||
} else {
|
||||
y[i] = 0x00;
|
||||
cb[i] = 0x00;
|
||||
cr[i] = 0x00;
|
||||
}
|
||||
}
|
||||
}
|
||||
jpeg_write_raw_data(&cinfo, data, 16);
|
||||
}
|
||||
@@ -557,7 +559,7 @@ static int put_jpeg_grey_memory(unsigned char *dest_image, int image_size, unsig
|
||||
static void put_jpeg_yuv420p_file(FILE *fp,
|
||||
unsigned char *image, int width, int height,
|
||||
int quality,
|
||||
struct context *cnt, struct tm *tm, struct coord *box)
|
||||
struct context *cnt, struct timeval *tv1, struct coord *box)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
@@ -598,7 +600,7 @@ static void put_jpeg_yuv420p_file(FILE *fp,
|
||||
jpeg_stdio_dest(&cinfo, fp); // Data written to file
|
||||
jpeg_start_compress(&cinfo, TRUE);
|
||||
|
||||
put_jpeg_exif(&cinfo, cnt, tm, box);
|
||||
put_jpeg_exif(&cinfo, cnt, tv1, box);
|
||||
|
||||
for (j = 0; j < height; j += 16) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
@@ -612,8 +614,8 @@ static void put_jpeg_yuv420p_file(FILE *fp,
|
||||
y[i] = 0x00;
|
||||
cb[i] = 0x00;
|
||||
cr[i] = 0x00;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
jpeg_write_raw_data(&cinfo, data, 16);
|
||||
}
|
||||
|
||||
@@ -897,7 +899,7 @@ int put_picture_memory(struct context *cnt, unsigned char* dest_image, int image
|
||||
switch (cnt->imgs.type) {
|
||||
case VIDEO_PALETTE_YUV420P:
|
||||
return put_jpeg_yuv420p_memory(dest_image, image_size, image,
|
||||
cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tm), &(cnt->current_image->location));
|
||||
cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tv), &(cnt->current_image->location));
|
||||
case VIDEO_PALETTE_GREY:
|
||||
return put_jpeg_grey_memory(dest_image, image_size, image,
|
||||
cnt->imgs.width, cnt->imgs.height, quality);
|
||||
@@ -916,7 +918,7 @@ void put_picture_fd(struct context *cnt, FILE *picture, unsigned char *image, in
|
||||
} else {
|
||||
switch (cnt->imgs.type) {
|
||||
case VIDEO_PALETTE_YUV420P:
|
||||
put_jpeg_yuv420p_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tm), &(cnt->current_image->location));
|
||||
put_jpeg_yuv420p_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality, cnt, &(cnt->current_image->timestamp_tv), &(cnt->current_image->location));
|
||||
break;
|
||||
case VIDEO_PALETTE_GREY:
|
||||
put_jpeg_grey_file(picture, image, cnt->imgs.width, cnt->imgs.height, quality);
|
||||
@@ -1026,7 +1028,7 @@ unsigned char *get_pgm(FILE *picture, int width, int height)
|
||||
for (y = 0; y < height; y++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
resized_image[y * width + x] = image[
|
||||
(mask_height - 1) * y / (height - 1) * mask_width +
|
||||
(mask_height - 1) * y / (height - 1) * mask_width +
|
||||
(mask_width - 1) * x / (width - 1)];
|
||||
}
|
||||
}
|
||||
@@ -1106,12 +1108,7 @@ void preview_save(struct context *cnt)
|
||||
/* Use filename of movie i.o. jpeg_filename when set to 'preview'. */
|
||||
use_imagepath = strcmp(cnt->conf.imagepath, "preview");
|
||||
|
||||
#ifdef HAVE_FFMPEG
|
||||
if ((cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe))
|
||||
&& !use_imagepath) {
|
||||
#else
|
||||
if ((cnt->conf.useextpipe && cnt->extpipe) && !use_imagepath) {
|
||||
#endif
|
||||
if ((cnt->ffmpeg_output || (cnt->conf.useextpipe && cnt->extpipe)) && !use_imagepath) {
|
||||
if (cnt->conf.useextpipe && cnt->extpipe) {
|
||||
basename_len = strlen(cnt->extpipefilename) + 1;
|
||||
strncpy(previewname, cnt->extpipefilename, basename_len);
|
||||
@@ -1140,7 +1137,7 @@ void preview_save(struct context *cnt)
|
||||
else
|
||||
imagepath = (char *)DEF_IMAGEPATH;
|
||||
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, &cnt->imgs.preview_image.timestamp_tm, NULL, 0);
|
||||
mystrftime(cnt, filename, sizeof(filename), imagepath, &cnt->imgs.preview_image.timestamp_tv, NULL, 0);
|
||||
snprintf(previewname, PATH_MAX, "%s/%s.%s", cnt->conf.filepath, filename, imageext(cnt));
|
||||
|
||||
put_picture(cnt, previewname, cnt->imgs.preview_image.image, FTYPE_IMAGE);
|
||||
|
||||
30
pwc-ioctl.h
30
pwc-ioctl.h
@@ -103,7 +103,7 @@ struct pwc_serial
|
||||
{
|
||||
char serial[30]; /* String with serial number. Contains terminating 0 */
|
||||
};
|
||||
|
||||
|
||||
/* pwc_whitebalance.mode values */
|
||||
#define PWC_WB_INDOOR 0
|
||||
#define PWC_WB_OUTDOOR 1
|
||||
@@ -111,14 +111,14 @@ struct pwc_serial
|
||||
#define PWC_WB_MANUAL 3
|
||||
#define PWC_WB_AUTO 4
|
||||
|
||||
/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
|
||||
/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
|
||||
Set mode to one of the PWC_WB_* values above.
|
||||
*red and *blue are the respective gains of these colour components inside
|
||||
*red and *blue are the respective gains of these colour components inside
|
||||
the camera; range 0..65535
|
||||
When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
|
||||
When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
|
||||
otherwise undefined.
|
||||
'read_red' and 'read_blue' are read-only.
|
||||
*/
|
||||
*/
|
||||
struct pwc_whitebalance
|
||||
{
|
||||
int mode;
|
||||
@@ -126,9 +126,9 @@ struct pwc_whitebalance
|
||||
int read_red, read_blue; /* R/O */
|
||||
};
|
||||
|
||||
/*
|
||||
/*
|
||||
'control_speed' and 'control_delay' are used in automatic whitebalance mode,
|
||||
and tell the camera how fast it should react to changes in lighting, and
|
||||
and tell the camera how fast it should react to changes in lighting, and
|
||||
with how much delay. Valid values are 0..65535.
|
||||
*/
|
||||
struct pwc_wb_speed
|
||||
@@ -157,11 +157,11 @@ struct pwc_imagesize
|
||||
#define PWC_MPT_TILT 0x02
|
||||
#define PWC_MPT_TIMEOUT 0x04 /* for status */
|
||||
|
||||
/* Set angles; when absolute != 0, the angle is absolute and the
|
||||
/* Set angles; when absolute != 0, the angle is absolute and the
|
||||
driver calculates the relative offset for you. This can only
|
||||
be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns
|
||||
absolute angles.
|
||||
*/
|
||||
*/
|
||||
struct pwc_mpt_angles
|
||||
{
|
||||
int absolute; /* write-only */
|
||||
@@ -188,7 +188,7 @@ struct pwc_mpt_status
|
||||
/* This is used for out-of-kernel decompression. With it, you can get
|
||||
all the necessary information to initialize and use the decompressor
|
||||
routines in standalone applications.
|
||||
*/
|
||||
*/
|
||||
struct pwc_video_command
|
||||
{
|
||||
int type; /* camera type (645, 675, 730, etc.) */
|
||||
@@ -273,7 +273,7 @@ struct pwc_video_command
|
||||
|
||||
/* Flickerless mode; = 0 off, otherwise on */
|
||||
#define VIDIOCPWCSFLICKER _IOW('v', 208, int)
|
||||
#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
|
||||
#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
|
||||
|
||||
/* Dynamic noise reduction; 0 off, 3 = high noise reduction */
|
||||
#define VIDIOCPWCSDYNNOISE _IOW('v', 209, int)
|
||||
@@ -282,7 +282,7 @@ struct pwc_video_command
|
||||
/* Real image size as used by the camera; tells you whether or not there's a gray border around the image */
|
||||
#define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize)
|
||||
|
||||
/* Motorized pan & tilt functions */
|
||||
/* Motorized pan & tilt functions */
|
||||
#define VIDIOCPWCMPTRESET _IOW('v', 211, int)
|
||||
#define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range)
|
||||
#define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles)
|
||||
@@ -299,11 +299,11 @@ struct pwc_table_init_buffer {
|
||||
|
||||
/*
|
||||
* This is private command used when communicating with v4l2.
|
||||
* In the future all private ioctl will be remove/replace to
|
||||
* In the future all private ioctl will be remove/replace to
|
||||
* use interface offer by v4l2.
|
||||
*/
|
||||
|
||||
#if (defined(MOTION_V4L2)) && defined(__linux__)
|
||||
#if (!defined(WITHOUT_V4L2)) && defined(__linux__)
|
||||
|
||||
#define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0)
|
||||
#define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1)
|
||||
@@ -323,6 +323,6 @@ struct pwc_raw_frame {
|
||||
__u8 rawframe[0]; /* frame_size = H/4*vbandlength */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#endif /* MOTION_V4L2 && __linux__ */
|
||||
#endif /* !WITHOUT_V4L2 && __linux__ */
|
||||
|
||||
#endif
|
||||
|
||||
14
track.c
14
track.c
@@ -8,7 +8,7 @@
|
||||
#include <math.h>
|
||||
#include "motion.h"
|
||||
|
||||
#ifdef MOTION_V4L2
|
||||
#ifndef WITHOUT_V4L2
|
||||
#include <linux/videodev2.h>
|
||||
#include "pwc-ioctl.h"
|
||||
#endif
|
||||
@@ -52,14 +52,14 @@ static unsigned int servo_move(struct context *cnt, struct coord *cent,
|
||||
struct images *imgs, unsigned int manual);
|
||||
static unsigned int iomojo_move(struct context *cnt, int dev, struct coord *cent, struct images *imgs);
|
||||
|
||||
#ifdef MOTION_V4L2
|
||||
#ifndef WITHOUT_V4L2
|
||||
static unsigned int lqos_center(struct context *cnt, int dev, int xoff, int yoff);
|
||||
static unsigned int lqos_move(struct context *cnt, int dev, struct coord *cent,
|
||||
struct images *imgs, unsigned int manual);
|
||||
static unsigned int uvc_center(struct context *cnt, int dev, int xoff, int yoff);
|
||||
static unsigned int uvc_move(struct context *cnt, int dev, struct coord *cent,
|
||||
struct images *imgs, unsigned int manual);
|
||||
#endif /* MOTION_V4L2 */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
/* Add a call to your functions here: */
|
||||
unsigned int track_center(struct context *cnt, int dev ATTRIBUTE_UNUSED,
|
||||
@@ -79,7 +79,7 @@ unsigned int track_center(struct context *cnt, int dev ATTRIBUTE_UNUSED,
|
||||
} else if (cnt->track.type == TRACK_TYPE_SERVO) {
|
||||
return servo_center(cnt, xoff, yoff);
|
||||
}
|
||||
#ifdef MOTION_V4L2
|
||||
#ifndef WITHOUT_V4L2
|
||||
else if (cnt->track.type == TRACK_TYPE_PWC)
|
||||
return lqos_center(cnt, dev, xoff, yoff);
|
||||
else if (cnt->track.type == TRACK_TYPE_UVC)
|
||||
@@ -108,7 +108,7 @@ unsigned int track_move(struct context *cnt, int dev, struct coord *cent, struct
|
||||
return stepper_move(cnt, cent, imgs);
|
||||
else if (cnt->track.type == TRACK_TYPE_SERVO)
|
||||
return servo_move(cnt, cent, imgs, manual);
|
||||
#ifdef MOTION_V4L2
|
||||
#ifndef WITHOUT_V4L2
|
||||
else if (cnt->track.type == TRACK_TYPE_PWC)
|
||||
return lqos_move(cnt, dev, cent, imgs, manual);
|
||||
else if (cnt->track.type == TRACK_TYPE_UVC)
|
||||
@@ -780,7 +780,7 @@ static unsigned int iomojo_move(struct context *cnt, int dev, struct coord *cent
|
||||
Logitech QuickCam Orbit camera tracking code by folkert@vanheusden.com
|
||||
|
||||
******************************************************************************/
|
||||
#ifdef MOTION_V4L2
|
||||
#ifndef WITHOUT_V4L2
|
||||
static unsigned int lqos_center(struct context *cnt, int dev, int x_angle, int y_angle)
|
||||
{
|
||||
int reset = 3;
|
||||
@@ -1219,4 +1219,4 @@ static unsigned int uvc_move(struct context *cnt, int dev, struct coord *cent,
|
||||
|
||||
return cnt->track.move_wait;
|
||||
}
|
||||
#endif /* MOTION_V4L2 */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
11
track.h
11
track.h
@@ -117,7 +117,7 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *,
|
||||
|
||||
#define SERVO_BAUDRATE B9600
|
||||
|
||||
#define SERVO_COMMAND_STATUS 0
|
||||
#define SERVO_COMMAND_STATUS 0
|
||||
#define SERVO_COMMAND_LEFT_N 1
|
||||
#define SERVO_COMMAND_RIGHT_N 2
|
||||
#define SERVO_COMMAND_LEFT 3
|
||||
@@ -155,7 +155,7 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *,
|
||||
#define IOMOJO_DIRECTION_DOWN 0x04
|
||||
#define IOMOJO_DIRECTION_UP 0x08
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/*
|
||||
* Defines for the Logitech QuickCam Orbit/Sphere USB webcam
|
||||
@@ -165,11 +165,9 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *,
|
||||
#define LQOS_HORIZONAL_DEGREES 120
|
||||
|
||||
/*
|
||||
* UVC
|
||||
* UVC
|
||||
*/
|
||||
|
||||
#ifdef MOTION_V4L2
|
||||
|
||||
#ifndef V4L2_CID_PAN_RELATIVE
|
||||
#define V4L2_CID_PAN_RELATIVE (V4L2_CID_PRIVATE_BASE+7)
|
||||
#endif
|
||||
@@ -183,9 +181,8 @@ unsigned int track_move(struct context *, int, struct coord *, struct images *,
|
||||
#endif
|
||||
|
||||
#define INCPANTILT 64 // 1 degree
|
||||
#endif /* MOTION_V4L2 */
|
||||
|
||||
|
||||
#endif /* WITHOUT_V4L */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
#endif /* _INCLUDE_TRACK_H */
|
||||
|
||||
453
video.c
453
video.c
@@ -1,453 +0,0 @@
|
||||
/*
|
||||
* video.c
|
||||
*
|
||||
* Video stream functions for motion.
|
||||
* Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org)
|
||||
* This software is distributed under the GNU public license version 2
|
||||
* See also the file 'COPYING'.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "motion.h"
|
||||
#include "video.h"
|
||||
#include "rotate.h"
|
||||
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && !defined(WITHOUT_V4L)
|
||||
|
||||
/**
|
||||
* v4l_picture_controls
|
||||
*/
|
||||
static void v4l_picture_controls(struct context *cnt, struct video_dev *viddev)
|
||||
{
|
||||
int dev = viddev->fd;
|
||||
struct video_picture vid_pic;
|
||||
int make_change = 0;
|
||||
|
||||
if (cnt->conf.contrast && cnt->conf.contrast != viddev->contrast) {
|
||||
|
||||
if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1)
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
|
||||
make_change = 1;
|
||||
vid_pic.contrast = cnt->conf.contrast * 256;
|
||||
viddev->contrast = cnt->conf.contrast;
|
||||
}
|
||||
|
||||
if (cnt->conf.saturation && cnt->conf.saturation != viddev->saturation) {
|
||||
|
||||
if (!make_change) {
|
||||
if (ioctl(dev, VIDIOCGPICT, &vid_pic)==-1)
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
}
|
||||
|
||||
make_change = 1;
|
||||
vid_pic.colour = cnt->conf.saturation * 256;
|
||||
viddev->saturation = cnt->conf.saturation;
|
||||
}
|
||||
|
||||
if (cnt->conf.hue && cnt->conf.hue != viddev->hue) {
|
||||
|
||||
if (!make_change) {
|
||||
if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1)
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
}
|
||||
|
||||
make_change = 1;
|
||||
vid_pic.hue = cnt->conf.hue * 256;
|
||||
viddev->hue = cnt->conf.hue;
|
||||
}
|
||||
|
||||
/* Only tested with PWCBSD in FreeBSD */
|
||||
#if defined(PWCBSD)
|
||||
if (cnt->conf.frame_limit != viddev->fps) {
|
||||
struct video_window vw;
|
||||
int fps;
|
||||
|
||||
if (ioctl(dev, VIDIOCGWIN, &vw) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCGWIN");
|
||||
} else {
|
||||
fps = vw.flags >> PWC_FPS_SHIFT;
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Get Current framerate %d .. trying %d",
|
||||
fps, cnt->conf.frame_limit);
|
||||
}
|
||||
|
||||
fps = cnt->conf.frame_limit;
|
||||
vw.flags = fps << PWC_FPS_SHIFT;
|
||||
|
||||
if (ioctl(dev, VIDIOCSWIN, &vw) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCSWIN");
|
||||
} else if (ioctl(dev, VIDIOCGWIN, &vw) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl VIDIOCGWIN");
|
||||
} else {
|
||||
fps = vw.flags >> PWC_FPS_SHIFT;
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set new framerate %d", fps);
|
||||
}
|
||||
|
||||
viddev->fps = fps;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (cnt->conf.autobright) {
|
||||
|
||||
if (vid_do_autobright(cnt, viddev)) {
|
||||
/* If we already read the VIDIOGPICT - we should not do it again. */
|
||||
if (!make_change) {
|
||||
if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1)
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
}
|
||||
|
||||
vid_pic.brightness = viddev->brightness * 256;
|
||||
make_change = 1;
|
||||
}
|
||||
|
||||
} else if (cnt->conf.brightness && cnt->conf.brightness != viddev->brightness) {
|
||||
|
||||
if ((!make_change) && (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1))
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
|
||||
make_change = 1;
|
||||
vid_pic.brightness = cnt->conf.brightness * 256;
|
||||
viddev->brightness = cnt->conf.brightness;
|
||||
}
|
||||
|
||||
if (make_change) {
|
||||
if (ioctl(dev, VIDIOCSPICT, &vid_pic) == -1)
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSPICT)");
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
Video4linux capture routines
|
||||
********************************************************************************/
|
||||
|
||||
/**
|
||||
* v4l_start
|
||||
* Initialize video device to start capturing and allocates memory map
|
||||
* for video device.
|
||||
*
|
||||
* Returns mmapped buffer for video device or NULL if any error happens.
|
||||
*
|
||||
*/
|
||||
unsigned char *v4l_start(struct video_dev *viddev, int width, int height,int input,
|
||||
int norm, unsigned long freq, int tuner_number)
|
||||
{
|
||||
int dev = viddev->fd;
|
||||
struct video_capability vid_caps;
|
||||
struct video_channel vid_chnl;
|
||||
struct video_tuner vid_tuner;
|
||||
struct video_mbuf vid_buf;
|
||||
struct video_mmap vid_mmap;
|
||||
void *map;
|
||||
|
||||
if (ioctl (dev, VIDIOCGCAP, &vid_caps) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCAP)");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (vid_caps.type & VID_TYPE_MONOCHROME)
|
||||
viddev->v4l_fmt = VIDEO_PALETTE_GREY;
|
||||
|
||||
if (input != IN_DEFAULT) {
|
||||
memset(&vid_chnl, 0, sizeof(struct video_channel));
|
||||
vid_chnl.channel = input;
|
||||
|
||||
if (ioctl (dev, VIDIOCGCHAN, &vid_chnl) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCHAN) Input %d",
|
||||
input);
|
||||
} else {
|
||||
vid_chnl.channel = input;
|
||||
vid_chnl.norm = norm;
|
||||
if (ioctl (dev, VIDIOCSCHAN, &vid_chnl) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSCHAN) Input %d"
|
||||
" Standard method %d", input, norm);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (freq) {
|
||||
memset(&vid_tuner, 0, sizeof(struct video_tuner));
|
||||
vid_tuner.tuner = tuner_number;
|
||||
if (ioctl (dev, VIDIOCGTUNER, &vid_tuner) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGTUNER) tuner %d",
|
||||
tuner_number);
|
||||
} else {
|
||||
if (vid_tuner.flags & VIDEO_TUNER_LOW)
|
||||
freq = freq * 16; /* steps of 1/16 KHz */
|
||||
else
|
||||
freq = freq * 10 / 625;
|
||||
|
||||
if (ioctl(dev, VIDIOCSFREQ, &freq) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSFREQ)"
|
||||
" Frequency %ul", freq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Tuner to %d Frequency set to %ul",
|
||||
tuner_number, freq);
|
||||
}
|
||||
}
|
||||
|
||||
if (ioctl (dev, VIDIOCGMBUF, &vid_buf) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, NO_ERRNO, "%s: ioctl(VIDIOCGMBUF) - Error device"
|
||||
" does not support memory map\n V4L capturing using read is deprecated!\n"
|
||||
"Motion only supports mmap.");
|
||||
return NULL;
|
||||
} else {
|
||||
map = mmap(0, vid_buf.size, PROT_READ|PROT_WRITE, MAP_SHARED, dev, 0);
|
||||
viddev->size_map = vid_buf.size;
|
||||
|
||||
if (vid_buf.frames > 1) {
|
||||
viddev->v4l_maxbuffer = 2;
|
||||
viddev->v4l_buffers[0] = map;
|
||||
viddev->v4l_buffers[1] = (unsigned char *)map + vid_buf.offsets[1];
|
||||
} else {
|
||||
viddev->v4l_buffers[0] = map;
|
||||
viddev->v4l_maxbuffer = 1;
|
||||
}
|
||||
|
||||
if (MAP_FAILED == map) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: MAP_FAILED");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
viddev->v4l_curbuffer = 0;
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
vid_mmap.frame = viddev->v4l_curbuffer;
|
||||
vid_mmap.width = width;
|
||||
vid_mmap.height = height;
|
||||
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUV420P, "
|
||||
"trying YUV422 palette");
|
||||
viddev->v4l_fmt = VIDEO_PALETTE_YUV422;
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
/* Try again... */
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUV422,"
|
||||
" trying YUYV palette");
|
||||
viddev->v4l_fmt = VIDEO_PALETTE_YUYV;
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with YUYV, trying RGB24 palette");
|
||||
viddev->v4l_fmt = VIDEO_PALETTE_RGB24;
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
/* Try again... */
|
||||
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(WRN, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with RGB24, trying"
|
||||
"GREYSCALE palette");
|
||||
viddev->v4l_fmt = VIDEO_PALETTE_GREY;
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
|
||||
/* Try one last time... */
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed with all supported palettes "
|
||||
"- giving up");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (viddev->v4l_fmt) {
|
||||
case VIDEO_PALETTE_YUV420P:
|
||||
viddev->v4l_bufsize = (width * height * 3) / 2;
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUV420P palette");
|
||||
break;
|
||||
case VIDEO_PALETTE_YUV422:
|
||||
viddev->v4l_bufsize = (width * height * 2);
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUV422 palette");
|
||||
break;
|
||||
case VIDEO_PALETTE_YUYV:
|
||||
viddev->v4l_bufsize = (width * height * 2);
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_YUYV palette");
|
||||
break;
|
||||
case VIDEO_PALETTE_RGB24:
|
||||
viddev->v4l_bufsize = (width * height * 3);
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_RGB24 palette");
|
||||
break;
|
||||
case VIDEO_PALETTE_GREY:
|
||||
viddev->v4l_bufsize = width * height;
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using VIDEO_PALETTE_GREY palette");
|
||||
break;
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* v4l_next
|
||||
* Fetches a video frame from a v4l device
|
||||
*
|
||||
* Parameters:
|
||||
* viddev Pointer to struct containing video device handle amd device parameters
|
||||
* map Pointer to the buffer in which the function puts the new image
|
||||
* width Width of image in pixels
|
||||
* height Height of image in pixels
|
||||
*
|
||||
* Returns
|
||||
* 0 Success
|
||||
* V4L_FATAL_ERROR Fatal error
|
||||
* Positive with bit 0 set and bit 1 unset
|
||||
* Non fatal error (not implemented)
|
||||
*/
|
||||
int v4l_next(struct video_dev *viddev, unsigned char *map, int width, int height)
|
||||
{
|
||||
int dev = viddev->fd;
|
||||
int frame = viddev->v4l_curbuffer;
|
||||
struct video_mmap vid_mmap;
|
||||
unsigned char *cap_map;
|
||||
|
||||
sigset_t set, old;
|
||||
|
||||
/* MMAP method is used */
|
||||
vid_mmap.format = viddev->v4l_fmt;
|
||||
vid_mmap.width = width;
|
||||
vid_mmap.height = height;
|
||||
|
||||
/* Block signals during IOCTL */
|
||||
sigemptyset(&set);
|
||||
sigaddset(&set, SIGCHLD);
|
||||
sigaddset(&set, SIGALRM);
|
||||
sigaddset(&set, SIGUSR1);
|
||||
sigaddset(&set, SIGTERM);
|
||||
sigaddset(&set, SIGHUP);
|
||||
pthread_sigmask (SIG_BLOCK, &set, &old);
|
||||
|
||||
cap_map = viddev->v4l_buffers[viddev->v4l_curbuffer];
|
||||
viddev->v4l_curbuffer++;
|
||||
|
||||
if (viddev->v4l_curbuffer >= viddev->v4l_maxbuffer)
|
||||
viddev->v4l_curbuffer = 0;
|
||||
|
||||
vid_mmap.frame = viddev->v4l_curbuffer;
|
||||
|
||||
if (ioctl(dev, VIDIOCMCAPTURE, &vid_mmap) == -1) {
|
||||
MOTION_LOG(ALR, TYPE_VIDEO, SHOW_ERRNO, "%s: mcapture error in proc %d",
|
||||
getpid());
|
||||
sigprocmask (SIG_UNBLOCK, &old, NULL);
|
||||
return V4L_FATAL_ERROR;
|
||||
}
|
||||
|
||||
vid_mmap.frame = frame;
|
||||
|
||||
if (ioctl(dev, VIDIOCSYNC, &vid_mmap.frame) == -1) {
|
||||
MOTION_LOG(ALR, TYPE_VIDEO, SHOW_ERRNO, "%s: sync error in proc %d",
|
||||
getpid());
|
||||
sigprocmask (SIG_UNBLOCK, &old, NULL);
|
||||
}
|
||||
|
||||
pthread_sigmask (SIG_UNBLOCK, &old, NULL); /*undo the signal blocking*/
|
||||
|
||||
switch (viddev->v4l_fmt) {
|
||||
case VIDEO_PALETTE_RGB24:
|
||||
conv_rgb24toyuv420p(map, cap_map, width, height);
|
||||
break;
|
||||
case VIDEO_PALETTE_YUYV:
|
||||
case VIDEO_PALETTE_YUV422:
|
||||
conv_yuv422to420p(map, cap_map, width, height);
|
||||
break;
|
||||
default:
|
||||
memcpy(map, cap_map, viddev->v4l_bufsize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l_set_input
|
||||
* Sets input for video device, adjust picture controls.
|
||||
* If needed skip frames for round robin.
|
||||
*
|
||||
* Parameters:
|
||||
* cnt Pointer to context struct
|
||||
* viddev Pointer to struct containing video device handle amd device parameters
|
||||
* map Pointer to the buffer in which the function puts the new image
|
||||
* width Width of image in pixels
|
||||
* height Height of image in pixels
|
||||
* conf Pointer to config struct
|
||||
*
|
||||
* Returns nothing
|
||||
*/
|
||||
void v4l_set_input(struct context *cnt, struct video_dev *viddev, unsigned char *map,
|
||||
int width, int height, struct config *conf)
|
||||
{
|
||||
int dev = viddev->fd;
|
||||
struct video_channel vid_chnl;
|
||||
struct video_tuner vid_tuner;
|
||||
unsigned long frequnits , freq;
|
||||
int input = conf->input;
|
||||
int norm = conf->norm;
|
||||
int tuner_number = conf->tuner_number;
|
||||
|
||||
frequnits = freq = conf->frequency;
|
||||
|
||||
if (input != viddev->input || width != viddev->width || height != viddev->height ||
|
||||
freq != viddev->freq || tuner_number != viddev->tuner_number || norm != viddev->norm) {
|
||||
unsigned int skip = conf->roundrobin_skip, i;
|
||||
|
||||
if (freq) {
|
||||
memset(&vid_tuner, 0, sizeof(struct video_tuner));
|
||||
vid_tuner.tuner = tuner_number;
|
||||
|
||||
if (ioctl (dev, VIDIOCGTUNER, &vid_tuner) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGTUNER) tuner number %d",
|
||||
tuner_number);
|
||||
} else {
|
||||
if (vid_tuner.flags & VIDEO_TUNER_LOW)
|
||||
frequnits = freq * 16; /* steps of 1/16 KHz */
|
||||
else
|
||||
frequnits = (freq * 10) / 625;
|
||||
|
||||
if (ioctl(dev, VIDIOCSFREQ, &frequnits) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSFREQ) Frequency %ul",
|
||||
frequnits);
|
||||
return;
|
||||
}
|
||||
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Tuner to %d Frequency to %ul",
|
||||
tuner_number, frequnits);
|
||||
}
|
||||
}
|
||||
|
||||
memset(&vid_chnl, 0, sizeof(struct video_channel));
|
||||
vid_chnl.channel = input;
|
||||
|
||||
if (ioctl (dev, VIDIOCGCHAN, &vid_chnl) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGCHAN) Input %d",
|
||||
input);
|
||||
} else {
|
||||
vid_chnl.channel = input;
|
||||
vid_chnl.norm = norm;
|
||||
|
||||
if (ioctl (dev, VIDIOCSCHAN, &vid_chnl) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSCHAN) Input %d"
|
||||
" Standard method %d", input, norm);
|
||||
return;
|
||||
}
|
||||
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Input to %d Standard method to %d",
|
||||
input, norm);
|
||||
}
|
||||
|
||||
v4l_picture_controls(cnt, viddev);
|
||||
conf->input = viddev->input = input;
|
||||
conf->width = viddev->width = width;
|
||||
conf->height = viddev->height = height;
|
||||
conf->frequency = viddev->freq = freq;
|
||||
conf->tuner_number = viddev->tuner_number = tuner_number;
|
||||
conf->norm = viddev->norm = norm;
|
||||
/* skip a few frames if needed */
|
||||
for (i = 0; i < skip; i++)
|
||||
v4l_next(viddev, map, width, height);
|
||||
} else {
|
||||
/* No round robin - we only adjust picture controls */
|
||||
v4l_picture_controls(cnt, viddev);
|
||||
}
|
||||
}
|
||||
#endif /* !WITHOUT_V4L */
|
||||
19
video2.c
19
video2.c
@@ -69,9 +69,9 @@
|
||||
*/
|
||||
|
||||
#include "motion.h"
|
||||
#include "video.h"
|
||||
#include "video2.h"
|
||||
|
||||
#if !defined(WITHOUT_V4L) && defined(MOTION_V4L2)
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
#define u8 unsigned char
|
||||
#define u16 unsigned short
|
||||
@@ -138,6 +138,10 @@
|
||||
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
|
||||
#endif
|
||||
|
||||
#ifndef V4L2_PIX_FMT_GREY
|
||||
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
|
||||
#endif
|
||||
|
||||
#define ZC301_V4L2_CID_DAC_MAGN V4L2_CID_PRIVATE_BASE
|
||||
#define ZC301_V4L2_CID_GREEN_BALANCE (V4L2_CID_PRIVATE_BASE+1)
|
||||
|
||||
@@ -271,7 +275,7 @@ static int v4l2_select_input(struct config *conf, struct video_dev *viddev,
|
||||
|
||||
if (xioctl(vid_source, VIDIOC_ENUMINPUT, &input) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to query input %d."
|
||||
" VIDIOC_ENUMINPUT, if you use a WEBCAM change input value in conf by -1",
|
||||
" VIDIOC_ENUMINPUT, if you use a WEBCAM change input value in conf by -1",
|
||||
input.index);
|
||||
return -1;
|
||||
}
|
||||
@@ -485,7 +489,8 @@ static int v4l2_set_pix_format(struct context *cnt, src_v4l2_t * vid_source,
|
||||
V4L2_PIX_FMT_YUV422P,
|
||||
V4L2_PIX_FMT_YUV420, /* most efficient for motion */
|
||||
V4L2_PIX_FMT_Y10,
|
||||
V4L2_PIX_FMT_Y12
|
||||
V4L2_PIX_FMT_Y12,
|
||||
V4L2_PIX_FMT_GREY
|
||||
};
|
||||
|
||||
int array_size = sizeof(supported_formats) / sizeof(supported_formats[0]);
|
||||
@@ -1101,6 +1106,10 @@ int v4l2_next(struct context *cnt, struct video_dev *viddev, unsigned char *map,
|
||||
y10torgb24(cnt->imgs.common_buffer, the_buffer->ptr, width, height, shift);
|
||||
conv_rgb24toyuv420p(map, cnt->imgs.common_buffer, width, height);
|
||||
return 0;
|
||||
case V4L2_PIX_FMT_GREY:
|
||||
conv_greytoyuv420p(map, the_buffer->ptr, width, height);
|
||||
return 0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1144,4 +1153,4 @@ void v4l2_cleanup(struct video_dev *viddev)
|
||||
free(vid_source);
|
||||
viddev->v4l2_private = NULL;
|
||||
}
|
||||
#endif /* !WITHOUT_V4L && MOTION_V4L2 */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* video.h
|
||||
/* video2.h
|
||||
*
|
||||
* Include file for video.c
|
||||
* Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org)
|
||||
@@ -12,16 +12,12 @@
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
#if !defined(WITHOUT_V4L)
|
||||
#if defined(HAVE_LINUX_VIDEODEV2_H)
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
#include <linux/videodev2.h>
|
||||
#elif defined(HAVE_LINUX_VIDEODEV_H)
|
||||
#include <linux/videodev.h>
|
||||
#elif defined(HAVE_SYS_VIDEOIO_H)
|
||||
#include <sys/videoio.h>
|
||||
#endif
|
||||
#include "vloopback_motion.h"
|
||||
#include "vloopback_motion2.h"
|
||||
#include "pwc-ioctl.h"
|
||||
|
||||
#endif
|
||||
|
||||
/* video4linux stuff */
|
||||
@@ -76,11 +72,11 @@ struct video_dev {
|
||||
int frames;
|
||||
|
||||
/* Device type specific stuff: */
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
/* v4l */
|
||||
int v4l2;
|
||||
void *v4l2_private;
|
||||
|
||||
|
||||
int size_map;
|
||||
int v4l_fmt;
|
||||
unsigned char *v4l_buffers[2];
|
||||
@@ -104,14 +100,9 @@ void bayer2rgb24(unsigned char *dst, unsigned char *src, long int width, long in
|
||||
int vid_do_autobright(struct context *cnt, struct video_dev *viddev);
|
||||
int mjpegtoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height, unsigned int size);
|
||||
void y10torgb24(unsigned char *map, unsigned char *cap_map, int width, int height, int shift);
|
||||
void conv_greytoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height);
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
/* video functions, video.c */
|
||||
unsigned char *v4l_start(struct video_dev *viddev, int width, int height,
|
||||
int input, int norm, unsigned long freq, int tuner_number);
|
||||
void v4l_set_input(struct context *cnt, struct video_dev *viddev, unsigned char *map, int width, int height,
|
||||
struct config *conf);
|
||||
int v4l_next(struct video_dev *viddev, unsigned char *map, int width, int height);
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/* video2.c */
|
||||
unsigned char *v4l2_start(struct context *cnt, struct video_dev *viddev, int width, int height,
|
||||
@@ -121,6 +112,6 @@ void v4l2_set_input(struct context *cnt, struct video_dev *viddev, unsigned char
|
||||
int v4l2_next(struct context *cnt, struct video_dev *viddev, unsigned char *map, int width, int height);
|
||||
void v4l2_close(struct video_dev *viddev);
|
||||
void v4l2_cleanup(struct video_dev *viddev);
|
||||
#endif /* WITHOUT_V4L */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
#endif /* _INCLUDE_VIDEO_H */
|
||||
113
video_common.c
113
video_common.c
@@ -11,7 +11,7 @@
|
||||
|
||||
/* For rotation */
|
||||
#include "rotate.h" /* Already includes motion.h */
|
||||
#include "video.h"
|
||||
#include "video2.h"
|
||||
#include "jpegutils.h"
|
||||
|
||||
typedef unsigned char uint8_t;
|
||||
@@ -459,6 +459,55 @@ void y10torgb24(unsigned char *map, unsigned char *cap_map, int width, int heigh
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* conv_greytoyuv420p
|
||||
*
|
||||
*
|
||||
*/
|
||||
void conv_greytoyuv420p(unsigned char *map, unsigned char *cap_map, int width, int height)
|
||||
{
|
||||
/* This is a adaptation of the rgb to yuv.
|
||||
* For grey, we use just a single color
|
||||
*/
|
||||
|
||||
unsigned char *y, *u, *v;
|
||||
unsigned char *r;
|
||||
int i, loop;
|
||||
|
||||
r = cap_map;
|
||||
|
||||
y = map;
|
||||
u = y + width * height;
|
||||
v = u + (width * height) / 4;
|
||||
memset(u, 0, width * height / 4);
|
||||
memset(v, 0, width * height / 4);
|
||||
|
||||
for (loop = 0; loop < height; loop++) {
|
||||
for (i = 0; i < width; i += 2) {
|
||||
*y++ = (9796 ** r + 19235 ** r + 3736 ** r) >> 15;
|
||||
*u += ((-4784 ** r - 9437 ** r + 14221 ** r) >> 17) + 32;
|
||||
*v += ((20218 ** r - 16941 ** r - 3277 ** r) >> 17) + 32;
|
||||
r++;
|
||||
|
||||
*y++ = (9796 ** r + 19235 ** r + 3736 ** r) >> 15;
|
||||
*u += ((-4784 ** r - 9437 ** r + 14221 ** r) >> 17) + 32;
|
||||
*v += ((20218 ** r - 16941 ** r - 3277 ** r) >> 17) + 32;
|
||||
r ++;
|
||||
|
||||
u++;
|
||||
v++;
|
||||
}
|
||||
|
||||
if ((loop & 1) == 0) {
|
||||
u -= width / 2;
|
||||
v -= width / 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
#define MAX2(x, y) ((x) > (y) ? (x) : (y))
|
||||
#define MIN2(x, y) ((x) < (y) ? (x) : (y))
|
||||
|
||||
@@ -535,7 +584,7 @@ int vid_do_autobright(struct context *cnt, struct video_dev *viddev)
|
||||
Wrappers calling the actual capture routines
|
||||
*****************************************************************************/
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
/*
|
||||
* Big lock for vid_start to ensure exclusive access to viddevs while adding
|
||||
* devices during initialization of each thread.
|
||||
@@ -569,7 +618,7 @@ void vid_cleanup(void)
|
||||
pthread_mutex_destroy(&vid_mutex);
|
||||
}
|
||||
|
||||
#endif /* WITHOUT_V4L */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
/**
|
||||
* vid_close
|
||||
@@ -578,10 +627,10 @@ void vid_cleanup(void)
|
||||
*/
|
||||
void vid_close(struct context *cnt)
|
||||
{
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
struct video_dev *dev = viddevs;
|
||||
struct video_dev *prev = NULL;
|
||||
#endif /* WITHOUT_V4L */
|
||||
#endif /* WITHOUT_V4L2 */
|
||||
|
||||
/* Cleanup the netcam part */
|
||||
#ifdef HAVE_MMAL
|
||||
@@ -600,9 +649,9 @@ void vid_close(struct context *cnt)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/* Cleanup the v4l part */
|
||||
/* Cleanup the v4l2 part */
|
||||
pthread_mutex_lock(&vid_mutex);
|
||||
while (dev) {
|
||||
if (dev->fd == cnt->video_dev)
|
||||
@@ -623,17 +672,14 @@ void vid_close(struct context *cnt)
|
||||
if (--dev->usage_count == 0) {
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Closing video device %s",
|
||||
dev->video_device);
|
||||
#ifdef MOTION_V4L2
|
||||
if (dev->v4l2) {
|
||||
v4l2_close(dev);
|
||||
v4l2_cleanup(dev);
|
||||
} else {
|
||||
#endif
|
||||
close(dev->fd);
|
||||
munmap(viddevs->v4l_buffers[0], dev->size_map);
|
||||
#ifdef MOTION_V4L2
|
||||
}
|
||||
#endif
|
||||
|
||||
dev->fd = -1;
|
||||
pthread_mutex_lock(&vid_mutex);
|
||||
/* Remove from list */
|
||||
@@ -659,10 +705,10 @@ void vid_close(struct context *cnt)
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
}
|
||||
}
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/**
|
||||
* vid_v4lx_start
|
||||
@@ -806,11 +852,8 @@ static int vid_v4lx_start(struct context *cnt)
|
||||
dev->owner = -1;
|
||||
dev->v4l_fmt = VIDEO_PALETTE_YUV420P;
|
||||
dev->fps = 0;
|
||||
#ifdef MOTION_V4L2
|
||||
/* First lets try V4L2 and if it's not supported V4L1. */
|
||||
|
||||
dev->v4l2 = 1;
|
||||
|
||||
if (!v4l2_start(cnt, dev, width, height, input, norm, frequency, tuner_number)) {
|
||||
/*
|
||||
* Restore width & height before test with v4l
|
||||
@@ -818,24 +861,9 @@ static int vid_v4lx_start(struct context *cnt)
|
||||
*/
|
||||
dev->width = width;
|
||||
dev->height = height;
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L))
|
||||
if (!v4l_start(dev, width, height, input, norm, frequency, tuner_number)) {
|
||||
close(dev->fd);
|
||||
pthread_mutexattr_destroy(&dev->attr);
|
||||
pthread_mutex_destroy(&dev->mutex);
|
||||
free(dev);
|
||||
|
||||
pthread_mutex_unlock(&vid_mutex);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MOTION_V4L2
|
||||
dev->v4l2 = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dev->v4l2 == 0) {
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Using V4L1");
|
||||
} else {
|
||||
@@ -872,7 +900,7 @@ static int vid_v4lx_start(struct context *cnt)
|
||||
|
||||
return fd;
|
||||
}
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
|
||||
/**
|
||||
* vid_start
|
||||
@@ -918,13 +946,13 @@ int vid_start(struct context *cnt)
|
||||
cnt->netcam = NULL;
|
||||
}
|
||||
}
|
||||
#ifdef WITHOUT_V4L
|
||||
#ifdef WITHOUT_V4L2
|
||||
else
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: You must setup netcam_url");
|
||||
#else
|
||||
else
|
||||
dev = vid_v4lx_start(cnt);
|
||||
#endif /*WITHOUT_V4L */
|
||||
#endif /*WITHOUT_V4L2 */
|
||||
|
||||
return dev;
|
||||
}
|
||||
@@ -969,7 +997,7 @@ int vid_next(struct context *cnt, unsigned char *map)
|
||||
|
||||
return netcam_next(cnt, map);
|
||||
}
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
/*
|
||||
* We start a new block so we can make declarations without breaking
|
||||
* gcc 2.95 or older.
|
||||
@@ -999,19 +1027,12 @@ int vid_next(struct context *cnt, unsigned char *map)
|
||||
dev->owner = cnt->threadnr;
|
||||
dev->frames = conf->roundrobin_frames;
|
||||
}
|
||||
#ifdef MOTION_V4L2
|
||||
|
||||
if (dev->v4l2) {
|
||||
v4l2_set_input(cnt, dev, map, width, height, conf);
|
||||
ret = v4l2_next(cnt, dev, map, width, height);
|
||||
} else {
|
||||
#endif
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L))
|
||||
v4l_set_input(cnt, dev, map, width, height, conf);
|
||||
ret = v4l_next(dev, map, width, height);
|
||||
#endif
|
||||
#ifdef MOTION_V4L2
|
||||
}
|
||||
#endif
|
||||
|
||||
if (--dev->frames <= 0) {
|
||||
dev->owner = -1;
|
||||
dev->frames = 0;
|
||||
@@ -1023,6 +1044,6 @@ int vid_next(struct context *cnt, unsigned char *map)
|
||||
rotate_map(cnt, map);
|
||||
|
||||
}
|
||||
#endif /*WITHOUT_V4L */
|
||||
#endif /*WITHOUT_V4L2 */
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "rotate.h" /* Already includes motion.h */
|
||||
#include "video_freebsd.h"
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/* For the v4l stuff: */
|
||||
#include <sys/mman.h>
|
||||
@@ -950,7 +950,7 @@ void vid_cleanup(void)
|
||||
pthread_mutex_destroy(&vid_mutex);
|
||||
}
|
||||
|
||||
#endif /*WITHOUT_V4L*/
|
||||
#endif /*WITHOUT_V4L2*/
|
||||
|
||||
/**
|
||||
* vid_close
|
||||
@@ -959,7 +959,7 @@ void vid_cleanup(void)
|
||||
*/
|
||||
void vid_close(struct context *cnt)
|
||||
{
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
struct video_dev *dev = viddevs;
|
||||
struct video_dev *prev = NULL;
|
||||
#endif
|
||||
@@ -971,7 +971,7 @@ void vid_close(struct context *cnt)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
/* Cleanup the v4l part */
|
||||
pthread_mutex_lock(&vid_mutex);
|
||||
@@ -1041,7 +1041,7 @@ void vid_close(struct context *cnt)
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
}
|
||||
}
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
}
|
||||
|
||||
|
||||
@@ -1061,7 +1061,7 @@ int vid_start(struct context *cnt)
|
||||
cnt->netcam = NULL;
|
||||
}
|
||||
}
|
||||
#ifdef WITHOUT_V4L
|
||||
#ifdef WITHOUT_V4L2
|
||||
else
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: You must setup netcam_url");
|
||||
#else
|
||||
@@ -1249,7 +1249,7 @@ int vid_start(struct context *cnt)
|
||||
|
||||
pthread_mutex_unlock(&vid_mutex);
|
||||
}
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
|
||||
/* FIXME needed tuner device ?! */
|
||||
return fd_bktr;
|
||||
@@ -1282,7 +1282,7 @@ int vid_next(struct context *cnt, unsigned char *map)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
struct video_dev *dev;
|
||||
int width, height;
|
||||
@@ -1329,6 +1329,6 @@ int vid_next(struct context *cnt, unsigned char *map)
|
||||
rotate_map(cnt, map);
|
||||
|
||||
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#ifndef _INCLUDE_VIDEO_FREEBSD_H
|
||||
#define _INCLUDE_VIDEO_FREEBSD_H
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
|
||||
#if defined(__NetBSD__) || defined(__OpenBSD__)
|
||||
#include <dev/ic/bt8xx.h>
|
||||
@@ -20,7 +20,7 @@
|
||||
#include <dev/bktr/ioctl_bt848.h>
|
||||
#endif
|
||||
|
||||
#endif /* !WITHOUT_V4L */
|
||||
#endif /* !WITHOUT_V4L2 */
|
||||
|
||||
/* bktr (video4linux) stuff FIXME more modes not only these */
|
||||
|
||||
@@ -109,7 +109,7 @@ struct video_dev {
|
||||
int frames;
|
||||
|
||||
/* Device type specific stuff: */
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
int capture_method;
|
||||
int v4l_fmt;
|
||||
unsigned char *v4l_buffers[2];
|
||||
@@ -124,7 +124,7 @@ int vid_start(struct context *);
|
||||
int vid_next(struct context *, unsigned char *);
|
||||
void vid_close(struct context *);
|
||||
|
||||
#ifndef WITHOUT_V4L
|
||||
#ifndef WITHOUT_V4L2
|
||||
void vid_init(void);
|
||||
void vid_cleanup(void);
|
||||
#endif
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
/*
|
||||
* vloopback_motion.c
|
||||
*
|
||||
* Video loopback functions for motion.
|
||||
* Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org)
|
||||
* Copyright 2008 by Angel Carpintero (motiondevelop@gmail.com)
|
||||
* This software is distributed under the GNU public license version 2
|
||||
* See also the file 'COPYING'.
|
||||
*
|
||||
*/
|
||||
#include "vloopback_motion.h"
|
||||
#if defined(HAVE_LINUX_VIDEODEV_H) && (!defined(WITHOUT_V4L)) && (!defined(BSD))
|
||||
#include <sys/utsname.h>
|
||||
#include <dirent.h>
|
||||
|
||||
/**
|
||||
* v4l_open_vidpipe
|
||||
*
|
||||
*/
|
||||
static int v4l_open_vidpipe(void)
|
||||
{
|
||||
int pipe_fd = -1;
|
||||
char pipepath[255];
|
||||
char buffer[255];
|
||||
char *major;
|
||||
char *minor;
|
||||
struct utsname uts;
|
||||
|
||||
if (uname(&uts) < 0) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to execute uname");
|
||||
return -1;
|
||||
}
|
||||
|
||||
major = strtok(uts.release, ".");
|
||||
minor = strtok(NULL, ".");
|
||||
|
||||
if ((major == NULL) || (minor == NULL) || (strcmp(major, "2"))) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to decipher OS version");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (strcmp(minor, "5") < 0) {
|
||||
FILE *vloopbacks;
|
||||
char *input;
|
||||
char *istatus;
|
||||
char *output;
|
||||
|
||||
vloopbacks = fopen("/proc/video/vloopback/vloopbacks", "r");
|
||||
|
||||
if (!vloopbacks) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open "
|
||||
"'/proc/video/vloopback/vloopbacks'");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Read vloopback version*/
|
||||
if (!fgets(buffer, sizeof(buffer), vloopbacks)) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback version");
|
||||
myfclose(vloopbacks);
|
||||
return -1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "\t%s", buffer);
|
||||
|
||||
/* Read explanation line */
|
||||
|
||||
if (!fgets(buffer, sizeof(buffer), vloopbacks)) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback"
|
||||
" explanation line");
|
||||
myfclose(vloopbacks);
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (fgets(buffer, sizeof(buffer), vloopbacks)) {
|
||||
if (strlen(buffer) > 1) {
|
||||
buffer[strlen(buffer)-1] = 0;
|
||||
input = strtok(NULL, "\t");
|
||||
istatus = strtok(NULL, "\t");
|
||||
output = strtok(NULL, "\t");
|
||||
|
||||
if (istatus[0] == '-') {
|
||||
snprintf(pipepath, sizeof(pipepath), "/dev/%s", input);
|
||||
pipe_fd = open(pipepath, O_RDWR);
|
||||
|
||||
if (pipe_fd >= 0) {
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: \tInput: /dev/%s "
|
||||
"\tOutput: /dev/%s", input, output);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
myfclose(vloopbacks);
|
||||
} else {
|
||||
DIR *dir;
|
||||
struct dirent *dirp;
|
||||
const char prefix[] = "/sys/class/video4linux/";
|
||||
char *ptr, *io;
|
||||
int fd;
|
||||
int low = 9999;
|
||||
int tfd;
|
||||
int tnum;
|
||||
|
||||
if ((dir = opendir(prefix)) == NULL) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open '%s'",
|
||||
prefix);
|
||||
return -1;
|
||||
}
|
||||
|
||||
while ((dirp = readdir(dir)) != NULL) {
|
||||
if (!strncmp(dirp->d_name, "video", 5)) {
|
||||
strncpy(buffer, prefix, sizeof(buffer));
|
||||
strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer));
|
||||
strncat(buffer, "/name", sizeof(buffer) - strlen(buffer));
|
||||
|
||||
if ((fd = open(buffer, O_RDONLY)) >= 0) {
|
||||
if ((read(fd, buffer, sizeof(buffer)-1)) < 0) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
ptr = strtok(buffer, " ");
|
||||
|
||||
if (strcmp(ptr, "Video")) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
major = strtok(NULL, " ");
|
||||
minor = strtok(NULL, " ");
|
||||
io = strtok(NULL, " \n");
|
||||
|
||||
if (strcmp(major, "loopback") || strcmp(io, "input")) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((ptr = strtok(buffer, " ")) == NULL) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
tnum = atoi(minor);
|
||||
|
||||
if (tnum < low) {
|
||||
mystrcpy(buffer, "/dev/");
|
||||
strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer));
|
||||
if ((tfd = open(buffer, O_RDWR)) >= 0) {
|
||||
strncpy(pipepath, buffer, sizeof(pipepath));
|
||||
|
||||
if (pipe_fd >= 0)
|
||||
close(pipe_fd);
|
||||
|
||||
pipe_fd = tfd;
|
||||
low = tnum;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
|
||||
if (pipe_fd >= 0)
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as input",
|
||||
pipepath);
|
||||
}
|
||||
|
||||
return pipe_fd;
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l_startpipe
|
||||
*
|
||||
*/
|
||||
static int v4l_startpipe(const char *dev_name, int width, int height, int type)
|
||||
{
|
||||
int dev;
|
||||
struct video_picture vid_pic;
|
||||
struct video_window vid_win;
|
||||
|
||||
if (!strcmp(dev_name, "-")) {
|
||||
dev = v4l_open_vidpipe();
|
||||
} else {
|
||||
dev = open(dev_name, O_RDWR);
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as input",
|
||||
dev_name);
|
||||
}
|
||||
|
||||
if (dev < 0) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening %s as input failed",
|
||||
dev_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ioctl(dev, VIDIOCGPICT, &vid_pic) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGPICT)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
vid_pic.palette = type;
|
||||
|
||||
if (ioctl(dev, VIDIOCSPICT, &vid_pic) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSPICT)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ioctl(dev, VIDIOCGWIN, &vid_win) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCGWIN)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
vid_win.height = height;
|
||||
vid_win.width = width;
|
||||
|
||||
if (ioctl(dev, VIDIOCSWIN, &vid_win) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOCSWIN)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l_putpipe
|
||||
*
|
||||
*/
|
||||
static int v4l_putpipe(int dev, unsigned char *image, int size)
|
||||
{
|
||||
return write(dev, image, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vid_startpipe
|
||||
*
|
||||
*/
|
||||
int vid_startpipe(const char *dev_name, int width, int height, int type)
|
||||
{
|
||||
return v4l_startpipe(dev_name, width, height, type);
|
||||
}
|
||||
|
||||
/**
|
||||
* vid_putpipe
|
||||
*
|
||||
*/
|
||||
int vid_putpipe (int dev, unsigned char *image, int size)
|
||||
{
|
||||
return v4l_putpipe(dev, image, size);
|
||||
}
|
||||
#endif /* !WITHOUT_V4L && !BSD */
|
||||
225
vloopback_motion2.c
Normal file
225
vloopback_motion2.c
Normal file
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
* vloopback_motion.c
|
||||
*
|
||||
* Video loopback functions for motion.
|
||||
* Copyright 2000 by Jeroen Vreeken (pe1rxq@amsat.org)
|
||||
* Copyright 2008 by Angel Carpintero (motiondevelop@gmail.com)
|
||||
* This software is distributed under the GNU public license version 2
|
||||
* See also the file 'COPYING'.
|
||||
*
|
||||
*/
|
||||
#include "vloopback_motion2.h"
|
||||
#if (!defined(WITHOUT_V4L2)) && (!defined(BSD))
|
||||
#include <dirent.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
/**
|
||||
* v4l2_open_vidpipe
|
||||
*
|
||||
*/
|
||||
static int v4l2_open_vidpipe(void)
|
||||
{
|
||||
int pipe_fd = -1;
|
||||
char pipepath[255];
|
||||
char buffer[255];
|
||||
DIR *dir;
|
||||
struct dirent *dirp;
|
||||
const char prefix[] = "/sys/class/video4linux/";
|
||||
int fd,tfd;
|
||||
int len,min;
|
||||
|
||||
if ((dir = opendir(prefix)) == NULL) {
|
||||
MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open '%s'", prefix);
|
||||
return -1;
|
||||
}
|
||||
|
||||
while ((dirp = readdir(dir)) != NULL) {
|
||||
if (!strncmp(dirp->d_name, "video", 5)) {
|
||||
strncpy(buffer, prefix, sizeof(buffer));
|
||||
strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer));
|
||||
strncat(buffer, "/name", sizeof(buffer) - strlen(buffer));
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening buffer: %s",buffer);
|
||||
if ((fd = open(buffer, O_RDONLY)) >= 0) {
|
||||
if ((len = read(fd, buffer, sizeof(buffer)-1)) < 0) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
buffer[len]=0;
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, SHOW_ERRNO, "%s: Read buffer: %s",buffer);
|
||||
if (strncmp(buffer, "Loopback video device",21)) { /* weird stuff after minor */
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
min = atoi(&buffer[21]);
|
||||
strcpy(buffer, "/dev/");
|
||||
strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer));
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO,"%s: found video device '%s' %d", buffer,min);
|
||||
if ((tfd = open(buffer, O_RDWR)) >= 0) {
|
||||
strncpy(pipepath, buffer, sizeof(pipepath));
|
||||
if (pipe_fd >= 0) close(pipe_fd);
|
||||
pipe_fd = tfd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
|
||||
if (pipe_fd >= 0)
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as pipe output", pipepath);
|
||||
|
||||
return pipe_fd;
|
||||
}
|
||||
|
||||
typedef struct capent {const char *cap; int code;} capentT;
|
||||
capentT cap_list[] ={
|
||||
{"V4L2_CAP_VIDEO_CAPTURE" ,0x00000001 },
|
||||
{"V4L2_CAP_VIDEO_CAPTURE_MPLANE" ,0x00001000 },
|
||||
{"V4L2_CAP_VIDEO_OUTPUT" ,0x00000002 },
|
||||
{"V4L2_CAP_VIDEO_OUTPUT_MPLANE" ,0x00002000 },
|
||||
{"V4L2_CAP_VIDEO_M2M" ,0x00004000 },
|
||||
{"V4L2_CAP_VIDEO_M2M_MPLANE" ,0x00008000 },
|
||||
{"V4L2_CAP_VIDEO_OVERLAY" ,0x00000004 },
|
||||
{"V4L2_CAP_VBI_CAPTURE" ,0x00000010 },
|
||||
{"V4L2_CAP_VBI_OUTPUT" ,0x00000020 },
|
||||
{"V4L2_CAP_SLICED_VBI_CAPTURE" ,0x00000040 },
|
||||
{"V4L2_CAP_SLICED_VBI_OUTPUT" ,0x00000080 },
|
||||
{"V4L2_CAP_RDS_CAPTURE" ,0x00000100 },
|
||||
{"V4L2_CAP_VIDEO_OUTPUT_OVERLAY" ,0x00000200 },
|
||||
{"V4L2_CAP_HW_FREQ_SEEK" ,0x00000400 },
|
||||
{"V4L2_CAP_RDS_OUTPUT" ,0x00000800 },
|
||||
{"V4L2_CAP_TUNER" ,0x00010000 },
|
||||
{"V4L2_CAP_AUDIO" ,0x00020000 },
|
||||
{"V4L2_CAP_RADIO" ,0x00040000 },
|
||||
{"V4L2_CAP_MODULATOR" ,0x00080000 },
|
||||
{"V4L2_CAP_SDR_CAPTURE" ,0x00100000 },
|
||||
{"V4L2_CAP_EXT_PIX_FORMAT" ,0x00200000 },
|
||||
{"V4L2_CAP_SDR_OUTPUT" ,0x00400000 },
|
||||
{"V4L2_CAP_READWRITE" ,0x01000000 },
|
||||
{"V4L2_CAP_ASYNCIO" ,0x02000000 },
|
||||
{"V4L2_CAP_STREAMING" ,0x04000000 },
|
||||
{"V4L2_CAP_DEVICE_CAPS" ,0x80000000 },
|
||||
{"Last",0}
|
||||
};
|
||||
|
||||
static void show_vcap(struct v4l2_capability *cap) {
|
||||
unsigned int vers = cap->version;
|
||||
unsigned int c = cap->capabilities;
|
||||
int i;
|
||||
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Pipe Device");
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.driver: %s",cap->driver);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.card: %s",cap->card);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.bus_info: %s",cap->bus_info);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: cap.card: %u.%u.%u",(vers >> 16) & 0xFF,(vers >> 8) & 0xFF,vers & 0xFF);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Device capabilities");
|
||||
for (i=0;cap_list[i].code;i++)
|
||||
if (c & cap_list[i].code)
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: %s",cap_list[i].cap);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: ------------------------");
|
||||
}
|
||||
|
||||
static void show_vfmt(struct v4l2_format *v) {
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: type: type: %d",v->type);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.width: %d",v->fmt.pix.width);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.height: %d",v->fmt.pix.height);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.pixelformat: %d",v->fmt.pix.pixelformat);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.sizeimage: %d",v->fmt.pix.sizeimage);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.field: %d",v->fmt.pix.field);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.bytesperline: %d",v->fmt.pix.bytesperline);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: fmt.pix.colorspace: %d",v->fmt.pix.colorspace);
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: ------------------------");
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_startpipe
|
||||
*
|
||||
*/
|
||||
static int v4l2_startpipe(const char *dev_name, int width, int height, int type)
|
||||
{
|
||||
int dev;
|
||||
struct v4l2_format v;
|
||||
struct v4l2_capability vc;
|
||||
|
||||
if (!strcmp(dev_name, "-")) {
|
||||
dev = v4l2_open_vidpipe();
|
||||
} else {
|
||||
dev = open(dev_name, O_RDWR);
|
||||
MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as pipe output", dev_name);
|
||||
}
|
||||
|
||||
if (dev < 0) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Opening %s as pipe output failed", dev_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ioctl(dev, VIDIOC_QUERYCAP, &vc) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_QUERYCAP)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
show_vcap(&vc);
|
||||
|
||||
memset(&v, 0, sizeof(v));
|
||||
|
||||
v.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
|
||||
|
||||
if (ioctl(dev, VIDIOC_G_FMT, &v) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_G_FMT)");
|
||||
return -1;
|
||||
}
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Original pipe specifications");
|
||||
show_vfmt(&v);
|
||||
|
||||
v.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
|
||||
v.fmt.pix.width = width;
|
||||
v.fmt.pix.height = height;
|
||||
v.fmt.pix.pixelformat = type;
|
||||
v.fmt.pix.sizeimage = 3 * width * height / 2;
|
||||
v.fmt.pix.bytesperline = width;
|
||||
v.fmt.pix.field = V4L2_FIELD_NONE;
|
||||
v.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Proposed pipe specifications");
|
||||
show_vfmt(&v);
|
||||
|
||||
if (ioctl(dev,VIDIOC_S_FMT, &v) == -1) {
|
||||
MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: ioctl (VIDIOC_S_FMT)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
MOTION_LOG(INF, TYPE_VIDEO, NO_ERRNO, "%s: Final pipe specifications");
|
||||
show_vfmt(&v);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_putpipe
|
||||
*
|
||||
*/
|
||||
static int v4l2_putpipe(int dev, unsigned char *image, int size)
|
||||
{
|
||||
return write(dev, image, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vid_startpipe
|
||||
*
|
||||
*/
|
||||
int vid_startpipe(const char *dev_name, int width, int height, int type)
|
||||
{
|
||||
return v4l2_startpipe(dev_name, width, height, type);
|
||||
}
|
||||
|
||||
/**
|
||||
* vid_putpipe
|
||||
*
|
||||
*/
|
||||
int vid_putpipe (int dev, unsigned char *image, int size)
|
||||
{
|
||||
return v4l2_putpipe(dev, image, size);
|
||||
}
|
||||
#endif /* !WITHOUT_V4L2 && !BSD */
|
||||
Reference in New Issue
Block a user