Add static file processing via netcam_url

1.  Add static movie processing via netcam_url
2.  Remove file netcam_url which only processed single image
3.  Remove experimental protocol of mjpg
This commit is contained in:
Mr-DaveDev
2018-08-19 20:03:17 -06:00
committed by Mr-Dave
parent 5d863a19dc
commit 90763ba943
8 changed files with 83 additions and 427 deletions

View File

@@ -739,6 +739,7 @@ static int init_camera_type(struct context *cnt){
if (cnt->conf.netcam_url) {
if ((strncmp(cnt->conf.netcam_url,"mjpeg",5) == 0) ||
(strncmp(cnt->conf.netcam_url,"v4l2" ,4) == 0) ||
(strncmp(cnt->conf.netcam_url,"file" ,4) == 0) ||
(strncmp(cnt->conf.netcam_url,"rtmp" ,4) == 0) ||
(strncmp(cnt->conf.netcam_url,"rtsp" ,4) == 0)) {
cnt->camera_type = CAMERA_TYPE_RTSP;

View File

@@ -182,8 +182,14 @@
<p></p>
<a name="Basic_Setup_Static"></a>
<strong>Static files</strong> can also be processed with a bit of additional setup via a v4l2loopback device.
Install the loopback software as described in the
<strong>Static files</strong> can also be processed by Motion in one of two ways. The first method
is by using the <a href="#netcam_url" >netcam_url</a> option and using a prefix of file:\\. With
this method, Motion will process the file at the speed specified by
<a href="#framerate" >framerate</a>. This allows the user to either speed up or slow down the video
processing to suit the particular need.
<p></p>
The second option for processing a static file requires a bit of additional setup and uses a
v4l2loopback device. To set this up, first install the loopback software as described in the
<code><strong><a href="#OptDetail_Pipe" >Output - Pipe Options</a> </code></strong>
section of this guide to create a /dev/videoX device and then use software such as ffmpeg to stream
the static file into the v4l2 device. e.g.
@@ -2732,15 +2738,6 @@
</ul>
<p></p>
<i>mjpg://</i>
<ul>
This prefix is experimental and when Motion sees this prefix it will replace the mjpg with http and
then process the network camera using the extremely experimental method of grabbing motion jpg images.
<p></p>
The functionality of this option is unknown and may be removed in future releases.
</ul>
<p></p>
<i>rtsp://</i>
<ul>
This prefix is the standard for all modern network cameras. It is recommended that users search the
@@ -2784,12 +2781,15 @@
<i>file://</i>
<ul>
This option allows for the processing of a static image. A process outside of Motion is required to
replace the image.
This option allows for the processing of a existing movie file. Motion will open the file and
process the file at the framerate specified in the Motion configuration file. Note that since the
file may have been created using a different framerate than specified in the Motion config file,
the file may be processed at either a faster or slower rate than real time.
<p></p>
A scenario for this would be processing a large number of previously saved images. The file
option would point to a single location such as <code>/home/user/test/currentimage.jpg</code> and a script
running external to Motion would loop through all the images and copy them into <code>currentimage.jpg</code>.
A sample format for the netcam_url would be <code>netcam_url file:///home/user/cam1/cam1_20180817084027.mkv</code>
<p></p>
This option may be useful for "reprocessing" a movie created by Motion to fine tune the detection options.
</ul>
<p></p>

View File

@@ -321,11 +321,6 @@ static void *netcam_handler_loop(void *arg)
,_("camera re-connected"));
open_error = 0;
}
} else if (netcam->caps.streaming == NCS_BLOCK) { /* MJPG-Block streaming */
/*
* Since we cannot move in the stream here, because we will read past the
* MJPG-block-header, error handling is done while reading MJPG blocks.
*/
}
}
@@ -735,14 +730,9 @@ int netcam_start(struct context *cnt){
} else if ((url.service) && (!strcmp(url.service, "ftp"))) {
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO,_("now calling netcam_setup_ftp"));
retval = netcam_setup_ftp(netcam, &url);
} else if ((url.service) && (!strcmp(url.service, "file"))) {
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO,_("now calling netcam_setup_file()"));
retval = netcam_setup_file(netcam, &url);
} else if ((url.service) && (!strcmp(url.service, "mjpg"))) {
retval = netcam_setup_mjpg(netcam, &url);
} else {
MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO
,_("Invalid netcam service '%s' - must be http, ftp, mjpg, mjpeg, v4l2 or file.")
,_("Invalid netcam service '%s' - must be http or ftp")
, url.service);
retval = -1;
}

View File

@@ -63,7 +63,6 @@ typedef struct netcam_context *netcam_context_ptr;
#define NCS_UNSUPPORTED 0 /* streaming is not supported */
#define NCS_MULTIPART 1 /* streaming is done via multipart */
#define NCS_BLOCK 2 /* streaming is done via MJPG-block */
/*
* struct url_t is used when parsing the user-supplied URL, as well as
@@ -180,7 +179,6 @@ typedef struct netcam_context {
buffer for the HTTP data */
struct ftp_context *ftp; /* this structure contains the context for FTP connection */
struct file_context *file; /* this structure contains the context for FILE connection */
int (*get_image)(netcam_context_ptr);
/* Function to fetch the image from

View File

@@ -17,8 +17,6 @@
#define CONNECT_TIMEOUT 10 /* Timeout on remote connection attempt */
#define READ_TIMEOUT 5 /* Default timeout on recv requests */
#define POLLING_TIMEOUT READ_TIMEOUT /* File polling timeout [s] */
#define POLLING_TIME 500*1000*1000 /* File polling time quantum [ns] (500ms) */
#define MAX_HEADER_RETRIES 5 /* Max tries to find a header record */
#define MINVAL(x, y) ((x) < (y) ? (x) : (y))
@@ -40,10 +38,6 @@ static const char *connect_req_keepalive = "Connection: Keep-Alive\r\n";
static const char *connect_auth_req = "Authorization: Basic %s\r\n";
tfile_context *file_new_context(void);
void file_free_context(tfile_context* ctxt);
/**
* check_quote
*
@@ -1463,349 +1457,6 @@ int netcam_setup_html(netcam_context_ptr netcam, struct url_t *url)
return 0;
}
/**
* netcam_mjpg_buffer_refill
*
* This routing reads content from the MJPG-camera until the response
* buffer of the specified netcam_context is full. If the connection is
* lost during this operation, it tries to re-connect.
*
* Parameters:
* netcam Pointer to a netcam_context structure
*
* Returns: The number of read bytes,
* or -1 if an fatal connection error occurs.
*/
static int netcam_mjpg_buffer_refill(netcam_context_ptr netcam)
{
int retval;
if (netcam->response->buffer_left > 0)
return netcam->response->buffer_left;
while (1) {
retval = rbuf_read_bufferful(netcam);
if (retval <= 0) { /* If we got 0, we timeoutted. */
MOTION_LOG(ALR, TYPE_NETCAM, NO_ERRNO
,_("Read error, trying to reconnect.."));
/* We may have lost the connexion */
if (netcam_http_request(netcam) < 0) {
MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO
,_("lost the cam."));
return -1; /* We REALLY lost the cam... bail out for now. */
}
}
if (retval > 0)
break;
}
netcam->response->buffer_left = retval;
netcam->response->buffer_pos = netcam->response->buffer;
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("Refilled buffer with [%d] bytes from the network."), retval);
return retval;
}
/**
* netcam_read_mjpg_jpeg
*
* This routine reads from a netcam using a MJPG-chunk based
* protocol, used by Linksys WVC200 for example.
* This implementation has been made by reverse-engineering
* the protocol, so it may contain bugs and should be considered as
* experimental.
*
* Protocol explanation:
*
* The stream consists of JPG pictures, spanned across multiple
* MJPG chunks (in general 3 chunks, altough that's not guaranteed).
*
* Each data chunk can range from 1 to 65535 bytes + a header, altough
* i have not seen anything bigger than 20000 bytes + a header.
*
* One MJPG chunk is constituted by a header plus the chunk data.
* The chunk header is of fixed size, and the following data size
* and position in the frame is specified in the chunk header.
*
* From what i have seen on WVC200 cameras, the stream always begins
* on JPG frame boundary, so you don't have to worry about beginning
* in the middle of a frame.
*
* See netcam.h for the mjpg_header structure and more details.
*
* Parameters:
* netcam Pointer to a netcam_context structure
*
* Returns: 0 if an image was obtained from the camera,
* or -1 if an error occurred.
*/
static int netcam_read_mjpg_jpeg(netcam_context_ptr netcam)
{
netcam_buff_ptr buffer;
mjpg_header mh;
size_t read_bytes;
int retval;
/*
* Initialisation - set our local pointers to the context
* information.
*/
buffer = netcam->receiving;
/* Assure the target buffer is empty. */
buffer->used = 0;
if (netcam_mjpg_buffer_refill(netcam) < 0)
return -1;
/* Loop until we have a complete JPG. */
while (1) {
read_bytes = 0;
while (read_bytes < sizeof(mh)) {
/* Transfer what we have in buffer in the header structure. */
retval = rbuf_flush(netcam, ((char *)&mh) + read_bytes, sizeof(mh) - read_bytes);
read_bytes += retval;
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("Read [%d/%d] header bytes."), read_bytes, sizeof(mh));
/* If we don't have received a full header, refill our buffer. */
if (read_bytes < sizeof(mh)) {
if (netcam_mjpg_buffer_refill(netcam) < 0)
return -1;
}
}
/* Now check the validity of our header. */
if (strncmp(mh.mh_magic, MJPG_MH_MAGIC, MJPG_MH_MAGIC_SIZE)) {
MOTION_LOG(WRN, TYPE_NETCAM, NO_ERRNO
,_("Invalid header received, reconnecting"));
/*
* We shall reconnect to restart the stream, and get a chance
* to resync.
*/
if (netcam_http_request(netcam) < 0)
return -1; /* We lost the cam... bail out. */
/* Even there, we need to resync. */
buffer->used = 0;
continue ;
}
/* Make room for the chunk. */
netcam_check_buffsize(buffer, (int) mh.mh_chunksize);
read_bytes = 0;
while (read_bytes < mh.mh_chunksize) {
retval = rbuf_flush(netcam, buffer->ptr + buffer->used + read_bytes,
mh.mh_chunksize - read_bytes);
read_bytes += retval;
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("Read [%d/%d] chunk bytes, [%d/%d] total")
,read_bytes, mh.mh_chunksize
,buffer->used + read_bytes, mh.mh_framesize);
if (retval < (int) (mh.mh_chunksize - read_bytes)) {
/* MOTION_LOG(EMG, TYPE_NETCAM, NO_ERRNO, "Chunk incomplete, going to refill."); */
if (netcam_mjpg_buffer_refill(netcam) < 0)
return -1;
}
}
buffer->used += read_bytes;
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("Chunk complete, buffer used [%d] bytes."), buffer->used);
/* Is our JPG image complete ? */
if (mh.mh_framesize == buffer->used) {
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("Image complete, buffer used [%d] bytes."), buffer->used);
break;
}
/* MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "Rlen now at [%d] bytes", rlen); */
}
netcam_image_read_complete(netcam);
return 0;
}
/**
* netcam_setup_mjpg
* This function will parse the netcam url, connect to the camera,
* set its type to MJPG-Streaming, and the get_image method accordingly.
*
* Parameters
*
* netcam Pointer to the netcam_context for the camera
* url Pointer to the url of the camera
*
* Returns: 0 on success (camera link ok) or -1 if an error occurred.
*
*/
int netcam_setup_mjpg(netcam_context_ptr netcam, struct url_t *url)
{
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO,_("now calling netcam_setup_mjpg()"));
netcam->timeout.tv_sec = READ_TIMEOUT;
strcpy(url->service, "http"); /* Put back a real URL service. */
/*
* This netcam is http-based, so build the required URL and
* structures, like the connection-string and so on.
*/
if (netcam_http_build_url(netcam, url) != 0)
return -1;
/* Then we will send our http request and get headers. */
if (netcam_http_request(netcam) < 0)
return -1;
/* We have a special type of streaming camera. */
netcam->caps.streaming = NCS_BLOCK;
/*
* We are positionned right just at the start of the first MJPG
* header, so don't move anymore, initialization complete.
*/
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO
,_("connected, going on to read and decode MJPG chunks."));
netcam->get_image = netcam_read_mjpg_jpeg;
return 0;
}
/**
* netcam_read_file_jpeg
*
* This routine reads local image file. ( netcam_url file:///path/image.jpg )
* The current implementation is still a little experimental,
* and needs some additional code for error detection and
* recovery.
*/
static int netcam_read_file_jpeg(netcam_context_ptr netcam)
{
int loop_counter = 0;
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO,_("Begin"));
netcam_buff_ptr buffer;
int len;
struct stat statbuf;
/* Point to our working buffer. */
buffer = netcam->receiving;
buffer->used = 0;
/*int fstat(int filedes, struct stat *buf);*/
do {
if (stat(netcam->file->path, &statbuf)) {
MOTION_LOG(CRT, TYPE_NETCAM, SHOW_ERRNO
,_("stat(%s) error"), netcam->file->path);
return -1;
}
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("statbuf.st_mtime[%d] != last_st_mtime[%d]")
, statbuf.st_mtime, netcam->file->last_st_mtime);
/* its waits POLLING_TIMEOUT */
if (loop_counter>((POLLING_TIMEOUT*1000*1000)/(POLLING_TIME/1000))) {
MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO
,_("waiting new file image timeout"));
return -1;
}
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO
,_("delay waiting new file image "));
//its waits 5seconds - READ_TIMEOUT
//SLEEP(netcam->timeout.tv_sec, netcam->timeout.tv_usec*1000);
SLEEP(0, POLLING_TIME); // its waits 500ms
/*return -1;*/
loop_counter++;
} while (statbuf.st_mtime == netcam->file->last_st_mtime);
netcam->file->last_st_mtime = statbuf.st_mtime;
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("processing new file image - st_mtime %d"), netcam->file->last_st_mtime);
/* Assure there's enough room in the buffer. */
while (buffer->size < (size_t)statbuf.st_size)
netcam_check_buffsize(buffer, statbuf.st_size);
/* Do the read */
netcam->file->control_file_desc = open(netcam->file->path, O_RDONLY|O_CLOEXEC);
if (netcam->file->control_file_desc < 0) {
MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO
,_("open(%s) error: %d")
,netcam->file->path, netcam->file->control_file_desc);
return -1;
}
if ((len = read(netcam->file->control_file_desc,
buffer->ptr + buffer->used, statbuf.st_size)) < 0) {
MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO
,_("read(%s) error: %d"), netcam->file->control_file_desc, len);
return -1;
}
buffer->used += len;
close(netcam->file->control_file_desc);
netcam_image_read_complete(netcam);
MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO,_("End"));
return 0;
}
tfile_context *file_new_context(void)
{
/* Note that mymalloc will exit on any problem. */
return mymalloc(sizeof(tfile_context));
}
void file_free_context(tfile_context* ctxt)
{
if (ctxt == NULL)
return;
free(ctxt->path);
free(ctxt);
}
int netcam_setup_file(netcam_context_ptr netcam, struct url_t *url)
{
if ((netcam->file = file_new_context()) == NULL)
return -1;
/*
* We copy the strings out of the url structure into the ftp_context
* structure. By setting url->{string} to NULL we effectively "take
* ownership" of the string away from the URL (i.e. it won't be freed
* when we cleanup the url structure later).
*/
netcam->file->path = url->path;
url->path = NULL;
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("netcam->file->path %s"), netcam->file->path);
netcam->get_image = netcam_read_file_jpeg;
return 0;
}
/**
* netcam_recv

View File

@@ -6,42 +6,10 @@
#include <netinet/in.h>
#include <sys/socket.h>
#define MJPG_MH_MAGIC "MJPG"
#define MJPG_MH_MAGIC_SIZE 4
typedef struct file_context {
char *path; /* the path within the URL */
int control_file_desc; /* file descriptor for the control socket */
time_t last_st_mtime; /* time this image was modified */
} tfile_context;
/*
* MJPG Chunk header for MJPG streaming.
* Little-endian data is read from the network.
*/
typedef struct {
char mh_magic[MJPG_MH_MAGIC_SIZE]; /* must contain the string MJP
not null-terminated. */
unsigned int mh_framesize; /* Total size of the current
frame in bytes (~45kb on WVC200) */
unsigned short mh_framewidth; /* Frame width in pixels */
unsigned short mh_frameheight; /* Frame height in pixels */
unsigned int mh_frameoffset; /* Offset of this chunk relative
to the beginning of frame. */
unsigned short mh_chunksize; /* The size of the chunk data
following this header. */
char mh_reserved[30]; /* Unknown data, seems to be
constant between all headers */
} mjpg_header;
void netcam_disconnect(netcam_context_ptr netcam);
int netcam_connect(netcam_context_ptr netcam, int err_flag);
int netcam_read_first_header(netcam_context_ptr netcam);
int netcam_setup_html(netcam_context_ptr netcam, struct url_t *url);
int netcam_setup_mjpg(netcam_context_ptr netcam, struct url_t *url);
int netcam_setup_file(netcam_context_ptr netcam, struct url_t *url);
int netcam_read_next_header(netcam_context_ptr netcam);
#endif // _INCLUDE_NETCAM_HTTP_H

View File

@@ -19,7 +19,7 @@
* it actually handles more camera types than just rtsp.
* Within its current construct, it could be set up to handle
* whatever types of capture devices that ffmpeg can use.
* As of this writing it includes rtsp, http and v4l2.
* As of this writing it includes rtsp, http, files and v4l2.
*
***********************************************************/
@@ -664,7 +664,7 @@ static int netcam_rtsp_read_image(struct rtsp_context *rtsp_data){
return 0;
}
static int netcam_rtsp_resize_ntc(struct rtsp_context *rtsp_data){
static int netcam_rtsp_ntc(struct rtsp_context *rtsp_data){
if ((rtsp_data->finish) || (!rtsp_data->first_image)) return 0;
@@ -672,7 +672,7 @@ static int netcam_rtsp_resize_ntc(struct rtsp_context *rtsp_data){
(rtsp_data->imgsize.height != rtsp_data->codec_context->height) ||
(netcam_rtsp_check_pixfmt(rtsp_data) != 0) ){
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "");
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "****************************************************************");
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "******************************************************");
if ((rtsp_data->imgsize.width != rtsp_data->codec_context->width) ||
(rtsp_data->imgsize.height != rtsp_data->codec_context->height)) {
if (netcam_rtsp_check_pixfmt(rtsp_data) != 0) {
@@ -699,7 +699,7 @@ static int netcam_rtsp_resize_ntc(struct rtsp_context *rtsp_data){
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, _("trancoded to YUV420P. If possible change netcam "));
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, _("picture format to YUV420P to possibly lower CPU usage."));
}
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "****************************************************************");
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "******************************************************");
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO, "");
}
@@ -794,6 +794,18 @@ static void netcam_rtsp_set_rtsp(struct rtsp_context *rtsp_data){
}
}
static void netcam_rtsp_set_file(struct rtsp_context *rtsp_data){
/* This is a place holder for the moment. We will add into
* this function any options that must be set for ffmpeg to
* read a particular file. To date, it does not need any
* additional options and works fine with defaults.
*/
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("%s: Setting attributes to read file"),rtsp_data->cameratype);
}
static void netcam_rtsp_set_v4l2(struct rtsp_context *rtsp_data){
char optsize[10], optfmt[10], optfps[10];
@@ -886,10 +898,15 @@ static void netcam_rtsp_set_path (struct context *cnt, struct rtsp_context *rtsp
}
if (strcmp(url.service, "v4l2") == 0) {
rtsp_data->path = mymalloc(strlen(url.path));
rtsp_data->path = mymalloc(strlen(url.path) + 1);
sprintf(rtsp_data->path, "%s",url.path);
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("Setting up v4l2 via ffmpeg netcam"));
} else if (strcmp(url.service, "file") == 0) {
rtsp_data->path = mymalloc(strlen(url.path) + 1);
sprintf(rtsp_data->path, "%s",url.path);
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
,_("Setting up file via ffmpeg netcam"));
} else {
if (!strcmp(url.service, "mjpeg")) {
sprintf(url.service, "%s","http");
@@ -948,6 +965,8 @@ static void netcam_rtsp_set_parms (struct context *cnt, struct rtsp_context *rts
rtsp_data->pktarray_index = -1;
rtsp_data->handler_finished = TRUE;
rtsp_data->first_image = TRUE;
rtsp_data->capture_frame = TRUE;
snprintf(rtsp_data->threadname, 15, "%s",_("Unknown"));
if (gettimeofday(&rtsp_data->interruptstarttime, NULL) < 0) {
@@ -1090,6 +1109,8 @@ static int netcam_rtsp_open_context(struct rtsp_context *rtsp_data){
netcam_rtsp_set_rtsp(rtsp_data);
} else if (strncmp(rtsp_data->service, "v4l2", 4) == 0 ){
netcam_rtsp_set_v4l2(rtsp_data);
} else if (strncmp(rtsp_data->service, "file", 4) == 0 ){
netcam_rtsp_set_file(rtsp_data);
} else {
av_dict_free(&rtsp_data->opts);
MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO
@@ -1220,7 +1241,7 @@ static int netcam_rtsp_connect(struct rtsp_context *rtsp_data){
if (netcam_rtsp_open_context(rtsp_data) < 0) return -1;
if (netcam_rtsp_resize_ntc(rtsp_data) < 0 ) return -1;
if (netcam_rtsp_ntc(rtsp_data) < 0 ) return -1;
if (netcam_rtsp_read_image(rtsp_data) < 0) return -1;
@@ -1265,6 +1286,7 @@ static void netcam_rtsp_shutdown(struct rtsp_context *rtsp_data){
static void *netcam_rtsp_handler(void *arg){
struct rtsp_context *rtsp_data = arg;
int capture_reset;
rtsp_data->handler_finished = FALSE;
@@ -1272,6 +1294,21 @@ static void *netcam_rtsp_handler(void *arg){
pthread_setspecific(tls_key_threadnr, (void *)((unsigned long)rtsp_data->threadnbr));
/* The rtsp_data->capture_frame is set to TRUE in the netcam_rtsp_next
* function that is running on the motion loop thread. When processing
* a file, we need to slow down this loop to only capture a new image
* after the motion loop has grabbed the picture. Once this loop has
* captured the picture, we set the capture_frame to FALSE. When processing
* other input sources, we let this loop run normally and continually set the
* capture_frame to TRUE. Note that if we slowed this loop for regular rtsp/rtmp
* cameras, the resulting images are corrupted.
*/
if (strncmp(rtsp_data->service, "file", 4) == 0 ){
capture_reset = FALSE;
} else {
capture_reset = TRUE;
}
MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO
,_("%s: Camera handler thread [%d] started")
,rtsp_data->cameratype, rtsp_data->threadnbr);
@@ -1287,19 +1324,28 @@ static void *netcam_rtsp_handler(void *arg){
netcam_rtsp_connect(rtsp_data);
continue;
} else { /* We think we are connected...*/
if (netcam_rtsp_read_image(rtsp_data) < 0) {
if (!rtsp_data->finish) {
/* Nope. We are not or got bad image. Reconnect*/
if ((rtsp_data->status == RTSP_CONNECTED) ||
(rtsp_data->status == RTSP_READINGIMAGE)){
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO
,_("%s: Bad image. Reconnecting with camera....")
,rtsp_data->cameratype);
pthread_mutex_lock(&rtsp_data->mutex);
if (rtsp_data->capture_frame){
pthread_mutex_unlock(&rtsp_data->mutex);
if (netcam_rtsp_read_image(rtsp_data) < 0) {
if (!rtsp_data->finish) {
/* Nope. We are not or got bad image. Reconnect*/
if ((rtsp_data->status == RTSP_CONNECTED) ||
(rtsp_data->status == RTSP_READINGIMAGE)){
MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO
,_("%s: Bad image. Reconnecting with camera....")
,rtsp_data->cameratype);
}
rtsp_data->status = RTSP_RECONNECTING;
netcam_rtsp_connect(rtsp_data);
}
rtsp_data->status = RTSP_RECONNECTING;
netcam_rtsp_connect(rtsp_data);
continue;
}
continue;
pthread_mutex_lock(&rtsp_data->mutex);
rtsp_data->capture_frame = capture_reset;
pthread_mutex_unlock(&rtsp_data->mutex);
} else {
pthread_mutex_unlock(&rtsp_data->mutex);
}
}
}
@@ -1468,6 +1514,7 @@ int netcam_rtsp_next(struct context *cnt, struct image_data *img_data){
, cnt->rtsp->img_latest->ptr
, cnt->rtsp->img_latest->used);
img_data->idnbr_norm = cnt->rtsp->idnbr;
cnt->rtsp->capture_frame = TRUE;
pthread_mutex_unlock(&cnt->rtsp->mutex);
if (cnt->rtsp_high){

View File

@@ -73,6 +73,7 @@ struct rtsp_context {
int rtsp_uses_tcp; /* Flag from config for whether to use tcp transport */
int v4l2_palette; /* Palette from config for v4l2 devices */
int framerate; /* Frames per second from configuration file */
int capture_frame; /* Bool for to tell file processing to capture a frame*/
char threadname[16]; /* The thread name*/
int threadnbr; /* The thread number */