From 6b99b88d45fd3938206b71ed97b61df523a33900 Mon Sep 17 00:00:00 2001 From: MrDave Date: Sat, 28 Dec 2019 08:51:14 -0700 Subject: [PATCH] Revise alg --- src/alg.cpp | 329 ++++++++++------- src/alg.hpp | 4 + src/motion.hpp | 2 + src/motion_loop.cpp | 872 +++++++++++++++++++++++--------------------- src/picture.cpp | 59 ++- 5 files changed, 717 insertions(+), 549 deletions(-) diff --git a/src/alg.cpp b/src/alg.cpp index 886bf1ad..038023f2 100644 --- a/src/alg.cpp +++ b/src/alg.cpp @@ -23,25 +23,12 @@ #define ACCEPT_STATIC_OBJECT_TIME 10 /* Seconds */ #define EXCLUDE_LEVEL_PERCENT 20 -#define PUSH(Y, XL, XR, DY) /* push new segment on stack */ \ - if (sp= 0 && Y+(DY) < height) \ - {sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;} - -#define POP(Y, XL, XR, DY) /* pop segment off stack */ \ - {sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;} - typedef struct { short y, xl, xr, dy; } Segment; - -/** - * alg_locate_center_size - * Locates the center and size of the movement. - */ -void alg_locate_center_size(struct ctx_images *imgs, int width, int height, struct ctx_coord *cent) -{ +void alg_locate_center_size_label(struct ctx_images *imgs, int width, int height, struct ctx_coord *cent) { unsigned char *out = imgs->image_motion.image_norm; int *labels = imgs->labels; int x, y, centc = 0, xdist = 0, ydist = 0; @@ -53,31 +40,15 @@ void alg_locate_center_size(struct ctx_images *imgs, int width, int height, stru cent->minx = width; cent->miny = height; - /* If Labeling enabled - locate center of largest labelgroup. */ - if (imgs->labelsize_max) { - /* Locate largest labelgroup */ - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - if (*(labels++) & 32768) { - cent->x += x; - cent->y += y; - centc++; - } + /* Locate largest labelgroup */ + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + if (*(labels++) & 32768) { + cent->x += x; + cent->y += y; + centc++; } } - - } else { - /* Locate movement */ - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - if (*(out++)) { - cent->x += x; - cent->y += y; - centc++; - } - } - } - } if (centc) { @@ -93,44 +64,160 @@ void alg_locate_center_size(struct ctx_images *imgs, int width, int height, stru out = imgs->image_motion.image_norm; /* If Labeling then we find the area around largest labelgroup instead. */ + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + if (*(labels++) & 32768) { + if (x > cent->x) + xdist += x - cent->x; + else if (x < cent->x) + xdist += cent->x - x; + + if (y > cent->y) + ydist += y - cent->y; + else if (y < cent->y) + ydist += cent->y - y; + + centc++; + } + } + } + + if (centc) { + cent->minx = cent->x - xdist / centc * 2; + cent->maxx = cent->x + xdist / centc * 2; + /* + * Make the box a little bigger in y direction to make sure the + * heads fit in so we multiply by 3 instead of 2 which seems to + * to work well in practical. + */ + cent->miny = cent->y - ydist / centc * 3; + cent->maxy = cent->y + ydist / centc * 2; + } + + if (cent->maxx > width - 1) + cent->maxx = width - 1; + else if (cent->maxx < 0) + cent->maxx = 0; + + if (cent->maxy > height - 1) + cent->maxy = height - 1; + else if (cent->maxy < 0) + cent->maxy = 0; + + if (cent->minx > width - 1) + cent->minx = width - 1; + else if (cent->minx < 0) + cent->minx = 0; + + if (cent->miny > height - 1) + cent->miny = height - 1; + else if (cent->miny < 0) + cent->miny = 0; + + /* Align for better locate box handling */ + cent->minx += cent->minx % 2; + cent->miny += cent->miny % 2; + cent->maxx -= cent->maxx % 2; + cent->maxy -= cent->maxy % 2; + + cent->width = cent->maxx - cent->minx; + cent->height = cent->maxy - cent->miny; + + /* + * We want to center Y coordinate to be the center of the action. + * The head of a person is important so we correct the cent.y coordinate + * to match the correction to include a persons head that we just did above. + */ + cent->y = (cent->miny + cent->maxy) / 2; + +} + + +/** Locates the center and size of the movement. */ +void alg_locate_center_size(struct ctx_images *imgs, int width, int height, struct ctx_coord *cent) { + unsigned char *out; + int *labels = imgs->labels; + int x, y, xdist = 0, ydist = 0; + int64_t wght_x, wght_y, centc; + if (imgs->labelsize_max) { - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - if (*(labels++) & 32768) { - if (x > cent->x) - xdist += x - cent->x; - else if (x < cent->x) - xdist += cent->x - x; + alg_locate_center_size_label(imgs,width,height,cent); + return; + } - if (y > cent->y) - ydist += y - cent->y; - else if (y < cent->y) - ydist += cent->y - y; + cent->x = 0; + cent->y = 0; + cent->maxx = 0; + cent->maxy = 0; + cent->minx = width; + cent->miny = height; - centc++; - } + /* Locate movement */ + + /* + out = imgs->image_motion.image_norm; + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + if (*(out++) != 0xff) { + cent->x+=x; + cent->y+=y; + centc++; } } + } + if (centc){ + cent->x = cent->x / centc; + cent->y = cent->y / centc; + } + */ - } else { - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - if (*(out++)) { - if (x > cent->x) - xdist += x - cent->x; - else if (x < cent->x) - xdist += cent->x - x; - if (y > cent->y) - ydist += y - cent->y; - else if (y < cent->y) - ydist += cent->y - y; - - centc++; - } + out = imgs->image_motion.image_norm; + centc = 0; + wght_x = 0; + wght_y = 0; + for (y = 0; y < height-3; y++) { + for (x = 0; x < width-3; x++) { + out++; + if ((*(out) != 0xff) && (*(out+1) != 0xff) && (*(out+2) != 0xff) && + (*(out+width) != 0xff) && (*(out+(width*2)) != 0xff)) { + wght_x=wght_x +(x * (255-(int)(*out))); + wght_y=wght_y +(y * (255-(int)(*out))); + centc =centc + (255-(int)(*out)); } } + out +=3; + } + if (centc){ + cent->x = wght_x / centc; + cent->y = wght_y / centc; + } + + + //MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, "Centx %d Centy %d",cent->x,cent->y); + + /* Find bounds of motion area*/ + centc = 0; + labels = imgs->labels; + out = imgs->image_motion.image_norm; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + if (*(out++) != 0xff) { + if (x > cent->x) + xdist += x - cent->x; + else if (x < cent->x) + xdist += cent->x - x; + + if (y > cent->y) + ydist += y - cent->y; + else if (y < cent->y) + ydist += cent->y - y; + + centc++; + } + } } if (centc) { @@ -270,10 +357,18 @@ void alg_threshold_tune(struct ctx_cam *cam, int diffs, int motion) * Parent segment was on line y - dy. dy = 1 or -1 */ -/** - * iflood - * +/* These macros are here since they are specific to the functions below + * rather than being generic macros that can be used anywhere. These + * can only be used in the iflood function */ +#define PUSH(Y, XL, XR, DY) /* push new segment on stack */ \ + if (sp= 0 && Y+(DY) < height) \ + {sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;} + +#define POP(Y, XL, XR, DY) /* pop segment off stack */ \ + {sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;} + +/** iflood */ static int iflood(int x, int y, int width, int height, unsigned char *out, int *labels, int newvalue, int oldvalue) { @@ -330,10 +425,7 @@ static int iflood(int x, int y, int width, int height, return count; } -/** - * alg_labeling - * - */ +/** alg_labeling */ static int alg_labeling(struct ctx_cam *cam) { struct ctx_images *imgs = &cam->imgs; @@ -372,10 +464,6 @@ static int alg_labeling(struct ctx_cam *cam) labelsize = iflood(ix, iy, width, height, out, labels, current_label, 0); if (labelsize > 0) { - //MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "Label: %i (%i) Size: %i (%i,%i)", - // current_label, cam->current_image->total_labels, - // labelsize, ix, iy); - /* Label above threshold? Mark it again (add 32768 to labelnumber). */ if (labelsize > cam->threshold) { labelsize = iflood(ix, iy, width, height, out, labels, current_label + 32768, current_label); @@ -396,10 +484,6 @@ static int alg_labeling(struct ctx_cam *cam) pixelpos++; /* Compensate for ix < width - 1 */ } - //MOTION_LOG(DBG, TYPE_ALL, NO_ERRNO, "%i Labels found. Largest connected Area: %i Pixel(s). " - // "Largest Label: %i", imgs->largest_label, imgs->labelsize_max, - // cam->current_image->total_labels); - /* Return group of significant labels or if that's none, the next largest * group (which is under the threshold, but especially for setup gives an * idea how close it was). @@ -407,12 +491,8 @@ static int alg_labeling(struct ctx_cam *cam) return imgs->labelgroup_max ? imgs->labelgroup_max : max_under; } -/** - * dilate9 - * Dilates a 3x3 box. - */ -static int dilate9(unsigned char *img, int width, int height, void *buffer) -{ +/** Dilates a 3x3 box. */ +int alg_dilate9(unsigned char *img, int width, int height, void *buffer) { /* * - row1, row2 and row3 represent lines in the temporary buffer. * - Window is a sliding window containing max values of the columns @@ -494,12 +574,8 @@ static int dilate9(unsigned char *img, int width, int height, void *buffer) return sum; } -/** - * dilate5 - * Dilates a + shape. - */ -static int dilate5(unsigned char *img, int width, int height, void *buffer) -{ +/** Dilates a + shape. */ +int alg_dilate5(unsigned char *img, int width, int height, void *buffer) { /* * - row1, row2 and row3 represent lines in the temporary buffer. * - mem holds the max value of the overlapping part of two + shapes. @@ -565,12 +641,8 @@ static int dilate5(unsigned char *img, int width, int height, void *buffer) return sum; } -/** - * erode9 - * Erodes a 3x3 box. - */ -static int erode9(unsigned char *img, int width, int height, void *buffer, unsigned char flag) -{ +/** Erodes a 3x3 box. */ +int alg_erode9(unsigned char *img, int width, int height, void *buffer, unsigned char flag) { int y, i, sum = 0; char *Row1,*Row2,*Row3; @@ -609,12 +681,8 @@ static int erode9(unsigned char *img, int width, int height, void *buffer, unsig return sum; } -/** - * erode5 - * Erodes in a + shape. - */ -static int erode5(unsigned char *img, int width, int height, void *buffer, unsigned char flag) -{ +/** Erodes in a + shape. */ +int alg_erode5(unsigned char *img, int width, int height, void *buffer, unsigned char flag) { int y, i, sum = 0; char *Row1,*Row2,*Row3; @@ -672,21 +740,21 @@ void alg_despeckle(struct ctx_cam *cam) { for (i = 0; i < len; i++) { switch (cam->conf->despeckle_filter[i]) { case 'E': - if ((diffs = erode9(out, width, height, common_buffer, 0)) == 0) + if ((diffs = alg_erode9(out, width, height, common_buffer, 0)) == 0) i = len; done = 1; break; case 'e': - if ((diffs = erode5(out, width, height, common_buffer, 0)) == 0) + if ((diffs = alg_erode5(out, width, height, common_buffer, 0)) == 0) i = len; done = 1; break; case 'D': - diffs = dilate9(out, width, height, common_buffer); + diffs = alg_dilate9(out, width, height, common_buffer); done = 1; break; case 'd': - diffs = dilate5(out, width, height, common_buffer); + diffs = alg_dilate5(out, width, height, common_buffer); done = 1; break; /* No further despeckle after labeling! */ @@ -746,9 +814,9 @@ void alg_tune_smartmask(struct ctx_cam *cam) { smartmask_final[i] = 255; } /* Further expansion (here:erode due to inverted logic!) of the mask. */ - diff = erode9(smartmask_final, cam->imgs.width, cam->imgs.height, + diff = alg_erode9(smartmask_final, cam->imgs.width, cam->imgs.height, cam->imgs.common_buffer, 255); - diff = erode5(smartmask_final, cam->imgs.width, cam->imgs.height, + diff = alg_erode5(smartmask_final, cam->imgs.width, cam->imgs.height, cam->imgs.common_buffer, 255); cam->smartmask_count = cam->smartmask_ratio; } @@ -770,7 +838,7 @@ static int alg_diff_standard(struct ctx_cam *cam, unsigned char *new_var) { i = imgs->motionsize; memset(out + i, 128, i / 2); /* Motion pictures are now b/w i.o. green */ - memset(out, 0, i); + memset(out, 0xff, i); for (; i > 0; i--) { curdiff = (int)(abs(*ref - *new_var)); /* Using a temp variable is 12% faster. */ @@ -838,6 +906,10 @@ static char alg_diff_fast(struct ctx_cam *cam, int max_n_changes, unsigned char void alg_diff(struct ctx_cam *cam) { + return; + cam->current_image->diffs = alg_diff_standard(cam, cam->imgs.image_vprvcy); + return; + if (cam->detecting_motion || cam->motapp->setup_mode){ cam->current_image->diffs = alg_diff_standard(cam, cam->imgs.image_vprvcy); } else { @@ -923,46 +995,57 @@ void alg_switchfilter(struct ctx_cam *cam) { void alg_update_reference_frame(struct ctx_cam *cam, int action) { int accept_timer = cam->lastrate * ACCEPT_STATIC_OBJECT_TIME; - int i, threshold_ref; + int i, threshold_ref, diffs; int *ref_dyn = cam->imgs.ref_dyn; - unsigned char *image_virgin = cam->imgs.image_vprvcy; + unsigned char *image_vprvcy = cam->imgs.image_vprvcy; unsigned char *ref = cam->imgs.ref; unsigned char *smartmask = cam->imgs.smartmask_final; unsigned char *out = cam->imgs.image_motion.image_norm; if (cam->lastrate > 5) /* Match rate limit */ accept_timer /= (cam->lastrate / 3); +accept_timer = 5; + if (action == UPDATE_REF_FRAME) { /* Black&white only for better performance. */ - threshold_ref = cam->noise * EXCLUDE_LEVEL_PERCENT / 100; + //memset(out + cam->imgs.motionsize, 128, cam->imgs.motionsize / 2); /* Motion pictures are now b/w i.o. green */ + + threshold_ref = cam->noise * EXCLUDE_LEVEL_PERCENT / 100; + threshold_ref = cam->noise; + diffs = 0; for (i = cam->imgs.motionsize; i > 0; i--) { /* Exclude pixels from ref frame well below noise level. */ - if (((int)(abs(*ref - *image_virgin)) > threshold_ref) && (*smartmask)) { + if ((int)(abs(*ref - *image_vprvcy)) > cam->noise) { if (*ref_dyn == 0) { /* Always give new pixels a chance. */ *ref_dyn = 1; } else if (*ref_dyn > accept_timer) { /* Include static Object after some time. */ *ref_dyn = 0; - *ref = *image_virgin; - } else if (*out) { - (*ref_dyn)++; /* Motionpixel? Keep excluding from ref frame. */ + *ref = *image_vprvcy; + //} else if (*out != 0xff) { + // (*ref_dyn)++; /* Motionpixel? Keep excluding from ref frame. */ } else { - *ref_dyn = 0; /* Nothing special - release pixel. */ - *ref = (*ref + *image_virgin) / 2; + // *ref_dyn = 0; /* Nothing special - release pixel. */ + (*ref_dyn)++; + // *ref = *image_vprvcy; + //*ref = (*ref + *image_vprvcy) / 2; } + *out = *image_vprvcy; + diffs++; } else { /* No motion: copy to ref frame. */ *ref_dyn = 0; /* Reset pixel */ - *ref = *image_virgin; + *ref = *image_vprvcy; + *out = 0xff; } ref++; - image_virgin++; + image_vprvcy++; smartmask++; ref_dyn++; out++; } /* end for i */ - + cam->current_image->diffs =diffs; } else { /* action == RESET_REF_FRAME - also used to initialize the frame at startup. */ /* Copy fresh image */ memcpy(cam->imgs.ref, cam->imgs.image_vprvcy, cam->imgs.size_norm); diff --git a/src/alg.hpp b/src/alg.hpp index d1419201..1bccd6bd 100644 --- a/src/alg.hpp +++ b/src/alg.hpp @@ -21,6 +21,10 @@ void alg_despeckle(struct ctx_cam *cam); void alg_tune_smartmask(struct ctx_cam *cam); void alg_update_reference_frame(struct ctx_cam *cam, int); + int alg_dilate9(unsigned char *img, int width, int height, void *buffer); + int alg_dilate5(unsigned char *img, int width, int height, void *buffer); + int alg_erode9(unsigned char *img, int width, int height, void *buffer, unsigned char flag); + int alg_erode5(unsigned char *img, int width, int height, void *buffer, unsigned char flag); void alg_new_update_frame(ctx_cam *cam); void alg_new_diff(ctx_cam *cam); diff --git a/src/motion.hpp b/src/motion.hpp index 093f1136..ba82d145 100644 --- a/src/motion.hpp +++ b/src/motion.hpp @@ -198,6 +198,7 @@ struct ctx_images { struct ctx_image_data image_preview; /* Picture buffer for best image when enables */ unsigned char *ref; /* The reference frame */ + unsigned char *ref_next; /* The reference frame */ unsigned char *mask; /* Buffer for the mask file */ unsigned char *smartmask; unsigned char *smartmask_final; @@ -317,6 +318,7 @@ struct ctx_cam { int postcap; /* downcounter, frames left to to send post event */ int shots; + int ref_lag; unsigned int detecting_motion; long frame_wait[AVGCNT]; /* Last wait times through motion loop*/ diff --git a/src/motion_loop.cpp b/src/motion_loop.cpp index daa63cb4..666553a4 100644 --- a/src/motion_loop.cpp +++ b/src/motion_loop.cpp @@ -240,351 +240,6 @@ static void mlp_detected(struct ctx_cam *cam, int dev, struct ctx_image_data *im } - -static int init_camera_type(struct ctx_cam *cam){ - - cam->camera_type = CAMERA_TYPE_UNKNOWN; - - if (cam->conf->mmalcam_name != "") { - cam->camera_type = CAMERA_TYPE_MMAL; - return 0; - } - - if (cam->conf->netcam_url != "") { - if ((cam->conf->netcam_url.compare(0,5,"mjpeg") == 0) || - (cam->conf->netcam_url.compare(0,4,"http") == 0) || - (cam->conf->netcam_url.compare(0,4,"v4l2") == 0) || - (cam->conf->netcam_url.compare(0,4,"file") == 0) || - (cam->conf->netcam_url.compare(0,4,"rtmp") == 0) || - (cam->conf->netcam_url.compare(0,4,"rtsp") == 0)) { - cam->camera_type = CAMERA_TYPE_NETCAM; - } - return 0; - } - - if (cam->conf->videodevice != "") { - cam->camera_type = CAMERA_TYPE_V4L2; - return 0; - } - - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO - , _("Unable to determine camera type (MMAL, Netcam, V4L2)")); - return -1; - -} - -/** Get first images from camera at startup */ -static void mlp_init_firstimage(struct ctx_cam *cam) { - - int indx; - - cam->current_image = &cam->imgs.image_ring[cam->imgs.ring_in]; - - /* Capture first image, or we will get an alarm on start */ - if (cam->video_dev >= 0) { - for (indx = 0; indx < 5; indx++) { - if (vid_next(cam, cam->current_image) == 0) break; - SLEEP(2, 0); - } - - if (indx >= 5) { - memset(cam->imgs.image_virgin, 0x80, cam->imgs.size_norm); /* initialize to grey */ - draw_text(cam->imgs.image_virgin, cam->imgs.width, cam->imgs.height, - 10, 20, "Error capturing first image", cam->text_scale); - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, _("Error capturing first image")); - } - } - cam->current_image = &cam->imgs.image_ring[cam->imgs.ring_in]; - - if (cam->conf->primary_method == 0){ - alg_update_reference_frame(cam, RESET_REF_FRAME); - } else if (cam->conf->primary_method == 1) { - alg_new_update_frame(cam); - } - - -} - -/** Check the image size to determine if modulo 8 and over 64 */ -static int mlp_check_szimg(struct ctx_cam *cam){ - - /* Revalidate we got a valid image size */ - if ((cam->imgs.width % 8) || (cam->imgs.height % 8)) { - MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO - ,_("Image width (%d) or height(%d) requested is not modulo 8.") - ,cam->imgs.width, cam->imgs.height); - return -1; - } - if ((cam->imgs.width < 64) || (cam->imgs.height < 64)){ - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO - ,_("Motion only supports width and height greater than or equal to 64 %dx%d") - ,cam->imgs.width, cam->imgs.height); - return -1; - } - /* Substream size notification*/ - if ((cam->imgs.width % 16) || (cam->imgs.height % 16)) { - MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO - ,_("Substream not available. Image sizes not modulo 16.")); - } - - return 0; - -} - -/** Set the items required for the area detect */ -static void mlp_init_areadetect(struct ctx_cam *cam){ - - /* Initialize area detection */ - cam->area_minx[0] = cam->area_minx[3] = cam->area_minx[6] = 0; - cam->area_miny[0] = cam->area_miny[1] = cam->area_miny[2] = 0; - - cam->area_minx[1] = cam->area_minx[4] = cam->area_minx[7] = cam->imgs.width / 3; - cam->area_maxx[0] = cam->area_maxx[3] = cam->area_maxx[6] = cam->imgs.width / 3; - - cam->area_minx[2] = cam->area_minx[5] = cam->area_minx[8] = cam->imgs.width / 3 * 2; - cam->area_maxx[1] = cam->area_maxx[4] = cam->area_maxx[7] = cam->imgs.width / 3 * 2; - - cam->area_miny[3] = cam->area_miny[4] = cam->area_miny[5] = cam->imgs.height / 3; - cam->area_maxy[0] = cam->area_maxy[1] = cam->area_maxy[2] = cam->imgs.height / 3; - - cam->area_miny[6] = cam->area_miny[7] = cam->area_miny[8] = cam->imgs.height / 3 * 2; - cam->area_maxy[3] = cam->area_maxy[4] = cam->area_maxy[5] = cam->imgs.height / 3 * 2; - - cam->area_maxx[2] = cam->area_maxx[5] = cam->area_maxx[8] = cam->imgs.width; - cam->area_maxy[6] = cam->area_maxy[7] = cam->area_maxy[8] = cam->imgs.height; - - cam->areadetect_eventnbr = 0; - -} - -/** Allocate the required buffers */ -static void mlp_init_buffers(struct ctx_cam *cam){ - - cam->imgs.ref =(unsigned char*) mymalloc(cam->imgs.size_norm); - cam->imgs.image_motion.image_norm = (unsigned char*)mymalloc(cam->imgs.size_norm); - cam->imgs.ref_dyn =(int*) mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.ref_dyn)); - cam->imgs.image_virgin =(unsigned char*) mymalloc(cam->imgs.size_norm); - cam->imgs.image_vprvcy = (unsigned char*)mymalloc(cam->imgs.size_norm); - cam->imgs.smartmask =(unsigned char*) mymalloc(cam->imgs.motionsize); - cam->imgs.smartmask_final =(unsigned char*) mymalloc(cam->imgs.motionsize); - cam->imgs.smartmask_buffer =(int*) mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.smartmask_buffer)); - cam->imgs.labels =(int*)mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.labels)); - cam->imgs.labelsize =(int*) mymalloc((cam->imgs.motionsize/2+1) * sizeof(*cam->imgs.labelsize)); - cam->imgs.image_preview.image_norm =(unsigned char*) mymalloc(cam->imgs.size_norm); - cam->imgs.common_buffer =(unsigned char*) mymalloc(3 * cam->imgs.width * cam->imgs.height); - cam->imgs.image_secondary =(unsigned char*) mymalloc(3 * cam->imgs.width * cam->imgs.height); - if (cam->imgs.size_high > 0){ - cam->imgs.image_preview.image_high =(unsigned char*) mymalloc(cam->imgs.size_high); - } - - memset(cam->imgs.smartmask, 0, cam->imgs.motionsize); - memset(cam->imgs.smartmask_final, 255, cam->imgs.motionsize); - memset(cam->imgs.smartmask_buffer, 0, cam->imgs.motionsize * sizeof(*cam->imgs.smartmask_buffer)); - -} - -static void mlp_init_values(struct ctx_cam *cam) { - - cam->event_nr=1; - - clock_gettime(CLOCK_REALTIME, &cam->frame_curr_ts); - clock_gettime(CLOCK_REALTIME, &cam->frame_last_ts); - - cam->noise = cam->conf->noise_level; - - cam->threshold = cam->conf->threshold; - if (cam->conf->threshold_maximum > cam->conf->threshold ){ - cam->threshold_maximum = cam->conf->threshold_maximum; - } else { - cam->threshold_maximum = (cam->imgs.height * cam->imgs.width * 3) / 2; - } - - cam->startup_frames = (cam->conf->framerate * 2) + cam->conf->pre_capture + cam->conf->minimum_motion_frames; - - cam->minimum_frame_time_downcounter = cam->conf->minimum_frame_time; - cam->get_image = 1; - - cam->movie_passthrough = cam->conf->movie_passthrough; - if ((cam->camera_type != CAMERA_TYPE_NETCAM) && - (cam->movie_passthrough)) { - MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO,_("Pass-through processing disabled.")); - cam->movie_passthrough = FALSE; - } - -} - -static int mlp_init_cam_start(struct ctx_cam *cam) { - - cam->video_dev = vid_start(cam); - - if (cam->video_dev == -1) { - MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO - ,_("Could not fetch initial image from camera ")); - MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO - ,_("Motion continues using width and height from config file(s)")); - cam->imgs.width = cam->conf->width; - cam->imgs.height = cam->conf->height; - cam->imgs.size_norm = cam->conf->width * cam->conf->height * 3 / 2; - cam->imgs.motionsize = cam->conf->width * cam->conf->height; - } else if (cam->video_dev == -2) { - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO - ,_("Could not fetch initial image from camera ")); - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO - ,_("Motion only supports width and height modulo 8")); - return -1; - } else { - cam->imgs.motionsize = (cam->imgs.width * cam->imgs.height); - cam->imgs.size_norm = (cam->imgs.width * cam->imgs.height * 3) / 2; - cam->imgs.size_high = (cam->imgs.width_high * cam->imgs.height_high * 3) / 2; - } - - return 0; - -} - -/** mlp_init */ -static int mlp_init(struct ctx_cam *cam) { - - mythreadname_set("ml",cam->threadnr,cam->conf->camera_name.c_str()); - - pthread_setspecific(tls_key_threadnr, (void *)((unsigned long)cam->threadnr)); - - if (init_camera_type(cam) != 0 ) return -1; - - mlp_init_values(cam); - - if (mlp_init_cam_start(cam) != 0) return -1; - - if (mlp_check_szimg(cam) != 0) return -1; - - mlp_ring_resize(cam, 1); /* Create a initial precapture ring buffer with 1 frame */ - - mlp_init_buffers(cam); - - webu_stream_init(cam); - - algsec_init(cam); - - rotate_init(cam); - - draw_init_scale(cam); - - mlp_init_firstimage(cam); - - vlp_init(cam); - - dbse_init(cam); - - pic_init_mask(cam); - - pic_init_privacy(cam); - - track_init(cam); - - mlp_init_areadetect(cam); - - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO - ,_("Camera %d started: motion detection %s"), - cam->camera_id, cam->pause ? _("Disabled"):_("Enabled")); - - if (cam->conf->emulate_motion) { - MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, _("Emulating motion")); - } - - return 0; -} - -/** clean up all memory etc. from motion init */ -void mlp_cleanup(struct ctx_cam *cam) { - - event(cam, EVENT_TIMELAPSEEND, NULL, NULL, NULL, NULL); - event(cam, EVENT_ENDMOTION, NULL, NULL, NULL, NULL); - - webu_stream_deinit(cam); - - algsec_deinit(cam); - - track_deinit(cam); - - if (cam->video_dev >= 0) vid_close(cam); - - free(cam->imgs.image_motion.image_norm); - cam->imgs.image_motion.image_norm = NULL; - - free(cam->imgs.ref); - cam->imgs.ref = NULL; - - free(cam->imgs.ref_dyn); - cam->imgs.ref_dyn = NULL; - - free(cam->imgs.image_virgin); - cam->imgs.image_virgin = NULL; - - free(cam->imgs.image_vprvcy); - cam->imgs.image_vprvcy = NULL; - - free(cam->imgs.labels); - cam->imgs.labels = NULL; - - free(cam->imgs.labelsize); - cam->imgs.labelsize = NULL; - - free(cam->imgs.smartmask); - cam->imgs.smartmask = NULL; - - free(cam->imgs.smartmask_final); - cam->imgs.smartmask_final = NULL; - - free(cam->imgs.smartmask_buffer); - cam->imgs.smartmask_buffer = NULL; - - if (cam->imgs.mask) free(cam->imgs.mask); - cam->imgs.mask = NULL; - - if (cam->imgs.mask_privacy) free(cam->imgs.mask_privacy); - cam->imgs.mask_privacy = NULL; - - if (cam->imgs.mask_privacy_uv) free(cam->imgs.mask_privacy_uv); - cam->imgs.mask_privacy_uv = NULL; - - if (cam->imgs.mask_privacy_high) free(cam->imgs.mask_privacy_high); - cam->imgs.mask_privacy_high = NULL; - - if (cam->imgs.mask_privacy_high_uv) free(cam->imgs.mask_privacy_high_uv); - cam->imgs.mask_privacy_high_uv = NULL; - - free(cam->imgs.common_buffer); - cam->imgs.common_buffer = NULL; - - free(cam->imgs.image_secondary); - cam->imgs.image_secondary = NULL; - - free(cam->imgs.image_preview.image_norm); - cam->imgs.image_preview.image_norm = NULL; - - if (cam->imgs.size_high > 0){ - free(cam->imgs.image_preview.image_high); - cam->imgs.image_preview.image_high = NULL; - } - - mlp_ring_destroy(cam); /* Cleanup the precapture ring buffer */ - - rotate_deinit(cam); /* cleanup image rotation data */ - - if (cam->pipe != -1) { - close(cam->pipe); - cam->pipe = -1; - } - - if (cam->mpipe != -1) { - close(cam->mpipe); - cam->mpipe = -1; - } - - dbse_deinit(cam); - -} - static void mlp_mask_privacy(struct ctx_cam *cam){ if (cam->imgs.mask_privacy == NULL) return; @@ -661,6 +316,355 @@ static void mlp_mask_privacy(struct ctx_cam *cam){ } } +static int mlp_init_camera_type(struct ctx_cam *cam){ + + cam->camera_type = CAMERA_TYPE_UNKNOWN; + + if (cam->conf->mmalcam_name != "") { + cam->camera_type = CAMERA_TYPE_MMAL; + return 0; + } + + if (cam->conf->netcam_url != "") { + if ((cam->conf->netcam_url.compare(0,5,"mjpeg") == 0) || + (cam->conf->netcam_url.compare(0,4,"http") == 0) || + (cam->conf->netcam_url.compare(0,4,"v4l2") == 0) || + (cam->conf->netcam_url.compare(0,4,"file") == 0) || + (cam->conf->netcam_url.compare(0,4,"rtmp") == 0) || + (cam->conf->netcam_url.compare(0,4,"rtsp") == 0)) { + cam->camera_type = CAMERA_TYPE_NETCAM; + } + return 0; + } + + if (cam->conf->videodevice != "") { + cam->camera_type = CAMERA_TYPE_V4L2; + return 0; + } + + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO + , _("Unable to determine camera type (MMAL, Netcam, V4L2)")); + return -1; + +} + +/** Get first images from camera at startup */ +static void mlp_init_firstimage(struct ctx_cam *cam) { + + int indx; + + cam->current_image = &cam->imgs.image_ring[cam->imgs.ring_in]; + + /* Capture first image, or we will get an alarm on start */ + if (cam->video_dev >= 0) { + for (indx = 0; indx < 5; indx++) { + if (vid_next(cam, cam->current_image) == 0) break; + SLEEP(2, 0); + } + + if (indx >= 5) { + memset(cam->imgs.image_virgin, 0x80, cam->imgs.size_norm); /* initialize to grey */ + draw_text(cam->imgs.image_virgin, cam->imgs.width, cam->imgs.height, + 10, 20, "Error capturing first image", cam->text_scale); + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, _("Error capturing first image")); + } + } + + cam->current_image = &cam->imgs.image_ring[cam->imgs.ring_in]; + + mlp_mask_privacy(cam); + memcpy(cam->imgs.ref, cam->current_image->image_norm, cam->imgs.size_norm); + memcpy(cam->imgs.ref_next, cam->current_image->image_norm, cam->imgs.size_norm); + +} + +/** Check the image size to determine if modulo 8 and over 64 */ +static int mlp_check_szimg(struct ctx_cam *cam){ + + /* Revalidate we got a valid image size */ + if ((cam->imgs.width % 8) || (cam->imgs.height % 8)) { + MOTION_LOG(CRT, TYPE_NETCAM, NO_ERRNO + ,_("Image width (%d) or height(%d) requested is not modulo 8.") + ,cam->imgs.width, cam->imgs.height); + return -1; + } + if ((cam->imgs.width < 64) || (cam->imgs.height < 64)){ + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO + ,_("Motion only supports width and height greater than or equal to 64 %dx%d") + ,cam->imgs.width, cam->imgs.height); + return -1; + } + /* Substream size notification*/ + if ((cam->imgs.width % 16) || (cam->imgs.height % 16)) { + MOTION_LOG(NTC, TYPE_NETCAM, NO_ERRNO + ,_("Substream not available. Image sizes not modulo 16.")); + } + + return 0; + +} + +/** Set the items required for the area detect */ +static void mlp_init_areadetect(struct ctx_cam *cam){ + + /* Initialize area detection */ + cam->area_minx[0] = cam->area_minx[3] = cam->area_minx[6] = 0; + cam->area_miny[0] = cam->area_miny[1] = cam->area_miny[2] = 0; + + cam->area_minx[1] = cam->area_minx[4] = cam->area_minx[7] = cam->imgs.width / 3; + cam->area_maxx[0] = cam->area_maxx[3] = cam->area_maxx[6] = cam->imgs.width / 3; + + cam->area_minx[2] = cam->area_minx[5] = cam->area_minx[8] = cam->imgs.width / 3 * 2; + cam->area_maxx[1] = cam->area_maxx[4] = cam->area_maxx[7] = cam->imgs.width / 3 * 2; + + cam->area_miny[3] = cam->area_miny[4] = cam->area_miny[5] = cam->imgs.height / 3; + cam->area_maxy[0] = cam->area_maxy[1] = cam->area_maxy[2] = cam->imgs.height / 3; + + cam->area_miny[6] = cam->area_miny[7] = cam->area_miny[8] = cam->imgs.height / 3 * 2; + cam->area_maxy[3] = cam->area_maxy[4] = cam->area_maxy[5] = cam->imgs.height / 3 * 2; + + cam->area_maxx[2] = cam->area_maxx[5] = cam->area_maxx[8] = cam->imgs.width; + cam->area_maxy[6] = cam->area_maxy[7] = cam->area_maxy[8] = cam->imgs.height; + + cam->areadetect_eventnbr = 0; + +} + +/** Allocate the required buffers */ +static void mlp_init_buffers(struct ctx_cam *cam){ + + /* TODO: Determine why ref image is full size instead of just motionsize*/ + cam->imgs.ref =(unsigned char*) mymalloc(cam->imgs.size_norm); + cam->imgs.ref_next =(unsigned char*) mymalloc(cam->imgs.size_norm); + cam->imgs.ref_dyn =(int*) mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.ref_dyn)); + cam->imgs.image_motion.image_norm = (unsigned char*)mymalloc(cam->imgs.size_norm); + cam->imgs.image_virgin =(unsigned char*) mymalloc(cam->imgs.size_norm); + cam->imgs.image_vprvcy = (unsigned char*)mymalloc(cam->imgs.size_norm); + cam->imgs.smartmask =(unsigned char*) mymalloc(cam->imgs.motionsize); + cam->imgs.smartmask_final =(unsigned char*) mymalloc(cam->imgs.motionsize); + cam->imgs.smartmask_buffer =(int*) mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.smartmask_buffer)); + cam->imgs.labels =(int*)mymalloc(cam->imgs.motionsize * sizeof(*cam->imgs.labels)); + cam->imgs.labelsize =(int*) mymalloc((cam->imgs.motionsize/2+1) * sizeof(*cam->imgs.labelsize)); + cam->imgs.image_preview.image_norm =(unsigned char*) mymalloc(cam->imgs.size_norm); + cam->imgs.common_buffer =(unsigned char*) mymalloc(3 * cam->imgs.width * cam->imgs.height); + cam->imgs.image_secondary =(unsigned char*) mymalloc(3 * cam->imgs.width * cam->imgs.height); + if (cam->imgs.size_high > 0){ + cam->imgs.image_preview.image_high =(unsigned char*) mymalloc(cam->imgs.size_high); + } + + memset(cam->imgs.smartmask, 0, cam->imgs.motionsize); + memset(cam->imgs.smartmask_final, 255, cam->imgs.motionsize); + memset(cam->imgs.smartmask_buffer, 0, cam->imgs.motionsize * sizeof(*cam->imgs.smartmask_buffer)); + +} + +static void mlp_init_values(struct ctx_cam *cam) { + + cam->event_nr=1; + + clock_gettime(CLOCK_REALTIME, &cam->frame_curr_ts); + clock_gettime(CLOCK_REALTIME, &cam->frame_last_ts); + + cam->noise = cam->conf->noise_level; + + cam->threshold = cam->conf->threshold; + if (cam->conf->threshold_maximum > cam->conf->threshold ){ + cam->threshold_maximum = cam->conf->threshold_maximum; + } else { + cam->threshold_maximum = (cam->imgs.height * cam->imgs.width * 3) / 2; + } + + cam->startup_frames = (cam->conf->framerate * 2) + cam->conf->pre_capture + cam->conf->minimum_motion_frames; + + cam->minimum_frame_time_downcounter = cam->conf->minimum_frame_time; + cam->get_image = 1; + + cam->movie_passthrough = cam->conf->movie_passthrough; + if ((cam->camera_type != CAMERA_TYPE_NETCAM) && + (cam->movie_passthrough)) { + MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO,_("Pass-through processing disabled.")); + cam->movie_passthrough = FALSE; + } + + cam->ref_lag = 130; + +} + +static int mlp_init_cam_start(struct ctx_cam *cam) { + + cam->video_dev = vid_start(cam); + + if (cam->video_dev == -1) { + MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO + ,_("Could not fetch initial image from camera ")); + MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO + ,_("Motion continues using width and height from config file(s)")); + cam->imgs.width = cam->conf->width; + cam->imgs.height = cam->conf->height; + cam->imgs.size_norm = cam->conf->width * cam->conf->height * 3 / 2; + cam->imgs.motionsize = cam->conf->width * cam->conf->height; + } else if (cam->video_dev == -2) { + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO + ,_("Could not fetch initial image from camera ")); + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO + ,_("Motion only supports width and height modulo 8")); + return -1; + } else { + cam->imgs.motionsize = (cam->imgs.width * cam->imgs.height); + cam->imgs.size_norm = (cam->imgs.width * cam->imgs.height * 3) / 2; + cam->imgs.size_high = (cam->imgs.width_high * cam->imgs.height_high * 3) / 2; + } + + return 0; + +} + +/** mlp_init */ +static int mlp_init(struct ctx_cam *cam) { + + mythreadname_set("ml",cam->threadnr,cam->conf->camera_name.c_str()); + + pthread_setspecific(tls_key_threadnr, (void *)((unsigned long)cam->threadnr)); + + if (mlp_init_camera_type(cam) != 0 ) return -1; + + mlp_init_values(cam); + + if (mlp_init_cam_start(cam) != 0) return -1; + + if (mlp_check_szimg(cam) != 0) return -1; + + mlp_ring_resize(cam, 1); /* Create a initial precapture ring buffer with 1 frame */ + + mlp_init_buffers(cam); + + webu_stream_init(cam); + + algsec_init(cam); + + rotate_init(cam); + + draw_init_scale(cam); + + pic_init_mask(cam); + + pic_init_privacy(cam); + + mlp_init_firstimage(cam); + + vlp_init(cam); + + dbse_init(cam); + + track_init(cam); + + mlp_init_areadetect(cam); + + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO + ,_("Camera %d started: motion detection %s"), + cam->camera_id, cam->pause ? _("Disabled"):_("Enabled")); + + if (cam->conf->emulate_motion) { + MOTION_LOG(INF, TYPE_ALL, NO_ERRNO, _("Emulating motion")); + } + + return 0; +} + +/** clean up all memory etc. from motion init */ +void mlp_cleanup(struct ctx_cam *cam) { + + event(cam, EVENT_TIMELAPSEEND, NULL, NULL, NULL, NULL); + event(cam, EVENT_ENDMOTION, NULL, NULL, NULL, NULL); + + webu_stream_deinit(cam); + + algsec_deinit(cam); + + track_deinit(cam); + + if (cam->video_dev >= 0) vid_close(cam); + + free(cam->imgs.image_motion.image_norm); + cam->imgs.image_motion.image_norm = NULL; + + free(cam->imgs.ref); + cam->imgs.ref = NULL; + + free(cam->imgs.ref_next); + cam->imgs.ref_next = NULL; + + free(cam->imgs.ref_dyn); + cam->imgs.ref_dyn = NULL; + + free(cam->imgs.image_virgin); + cam->imgs.image_virgin = NULL; + + free(cam->imgs.image_vprvcy); + cam->imgs.image_vprvcy = NULL; + + free(cam->imgs.labels); + cam->imgs.labels = NULL; + + free(cam->imgs.labelsize); + cam->imgs.labelsize = NULL; + + free(cam->imgs.smartmask); + cam->imgs.smartmask = NULL; + + free(cam->imgs.smartmask_final); + cam->imgs.smartmask_final = NULL; + + free(cam->imgs.smartmask_buffer); + cam->imgs.smartmask_buffer = NULL; + + if (cam->imgs.mask) free(cam->imgs.mask); + cam->imgs.mask = NULL; + + if (cam->imgs.mask_privacy) free(cam->imgs.mask_privacy); + cam->imgs.mask_privacy = NULL; + + if (cam->imgs.mask_privacy_uv) free(cam->imgs.mask_privacy_uv); + cam->imgs.mask_privacy_uv = NULL; + + if (cam->imgs.mask_privacy_high) free(cam->imgs.mask_privacy_high); + cam->imgs.mask_privacy_high = NULL; + + if (cam->imgs.mask_privacy_high_uv) free(cam->imgs.mask_privacy_high_uv); + cam->imgs.mask_privacy_high_uv = NULL; + + free(cam->imgs.common_buffer); + cam->imgs.common_buffer = NULL; + + free(cam->imgs.image_secondary); + cam->imgs.image_secondary = NULL; + + free(cam->imgs.image_preview.image_norm); + cam->imgs.image_preview.image_norm = NULL; + + if (cam->imgs.size_high > 0){ + free(cam->imgs.image_preview.image_high); + cam->imgs.image_preview.image_high = NULL; + } + + mlp_ring_destroy(cam); /* Cleanup the precapture ring buffer */ + + rotate_deinit(cam); /* cleanup image rotation data */ + + if (cam->pipe != -1) { + close(cam->pipe); + cam->pipe = -1; + } + + if (cam->mpipe != -1) { + close(cam->mpipe); + cam->mpipe = -1; + } + + dbse_deinit(cam); + +} + static void mlp_areadetect(struct ctx_cam *cam){ int i, j, z = 0; @@ -806,31 +810,104 @@ static int mlp_retry(struct ctx_cam *cam){ return 0; } -static int mlp_capture(struct ctx_cam *cam){ +static void mlp_capture_valid(struct ctx_cam *cam){ + + cam->lost_connection = 0; + cam->connectionlosttime = 0; + + memcpy(cam->imgs.image_virgin, cam->current_image->image_norm, cam->imgs.size_norm); + mlp_mask_privacy(cam); + memcpy(cam->imgs.image_vprvcy, cam->current_image->image_norm, cam->imgs.size_norm); + + if (cam->missing_frame_counter >= MISSING_FRAMES_TIMEOUT * cam->conf->framerate) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, _("Video signal re-acquired")); + event(cam, EVENT_CAMERA_FOUND, NULL, NULL, NULL, NULL); + cam->ref_lag = 0; + memcpy(cam->imgs.ref_next, cam->imgs.image_vprvcy, cam->imgs.size_norm); + } + cam->missing_frame_counter = 0; + + alg_update_reference_frame(cam, UPDATE_REF_FRAME); + + cam->ref_lag--; + if ((cam->ref_lag >= 1000000) && (cam->detecting_motion == FALSE)) { + memcpy(cam->imgs.ref, cam->imgs.ref_next, cam->imgs.size_norm); + memcpy(cam->imgs.ref_next, cam->imgs.image_vprvcy, cam->imgs.size_norm); + cam->ref_lag = 130; + } + + +} + +static int mlp_capture_nonfatal(struct ctx_cam *cam, int vid_return_code){ const char *tmpin; char tmpout[80]; - int vid_return_code = 0; /* Return code used when calling vid_next */ struct timespec ts1; - if (cam->video_dev >= 0) + if (vid_return_code == NETCAM_RESTART_ERROR) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO + ,_("Restarting Motion thread to reinitialize all " + "image buffers")); + cam->lost_connection = 1; + return 1; + } + + if (cam->connectionlosttime == 0){ + cam->connectionlosttime = cam->frame_curr_ts.tv_sec; + } + + ++cam->missing_frame_counter; + + if (cam->video_dev >= 0 && + cam->missing_frame_counter < (MISSING_FRAMES_TIMEOUT * cam->conf->framerate)) { + memcpy(cam->current_image->image_norm, cam->imgs.image_vprvcy, cam->imgs.size_norm); + } else { + cam->lost_connection = 1; + + if (cam->video_dev >= 0) + tmpin = "CONNECTION TO CAMERA LOST\\nSINCE %Y-%m-%d %T"; + else + tmpin = "UNABLE TO OPEN VIDEO DEVICE\\nSINCE %Y-%m-%d %T"; + + ts1.tv_sec=cam->connectionlosttime; + ts1.tv_nsec = 0; + memset(cam->current_image->image_norm, 0x80, cam->imgs.size_norm); + mystrftime(cam, tmpout, sizeof(tmpout), tmpin, &ts1, NULL, 0); + draw_text(cam->current_image->image_norm, cam->imgs.width, cam->imgs.height, + 10, 20 * cam->text_scale, tmpout, cam->text_scale); + + /* Write error message only once */ + if (cam->missing_frame_counter == MISSING_FRAMES_TIMEOUT * cam->conf->framerate) { + MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO + ,_("Video signal lost - Adding grey image")); + event(cam, EVENT_CAMERA_LOST, NULL, NULL, NULL, &ts1); + } + + if ((cam->video_dev > 0) && + (cam->missing_frame_counter == (MISSING_FRAMES_TIMEOUT * 4) * cam->conf->framerate)) { + MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO + ,_("Video signal still lost - Trying to close video device")); + vid_close(cam); + } + } + + return 0; + +} + +static int mlp_capture(struct ctx_cam *cam){ + + int vid_return_code = 0; /* Return code used when calling vid_next */ + + if (cam->video_dev >= 0){ vid_return_code = vid_next(cam, cam->current_image); - else + } else { vid_return_code = 1; /* Non fatal error */ + } if (vid_return_code == 0) { - cam->lost_connection = 0; - cam->connectionlosttime = 0; - - if (cam->missing_frame_counter >= MISSING_FRAMES_TIMEOUT * cam->conf->framerate) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO, _("Video signal re-acquired")); - event(cam, EVENT_CAMERA_FOUND, NULL, NULL, NULL, NULL); - } - cam->missing_frame_counter = 0; - memcpy(cam->imgs.image_virgin, cam->current_image->image_norm, cam->imgs.size_norm); - mlp_mask_privacy(cam); - memcpy(cam->imgs.image_vprvcy, cam->current_image->image_norm, cam->imgs.size_norm); - + mlp_capture_valid(cam); } else if (vid_return_code < 0) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO ,_("Video device fatal error - Closing video device")); @@ -838,53 +915,9 @@ static int mlp_capture(struct ctx_cam *cam){ memcpy(cam->current_image->image_norm, cam->imgs.image_virgin, cam->imgs.size_norm); cam->lost_connection = 1; } else { - if (vid_return_code == NETCAM_RESTART_ERROR) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO - ,_("Restarting Motion thread to reinitialize all " - "image buffers")); - cam->lost_connection = 1; - return 1; - } - - if (cam->connectionlosttime == 0){ - cam->connectionlosttime = cam->frame_curr_ts.tv_sec; - } - - ++cam->missing_frame_counter; - - if (cam->video_dev >= 0 && - cam->missing_frame_counter < (MISSING_FRAMES_TIMEOUT * cam->conf->framerate)) { - memcpy(cam->current_image->image_norm, cam->imgs.image_vprvcy, cam->imgs.size_norm); - } else { - cam->lost_connection = 1; - - if (cam->video_dev >= 0) - tmpin = "CONNECTION TO CAMERA LOST\\nSINCE %Y-%m-%d %T"; - else - tmpin = "UNABLE TO OPEN VIDEO DEVICE\\nSINCE %Y-%m-%d %T"; - - ts1.tv_sec=cam->connectionlosttime; - ts1.tv_nsec = 0; - memset(cam->current_image->image_norm, 0x80, cam->imgs.size_norm); - mystrftime(cam, tmpout, sizeof(tmpout), tmpin, &ts1, NULL, 0); - draw_text(cam->current_image->image_norm, cam->imgs.width, cam->imgs.height, - 10, 20 * cam->text_scale, tmpout, cam->text_scale); - - /* Write error message only once */ - if (cam->missing_frame_counter == MISSING_FRAMES_TIMEOUT * cam->conf->framerate) { - MOTION_LOG(NTC, TYPE_ALL, NO_ERRNO - ,_("Video signal lost - Adding grey image")); - event(cam, EVENT_CAMERA_LOST, NULL, NULL, NULL, &ts1); - } - - if ((cam->video_dev > 0) && - (cam->missing_frame_counter == (MISSING_FRAMES_TIMEOUT * 4) * cam->conf->framerate)) { - MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO - ,_("Video signal still lost - Trying to close video device")); - vid_close(cam); - } - } + return mlp_capture_nonfatal(cam, vid_return_code); } + return 0; } @@ -915,9 +948,12 @@ static void mlp_detection(struct ctx_cam *cam){ static void mlp_tuning(struct ctx_cam *cam){ - if ((cam->conf->noise_tune && cam->shots == 0) && - (!cam->detecting_motion && (cam->current_image->diffs <= cam->threshold))) + if ((cam->conf->noise_tune == TRUE) && + (cam->shots == 0) && + (cam->detecting_motion == FALSE) && + (cam->current_image->diffs <= cam->threshold)) { alg_noise_tune(cam, cam->imgs.image_vprvcy); + } if (cam->conf->threshold_tune){ alg_threshold_tune(cam, cam->current_image->diffs, cam->detecting_motion); @@ -930,10 +966,6 @@ static void mlp_tuning(struct ctx_cam *cam){ , cam->imgs.width , cam->imgs.height , &cam->current_image->location); - } - - if (cam->conf->primary_method == 0){ - alg_update_reference_frame(cam, UPDATE_REF_FRAME); } cam->previous_diffs = cam->current_image->diffs; @@ -1055,8 +1087,7 @@ static void mlp_actions_motion(struct ctx_cam *cam){ } if (frame_count >= cam->conf->minimum_motion_frames) { - - cam->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); + cam->current_image->flags |= (IMAGE_TRIGGER | IMAGE_SAVE); if ( (cam->detecting_motion == FALSE) && (cam->movie_norm != NULL) ){ movie_reset_start_time(cam->movie_norm, &cam->current_image->imgts); @@ -1122,7 +1153,7 @@ static void mlp_actions_event(struct ctx_cam *cam){ static void mlp_actions(struct ctx_cam *cam){ - if ((cam->current_image->diffs > cam->threshold) && + if ((cam->current_image->diffs > cam->threshold) && (cam->current_image->diffs < cam->threshold_maximum)) { cam->current_image->flags |= IMAGE_MOTION; } @@ -1144,17 +1175,14 @@ static void mlp_actions(struct ctx_cam *cam){ cam->lasttime = cam->current_image->imgts.tv_sec; } - if (cam->detecting_motion) algsec_detect(cam); + if (cam->detecting_motion){ + algsec_detect(cam); + } mlp_areadetect(cam); mlp_actions_event(cam); - /* Save/send to movie some images */ - /* But why? And why just two images from the ring? Didn't other functions flush already?*/ - mlp_ring_process(cam, 2); - - } static void mlp_setupmode(struct ctx_cam *cam){ diff --git a/src/picture.cpp b/src/picture.cpp index d6aeeb37..829d76ab 100644 --- a/src/picture.cpp +++ b/src/picture.cpp @@ -17,6 +17,7 @@ #include "event.hpp" #include "exif.hpp" #include "draw.hpp" +#include "alg.hpp" #ifdef HAVE_WEBP #include @@ -311,9 +312,10 @@ void pic_save_norm(struct ctx_cam *cam, char *file, unsigned char *image, int ft /* Saves image to a file in format requested */ void pic_save_roi(struct ctx_cam *cam, char *file, unsigned char *image) { FILE *picture; - int image_size, sz, indxh; + + int image_size, sz, indxh, indxw, indx; ctx_coord *bx; - unsigned char *buf, *img; + unsigned char *buf, *img, curdiff ; int width,height, x, y; bx = &cam->current_image->location; @@ -327,8 +329,23 @@ void pic_save_roi(struct ctx_cam *cam, char *file, unsigned char *image) { return; } - /* Lets make the box square */ + /* + char testname[4096]; + FILE *pictest; + memset(testname,0,4096); + memcpy(testname,file,strlen(file)-5); + memcpy(testname+strlen(file)-5,"s.jpg",5); + + pictest = myfopen(testname, "w"); + if (!pictest) { + MOTION_LOG(ERR, TYPE_ALL, SHOW_ERRNO + ,_("Can't write picture to file %s"), file); + return; + } + */ + + /* Lets make the box square */ width = bx->width; height= bx->height; @@ -354,10 +371,28 @@ void pic_save_roi(struct ctx_cam *cam, char *file, unsigned char *image) { buf =(unsigned char*) mymalloc(image_size); img =(unsigned char*) mymalloc(image_size); + /* Subtract the background */ for (indxh=y; indxhimgs.width)+x, width); + indx = (indxh*cam->imgs.width)+x; + memcpy(img+((indxh-y)*width), image+indx, width); + for(indxw=0; indxw imgs.ref+indx+indxw)))); + if (curdiff < cam->noise) img[((indxh-y)*width)+indxw] = 0xff; + } } + /* + sz = jpgutl_put_grey(buf, image_size, img + ,width, height + ,cam->conf->picture_quality, cam + ,&(cam->current_image->imgts), bx); + + fwrite(buf, sz, 1, pictest); + myfclose(pictest); + */ + //alg_dilate5(img,width,height,cam->imgs.common_buffer); + //alg_erode5(img,width,height,cam->imgs.common_buffer,0); + sz = jpgutl_put_grey(buf, image_size, img ,width, height ,cam->conf->picture_quality, cam @@ -365,10 +400,26 @@ void pic_save_roi(struct ctx_cam *cam, char *file, unsigned char *image) { fwrite(buf, sz, 1, picture); + /* + unsigned char *buf2, *img2; + image_size =cam->imgs.motionsize; + img2 =(unsigned char*) mymalloc(image_size); + buf2 =(unsigned char*) mymalloc(image_size); + memcpy(img2,cam->imgs.image_bground,image_size); + sz = jpgutl_put_grey(buf2, image_size, img2 + ,cam->imgs.width,cam->imgs.height + ,cam->conf->picture_quality, cam + ,&(cam->current_image->imgts), bx); + fwrite(buf2, sz, 1, picture); + free(buf2); + free(img2); + */ + free(buf); free(img); myfclose(picture); + } /** Get the pgm file used as fixed mask */