From 6a8db582ffb480caa4eaa47fd87201501aa4740a Mon Sep 17 00:00:00 2001
From: Isaac Connor
Date: Tue, 21 Jun 2016 12:21:18 -0400
Subject: [PATCH] whitespacing updates
---
src/zm_camera.cpp | 24 +-
src/zm_curl_camera.cpp | 758 +++++-----
src/zm_curl_camera.h | 76 +-
src/zm_ffmpeg.cpp | 526 +++----
src/zm_ffmpeg_camera.cpp | 958 ++++++-------
src/zm_ffmpeg_camera.h | 32 +-
src/zm_file_camera.cpp | 52 +-
src/zm_file_camera.h | 20 +-
src/zm_libvlc_camera.cpp | 300 ++--
src/zm_libvlc_camera.h | 56 +-
src/zm_local_camera.cpp | 1078 +++++++--------
src/zm_local_camera.h | 132 +-
src/zm_monitor.cpp | 78 +-
src/zm_monitor.h | 466 +++----
src/zm_remote_camera.h | 96 +-
src/zm_remote_camera_http.cpp | 2152 ++++++++++++++---------------
src/zm_remote_camera_http.h | 46 +-
src/zm_remote_camera_rtsp.cpp | 748 +++++-----
src/zm_remote_camera_rtsp.h | 74 +-
web/skins/classic/views/frame.php | 7 +-
20 files changed, 3841 insertions(+), 3838 deletions(-)
diff --git a/src/zm_camera.cpp b/src/zm_camera.cpp
index 4759ddc1b..f6c93c041 100644
--- a/src/zm_camera.cpp
+++ b/src/zm_camera.cpp
@@ -32,19 +32,19 @@ Camera::Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_co
colour( p_colour ),
contrast( p_contrast ),
capture( p_capture ),
- record_audio( p_record_audio )
+ record_audio( p_record_audio )
{
- pixels = width * height;
- imagesize = pixels * colours;
-
- Debug(2,"New camera id: %d width: %d height: %d colours: %d subpixelorder: %d capture: %d",id,width,height,colours,subpixelorder,capture);
-
- /* Because many loops are unrolled and work on 16 colours/time or 4 pixels/time, we have to meet requirements */
- if((colours == ZM_COLOUR_GRAY8 || colours == ZM_COLOUR_RGB32) && (imagesize % 16) != 0) {
- Fatal("Image size is not multiples of 16");
- } else if(colours == ZM_COLOUR_RGB24 && ((imagesize % 16) != 0 || (imagesize % 12) != 0)) {
- Fatal("Image size is not multiples of 12 and 16");
- }
+ pixels = width * height;
+ imagesize = pixels * colours;
+
+ Debug(2,"New camera id: %d width: %d height: %d colours: %d subpixelorder: %d capture: %d",id,width,height,colours,subpixelorder,capture);
+
+ /* Because many loops are unrolled and work on 16 colours/time or 4 pixels/time, we have to meet requirements */
+ if((colours == ZM_COLOUR_GRAY8 || colours == ZM_COLOUR_RGB32) && (imagesize % 16) != 0) {
+ Fatal("Image size is not multiples of 16");
+ } else if(colours == ZM_COLOUR_RGB24 && ((imagesize % 16) != 0 || (imagesize % 12) != 0)) {
+ Fatal("Image size is not multiples of 12 and 16");
+ }
}
Camera::~Camera()
diff --git a/src/zm_curl_camera.cpp b/src/zm_curl_camera.cpp
index 303b53528..8c1c7625e 100644
--- a/src/zm_curl_camera.cpp
+++ b/src/zm_curl_camera.cpp
@@ -35,83 +35,83 @@ cURLCamera::cURLCamera( int p_id, const std::string &p_path, const std::string &
mPath( p_path ), mUser( p_user ), mPass ( p_pass ), bTerminate( false ), bReset( false ), mode ( MODE_UNSET )
{
- if ( capture )
- {
- Initialise();
- }
+ if ( capture )
+ {
+ Initialise();
+ }
}
cURLCamera::~cURLCamera()
{
- if ( capture )
- {
+ if ( capture )
+ {
- Terminate();
- }
+ Terminate();
+ }
}
void cURLCamera::Initialise()
{
- content_length_match_len = strlen(content_length_match);
- content_type_match_len = strlen(content_type_match);
+ content_length_match_len = strlen(content_length_match);
+ content_type_match_len = strlen(content_type_match);
- databuffer.expand(CURL_BUFFER_INITIAL_SIZE);
+ databuffer.expand(CURL_BUFFER_INITIAL_SIZE);
- /* cURL initialization */
- cRet = curl_global_init(CURL_GLOBAL_ALL);
- if(cRet != CURLE_OK) {
- Fatal("libcurl initialization failed: ", curl_easy_strerror(cRet));
- }
+ /* cURL initialization */
+ cRet = curl_global_init(CURL_GLOBAL_ALL);
+ if(cRet != CURLE_OK) {
+ Fatal("libcurl initialization failed: ", curl_easy_strerror(cRet));
+ }
- Debug(2,"libcurl version: %s",curl_version());
+ Debug(2,"libcurl version: %s",curl_version());
- /* Create the shared data mutex */
- nRet = pthread_mutex_init(&shareddata_mutex, NULL);
- if(nRet != 0) {
- Fatal("Shared data mutex creation failed: %s",strerror(nRet));
- }
- /* Create the data available condition variable */
- nRet = pthread_cond_init(&data_available_cond, NULL);
- if(nRet != 0) {
- Fatal("Data available condition variable creation failed: %s",strerror(nRet));
- }
- /* Create the request complete condition variable */
- nRet = pthread_cond_init(&request_complete_cond, NULL);
- if(nRet != 0) {
- Fatal("Request complete condition variable creation failed: %s",strerror(nRet));
- }
+ /* Create the shared data mutex */
+ nRet = pthread_mutex_init(&shareddata_mutex, NULL);
+ if(nRet != 0) {
+ Fatal("Shared data mutex creation failed: %s",strerror(nRet));
+ }
+ /* Create the data available condition variable */
+ nRet = pthread_cond_init(&data_available_cond, NULL);
+ if(nRet != 0) {
+ Fatal("Data available condition variable creation failed: %s",strerror(nRet));
+ }
+ /* Create the request complete condition variable */
+ nRet = pthread_cond_init(&request_complete_cond, NULL);
+ if(nRet != 0) {
+ Fatal("Request complete condition variable creation failed: %s",strerror(nRet));
+ }
- /* Create the thread */
- nRet = pthread_create(&thread, NULL, thread_func_dispatcher, this);
- if(nRet != 0) {
- Fatal("Thread creation failed: %s",strerror(nRet));
- }
+ /* Create the thread */
+ nRet = pthread_create(&thread, NULL, thread_func_dispatcher, this);
+ if(nRet != 0) {
+ Fatal("Thread creation failed: %s",strerror(nRet));
+ }
}
void cURLCamera::Terminate()
{
- /* Signal the thread to terminate */
- bTerminate = true;
+ /* Signal the thread to terminate */
+ bTerminate = true;
- /* Wait for thread termination */
- pthread_join(thread, NULL);
+ /* Wait for thread termination */
+ pthread_join(thread, NULL);
- /* Destroy condition variables */
- pthread_cond_destroy(&request_complete_cond);
- pthread_cond_destroy(&data_available_cond);
+ /* Destroy condition variables */
+ pthread_cond_destroy(&request_complete_cond);
+ pthread_cond_destroy(&data_available_cond);
- /* Destroy mutex */
- pthread_mutex_destroy(&shareddata_mutex);
+ /* Destroy mutex */
+ pthread_mutex_destroy(&shareddata_mutex);
- /* cURL cleanup */
- curl_global_cleanup();
+ /* cURL cleanup */
+ curl_global_cleanup();
}
int cURLCamera::PrimeCapture()
{
- //Info( "Priming capture from %s", mPath.c_str() );
- return 0;
+ //Info( "Priming capture from %s", mPath.c_str() );
+ return 0;
}
int cURLCamera::PreCapture()
@@ -122,187 +122,187 @@ int cURLCamera::PreCapture()
int cURLCamera::Capture( Image &image )
{
- bool frameComplete = false;
+ bool frameComplete = false;
- /* MODE_STREAM specific variables */
- bool SubHeadersParsingComplete = false;
- unsigned int frame_content_length = 0;
- std::string frame_content_type;
- bool need_more_data = false;
+ /* MODE_STREAM specific variables */
+ bool SubHeadersParsingComplete = false;
+ unsigned int frame_content_length = 0;
+ std::string frame_content_type;
+ bool need_more_data = false;
- /* Grab the mutex to ensure exclusive access to the shared data */
- lock();
+ /* Grab the mutex to ensure exclusive access to the shared data */
+ lock();
- while (!frameComplete) {
+ while (!frameComplete) {
- /* If the work thread did a reset, reset our local variables */
- if(bReset) {
- SubHeadersParsingComplete = false;
- frame_content_length = 0;
- frame_content_type.clear();
- need_more_data = false;
- bReset = false;
- }
+ /* If the work thread did a reset, reset our local variables */
+ if(bReset) {
+ SubHeadersParsingComplete = false;
+ frame_content_length = 0;
+ frame_content_type.clear();
+ need_more_data = false;
+ bReset = false;
+ }
- if(mode == MODE_UNSET) {
- /* Don't have a mode yet. Sleep while waiting for data */
- nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
- if(nRet != 0) {
- Error("Failed waiting for available data condition variable: %s",strerror(nRet));
- return -20;
- }
- }
+ if(mode == MODE_UNSET) {
+ /* Don't have a mode yet. Sleep while waiting for data */
+ nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
+ if(nRet != 0) {
+ Error("Failed waiting for available data condition variable: %s",strerror(nRet));
+ return -20;
+ }
+ }
- if(mode == MODE_STREAM) {
+ if(mode == MODE_STREAM) {
- /* Subheader parsing */
- while(!SubHeadersParsingComplete && !need_more_data) {
+ /* Subheader parsing */
+ while(!SubHeadersParsingComplete && !need_more_data) {
- size_t crlf_start, crlf_end, crlf_size;
- std::string subheader;
+ size_t crlf_start, crlf_end, crlf_size;
+ std::string subheader;
- /* Check if the buffer contains something */
- if(databuffer.empty()) {
- /* Empty buffer, wait for data */
- need_more_data = true;
- break;
- }
-
- /* Find crlf start */
- crlf_start = memcspn(databuffer,"\r\n",databuffer.size());
- if(crlf_start == databuffer.size()) {
- /* Not found, wait for more data */
- need_more_data = true;
- break;
- }
+ /* Check if the buffer contains something */
+ if(databuffer.empty()) {
+ /* Empty buffer, wait for data */
+ need_more_data = true;
+ break;
+ }
+
+ /* Find crlf start */
+ crlf_start = memcspn(databuffer,"\r\n",databuffer.size());
+ if(crlf_start == databuffer.size()) {
+ /* Not found, wait for more data */
+ need_more_data = true;
+ break;
+ }
- /* See if we have enough data for determining crlf length */
- if(databuffer.size() < crlf_start+5) {
- /* Need more data */
- need_more_data = true;
- break;
- }
+ /* See if we have enough data for determining crlf length */
+ if(databuffer.size() < crlf_start+5) {
+ /* Need more data */
+ need_more_data = true;
+ break;
+ }
- /* Find crlf end and calculate crlf size */
- crlf_end = memspn(((const char*)databuffer.head())+crlf_start,"\r\n",5);
- crlf_size = (crlf_start + crlf_end) - crlf_start;
+ /* Find crlf end and calculate crlf size */
+ crlf_end = memspn(((const char*)databuffer.head())+crlf_start,"\r\n",5);
+ crlf_size = (crlf_start + crlf_end) - crlf_start;
- /* Is this the end of a previous stream? (This is just before the boundary) */
- if(crlf_start == 0) {
- databuffer.consume(crlf_size);
- continue;
- }
+ /* Is this the end of a previous stream? (This is just before the boundary) */
+ if(crlf_start == 0) {
+ databuffer.consume(crlf_size);
+ continue;
+ }
- /* Check for invalid CRLF size */
- if(crlf_size > 4) {
- Error("Invalid CRLF length");
- }
+ /* Check for invalid CRLF size */
+ if(crlf_size > 4) {
+ Error("Invalid CRLF length");
+ }
- /* Check if the crlf is \n\n or \r\n\r\n (marks end of headers, this is the last header) */
- if( (crlf_size == 2 && memcmp(((const char*)databuffer.head())+crlf_start,"\n\n",2) == 0) || (crlf_size == 4 && memcmp(((const char*)databuffer.head())+crlf_start,"\r\n\r\n",4) == 0) ) {
- /* This is the last header */
- SubHeadersParsingComplete = true;
- }
+ /* Check if the crlf is \n\n or \r\n\r\n (marks end of headers, this is the last header) */
+ if( (crlf_size == 2 && memcmp(((const char*)databuffer.head())+crlf_start,"\n\n",2) == 0) || (crlf_size == 4 && memcmp(((const char*)databuffer.head())+crlf_start,"\r\n\r\n",4) == 0) ) {
+ /* This is the last header */
+ SubHeadersParsingComplete = true;
+ }
- /* Copy the subheader, excluding the crlf */
- subheader.assign(databuffer, crlf_start);
+ /* Copy the subheader, excluding the crlf */
+ subheader.assign(databuffer, crlf_start);
- /* Advance the buffer past this one */
- databuffer.consume(crlf_start+crlf_size);
+ /* Advance the buffer past this one */
+ databuffer.consume(crlf_start+crlf_size);
- Debug(7,"Got subheader: %s",subheader.c_str());
+ Debug(7,"Got subheader: %s",subheader.c_str());
- /* Find where the data in this header starts */
- size_t subheader_data_start = subheader.rfind(' ');
- if(subheader_data_start == std::string::npos) {
- subheader_data_start = subheader.find(':');
- }
+ /* Find where the data in this header starts */
+ size_t subheader_data_start = subheader.rfind(' ');
+ if(subheader_data_start == std::string::npos) {
+ subheader_data_start = subheader.find(':');
+ }
- /* Extract the data into a string */
- std::string subheader_data = subheader.substr(subheader_data_start+1, std::string::npos);
+ /* Extract the data into a string */
+ std::string subheader_data = subheader.substr(subheader_data_start+1, std::string::npos);
- Debug(8,"Got subheader data: %s",subheader_data.c_str());
+ Debug(8,"Got subheader data: %s",subheader_data.c_str());
- /* Check the header */
- if(strncasecmp(subheader.c_str(),content_length_match,content_length_match_len) == 0) {
- /* Found the content-length header */
- frame_content_length = atoi(subheader_data.c_str());
- Debug(6,"Got content-length subheader: %d",frame_content_length);
- } else if(strncasecmp(subheader.c_str(),content_type_match,content_type_match_len) == 0) {
- /* Found the content-type header */
- frame_content_type = subheader_data;
- Debug(6,"Got content-type subheader: %s",frame_content_type.c_str());
- }
+ /* Check the header */
+ if(strncasecmp(subheader.c_str(),content_length_match,content_length_match_len) == 0) {
+ /* Found the content-length header */
+ frame_content_length = atoi(subheader_data.c_str());
+ Debug(6,"Got content-length subheader: %d",frame_content_length);
+ } else if(strncasecmp(subheader.c_str(),content_type_match,content_type_match_len) == 0) {
+ /* Found the content-type header */
+ frame_content_type = subheader_data;
+ Debug(6,"Got content-type subheader: %s",frame_content_type.c_str());
+ }
- }
+ }
- /* Attempt to extract the frame */
- if(!need_more_data) {
- if(!SubHeadersParsingComplete) {
- /* We haven't parsed all headers yet */
- need_more_data = true;
- } else if(frame_content_length <= 0) {
- /* Invalid frame */
- Error("Invalid frame: invalid content length");
- } else if(frame_content_type != "image/jpeg") {
- /* Unsupported frame type */
- Error("Unsupported frame: %s",frame_content_type.c_str());
- } else if(frame_content_length > databuffer.size()) {
- /* Incomplete frame, wait for more data */
- need_more_data = true;
- } else {
- /* All good. decode the image */
- image.DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder);
- frameComplete = true;
- }
- }
+ /* Attempt to extract the frame */
+ if(!need_more_data) {
+ if(!SubHeadersParsingComplete) {
+ /* We haven't parsed all headers yet */
+ need_more_data = true;
+ } else if(frame_content_length <= 0) {
+ /* Invalid frame */
+ Error("Invalid frame: invalid content length");
+ } else if(frame_content_type != "image/jpeg") {
+ /* Unsupported frame type */
+ Error("Unsupported frame: %s",frame_content_type.c_str());
+ } else if(frame_content_length > databuffer.size()) {
+ /* Incomplete frame, wait for more data */
+ need_more_data = true;
+ } else {
+ /* All good. decode the image */
+ image.DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder);
+ frameComplete = true;
+ }
+ }
- /* Attempt to get more data */
- if(need_more_data) {
- nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
- if(nRet != 0) {
- Error("Failed waiting for available data condition variable: %s",strerror(nRet));
- return -18;
- }
- need_more_data = false;
- }
+ /* Attempt to get more data */
+ if(need_more_data) {
+ nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
+ if(nRet != 0) {
+ Error("Failed waiting for available data condition variable: %s",strerror(nRet));
+ return -18;
+ }
+ need_more_data = false;
+ }
- } else if(mode == MODE_SINGLE) {
- /* Check if we have anything */
- if (!single_offsets.empty()) {
- if( (single_offsets.front() > 0) && (databuffer.size() >= single_offsets.front()) ) {
- /* Extract frame */
- image.DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder);
- single_offsets.pop_front();
- frameComplete = true;
- } else {
- /* This shouldn't happen */
- Error("Internal error. Attempting recovery");
- databuffer.consume(single_offsets.front());
- single_offsets.pop_front();
- }
- } else {
- /* Don't have a frame yet, wait for the request complete condition variable */
- nRet = pthread_cond_wait(&request_complete_cond,&shareddata_mutex);
- if(nRet != 0) {
- Error("Failed waiting for request complete condition variable: %s",strerror(nRet));
- return -19;
- }
- }
- } else {
- /* Failed to match content-type */
- Fatal("Unable to match Content-Type. Check URL, username and password");
- } /* mode */
+ } else if(mode == MODE_SINGLE) {
+ /* Check if we have anything */
+ if (!single_offsets.empty()) {
+ if( (single_offsets.front() > 0) && (databuffer.size() >= single_offsets.front()) ) {
+ /* Extract frame */
+ image.DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder);
+ single_offsets.pop_front();
+ frameComplete = true;
+ } else {
+ /* This shouldn't happen */
+ Error("Internal error. Attempting recovery");
+ databuffer.consume(single_offsets.front());
+ single_offsets.pop_front();
+ }
+ } else {
+ /* Don't have a frame yet, wait for the request complete condition variable */
+ nRet = pthread_cond_wait(&request_complete_cond,&shareddata_mutex);
+ if(nRet != 0) {
+ Error("Failed waiting for request complete condition variable: %s",strerror(nRet));
+ return -19;
+ }
+ }
+ } else {
+ /* Failed to match content-type */
+ Fatal("Unable to match Content-Type. Check URL, username and password");
+ } /* mode */
- } /* frameComplete loop */
+ } /* frameComplete loop */
- /* Release the mutex */
- unlock();
+ /* Release the mutex */
+ unlock();
- if(!frameComplete)
- return -1;
+ if(!frameComplete)
+ return -1;
- return 0;
+ return 0;
}
int cURLCamera::PostCapture()
@@ -313,7 +313,7 @@ int cURLCamera::PostCapture()
int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory )
{
- Error("Capture and Record not implemented for the cURL camera type");
+ Error("Capture and Record not implemented for the cURL camera type");
// Nothing to do here
return( 0 );
}
@@ -321,241 +321,241 @@ int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_dire
size_t cURLCamera::data_callback(void *buffer, size_t size, size_t nmemb, void *userdata)
{
- lock();
+ lock();
- /* Append the data we just received to our buffer */
- databuffer.append((const char*)buffer, size*nmemb);
+ /* Append the data we just received to our buffer */
+ databuffer.append((const char*)buffer, size*nmemb);
- /* Signal data available */
- nRet = pthread_cond_signal(&data_available_cond);
- if(nRet != 0) {
- Error("Failed signaling data available condition variable: %s",strerror(nRet));
- return -16;
- }
+ /* Signal data available */
+ nRet = pthread_cond_signal(&data_available_cond);
+ if(nRet != 0) {
+ Error("Failed signaling data available condition variable: %s",strerror(nRet));
+ return -16;
+ }
- unlock();
+ unlock();
- /* Return bytes processed */
- return size*nmemb;
+ /* Return bytes processed */
+ return size*nmemb;
}
size_t cURLCamera::header_callback( void *buffer, size_t size, size_t nmemb, void *userdata)
{
- std::string header;
- header.assign((const char*)buffer, size*nmemb);
-
- Debug(4,"Got header: %s",header.c_str());
+ std::string header;
+ header.assign((const char*)buffer, size*nmemb);
+
+ Debug(4,"Got header: %s",header.c_str());
- /* Check Content-Type header */
- if(strncasecmp(header.c_str(),content_type_match,content_type_match_len) == 0) {
- size_t pos = header.find(';');
- if(pos != std::string::npos) {
- header.erase(pos, std::string::npos);
- }
+ /* Check Content-Type header */
+ if(strncasecmp(header.c_str(),content_type_match,content_type_match_len) == 0) {
+ size_t pos = header.find(';');
+ if(pos != std::string::npos) {
+ header.erase(pos, std::string::npos);
+ }
- pos = header.rfind(' ');
- if(pos == std::string::npos) {
- pos = header.find(':');
- }
+ pos = header.rfind(' ');
+ if(pos == std::string::npos) {
+ pos = header.find(':');
+ }
- std::string content_type = header.substr(pos+1, std::string::npos);
- Debug(6,"Content-Type is: %s",content_type.c_str());
+ std::string content_type = header.substr(pos+1, std::string::npos);
+ Debug(6,"Content-Type is: %s",content_type.c_str());
- lock();
+ lock();
- const char* multipart_match = "multipart/x-mixed-replace";
- const char* image_jpeg_match = "image/jpeg";
- if(strncasecmp(content_type.c_str(),multipart_match,strlen(multipart_match)) == 0) {
- Debug(7,"Content type matched as multipart/x-mixed-replace");
- mode = MODE_STREAM;
- } else if(strncasecmp(content_type.c_str(),image_jpeg_match,strlen(image_jpeg_match)) == 0) {
- Debug(7,"Content type matched as image/jpeg");
- mode = MODE_SINGLE;
- }
+ const char* multipart_match = "multipart/x-mixed-replace";
+ const char* image_jpeg_match = "image/jpeg";
+ if(strncasecmp(content_type.c_str(),multipart_match,strlen(multipart_match)) == 0) {
+ Debug(7,"Content type matched as multipart/x-mixed-replace");
+ mode = MODE_STREAM;
+ } else if(strncasecmp(content_type.c_str(),image_jpeg_match,strlen(image_jpeg_match)) == 0) {
+ Debug(7,"Content type matched as image/jpeg");
+ mode = MODE_SINGLE;
+ }
- unlock();
- }
-
- /* Return bytes processed */
- return size*nmemb;
+ unlock();
+ }
+
+ /* Return bytes processed */
+ return size*nmemb;
}
void* cURLCamera::thread_func()
{
- long tRet;
- double dSize;
+ long tRet;
+ double dSize;
- c = curl_easy_init();
- if(c == NULL) {
- Fatal("Failed getting easy handle from libcurl");
- }
+ c = curl_easy_init();
+ if(c == NULL) {
+ Fatal("Failed getting easy handle from libcurl");
+ }
- /* Set URL */
- cRet = curl_easy_setopt(c, CURLOPT_URL, mPath.c_str());
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl URL: %s", curl_easy_strerror(cRet));
-
- /* Header callback */
- cRet = curl_easy_setopt(c, CURLOPT_HEADERFUNCTION, &header_callback_dispatcher);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl header callback function: %s", curl_easy_strerror(cRet));
- cRet = curl_easy_setopt(c, CURLOPT_HEADERDATA, this);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl header callback object: %s", curl_easy_strerror(cRet));
+ /* Set URL */
+ cRet = curl_easy_setopt(c, CURLOPT_URL, mPath.c_str());
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl URL: %s", curl_easy_strerror(cRet));
+
+ /* Header callback */
+ cRet = curl_easy_setopt(c, CURLOPT_HEADERFUNCTION, &header_callback_dispatcher);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl header callback function: %s", curl_easy_strerror(cRet));
+ cRet = curl_easy_setopt(c, CURLOPT_HEADERDATA, this);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl header callback object: %s", curl_easy_strerror(cRet));
- /* Data callback */
- cRet = curl_easy_setopt(c, CURLOPT_WRITEFUNCTION, &data_callback_dispatcher);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl data callback function: %s", curl_easy_strerror(cRet));
- cRet = curl_easy_setopt(c, CURLOPT_WRITEDATA, this);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl data callback object: %s", curl_easy_strerror(cRet));
+ /* Data callback */
+ cRet = curl_easy_setopt(c, CURLOPT_WRITEFUNCTION, &data_callback_dispatcher);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl data callback function: %s", curl_easy_strerror(cRet));
+ cRet = curl_easy_setopt(c, CURLOPT_WRITEDATA, this);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl data callback object: %s", curl_easy_strerror(cRet));
- /* Progress callback */
- cRet = curl_easy_setopt(c, CURLOPT_NOPROGRESS, 0);
- if(cRet != CURLE_OK)
- Fatal("Failed enabling libcurl progress callback function: %s", curl_easy_strerror(cRet));
- cRet = curl_easy_setopt(c, CURLOPT_PROGRESSFUNCTION, &progress_callback_dispatcher);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl progress callback function: %s", curl_easy_strerror(cRet));
- cRet = curl_easy_setopt(c, CURLOPT_PROGRESSDATA, this);
- if(cRet != CURLE_OK)
- Fatal("Failed setting libcurl progress callback object: %s", curl_easy_strerror(cRet));
+ /* Progress callback */
+ cRet = curl_easy_setopt(c, CURLOPT_NOPROGRESS, 0);
+ if(cRet != CURLE_OK)
+ Fatal("Failed enabling libcurl progress callback function: %s", curl_easy_strerror(cRet));
+ cRet = curl_easy_setopt(c, CURLOPT_PROGRESSFUNCTION, &progress_callback_dispatcher);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl progress callback function: %s", curl_easy_strerror(cRet));
+ cRet = curl_easy_setopt(c, CURLOPT_PROGRESSDATA, this);
+ if(cRet != CURLE_OK)
+ Fatal("Failed setting libcurl progress callback object: %s", curl_easy_strerror(cRet));
- /* Set username and password */
- if(!mUser.empty()) {
- cRet = curl_easy_setopt(c, CURLOPT_USERNAME, mUser.c_str());
- if(cRet != CURLE_OK)
- Error("Failed setting username: %s", curl_easy_strerror(cRet));
- }
- if(!mPass.empty()) {
- cRet = curl_easy_setopt(c, CURLOPT_PASSWORD, mPass.c_str());
- if(cRet != CURLE_OK)
- Error("Failed setting password: %s", curl_easy_strerror(cRet));
- }
+ /* Set username and password */
+ if(!mUser.empty()) {
+ cRet = curl_easy_setopt(c, CURLOPT_USERNAME, mUser.c_str());
+ if(cRet != CURLE_OK)
+ Error("Failed setting username: %s", curl_easy_strerror(cRet));
+ }
+ if(!mPass.empty()) {
+ cRet = curl_easy_setopt(c, CURLOPT_PASSWORD, mPass.c_str());
+ if(cRet != CURLE_OK)
+ Error("Failed setting password: %s", curl_easy_strerror(cRet));
+ }
- /* Authenication preference */
- cRet = curl_easy_setopt(c, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
- if(cRet != CURLE_OK)
- Warning("Failed setting libcurl acceptable http authenication methods: %s", curl_easy_strerror(cRet));
+ /* Authenication preference */
+ cRet = curl_easy_setopt(c, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
+ if(cRet != CURLE_OK)
+ Warning("Failed setting libcurl acceptable http authenication methods: %s", curl_easy_strerror(cRet));
- /* Work loop */
- for(int attempt=1;attempt<=CURL_MAXRETRY;attempt++) {
- tRet = 0;
- while(!bTerminate) {
- /* Do the work */
- cRet = curl_easy_perform(c);
+ /* Work loop */
+ for(int attempt=1;attempt<=CURL_MAXRETRY;attempt++) {
+ tRet = 0;
+ while(!bTerminate) {
+ /* Do the work */
+ cRet = curl_easy_perform(c);
- if(mode == MODE_SINGLE) {
- if(cRet != CURLE_OK) {
- break;
- }
- /* Attempt to get the size of the file */
- cRet = curl_easy_getinfo(c, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &dSize);
- if(cRet != CURLE_OK) {
- break;
- }
- /* We need to lock for the offsets array and the condition variable */
- lock();
- /* Push the size into our offsets array */
- if(dSize > 0) {
- single_offsets.push_back(dSize);
- } else {
- Fatal("Unable to get the size of the image");
- }
- /* Signal the request complete condition variable */
- tRet = pthread_cond_signal(&request_complete_cond);
- if(tRet != 0) {
- Error("Failed signaling request completed condition variable: %s",strerror(tRet));
- }
- /* Unlock */
- unlock();
+ if(mode == MODE_SINGLE) {
+ if(cRet != CURLE_OK) {
+ break;
+ }
+ /* Attempt to get the size of the file */
+ cRet = curl_easy_getinfo(c, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &dSize);
+ if(cRet != CURLE_OK) {
+ break;
+ }
+ /* We need to lock for the offsets array and the condition variable */
+ lock();
+ /* Push the size into our offsets array */
+ if(dSize > 0) {
+ single_offsets.push_back(dSize);
+ } else {
+ Fatal("Unable to get the size of the image");
+ }
+ /* Signal the request complete condition variable */
+ tRet = pthread_cond_signal(&request_complete_cond);
+ if(tRet != 0) {
+ Error("Failed signaling request completed condition variable: %s",strerror(tRet));
+ }
+ /* Unlock */
+ unlock();
- } else if (mode == MODE_STREAM) {
- break;
- }
- }
+ } else if (mode == MODE_STREAM) {
+ break;
+ }
+ }
- /* Return value checking */
- if(cRet == CURLE_ABORTED_BY_CALLBACK || bTerminate) {
- /* Aborted */
- break;
- } else if (cRet != CURLE_OK) {
- /* Some error */
- Error("cURL Request failed: %s",curl_easy_strerror(cRet));
- if(attempt < CURL_MAXRETRY) {
- Error("Retrying.. Attempt %d of %d",attempt,CURL_MAXRETRY);
- /* Do a reset */
- lock();
- databuffer.clear();
- single_offsets.clear();
- mode = MODE_UNSET;
- bReset = true;
- unlock();
- }
- tRet = -50;
- }
- }
-
- /* Cleanup */
- curl_easy_cleanup(c);
- c = NULL;
-
- return (void*)tRet;
+ /* Return value checking */
+ if(cRet == CURLE_ABORTED_BY_CALLBACK || bTerminate) {
+ /* Aborted */
+ break;
+ } else if (cRet != CURLE_OK) {
+ /* Some error */
+ Error("cURL Request failed: %s",curl_easy_strerror(cRet));
+ if(attempt < CURL_MAXRETRY) {
+ Error("Retrying.. Attempt %d of %d",attempt,CURL_MAXRETRY);
+ /* Do a reset */
+ lock();
+ databuffer.clear();
+ single_offsets.clear();
+ mode = MODE_UNSET;
+ bReset = true;
+ unlock();
+ }
+ tRet = -50;
+ }
+ }
+
+ /* Cleanup */
+ curl_easy_cleanup(c);
+ c = NULL;
+
+ return (void*)tRet;
}
int cURLCamera::lock() {
- int nRet;
+ int nRet;
- /* Lock shared data */
- nRet = pthread_mutex_lock(&shareddata_mutex);
- if(nRet != 0) {
- Error("Failed locking shared data mutex: %s",strerror(nRet));
- }
- return nRet;
+ /* Lock shared data */
+ nRet = pthread_mutex_lock(&shareddata_mutex);
+ if(nRet != 0) {
+ Error("Failed locking shared data mutex: %s",strerror(nRet));
+ }
+ return nRet;
}
int cURLCamera::unlock() {
- int nRet;
+ int nRet;
- /* Unlock shared data */
- nRet = pthread_mutex_unlock(&shareddata_mutex);
- if(nRet != 0) {
- Error("Failed unlocking shared data mutex: %s",strerror(nRet));
- }
- return nRet;
+ /* Unlock shared data */
+ nRet = pthread_mutex_unlock(&shareddata_mutex);
+ if(nRet != 0) {
+ Error("Failed unlocking shared data mutex: %s",strerror(nRet));
+ }
+ return nRet;
}
int cURLCamera::progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow)
{
- /* Signal the curl thread to terminate */
- if(bTerminate)
- return -10;
-
- return 0;
+ /* Signal the curl thread to terminate */
+ if(bTerminate)
+ return -10;
+
+ return 0;
}
/* These functions call the functions in the class for the correct object */
size_t data_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata)
{
- return ((cURLCamera*)userdata)->data_callback(buffer,size,nmemb,userdata);
+ return ((cURLCamera*)userdata)->data_callback(buffer,size,nmemb,userdata);
}
size_t header_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata)
{
- return ((cURLCamera*)userdata)->header_callback(buffer,size,nmemb,userdata);
+ return ((cURLCamera*)userdata)->header_callback(buffer,size,nmemb,userdata);
}
int progress_callback_dispatcher(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow)
{
- return ((cURLCamera*)userdata)->progress_callback(userdata,dltotal,dlnow,ultotal,ulnow);
+ return ((cURLCamera*)userdata)->progress_callback(userdata,dltotal,dlnow,ultotal,ulnow);
}
void* thread_func_dispatcher(void* object) {
- return ((cURLCamera*)object)->thread_func();
+ return ((cURLCamera*)object)->thread_func();
}
diff --git a/src/zm_curl_camera.h b/src/zm_curl_camera.h
index dcf39f94a..7dead3f66 100644
--- a/src/zm_curl_camera.h
+++ b/src/zm_curl_camera.h
@@ -42,57 +42,57 @@
class cURLCamera : public Camera
{
protected:
- typedef enum {MODE_UNSET, MODE_SINGLE, MODE_STREAM} mode_t;
+ typedef enum {MODE_UNSET, MODE_SINGLE, MODE_STREAM} mode_t;
- std::string mPath;
- std::string mUser;
- std::string mPass;
+ std::string mPath;
+ std::string mUser;
+ std::string mPass;
- /* cURL object(s) */
- CURL* c;
+ /* cURL object(s) */
+ CURL* c;
- /* Shared data */
- volatile bool bTerminate;
- volatile bool bReset;
- volatile mode_t mode;
- Buffer databuffer;
- std::deque single_offsets;
+ /* Shared data */
+ volatile bool bTerminate;
+ volatile bool bReset;
+ volatile mode_t mode;
+ Buffer databuffer;
+ std::deque single_offsets;
- /* pthread objects */
- pthread_t thread;
- pthread_mutex_t shareddata_mutex;
- pthread_cond_t data_available_cond;
- pthread_cond_t request_complete_cond;
+ /* pthread objects */
+ pthread_t thread;
+ pthread_mutex_t shareddata_mutex;
+ pthread_cond_t data_available_cond;
+ pthread_cond_t request_complete_cond;
public:
- cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~cURLCamera();
+ cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~cURLCamera();
- const std::string &Path() const { return( mPath ); }
- const std::string &Username() const { return( mUser ); }
- const std::string &Password() const { return( mPass ); }
+ const std::string &Path() const { return( mPath ); }
+ const std::string &Username() const { return( mUser ); }
+ const std::string &Password() const { return( mPass ); }
- void Initialise();
- void Terminate();
+ void Initialise();
+ void Terminate();
- int PrimeCapture();
- int PreCapture();
- int Capture( Image &image );
- int PostCapture();
- int CaptureAndRecord( Image &image, bool recording, char* event_directory);
+ int PrimeCapture();
+ int PreCapture();
+ int Capture( Image &image );
+ int PostCapture();
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory);
- size_t data_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
- size_t header_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
- int progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow);
- int debug_callback(CURL* handle, curl_infotype type, char* str, size_t strsize, void* data);
- void* thread_func();
- int lock();
- int unlock();
+ size_t data_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
+ size_t header_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
+ int progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow);
+ int debug_callback(CURL* handle, curl_infotype type, char* str, size_t strsize, void* data);
+ void* thread_func();
+ int lock();
+ int unlock();
private:
- int nRet;
- CURLcode cRet;
+ int nRet;
+ CURLcode cRet;
};
diff --git a/src/zm_ffmpeg.cpp b/src/zm_ffmpeg.cpp
index 10345dfc5..2c4356abc 100644
--- a/src/zm_ffmpeg.cpp
+++ b/src/zm_ffmpeg.cpp
@@ -15,7 +15,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-*/
+ */
#include "zm_ffmpeg.h"
#include "zm_image.h"
@@ -24,243 +24,243 @@
#if HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
void FFMPEGInit() {
- static bool bInit = false;
+ static bool bInit = false;
- if(!bInit) {
- av_register_all();
- av_log_set_level(AV_LOG_DEBUG);
- bInit = true;
- }
+ if(!bInit) {
+ av_register_all();
+ av_log_set_level(AV_LOG_DEBUG);
+ bInit = true;
+ }
}
#if HAVE_LIBAVUTIL
enum _AVPIXELFORMAT GetFFMPEGPixelFormat(unsigned int p_colours, unsigned p_subpixelorder) {
- enum _AVPIXELFORMAT pf;
+ enum _AVPIXELFORMAT pf;
- Debug(8,"Colours: %d SubpixelOrder: %d",p_colours,p_subpixelorder);
+ Debug(8,"Colours: %d SubpixelOrder: %d",p_colours,p_subpixelorder);
- switch(p_colours) {
- case ZM_COLOUR_RGB24:
- {
- if(p_subpixelorder == ZM_SUBPIX_ORDER_BGR) {
- /* BGR subpixel order */
- pf = AV_PIX_FMT_BGR24;
- } else {
- /* Assume RGB subpixel order */
- pf = AV_PIX_FMT_RGB24;
- }
- break;
- }
- case ZM_COLOUR_RGB32:
- {
- if(p_subpixelorder == ZM_SUBPIX_ORDER_ARGB) {
- /* ARGB subpixel order */
- pf = AV_PIX_FMT_ARGB;
- } else if(p_subpixelorder == ZM_SUBPIX_ORDER_ABGR) {
- /* ABGR subpixel order */
- pf = AV_PIX_FMT_ABGR;
- } else if(p_subpixelorder == ZM_SUBPIX_ORDER_BGRA) {
- /* BGRA subpixel order */
- pf = AV_PIX_FMT_BGRA;
- } else {
- /* Assume RGBA subpixel order */
- pf = AV_PIX_FMT_RGBA;
- }
- break;
- }
- case ZM_COLOUR_GRAY8:
- pf = AV_PIX_FMT_GRAY8;
- break;
- default:
- Panic("Unexpected colours: %d",p_colours);
- pf = AV_PIX_FMT_GRAY8; /* Just to shush gcc variable may be unused warning */
- break;
- }
+ switch(p_colours) {
+ case ZM_COLOUR_RGB24:
+ {
+ if(p_subpixelorder == ZM_SUBPIX_ORDER_BGR) {
+ /* BGR subpixel order */
+ pf = AV_PIX_FMT_BGR24;
+ } else {
+ /* Assume RGB subpixel order */
+ pf = AV_PIX_FMT_RGB24;
+ }
+ break;
+ }
+ case ZM_COLOUR_RGB32:
+ {
+ if(p_subpixelorder == ZM_SUBPIX_ORDER_ARGB) {
+ /* ARGB subpixel order */
+ pf = AV_PIX_FMT_ARGB;
+ } else if(p_subpixelorder == ZM_SUBPIX_ORDER_ABGR) {
+ /* ABGR subpixel order */
+ pf = AV_PIX_FMT_ABGR;
+ } else if(p_subpixelorder == ZM_SUBPIX_ORDER_BGRA) {
+ /* BGRA subpixel order */
+ pf = AV_PIX_FMT_BGRA;
+ } else {
+ /* Assume RGBA subpixel order */
+ pf = AV_PIX_FMT_RGBA;
+ }
+ break;
+ }
+ case ZM_COLOUR_GRAY8:
+ pf = AV_PIX_FMT_GRAY8;
+ break;
+ default:
+ Panic("Unexpected colours: %d",p_colours);
+ pf = AV_PIX_FMT_GRAY8; /* Just to shush gcc variable may be unused warning */
+ break;
+ }
- return pf;
+ return pf;
}
#endif // HAVE_LIBAVUTIL
#if HAVE_LIBSWSCALE && HAVE_LIBAVUTIL
SWScale::SWScale() : gotdefaults(false), swscale_ctx(NULL), input_avframe(NULL), output_avframe(NULL) {
- Debug(4,"SWScale object created");
+ Debug(4,"SWScale object created");
- /* Allocate AVFrame for the input */
+ /* Allocate AVFrame for the input */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- input_avframe = av_frame_alloc();
+ input_avframe = av_frame_alloc();
#else
- input_avframe = avcodec_alloc_frame();
+ input_avframe = avcodec_alloc_frame();
#endif
- if(input_avframe == NULL) {
- Fatal("Failed allocating AVFrame for the input");
- }
+ if(input_avframe == NULL) {
+ Fatal("Failed allocating AVFrame for the input");
+ }
- /* Allocate AVFrame for the output */
+ /* Allocate AVFrame for the output */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- output_avframe = av_frame_alloc();
+ output_avframe = av_frame_alloc();
#else
- output_avframe = avcodec_alloc_frame();
+ output_avframe = avcodec_alloc_frame();
#endif
- if(output_avframe == NULL) {
- Fatal("Failed allocating AVFrame for the output");
- }
+ if(output_avframe == NULL) {
+ Fatal("Failed allocating AVFrame for the output");
+ }
}
SWScale::~SWScale() {
- /* Free up everything */
+ /* Free up everything */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- av_frame_free( &input_avframe );
+ av_frame_free( &input_avframe );
#else
- av_freep( &input_avframe );
+ av_freep( &input_avframe );
#endif
- //input_avframe = NULL;
+ //input_avframe = NULL;
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- av_frame_free( &output_avframe );
+ av_frame_free( &output_avframe );
#else
- av_freep( &output_avframe );
+ av_freep( &output_avframe );
#endif
- //output_avframe = NULL;
+ //output_avframe = NULL;
- if(swscale_ctx) {
- sws_freeContext(swscale_ctx);
- swscale_ctx = NULL;
- }
-
- Debug(4,"SWScale object destroyed");
+ if(swscale_ctx) {
+ sws_freeContext(swscale_ctx);
+ swscale_ctx = NULL;
+ }
+
+ Debug(4,"SWScale object destroyed");
}
int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
- /* Assign the defaults */
- default_input_pf = in_pf;
- default_output_pf = out_pf;
- default_width = width;
- default_height = height;
+ /* Assign the defaults */
+ default_input_pf = in_pf;
+ default_output_pf = out_pf;
+ default_width = width;
+ default_height = height;
- gotdefaults = true;
+ gotdefaults = true;
- return 0;
+ return 0;
}
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
- /* Parameter checking */
- if(in_buffer == NULL || out_buffer == NULL) {
- Error("NULL Input or output buffer");
- return -1;
- }
+ /* Parameter checking */
+ if(in_buffer == NULL || out_buffer == NULL) {
+ Error("NULL Input or output buffer");
+ return -1;
+ }
- if(!width || !height) {
- Error("Invalid width or height");
- return -3;
- }
+ if(!width || !height) {
+ Error("Invalid width or height");
+ return -3;
+ }
#if LIBSWSCALE_VERSION_CHECK(0, 8, 0, 8, 0)
- /* Warn if the input or output pixelformat is not supported */
- if(!sws_isSupportedInput(in_pf)) {
- Warning("swscale does not support the input format: %c%c%c%c",(in_pf)&0xff,((in_pf)&0xff),((in_pf>>16)&0xff),((in_pf>>24)&0xff));
- }
- if(!sws_isSupportedOutput(out_pf)) {
- Warning("swscale does not support the output format: %c%c%c%c",(out_pf)&0xff,((out_pf>>8)&0xff),((out_pf>>16)&0xff),((out_pf>>24)&0xff));
- }
+ /* Warn if the input or output pixelformat is not supported */
+ if(!sws_isSupportedInput(in_pf)) {
+ Warning("swscale does not support the input format: %c%c%c%c",(in_pf)&0xff,((in_pf)&0xff),((in_pf>>16)&0xff),((in_pf>>24)&0xff));
+ }
+ if(!sws_isSupportedOutput(out_pf)) {
+ Warning("swscale does not support the output format: %c%c%c%c",(out_pf)&0xff,((out_pf>>8)&0xff),((out_pf>>16)&0xff),((out_pf>>24)&0xff));
+ }
#endif
- /* Check the buffer sizes */
+ /* Check the buffer sizes */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t insize = av_image_get_buffer_size(in_pf, width, height,1);
#else
size_t insize = avpicture_get_size(in_pf, width, height);
#endif
- if(insize != in_buffer_size) {
- Error("The input buffer size does not match the expected size for the input format. Required: %d Available: %d", insize, in_buffer_size);
- return -4;
- }
+ if(insize != in_buffer_size) {
+ Error("The input buffer size does not match the expected size for the input format. Required: %d Available: %d", insize, in_buffer_size);
+ return -4;
+ }
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t outsize = av_image_get_buffer_size(out_pf, width, height,1);
#else
size_t outsize = avpicture_get_size(out_pf, width, height);
#endif
- if(outsize < out_buffer_size) {
- Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size);
- return -5;
- }
+ if(outsize < out_buffer_size) {
+ Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size);
+ return -5;
+ }
- /* Get the context */
- swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL );
- if(swscale_ctx == NULL) {
- Error("Failed getting swscale context");
- return -6;
- }
+ /* Get the context */
+ swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL );
+ if(swscale_ctx == NULL) {
+ Error("Failed getting swscale context");
+ return -6;
+ }
- /* Fill in the buffers */
+ /* Fill in the buffers */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
if(av_image_fill_arrays(input_avframe->data, input_avframe->linesize,
(uint8_t*)in_buffer, in_pf, width, height, 1) <= 0)
{
#else
- if(avpicture_fill( (AVPicture*)input_avframe, (uint8_t*)in_buffer,
- in_pf, width, height ) <= 0)
- {
+ if(avpicture_fill( (AVPicture*)input_avframe, (uint8_t*)in_buffer,
+ in_pf, width, height ) <= 0)
+ {
#endif
- Error("Failed filling input frame with input buffer");
- return -7;
- }
+ Error("Failed filling input frame with input buffer");
+ return -7;
+ }
- if(!avpicture_fill( (AVPicture*)output_avframe, out_buffer, out_pf, width, height ) ) {
- Error("Failed filling output frame with output buffer");
- return -8;
+ if(!avpicture_fill( (AVPicture*)output_avframe, out_buffer, out_pf, width, height ) ) {
+ Error("Failed filling output frame with output buffer");
+ return -8;
+ }
+
+ /* Do the conversion */
+ if(!sws_scale(swscale_ctx, input_avframe->data, input_avframe->linesize, 0, height, output_avframe->data, output_avframe->linesize ) ) {
+ Error("swscale conversion failed");
+ return -10;
+ }
+
+ return 0;
}
- /* Do the conversion */
- if(!sws_scale(swscale_ctx, input_avframe->data, input_avframe->linesize, 0, height, output_avframe->data, output_avframe->linesize ) ) {
- Error("swscale conversion failed");
- return -10;
+ int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
+ if(img->Width() != width) {
+ Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
+ return -12;
+ }
+
+ if(img->Height() != height) {
+ Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
+ return -13;
+ }
+
+ return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
}
- return 0;
-}
+ int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
-int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
- if(img->Width() != width) {
- Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
- return -12;
- }
+ if(!gotdefaults) {
+ Error("Defaults are not set");
+ return -24;
+ }
- if(img->Height() != height) {
- Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
- return -13;
- }
+ return Convert(img,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
+ }
- return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
-}
+ int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
-int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
+ if(!gotdefaults) {
+ Error("Defaults are not set");
+ return -24;
+ }
- if(!gotdefaults) {
- Error("Defaults are not set");
- return -24;
- }
-
- return Convert(img,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
-}
-
-int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
-
- if(!gotdefaults) {
- Error("Defaults are not set");
- return -24;
- }
-
- return Convert(in_buffer,in_buffer_size,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
-}
+ return Convert(in_buffer,in_buffer_size,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
+ }
#endif // HAVE_LIBSWSCALE && HAVE_LIBAVUTIL
#endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
#if HAVE_LIBAVUTIL
-int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
+ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
int64_t a, b, this_thing;
av_assert0(in_ts != AV_NOPTS_VALUE);
@@ -268,90 +268,90 @@ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int
if (*last == AV_NOPTS_VALUE || !duration || in_tb.num*(int64_t)out_tb.den <= out_tb.num*(int64_t)in_tb.den) {
simple_round:
- *last = av_rescale_q(in_ts, in_tb, fs_tb) + duration;
- return av_rescale_q(in_ts, in_tb, out_tb);
+ *last = av_rescale_q(in_ts, in_tb, fs_tb) + duration;
+ return av_rescale_q(in_ts, in_tb, out_tb);
}
a = av_rescale_q_rnd(2*in_ts-1, in_tb, fs_tb, AV_ROUND_DOWN) >>1;
b = (av_rescale_q_rnd(2*in_ts+1, in_tb, fs_tb, AV_ROUND_UP )+1)>>1;
if (*last < 2*a - b || *last > 2*b - a)
- goto simple_round;
+ goto simple_round;
- this_thing = av_clip64(*last, a, b);
+ this_thing = av_clip64(*last, a, b);
*last = this_thing + duration;
return av_rescale_q(this_thing, fs_tb, out_tb);
-}
+ }
#endif
-int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
+ int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
AVFormatContext *s = avformat_alloc_context();
int ret = 0;
*avctx = NULL;
if (!s) {
- av_log(s, AV_LOG_ERROR, "Out of memory\n");
- ret = AVERROR(ENOMEM);
- return ret;
+ av_log(s, AV_LOG_ERROR, "Out of memory\n");
+ ret = AVERROR(ENOMEM);
+ return ret;
}
if (!oformat) {
- if (format) {
- oformat = av_guess_format(format, NULL, NULL);
- if (!oformat) {
- av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
- ret = AVERROR(EINVAL);
- }
- } else {
- oformat = av_guess_format(NULL, filename, NULL);
- if (!oformat) {
- ret = AVERROR(EINVAL);
- av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n", filename);
- }
+ if (format) {
+ oformat = av_guess_format(format, NULL, NULL);
+ if (!oformat) {
+ av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
+ ret = AVERROR(EINVAL);
}
+ } else {
+ oformat = av_guess_format(NULL, filename, NULL);
+ if (!oformat) {
+ ret = AVERROR(EINVAL);
+ av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n", filename);
+ }
+ }
}
if (ret) {
- avformat_free_context(s);
- return ret;
+ avformat_free_context(s);
+ return ret;
} else {
- s->oformat = oformat;
- if (s->oformat->priv_data_size > 0) {
- s->priv_data = av_mallocz(s->oformat->priv_data_size);
- if (s->priv_data) {
- if (s->oformat->priv_class) {
- *(const AVClass**)s->priv_data= s->oformat->priv_class;
- av_opt_set_defaults(s->priv_data);
- }
- } else {
- av_log(s, AV_LOG_ERROR, "Out of memory\n");
- ret = AVERROR(ENOMEM);
- return ret;
- }
- s->priv_data = NULL;
- }
+ s->oformat = oformat;
+ if (s->oformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->oformat->priv_data_size);
+ if (s->priv_data) {
+ if (s->oformat->priv_class) {
+ *(const AVClass**)s->priv_data= s->oformat->priv_class;
+ av_opt_set_defaults(s->priv_data);
+ }
+ } else {
+ av_log(s, AV_LOG_ERROR, "Out of memory\n");
+ ret = AVERROR(ENOMEM);
+ return ret;
+ }
+ s->priv_data = NULL;
+ }
- if (filename) strncpy(s->filename, filename, sizeof(s->filename));
- *avctx = s;
- return 0;
- }
-}
+ if (filename) strncpy(s->filename, filename, sizeof(s->filename));
+ *avctx = s;
+ return 0;
+ }
+ }
-static void zm_log_fps(double d, const char *postfix)
-{
+ static void zm_log_fps(double d, const char *postfix)
+ {
uint64_t v = lrintf(d * 100);
if (!v) {
- Debug(3, "%1.4f %s", d, postfix);
+ Debug(3, "%1.4f %s", d, postfix);
} else if (v % 100) {
- Debug(3, "%3.2f %s", d, postfix);
- } else if (v % (100 * 1000)) {
- Debug(3, "%1.0f %s", d, postfix);
+ Debug(3, "%3.2f %s", d, postfix);
+ } else if (v % (100 * 1000)) {
+ Debug(3, "%1.0f %s", d, postfix);
} else
- Debug(3, "%1.0fk %s", d / 1000, postfix);
-}
+ Debug(3, "%1.0fk %s", d / 1000, postfix);
+ }
-/* "user interface" functions */
-void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) {
+ /* "user interface" functions */
+ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) {
char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
@@ -363,67 +363,67 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
- Debug(3, "[0x%x]", st->id);
+ Debug(3, "[0x%x]", st->id);
if (lang)
- Debug(3, "(%s)", lang->value);
+ Debug(3, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
- st->time_base.num, st->time_base.den);
+ st->time_base.num, st->time_base.den);
Debug(3, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
- AVRational display_aspect_ratio;
- av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
- st->codec->width * (int64_t)st->sample_aspect_ratio.num,
- st->codec->height * (int64_t)st->sample_aspect_ratio.den,
- 1024 * 1024);
- Debug(3, ", SAR %d:%d DAR %d:%d",
- st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
- display_aspect_ratio.num, display_aspect_ratio.den);
+ AVRational display_aspect_ratio;
+ av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
+ st->codec->width * (int64_t)st->sample_aspect_ratio.num,
+ st->codec->height * (int64_t)st->sample_aspect_ratio.den,
+ 1024 * 1024);
+ Debug(3, ", SAR %d:%d DAR %d:%d",
+ st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
+ display_aspect_ratio.num, display_aspect_ratio.den);
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
- int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
- int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
- int tbn = st->time_base.den && st->time_base.num;
- int tbc = st->codec->time_base.den && st->codec->time_base.num;
+ int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
+ int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
+ int tbn = st->time_base.den && st->time_base.num;
+ int tbc = st->codec->time_base.den && st->codec->time_base.num;
- if (fps || tbr || tbn || tbc)
- Debug(3, "\n" );
+ if (fps || tbr || tbn || tbc)
+ Debug(3, "\n" );
- if (fps)
- zm_log_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
- if (tbr)
- zm_log_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
- if (tbn)
- zm_log_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
- if (tbc)
- zm_log_fps(1 / av_q2d(st->codec->time_base), "tbc");
- }
-
- if (st->disposition & AV_DISPOSITION_DEFAULT)
- Debug(3, " (default)");
- if (st->disposition & AV_DISPOSITION_DUB)
- Debug(3, " (dub)");
- if (st->disposition & AV_DISPOSITION_ORIGINAL)
- Debug(3, " (original)");
- if (st->disposition & AV_DISPOSITION_COMMENT)
- Debug(3, " (comment)");
- if (st->disposition & AV_DISPOSITION_LYRICS)
- Debug(3, " (lyrics)");
- if (st->disposition & AV_DISPOSITION_KARAOKE)
- Debug(3, " (karaoke)");
- if (st->disposition & AV_DISPOSITION_FORCED)
- Debug(3, " (forced)");
- if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
- Debug(3, " (hearing impaired)");
- if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
- Debug(3, " (visual impaired)");
- if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
- Debug(3, " (clean effects)");
- Debug(3, "\n");
-
- //dump_metadata(NULL, st->metadata, " ");
-
- //dump_sidedata(NULL, st, " ");
- }
+ if (fps)
+ zm_log_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
+ if (tbr)
+ zm_log_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
+ if (tbn)
+ zm_log_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
+ if (tbc)
+ zm_log_fps(1 / av_q2d(st->codec->time_base), "tbc");
+ }
+
+ if (st->disposition & AV_DISPOSITION_DEFAULT)
+ Debug(3, " (default)");
+ if (st->disposition & AV_DISPOSITION_DUB)
+ Debug(3, " (dub)");
+ if (st->disposition & AV_DISPOSITION_ORIGINAL)
+ Debug(3, " (original)");
+ if (st->disposition & AV_DISPOSITION_COMMENT)
+ Debug(3, " (comment)");
+ if (st->disposition & AV_DISPOSITION_LYRICS)
+ Debug(3, " (lyrics)");
+ if (st->disposition & AV_DISPOSITION_KARAOKE)
+ Debug(3, " (karaoke)");
+ if (st->disposition & AV_DISPOSITION_FORCED)
+ Debug(3, " (forced)");
+ if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
+ Debug(3, " (hearing impaired)");
+ if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
+ Debug(3, " (visual impaired)");
+ if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
+ Debug(3, " (clean effects)");
+ Debug(3, "\n");
+
+ //dump_metadata(NULL, st->metadata, " ");
+
+ //dump_sidedata(NULL, st, " ");
+ }
diff --git a/src/zm_ffmpeg_camera.cpp b/src/zm_ffmpeg_camera.cpp
index d2c25de38..f6553296d 100644
--- a/src/zm_ffmpeg_camera.cpp
+++ b/src/zm_ffmpeg_camera.cpp
@@ -31,76 +31,76 @@ extern "C"{
#endif
#ifdef SOLARIS
-#include // for ESRCH
+#include // for ESRCH
#include
#include
#endif
FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
- Camera( p_id, FFMPEG_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
- mPath( p_path ),
- mMethod( p_method ),
- mOptions( p_options )
+ Camera( p_id, FFMPEG_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
+ mPath( p_path ),
+ mMethod( p_method ),
+ mOptions( p_options )
{
- if ( capture )
- {
- Initialise();
- }
-
- mFormatContext = NULL;
- mVideoStreamId = -1;
- mAudioStreamId = -1;
- mCodecContext = NULL;
- mCodec = NULL;
- mRawFrame = NULL;
- mFrame = NULL;
- frameCount = 0;
- startTime=0;
- mIsOpening = false;
- mCanCapture = false;
- mOpenStart = 0;
- mReopenThread = 0;
- wasRecording = false;
- videoStore = NULL;
-
+ if ( capture )
+ {
+ Initialise();
+ }
+
+ mFormatContext = NULL;
+ mVideoStreamId = -1;
+ mAudioStreamId = -1;
+ mCodecContext = NULL;
+ mCodec = NULL;
+ mRawFrame = NULL;
+ mFrame = NULL;
+ frameCount = 0;
+ startTime=0;
+ mIsOpening = false;
+ mCanCapture = false;
+ mOpenStart = 0;
+ mReopenThread = 0;
+ wasRecording = false;
+ videoStore = NULL;
+
#if HAVE_LIBSWSCALE
- mConvertContext = NULL;
+ mConvertContext = NULL;
#endif
- /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
- if(colours == ZM_COLOUR_RGB32) {
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- imagePixFormat = AV_PIX_FMT_RGBA;
- } else if(colours == ZM_COLOUR_RGB24) {
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- imagePixFormat = AV_PIX_FMT_RGB24;
- } else if(colours == ZM_COLOUR_GRAY8) {
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- imagePixFormat = AV_PIX_FMT_GRAY8;
- } else {
- Panic("Unexpected colours: %d",colours);
- }
-
+ /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
+ if(colours == ZM_COLOUR_RGB32) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ imagePixFormat = AV_PIX_FMT_RGBA;
+ } else if(colours == ZM_COLOUR_RGB24) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ imagePixFormat = AV_PIX_FMT_RGB24;
+ } else if(colours == ZM_COLOUR_GRAY8) {
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ imagePixFormat = AV_PIX_FMT_GRAY8;
+ } else {
+ Panic("Unexpected colours: %d",colours);
+ }
+
}
FfmpegCamera::~FfmpegCamera()
{
- CloseFfmpeg();
+ CloseFfmpeg();
- if ( capture )
- {
- Terminate();
- }
+ if ( capture )
+ {
+ Terminate();
+ }
}
void FfmpegCamera::Initialise()
{
- if ( logDebugging() )
- av_log_set_level( AV_LOG_DEBUG );
- else
- av_log_set_level( AV_LOG_QUIET );
+ if ( logDebugging() )
+ av_log_set_level( AV_LOG_DEBUG );
+ else
+ av_log_set_level( AV_LOG_QUIET );
- av_register_all();
- avformat_network_init();
+ av_register_all();
+ avformat_network_init();
}
void FfmpegCamera::Terminate()
@@ -109,295 +109,295 @@ void FfmpegCamera::Terminate()
int FfmpegCamera::PrimeCapture()
{
- mVideoStreamId = -1;
- mAudioStreamId = -1;
- Info( "Priming capture from %s", mPath.c_str() );
+ mVideoStreamId = -1;
+ mAudioStreamId = -1;
+ Info( "Priming capture from %s", mPath.c_str() );
- if (OpenFfmpeg() != 0){
- ReopenFfmpeg();
- }
- return 0;
+ if (OpenFfmpeg() != 0){
+ ReopenFfmpeg();
+ }
+ return 0;
}
int FfmpegCamera::PreCapture()
{
- // Nothing to do here
- return( 0 );
+ // Nothing to do here
+ return( 0 );
}
int FfmpegCamera::Capture( Image &image )
{
- if (!mCanCapture){
- return -1;
- }
-
- // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread.
- if (mReopenThread != 0) {
- void *retval = 0;
- int ret;
-
- ret = pthread_join(mReopenThread, &retval);
- if (ret != 0){
- Error("Could not join reopen thread.");
- }
-
- Info( "Successfully reopened stream." );
- mReopenThread = 0;
+ if (!mCanCapture){
+ return -1;
+ }
+
+ // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread.
+ if (mReopenThread != 0) {
+ void *retval = 0;
+ int ret;
+
+ ret = pthread_join(mReopenThread, &retval);
+ if (ret != 0){
+ Error("Could not join reopen thread.");
}
- AVPacket packet;
- uint8_t* directbuffer;
-
- /* Request a writeable buffer of the target image */
- directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
- if(directbuffer == NULL) {
- Error("Failed requesting writeable buffer for the captured image.");
- return (-1);
- }
-
- int frameComplete = false;
- while ( !frameComplete )
+ Info( "Successfully reopened stream." );
+ mReopenThread = 0;
+ }
+
+ AVPacket packet;
+ uint8_t* directbuffer;
+
+ /* Request a writeable buffer of the target image */
+ directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
+ if(directbuffer == NULL) {
+ Error("Failed requesting writeable buffer for the captured image.");
+ return (-1);
+ }
+
+ int frameComplete = false;
+ while ( !frameComplete )
+ {
+ int avResult = av_read_frame( mFormatContext, &packet );
+ if ( avResult < 0 )
{
- int avResult = av_read_frame( mFormatContext, &packet );
- if ( avResult < 0 )
- {
- char errbuf[AV_ERROR_MAX_STRING_SIZE];
- av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
- if (
- // Check if EOF.
- (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
- // Check for Connection failure.
- (avResult == -110)
- )
- {
- Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf );
- ReopenFfmpeg();
- }
+ char errbuf[AV_ERROR_MAX_STRING_SIZE];
+ av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
+ if (
+ // Check if EOF.
+ (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
+ // Check for Connection failure.
+ (avResult == -110)
+ )
+ {
+ Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf );
+ ReopenFfmpeg();
+ }
- Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf );
- return( -1 );
- }
- Debug( 5, "Got packet from stream %d", packet.stream_index );
- // What about audio stream? Maybe someday we could do sound detection...
- if ( packet.stream_index == mVideoStreamId )
- {
+ Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf );
+ return( -1 );
+ }
+ Debug( 5, "Got packet from stream %d", packet.stream_index );
+ // What about audio stream? Maybe someday we could do sound detection...
+ if ( packet.stream_index == mVideoStreamId )
+ {
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
- if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
+ if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
#else
- if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 )
+ if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 )
#endif
- Fatal( "Unable to decode frame at frame %d", frameCount );
+ Fatal( "Unable to decode frame at frame %d", frameCount );
- Debug( 4, "Decoded video packet at frame %d", frameCount );
+ Debug( 4, "Decoded video packet at frame %d", frameCount );
- if ( frameComplete ) {
- Debug( 4, "Got frame %d", frameCount );
+ if ( frameComplete ) {
+ Debug( 4, "Got frame %d", frameCount );
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
- av_image_fill_arrays(mFrame->data, mFrame->linesize,
- directbuffer, imagePixFormat, width, height, 1);
+ av_image_fill_arrays(mFrame->data, mFrame->linesize,
+ directbuffer, imagePixFormat, width, height, 1);
#else
- avpicture_fill( (AVPicture *)mFrame, directbuffer,
- imagePixFormat, width, height);
+ avpicture_fill( (AVPicture *)mFrame, directbuffer,
+ imagePixFormat, width, height);
#endif
-
-#if HAVE_LIBSWSCALE
- if(mConvertContext == NULL) {
- mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
- if(mConvertContext == NULL)
- Fatal( "Unable to create conversion context for %s", mPath.c_str() );
- }
-
- if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
- Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
+#if HAVE_LIBSWSCALE
+ if(mConvertContext == NULL) {
+ mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
+
+ if(mConvertContext == NULL)
+ Fatal( "Unable to create conversion context for %s", mPath.c_str() );
+ }
+
+ if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
+ Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
- frameCount++;
- } // end if frameComplete
- } else {
- Debug( 4, "Different stream_index %d", packet.stream_index );
- } // end if packet.stream_index == mVideoStreamId
+ frameCount++;
+ } // end if frameComplete
+ } else {
+ Debug( 4, "Different stream_index %d", packet.stream_index );
+ } // end if packet.stream_index == mVideoStreamId
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
- av_packet_unref( &packet);
+ av_packet_unref( &packet);
#else
- av_free_packet( &packet );
+ av_free_packet( &packet );
#endif
- } // end while ! frameComplete
- return (0);
+ } // end while ! frameComplete
+ return (0);
} // FfmpegCamera::Capture
int FfmpegCamera::PostCapture()
{
- // Nothing to do here
- return( 0 );
+ // Nothing to do here
+ return( 0 );
}
int FfmpegCamera::OpenFfmpeg() {
- Debug ( 2, "OpenFfmpeg called." );
+ Debug ( 2, "OpenFfmpeg called." );
- mOpenStart = time(NULL);
- mIsOpening = true;
+ mOpenStart = time(NULL);
+ mIsOpening = true;
- // Open the input, not necessarily a file
+ // Open the input, not necessarily a file
#if !LIBAVFORMAT_VERSION_CHECK(53, 2, 0, 4, 0)
- Debug ( 1, "Calling av_open_input_file" );
- if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
+ Debug ( 1, "Calling av_open_input_file" );
+ if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
// Handle options
AVDictionary *opts = 0;
- StringVector opVect = split(Options(), ",");
-
- // Set transport method as specified by method field, rtpUni is default
- if ( Method() == "rtpMulti" )
- opVect.push_back("rtsp_transport=udp_multicast");
- else if ( Method() == "rtpRtsp" )
- opVect.push_back("rtsp_transport=tcp");
- else if ( Method() == "rtpRtspHttp" )
- opVect.push_back("rtsp_transport=http");
-
- Debug(2, "Number of Options: %d",opVect.size());
- for (size_t i=0; i 1) {
+ parts[0] = trimSpaces(parts[0]);
+ parts[1] = trimSpaces(parts[1]);
+ if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
+ Debug(2, "set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str());
+ }
+ else
+ {
+ Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
+ }
+
+ }
+ else
{
- StringVector parts = split(opVect[i],"=");
- if (parts.size() > 1) {
- parts[0] = trimSpaces(parts[0]);
- parts[1] = trimSpaces(parts[1]);
- if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
- Debug(2, "set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str());
- }
- else
- {
- Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
- }
-
- }
- else
- {
- Warning( "Unable to parse ffmpeg option %d '%s', expecting key=value", i, opVect[i].c_str() );
- }
- }
- Debug ( 1, "Calling avformat_open_input" );
+ Warning( "Unable to parse ffmpeg option %d '%s', expecting key=value", i, opVect[i].c_str() );
+ }
+ }
+ Debug ( 1, "Calling avformat_open_input" );
- mFormatContext = avformat_alloc_context( );
- mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
- mFormatContext->interrupt_callback.opaque = this;
+ mFormatContext = avformat_alloc_context( );
+ mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
+ mFormatContext->interrupt_callback.opaque = this;
- if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
+ if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
- {
- mIsOpening = false;
- Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
- return -1;
- }
-
- AVDictionaryEntry *e;
- if ((e = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX)) != NULL) {
- Warning( "Option %s not recognized by ffmpeg", e->key);
- }
-
+ {
mIsOpening = false;
- Debug ( 1, "Opened input" );
+ Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
+ return -1;
+ }
- Info( "Stream open %s", mPath.c_str() );
- startTime=av_gettime();//FIXME here or after find_Stream_info
-
- //FIXME can speed up initial analysis but need sensible parameters...
- //mFormatContext->probesize = 32;
- //mFormatContext->max_analyze_duration = 32;
- // Locate stream info from avformat_open_input
+ AVDictionaryEntry *e;
+ if ((e = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX)) != NULL) {
+ Warning( "Option %s not recognized by ffmpeg", e->key);
+ }
+
+ mIsOpening = false;
+ Debug ( 1, "Opened input" );
+
+ Info( "Stream open %s", mPath.c_str() );
+ startTime=av_gettime();//FIXME here or after find_Stream_info
+
+ //FIXME can speed up initial analysis but need sensible parameters...
+ //mFormatContext->probesize = 32;
+ //mFormatContext->max_analyze_duration = 32;
+ // Locate stream info from avformat_open_input
#if !LIBAVFORMAT_VERSION_CHECK(53, 6, 0, 6, 0)
- Debug ( 1, "Calling av_find_stream_info" );
- if ( av_find_stream_info( mFormatContext ) < 0 )
+ Debug ( 1, "Calling av_find_stream_info" );
+ if ( av_find_stream_info( mFormatContext ) < 0 )
#else
Debug ( 1, "Calling avformat_find_stream_info" );
- if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
+ if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
- Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
+ Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
- Debug ( 1, "Got stream info" );
+ Debug ( 1, "Got stream info" );
- // Find first video stream present
- // The one we want Might not be the first
- mVideoStreamId = -1;
- mAudioStreamId = -1;
- for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
- {
+ // Find first video stream present
+ // The one we want Might not be the first
+ mVideoStreamId = -1;
+ mAudioStreamId = -1;
+ for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
+ {
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
- if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
+ if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
- if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
+ if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
- {
- if ( mVideoStreamId == -1 ) {
- mVideoStreamId = i;
- // if we break, then we won't find the audio stream
- continue;
- } else {
- Debug(2, "Have another video stream." );
- }
- }
-#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
- if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
-#else
- if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
-#endif
- {
- if ( mAudioStreamId == -1 ) {
- mAudioStreamId = i;
- } else {
- Debug(2, "Have another audio stream." );
- }
+ {
+ if ( mVideoStreamId == -1 ) {
+ mVideoStreamId = i;
+ // if we break, then we won't find the audio stream
+ continue;
+ } else {
+ Debug(2, "Have another video stream." );
}
- }
- if ( mVideoStreamId == -1 )
- Fatal( "Unable to locate video stream in %s", mPath.c_str() );
- if ( mAudioStreamId == -1 )
- Debug( 2, "Unable to locate audio stream in %s", mPath.c_str() );
+ }
+#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
+ if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
+#else
+ if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
+#endif
+ {
+ if ( mAudioStreamId == -1 ) {
+ mAudioStreamId = i;
+ } else {
+ Debug(2, "Have another audio stream." );
+ }
+ }
+ }
+ if ( mVideoStreamId == -1 )
+ Fatal( "Unable to locate video stream in %s", mPath.c_str() );
+ if ( mAudioStreamId == -1 )
+ Debug( 2, "Unable to locate audio stream in %s", mPath.c_str() );
- Debug ( 3, "Found video stream at index %d", mVideoStreamId );
- Debug ( 3, "Found audio stream at index %d", mAudioStreamId );
+ Debug ( 3, "Found video stream at index %d", mVideoStreamId );
+ Debug ( 3, "Found audio stream at index %d", mAudioStreamId );
- mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
+ mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
- // Try and get the codec from the codec context
- if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
- Fatal( "Can't find codec for video stream from %s", mPath.c_str() );
+ // Try and get the codec from the codec context
+ if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
+ Fatal( "Can't find codec for video stream from %s", mPath.c_str() );
- Debug ( 1, "Found decoder" );
- zm_dump_stream_format( mFormatContext, mVideoStreamId, 0, 0 );
+ Debug ( 1, "Found decoder" );
+ zm_dump_stream_format( mFormatContext, mVideoStreamId, 0, 0 );
- // Open the codec
+ // Open the codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
- Debug ( 1, "Calling avcodec_open" );
- if ( avcodec_open( mCodecContext, mCodec ) < 0 )
+ Debug ( 1, "Calling avcodec_open" );
+ if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
Debug ( 1, "Calling avcodec_open2" );
- if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
+ if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
- Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );
+ Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );
- Debug ( 1, "Opened codec" );
+ Debug ( 1, "Opened codec" );
- // Allocate space for the native video frame
+ // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- mRawFrame = av_frame_alloc();
+ mRawFrame = av_frame_alloc();
#else
- mRawFrame = avcodec_alloc_frame();
+ mRawFrame = avcodec_alloc_frame();
#endif
- // Allocate space for the converted video frame
+ // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- mFrame = av_frame_alloc();
+ mFrame = av_frame_alloc();
#else
- mFrame = avcodec_alloc_frame();
+ mFrame = avcodec_alloc_frame();
#endif
- if(mRawFrame == NULL || mFrame == NULL)
- Fatal( "Unable to allocate frame for %s", mPath.c_str() );
+ if(mRawFrame == NULL || mFrame == NULL)
+ Fatal( "Unable to allocate frame for %s", mPath.c_str() );
Debug ( 1, "Allocated frames" );
@@ -412,284 +412,284 @@ int FfmpegCamera::OpenFfmpeg() {
}
Debug ( 1, "Validated imagesize" );
-
+
#if HAVE_LIBSWSCALE
- Debug ( 1, "Calling sws_isSupportedInput" );
- if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
- Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
- }
-
- if(!sws_isSupportedOutput(imagePixFormat)) {
- Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
- }
-
+ Debug ( 1, "Calling sws_isSupportedInput" );
+ if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
+ Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
+ }
+
+ if(!sws_isSupportedOutput(imagePixFormat)) {
+ Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
+ }
+
#else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
- mCanCapture = true;
+ mCanCapture = true;
- return 0;
+ return 0;
}
int FfmpegCamera::ReopenFfmpeg() {
- Debug(2, "ReopenFfmpeg called.");
+ Debug(2, "ReopenFfmpeg called.");
- mCanCapture = false;
- if (pthread_create( &mReopenThread, NULL, ReopenFfmpegThreadCallback, (void*) this) != 0){
- // Log a fatal error and exit the process.
- Fatal( "ReopenFfmpeg failed to create worker thread." );
- }
+ mCanCapture = false;
+ if (pthread_create( &mReopenThread, NULL, ReopenFfmpegThreadCallback, (void*) this) != 0){
+ // Log a fatal error and exit the process.
+ Fatal( "ReopenFfmpeg failed to create worker thread." );
+ }
- return 0;
+ return 0;
}
int FfmpegCamera::CloseFfmpeg(){
- Debug(2, "CloseFfmpeg called.");
+ Debug(2, "CloseFfmpeg called.");
- mCanCapture = false;
+ mCanCapture = false;
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- av_frame_free( &mFrame );
- av_frame_free( &mRawFrame );
+ av_frame_free( &mFrame );
+ av_frame_free( &mRawFrame );
#else
- av_freep( &mFrame );
- av_freep( &mRawFrame );
+ av_freep( &mFrame );
+ av_freep( &mRawFrame );
#endif
-
+
#if HAVE_LIBSWSCALE
- if ( mConvertContext )
- {
- sws_freeContext( mConvertContext );
- mConvertContext = NULL;
- }
+ if ( mConvertContext )
+ {
+ sws_freeContext( mConvertContext );
+ mConvertContext = NULL;
+ }
#endif
- if ( mCodecContext )
- {
- avcodec_close( mCodecContext );
- mCodecContext = NULL; // Freed by av_close_input_file
- }
- if ( mFormatContext )
- {
+ if ( mCodecContext )
+ {
+ avcodec_close( mCodecContext );
+ mCodecContext = NULL; // Freed by av_close_input_file
+ }
+ if ( mFormatContext )
+ {
#if !LIBAVFORMAT_VERSION_CHECK(53, 17, 0, 25, 0)
- av_close_input_file( mFormatContext );
+ av_close_input_file( mFormatContext );
#else
- avformat_close_input( &mFormatContext );
+ avformat_close_input( &mFormatContext );
#endif
- mFormatContext = NULL;
- }
+ mFormatContext = NULL;
+ }
- return 0;
+ return 0;
}
int FfmpegCamera::FfmpegInterruptCallback(void *ctx)
{
- FfmpegCamera* camera = reinterpret_cast(ctx);
- if (camera->mIsOpening){
- int now = time(NULL);
- if ((now - camera->mOpenStart) > config.ffmpeg_open_timeout) {
- Error ( "Open video took more than %d seconds.", config.ffmpeg_open_timeout );
- return 1;
- }
+ FfmpegCamera* camera = reinterpret_cast(ctx);
+ if (camera->mIsOpening){
+ int now = time(NULL);
+ if ((now - camera->mOpenStart) > config.ffmpeg_open_timeout) {
+ Error ( "Open video took more than %d seconds.", config.ffmpeg_open_timeout );
+ return 1;
}
+ }
- return 0;
+ return 0;
}
void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx){
- if (ctx == NULL) return NULL;
+ if (ctx == NULL) return NULL;
- FfmpegCamera* camera = reinterpret_cast(ctx);
+ FfmpegCamera* camera = reinterpret_cast(ctx);
- while (1){
- // Close current stream.
- camera->CloseFfmpeg();
+ while (1){
+ // Close current stream.
+ camera->CloseFfmpeg();
- // Sleep if necessary to not reconnect too fast.
- int wait = config.ffmpeg_open_timeout - (time(NULL) - camera->mOpenStart);
- wait = wait < 0 ? 0 : wait;
- if (wait > 0){
- Debug( 1, "Sleeping %d seconds before reopening stream.", wait );
- sleep(wait);
- }
-
- if (camera->OpenFfmpeg() == 0){
- return NULL;
- }
+ // Sleep if necessary to not reconnect too fast.
+ int wait = config.ffmpeg_open_timeout - (time(NULL) - camera->mOpenStart);
+ wait = wait < 0 ? 0 : wait;
+ if (wait > 0){
+ Debug( 1, "Sleeping %d seconds before reopening stream.", wait );
+ sleep(wait);
}
+
+ if (camera->OpenFfmpeg() == 0){
+ return NULL;
+ }
+ }
}
//Function to handle capture and store
int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_file )
{
- if (!mCanCapture){
- return -1;
- }
-
- // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread.
- if (mReopenThread != 0) {
- void *retval = 0;
- int ret;
-
- ret = pthread_join(mReopenThread, &retval);
- if (ret != 0){
- Error("Could not join reopen thread.");
- }
-
- Info( "Successfully reopened stream." );
- mReopenThread = 0;
+ if (!mCanCapture){
+ return -1;
+ }
+
+ // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread.
+ if (mReopenThread != 0) {
+ void *retval = 0;
+ int ret;
+
+ ret = pthread_join(mReopenThread, &retval);
+ if (ret != 0){
+ Error("Could not join reopen thread.");
}
- AVPacket packet;
- uint8_t* directbuffer;
-
- /* Request a writeable buffer of the target image */
- directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
- if( directbuffer == NULL ) {
- Error("Failed requesting writeable buffer for the captured image.");
- return (-1);
- }
-
- if ( mCodecContext->codec_id != AV_CODEC_ID_H264 ) {
- Error( "Input stream is not h264. The stored event file may not be viewable in browser." );
+ Info( "Successfully reopened stream." );
+ mReopenThread = 0;
+ }
+
+ AVPacket packet;
+ uint8_t* directbuffer;
+
+ /* Request a writeable buffer of the target image */
+ directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
+ if( directbuffer == NULL ) {
+ Error("Failed requesting writeable buffer for the captured image.");
+ return (-1);
+ }
+
+ if ( mCodecContext->codec_id != AV_CODEC_ID_H264 ) {
+ Error( "Input stream is not h264. The stored event file may not be viewable in browser." );
+ }
+
+ int frameComplete = false;
+ while ( !frameComplete ) {
+ int avResult = av_read_frame( mFormatContext, &packet );
+ if ( avResult < 0 ) {
+ char errbuf[AV_ERROR_MAX_STRING_SIZE];
+ av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
+ if (
+ // Check if EOF.
+ (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
+ // Check for Connection failure.
+ (avResult == -110)
+ ) {
+ Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf);
+ ReopenFfmpeg();
+ }
+
+ Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf );
+ return( -1 );
}
-
- int frameComplete = false;
- while ( !frameComplete ) {
- int avResult = av_read_frame( mFormatContext, &packet );
- if ( avResult < 0 ) {
- char errbuf[AV_ERROR_MAX_STRING_SIZE];
- av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
- if (
- // Check if EOF.
- (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
- // Check for Connection failure.
- (avResult == -110)
- ) {
- Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf);
- ReopenFfmpeg();
- }
-
- Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf );
- return( -1 );
- }
- Debug( 5, "Got packet from stream %d", packet.stream_index );
- if ( packet.stream_index == mVideoStreamId ) {
+ Debug( 5, "Got packet from stream %d", packet.stream_index );
+ if ( packet.stream_index == mVideoStreamId ) {
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
- if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
+ if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
#else
- if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 )
+ if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 )
#endif
- Fatal( "Unable to decode frame at frame %d", frameCount );
+ Fatal( "Unable to decode frame at frame %d", frameCount );
- Debug( 4, "Decoded video packet at frame %d", frameCount );
+ Debug( 4, "Decoded video packet at frame %d", frameCount );
- if ( frameComplete ) {
- Debug( 4, "Got frame %d", frameCount );
-
- avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
+ if ( frameComplete ) {
+ Debug( 4, "Got frame %d", frameCount );
- //Keep the last keyframe so we can establish immediate video
- if(packet.flags & AV_PKT_FLAG_KEY) {
- //Debug(4, "Have keyframe");
- //av_copy_packet(&lastKeyframePkt, &packet);
- //TODO I think we need to store the key frame location for seeking as part of the event
- }
-
- //Video recording
- if ( recording && !wasRecording ) {
- //Instantiate the video storage module
- Debug(3, "recording and ! wasRecording %s", event_file);
+ avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
- videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
- wasRecording = true;
- strcpy(oldDirectory, event_file);
+ //Keep the last keyframe so we can establish immediate video
+ if(packet.flags & AV_PKT_FLAG_KEY) {
+ //Debug(4, "Have keyframe");
+ //av_copy_packet(&lastKeyframePkt, &packet);
+ //TODO I think we need to store the key frame location for seeking as part of the event
+ }
-
- // Need to write out all the frames from the last keyframe?
-
- } else if ( ( ! recording ) && wasRecording && videoStore ) {
- Info("Deleting videoStore instance");
- delete videoStore;
- videoStore = NULL;
- }
-
- // The directory we are recording to is no longer tied to the current event.
- // Need to re-init the videostore with the correct directory and start recording again
- // for efficiency's sake, we should test for keyframe before we test for directory change...
- if ( recording && wasRecording && (packet.flags & AV_PKT_FLAG_KEY) && (strcmp(oldDirectory, event_file) != 0 ) ) {
- // don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...
- // if we store our key frame location with the event will that be enough?
- Info("Re-starting video storage module");
- if(videoStore){
- delete videoStore;
- videoStore = NULL;
- }
+ //Video recording
+ if ( recording && !wasRecording ) {
+ //Instantiate the video storage module
+ Debug(3, "recording and ! wasRecording %s", event_file);
+
+ videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
+ wasRecording = true;
+ strcpy(oldDirectory, event_file);
+
+
+ // Need to write out all the frames from the last keyframe?
+
+ } else if ( ( ! recording ) && wasRecording && videoStore ) {
+ Info("Deleting videoStore instance");
+ delete videoStore;
+ videoStore = NULL;
+ }
+
+ // The directory we are recording to is no longer tied to the current event.
+ // Need to re-init the videostore with the correct directory and start recording again
+ // for efficiency's sake, we should test for keyframe before we test for directory change...
+ if ( recording && wasRecording && (packet.flags & AV_PKT_FLAG_KEY) && (strcmp(oldDirectory, event_file) != 0 ) ) {
+ // don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...
+ // if we store our key frame location with the event will that be enough?
+ Info("Re-starting video storage module");
+ if(videoStore){
+ delete videoStore;
+ videoStore = NULL;
+ }
+
+ videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation());
+ strcpy(oldDirectory, event_file);
+ }
+
+ if ( videoStore && recording ) {
+ //Write the packet to our video store
+ int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
+ if ( ret < 0 ) { //Less than zero and we skipped a frame
+ av_free_packet( &packet );
+ return 0;
+ }
+ }
- videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation());
- strcpy(oldDirectory, event_file);
- }
-
- if ( videoStore && recording ) {
- //Write the packet to our video store
- int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
- if ( ret < 0 ) { //Less than zero and we skipped a frame
- av_free_packet( &packet );
- return 0;
- }
- }
-
#if HAVE_LIBSWSCALE
- if ( mConvertContext == NULL ) {
- mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
- if ( mConvertContext == NULL )
- Fatal( "Unable to create conversion context for %s", mPath.c_str() );
- }
-
- if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
- Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
+ if ( mConvertContext == NULL ) {
+ mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
+ if ( mConvertContext == NULL )
+ Fatal( "Unable to create conversion context for %s", mPath.c_str() );
+ }
+
+ if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
+ Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
- frameCount++;
- } else {
- Debug( 3, "Not framecomplete after av_read_frame" );
- } // end if frameComplete
- } else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
- Debug( 4, "Audio stream index %d", packet.stream_index );
- if ( frameComplete ) {
- Debug( 3, "Got audio frame with framecomplete %d", frameCount );
- //} else {
- //Debug( 3, "Got audio frame %d without frameComplete", frameCount );
- }
- if ( videoStore && recording ) {
- if ( record_audio ) {
- Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index );
- //Write the packet to our video store
- //FIXME no relevance of last key frame
- int ret = videoStore->writeAudioFramePacket( &packet, mFormatContext->streams[packet.stream_index] );
- if ( ret < 0 ) {//Less than zero and we skipped a frame
- av_free_packet( &packet );
- return 0;
- }
- } else {
- Debug(4, "Not recording audio packet" );
- }
- }
- } else {
-#if LIBAVUTIL_VERSION_CHECK(54, 23, 0, 23, 0)
- Debug( 3, "Some other stream index %d, %s", packet.stream_index, av_get_media_type_string( mFormatContext->streams[packet.stream_index]->codec->codec_type) );
-#else
- Debug( 3, "Some other stream index %d", packet.stream_index );
-#endif
+ frameCount++;
+ } else {
+ Debug( 3, "Not framecomplete after av_read_frame" );
+ } // end if frameComplete
+ } else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
+ Debug( 4, "Audio stream index %d", packet.stream_index );
+ if ( frameComplete ) {
+ Debug( 3, "Got audio frame with framecomplete %d", frameCount );
+ //} else {
+ //Debug( 3, "Got audio frame %d without frameComplete", frameCount );
+ }
+ if ( videoStore && recording ) {
+ if ( record_audio ) {
+ Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index );
+ //Write the packet to our video store
+ //FIXME no relevance of last key frame
+ int ret = videoStore->writeAudioFramePacket( &packet, mFormatContext->streams[packet.stream_index] );
+ if ( ret < 0 ) {//Less than zero and we skipped a frame
+ av_free_packet( &packet );
+ return 0;
}
- av_free_packet( &packet );
- } // end while ! frameComplete
- return (frameCount);
+ } else {
+ Debug(4, "Not recording audio packet" );
+ }
+ }
+ } else {
+#if LIBAVUTIL_VERSION_CHECK(54, 23, 0, 23, 0)
+ Debug( 3, "Some other stream index %d, %s", packet.stream_index, av_get_media_type_string( mFormatContext->streams[packet.stream_index]->codec->codec_type) );
+#else
+ Debug( 3, "Some other stream index %d", packet.stream_index );
+#endif
+ }
+ av_free_packet( &packet );
+ } // end while ! frameComplete
+ return (frameCount);
}
#endif // HAVE_LIBAVFORMAT
diff --git a/src/zm_ffmpeg_camera.h b/src/zm_ffmpeg_camera.h
index 314ce6fa1..b203a4d12 100644
--- a/src/zm_ffmpeg_camera.h
+++ b/src/zm_ffmpeg_camera.h
@@ -33,10 +33,10 @@
//
class FfmpegCamera : public Camera
{
-protected:
+ protected:
std::string mPath;
- std::string mMethod;
- std::string mOptions;
+ std::string mMethod;
+ std::string mOptions;
int frameCount;
@@ -60,7 +60,7 @@ protected:
int mOpenStart;
pthread_t mReopenThread;
#endif // HAVE_LIBAVFORMAT
-
+
bool wasRecording;
VideoStore *videoStore;
char oldDirectory[4096];
@@ -69,27 +69,27 @@ protected:
AVPacket lastKeyframePkt;
#if HAVE_LIBSWSCALE
- struct SwsContext *mConvertContext;
+ struct SwsContext *mConvertContext;
#endif
int64_t startTime;
-
-public:
- FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~FfmpegCamera();
+
+ public:
+ FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~FfmpegCamera();
const std::string &Path() const { return( mPath ); }
const std::string &Options() const { return( mOptions ); }
const std::string &Method() const { return( mMethod ); }
- void Initialise();
- void Terminate();
+ void Initialise();
+ void Terminate();
- int PrimeCapture();
- int PreCapture();
- int Capture( Image &image );
- int CaptureAndRecord( Image &image, bool recording, char* event_directory );
- int PostCapture();
+ int PrimeCapture();
+ int PreCapture();
+ int Capture( Image &image );
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory );
+ int PostCapture();
};
#endif // ZM_FFMPEG_CAMERA_H
diff --git a/src/zm_file_camera.cpp b/src/zm_file_camera.cpp
index f9ec73b26..55323f036 100644
--- a/src/zm_file_camera.cpp
+++ b/src/zm_file_camera.cpp
@@ -36,28 +36,28 @@
FileCamera::FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : Camera( p_id, FILE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
{
- strncpy( path, p_path, sizeof(path) );
- if ( capture )
- {
- Initialise();
- }
+ strncpy( path, p_path, sizeof(path) );
+ if ( capture )
+ {
+ Initialise();
+ }
}
FileCamera::~FileCamera()
{
- if ( capture )
- {
- Terminate();
- }
+ if ( capture )
+ {
+ Terminate();
+ }
}
void FileCamera::Initialise()
{
- if ( !path[0] )
- {
- Error( "No path specified for file image" );
- exit( -1 );
- }
+ if ( !path[0] )
+ {
+ Error( "No path specified for file image" );
+ exit( -1 );
+ }
}
void FileCamera::Terminate()
@@ -66,23 +66,23 @@ void FileCamera::Terminate()
int FileCamera::PreCapture()
{
- struct stat statbuf;
- if ( stat( path, &statbuf ) < 0 )
- {
- Error( "Can't stat %s: %s", path, strerror(errno) );
- return( -1 );
- }
+ struct stat statbuf;
+ if ( stat( path, &statbuf ) < 0 )
+ {
+ Error( "Can't stat %s: %s", path, strerror(errno) );
+ return( -1 );
+ }
- while ( (time( 0 ) - statbuf.st_mtime) < 1 )
- {
- usleep( 100000 );
- }
- return( 0 );
+ while ( (time( 0 ) - statbuf.st_mtime) < 1 )
+ {
+ usleep( 100000 );
+ }
+ return( 0 );
}
int FileCamera::Capture( Image &image )
{
- return( image.ReadJpeg( path, colours, subpixelorder )?0:-1 );
+ return( image.ReadJpeg( path, colours, subpixelorder )?0:-1 );
}
int FileCamera::PostCapture()
diff --git a/src/zm_file_camera.h b/src/zm_file_camera.h
index a2ed23077..a9c354844 100644
--- a/src/zm_file_camera.h
+++ b/src/zm_file_camera.h
@@ -33,20 +33,20 @@
class FileCamera : public Camera
{
protected:
- char path[PATH_MAX];
+ char path[PATH_MAX];
public:
- FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~FileCamera();
+ FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~FileCamera();
- const char *Path() const { return( path ); }
+ const char *Path() const { return( path ); }
- void Initialise();
- void Terminate();
- int PreCapture();
- int Capture( Image &image );
- int PostCapture();
- int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
+ void Initialise();
+ void Terminate();
+ int PreCapture();
+ int Capture( Image &image );
+ int PostCapture();
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
};
#endif // ZM_FILE_CAMERA_H
diff --git a/src/zm_libvlc_camera.cpp b/src/zm_libvlc_camera.cpp
index affd91011..9439efb83 100644
--- a/src/zm_libvlc_camera.cpp
+++ b/src/zm_libvlc_camera.cpp
@@ -25,102 +25,102 @@
// Do all the buffer checking work here to avoid unnecessary locking
void* LibvlcLockBuffer(void* opaque, void** planes)
{
- LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
- data->mutex.lock();
-
- uint8_t* buffer = data->buffer;
- data->buffer = data->prevBuffer;
- data->prevBuffer = buffer;
-
- *planes = data->buffer;
- return NULL;
+ LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
+ data->mutex.lock();
+
+ uint8_t* buffer = data->buffer;
+ data->buffer = data->prevBuffer;
+ data->prevBuffer = buffer;
+
+ *planes = data->buffer;
+ return NULL;
}
void LibvlcUnlockBuffer(void* opaque, void* picture, void *const *planes)
{
- LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
-
- bool newFrame = false;
- for(uint32_t i = 0; i < data->bufferSize; i++)
+ LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
+
+ bool newFrame = false;
+ for(uint32_t i = 0; i < data->bufferSize; i++)
+ {
+ if(data->buffer[i] != data->prevBuffer[i])
{
- if(data->buffer[i] != data->prevBuffer[i])
- {
- newFrame = true;
- break;
- }
- }
- data->mutex.unlock();
-
- time_t now;
- time(&now);
- // Return frames slightly faster than 1fps (if time() supports greater than one second resolution)
- if(newFrame || difftime(now, data->prevTime) >= 0.8)
- {
- data->prevTime = now;
- data->newImage.updateValueSignal(true);
+ newFrame = true;
+ break;
}
+ }
+ data->mutex.unlock();
+
+ time_t now;
+ time(&now);
+ // Return frames slightly faster than 1fps (if time() supports greater than one second resolution)
+ if(newFrame || difftime(now, data->prevTime) >= 0.8)
+ {
+ data->prevTime = now;
+ data->newImage.updateValueSignal(true);
+ }
}
LibvlcCamera::LibvlcCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
- Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
- mPath( p_path ),
- mMethod( p_method ),
- mOptions( p_options )
-{
- mLibvlcInstance = NULL;
- mLibvlcMedia = NULL;
- mLibvlcMediaPlayer = NULL;
- mLibvlcData.buffer = NULL;
- mLibvlcData.prevBuffer = NULL;
+ Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
+ mPath( p_path ),
+ mMethod( p_method ),
+ mOptions( p_options )
+{
+ mLibvlcInstance = NULL;
+ mLibvlcMedia = NULL;
+ mLibvlcMediaPlayer = NULL;
+ mLibvlcData.buffer = NULL;
+ mLibvlcData.prevBuffer = NULL;
- /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
- if(colours == ZM_COLOUR_RGB32) {
- subpixelorder = ZM_SUBPIX_ORDER_BGRA;
- mTargetChroma = "RV32";
- mBpp = 4;
- } else if(colours == ZM_COLOUR_RGB24) {
- subpixelorder = ZM_SUBPIX_ORDER_BGR;
- mTargetChroma = "RV24";
- mBpp = 3;
- } else if(colours == ZM_COLOUR_GRAY8) {
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- mTargetChroma = "GREY";
- mBpp = 1;
- } else {
- Panic("Unexpected colours: %d",colours);
- }
-
- if ( capture )
- {
- Initialise();
- }
+ /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
+ if(colours == ZM_COLOUR_RGB32) {
+ subpixelorder = ZM_SUBPIX_ORDER_BGRA;
+ mTargetChroma = "RV32";
+ mBpp = 4;
+ } else if(colours == ZM_COLOUR_RGB24) {
+ subpixelorder = ZM_SUBPIX_ORDER_BGR;
+ mTargetChroma = "RV24";
+ mBpp = 3;
+ } else if(colours == ZM_COLOUR_GRAY8) {
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ mTargetChroma = "GREY";
+ mBpp = 1;
+ } else {
+ Panic("Unexpected colours: %d",colours);
+ }
+
+ if ( capture )
+ {
+ Initialise();
+ }
}
LibvlcCamera::~LibvlcCamera()
{
- if ( capture )
- {
- Terminate();
- }
- if(mLibvlcMediaPlayer != NULL)
- {
- libvlc_media_player_release(mLibvlcMediaPlayer);
- mLibvlcMediaPlayer = NULL;
- }
- if(mLibvlcMedia != NULL)
- {
- libvlc_media_release(mLibvlcMedia);
- mLibvlcMedia = NULL;
- }
- if(mLibvlcInstance != NULL)
- {
- libvlc_release(mLibvlcInstance);
- mLibvlcInstance = NULL;
- }
- if (mOptArgV != NULL)
- {
- delete[] mOptArgV;
- }
+ if ( capture )
+ {
+ Terminate();
+ }
+ if(mLibvlcMediaPlayer != NULL)
+ {
+ libvlc_media_player_release(mLibvlcMediaPlayer);
+ mLibvlcMediaPlayer = NULL;
+ }
+ if(mLibvlcMedia != NULL)
+ {
+ libvlc_media_release(mLibvlcMedia);
+ mLibvlcMedia = NULL;
+ }
+ if(mLibvlcInstance != NULL)
+ {
+ libvlc_release(mLibvlcInstance);
+ mLibvlcInstance = NULL;
+ }
+ if (mOptArgV != NULL)
+ {
+ delete[] mOptArgV;
+ }
}
void LibvlcCamera::Initialise()
@@ -129,105 +129,105 @@ void LibvlcCamera::Initialise()
void LibvlcCamera::Terminate()
{
- libvlc_media_player_stop(mLibvlcMediaPlayer);
- if(mLibvlcData.buffer != NULL)
- {
- zm_freealigned(mLibvlcData.buffer);
- }
- if(mLibvlcData.prevBuffer != NULL)
- {
- zm_freealigned(mLibvlcData.prevBuffer);
- }
+ libvlc_media_player_stop(mLibvlcMediaPlayer);
+ if(mLibvlcData.buffer != NULL)
+ {
+ zm_freealigned(mLibvlcData.buffer);
+ }
+ if(mLibvlcData.prevBuffer != NULL)
+ {
+ zm_freealigned(mLibvlcData.prevBuffer);
+ }
}
int LibvlcCamera::PrimeCapture()
{
- Info("Priming capture from %s", mPath.c_str());
-
- StringVector opVect = split(Options(), ",");
-
- // Set transport method as specified by method field, rtpUni is default
- if ( Method() == "rtpMulti" )
- opVect.push_back("--rtsp-mcast");
- else if ( Method() == "rtpRtsp" )
- opVect.push_back("--rtsp-tcp");
- else if ( Method() == "rtpRtspHttp" )
- opVect.push_back("--rtsp-http");
+ Info("Priming capture from %s", mPath.c_str());
- if (opVect.size() > 0)
- {
- mOptArgV = new char*[opVect.size()];
- Debug(2, "Number of Options: %d",opVect.size());
- for (size_t i=0; i< opVect.size(); i++) {
- opVect[i] = trimSpaces(opVect[i]);
- mOptArgV[i] = (char *)opVect[i].c_str();
- Debug(2, "set option %d to '%s'", i, opVect[i].c_str());
- }
+ StringVector opVect = split(Options(), ",");
+
+ // Set transport method as specified by method field, rtpUni is default
+ if ( Method() == "rtpMulti" )
+ opVect.push_back("--rtsp-mcast");
+ else if ( Method() == "rtpRtsp" )
+ opVect.push_back("--rtsp-tcp");
+ else if ( Method() == "rtpRtspHttp" )
+ opVect.push_back("--rtsp-http");
+
+ if (opVect.size() > 0)
+ {
+ mOptArgV = new char*[opVect.size()];
+ Debug(2, "Number of Options: %d",opVect.size());
+ for (size_t i=0; i< opVect.size(); i++) {
+ opVect[i] = trimSpaces(opVect[i]);
+ mOptArgV[i] = (char *)opVect[i].c_str();
+ Debug(2, "set option %d to '%s'", i, opVect[i].c_str());
}
+ }
- mLibvlcInstance = libvlc_new (opVect.size(), (const char* const*)mOptArgV);
- if(mLibvlcInstance == NULL)
- Fatal("Unable to create libvlc instance due to: %s", libvlc_errmsg());
-
- mLibvlcMedia = libvlc_media_new_location(mLibvlcInstance, mPath.c_str());
- if(mLibvlcMedia == NULL)
- Fatal("Unable to open input %s due to: %s", mPath.c_str(), libvlc_errmsg());
-
- mLibvlcMediaPlayer = libvlc_media_player_new_from_media(mLibvlcMedia);
- if(mLibvlcMediaPlayer == NULL)
- Fatal("Unable to create player for %s due to: %s", mPath.c_str(), libvlc_errmsg());
+ mLibvlcInstance = libvlc_new (opVect.size(), (const char* const*)mOptArgV);
+ if(mLibvlcInstance == NULL)
+ Fatal("Unable to create libvlc instance due to: %s", libvlc_errmsg());
- libvlc_video_set_format(mLibvlcMediaPlayer, mTargetChroma.c_str(), width, height, width * mBpp);
- libvlc_video_set_callbacks(mLibvlcMediaPlayer, &LibvlcLockBuffer, &LibvlcUnlockBuffer, NULL, &mLibvlcData);
+ mLibvlcMedia = libvlc_media_new_location(mLibvlcInstance, mPath.c_str());
+ if(mLibvlcMedia == NULL)
+ Fatal("Unable to open input %s due to: %s", mPath.c_str(), libvlc_errmsg());
- mLibvlcData.bufferSize = width * height * mBpp;
- // Libvlc wants 32 byte alignment for images (should in theory do this for all image lines)
- mLibvlcData.buffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
- mLibvlcData.prevBuffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
-
- mLibvlcData.newImage.setValueImmediate(false);
+ mLibvlcMediaPlayer = libvlc_media_player_new_from_media(mLibvlcMedia);
+ if(mLibvlcMediaPlayer == NULL)
+ Fatal("Unable to create player for %s due to: %s", mPath.c_str(), libvlc_errmsg());
- libvlc_media_player_play(mLibvlcMediaPlayer);
-
- return(0);
+ libvlc_video_set_format(mLibvlcMediaPlayer, mTargetChroma.c_str(), width, height, width * mBpp);
+ libvlc_video_set_callbacks(mLibvlcMediaPlayer, &LibvlcLockBuffer, &LibvlcUnlockBuffer, NULL, &mLibvlcData);
+
+ mLibvlcData.bufferSize = width * height * mBpp;
+ // Libvlc wants 32 byte alignment for images (should in theory do this for all image lines)
+ mLibvlcData.buffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
+ mLibvlcData.prevBuffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
+
+ mLibvlcData.newImage.setValueImmediate(false);
+
+ libvlc_media_player_play(mLibvlcMediaPlayer);
+
+ return(0);
}
int LibvlcCamera::PreCapture()
{
- return(0);
+ return(0);
}
// Should not return -1 as cancels capture. Always wait for image if available.
int LibvlcCamera::Capture( Image &image )
{
- while(!mLibvlcData.newImage.getValueImmediate())
- mLibvlcData.newImage.getUpdatedValue(1);
+ while(!mLibvlcData.newImage.getValueImmediate())
+ mLibvlcData.newImage.getUpdatedValue(1);
- mLibvlcData.mutex.lock();
- image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
- mLibvlcData.newImage.setValueImmediate(false);
- mLibvlcData.mutex.unlock();
-
- return (0);
+ mLibvlcData.mutex.lock();
+ image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
+ mLibvlcData.newImage.setValueImmediate(false);
+ mLibvlcData.mutex.unlock();
+
+ return (0);
}
// Should not return -1 as cancels capture. Always wait for image if available.
int LibvlcCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory )
{
- while(!mLibvlcData.newImage.getValueImmediate())
- mLibvlcData.newImage.getUpdatedValue(1);
+ while(!mLibvlcData.newImage.getValueImmediate())
+ mLibvlcData.newImage.getUpdatedValue(1);
- mLibvlcData.mutex.lock();
- image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
- mLibvlcData.newImage.setValueImmediate(false);
- mLibvlcData.mutex.unlock();
+ mLibvlcData.mutex.lock();
+ image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
+ mLibvlcData.newImage.setValueImmediate(false);
+ mLibvlcData.mutex.unlock();
- return (0);
+ return (0);
}
int LibvlcCamera::PostCapture()
{
- return(0);
+ return(0);
}
#endif // HAVE_LIBVLC
diff --git a/src/zm_libvlc_camera.h b/src/zm_libvlc_camera.h
index 8c466b5ae..7c52a30cd 100644
--- a/src/zm_libvlc_camera.h
+++ b/src/zm_libvlc_camera.h
@@ -33,45 +33,45 @@
// Used by libvlc callbacks
struct LibvlcPrivateData
{
- uint8_t* buffer;
- uint8_t* prevBuffer;
- time_t prevTime;
- uint32_t bufferSize;
- Mutex mutex;
- ThreadData newImage;
+ uint8_t* buffer;
+ uint8_t* prevBuffer;
+ time_t prevTime;
+ uint32_t bufferSize;
+ Mutex mutex;
+ ThreadData newImage;
};
class LibvlcCamera : public Camera
{
protected:
- std::string mPath;
- std::string mMethod;
- std::string mOptions;
- char **mOptArgV;
- LibvlcPrivateData mLibvlcData;
- std::string mTargetChroma;
- uint8_t mBpp;
+ std::string mPath;
+ std::string mMethod;
+ std::string mOptions;
+ char **mOptArgV;
+ LibvlcPrivateData mLibvlcData;
+ std::string mTargetChroma;
+ uint8_t mBpp;
- libvlc_instance_t *mLibvlcInstance;
- libvlc_media_t *mLibvlcMedia;
- libvlc_media_player_t *mLibvlcMediaPlayer;
+ libvlc_instance_t *mLibvlcInstance;
+ libvlc_media_t *mLibvlcMedia;
+ libvlc_media_player_t *mLibvlcMediaPlayer;
public:
- LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~LibvlcCamera();
+ LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~LibvlcCamera();
- const std::string &Path() const { return( mPath ); }
- const std::string &Options() const { return( mOptions ); }
- const std::string &Method() const { return( mMethod ); }
+ const std::string &Path() const { return( mPath ); }
+ const std::string &Options() const { return( mOptions ); }
+ const std::string &Method() const { return( mMethod ); }
- void Initialise();
- void Terminate();
+ void Initialise();
+ void Terminate();
- int PrimeCapture();
- int PreCapture();
- int Capture( Image &image );
- int CaptureAndRecord( Image &image, bool recording, char* event_directory );
- int PostCapture();
+ int PrimeCapture();
+ int PreCapture();
+ int Capture( Image &image );
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory );
+ int PostCapture();
};
#endif // HAVE_LIBVLC
diff --git a/src/zm_local_camera.cpp b/src/zm_local_camera.cpp
index d197a605c..c955c52de 100644
--- a/src/zm_local_camera.cpp
+++ b/src/zm_local_camera.cpp
@@ -43,216 +43,216 @@ static unsigned int BigEndian;
static int vidioctl( int fd, int request, void *arg )
{
- int result = -1;
- do
- {
- result = ioctl( fd, request, arg );
- } while ( result == -1 && errno == EINTR );
- return( result );
+ int result = -1;
+ do
+ {
+ result = ioctl( fd, request, arg );
+ } while ( result == -1 && errno == EINTR );
+ return( result );
}
#if HAVE_LIBSWSCALE
static _AVPIXELFORMAT getFfPixFormatFromV4lPalette( int v4l_version, int palette )
{
- _AVPIXELFORMAT pixFormat = AV_PIX_FMT_NONE;
+ _AVPIXELFORMAT pixFormat = AV_PIX_FMT_NONE;
#if ZM_HAS_V4L2
- if ( v4l_version == 2 )
+ if ( v4l_version == 2 )
+ {
+ switch( palette )
{
- switch( palette )
- {
#if defined(V4L2_PIX_FMT_RGB444) && defined(AV_PIX_FMT_RGB444)
- case V4L2_PIX_FMT_RGB444 :
- pixFormat = AV_PIX_FMT_RGB444;
- break;
+ case V4L2_PIX_FMT_RGB444 :
+ pixFormat = AV_PIX_FMT_RGB444;
+ break;
#endif // V4L2_PIX_FMT_RGB444
- case V4L2_PIX_FMT_RGB555 :
- pixFormat = AV_PIX_FMT_RGB555;
- break;
- case V4L2_PIX_FMT_RGB565 :
- pixFormat = AV_PIX_FMT_RGB565;
- break;
- case V4L2_PIX_FMT_BGR24 :
- pixFormat = AV_PIX_FMT_BGR24;
- break;
- case V4L2_PIX_FMT_RGB24 :
- pixFormat = AV_PIX_FMT_RGB24;
- break;
- case V4L2_PIX_FMT_BGR32 :
- pixFormat = AV_PIX_FMT_BGRA;
- break;
- case V4L2_PIX_FMT_RGB32 :
- pixFormat = AV_PIX_FMT_ARGB;
- break;
- case V4L2_PIX_FMT_GREY :
- pixFormat = AV_PIX_FMT_GRAY8;
- break;
- case V4L2_PIX_FMT_YUYV :
- pixFormat = AV_PIX_FMT_YUYV422;
- break;
- case V4L2_PIX_FMT_YUV422P :
- pixFormat = AV_PIX_FMT_YUV422P;
- break;
- case V4L2_PIX_FMT_YUV411P :
- pixFormat = AV_PIX_FMT_YUV411P;
- break;
+ case V4L2_PIX_FMT_RGB555 :
+ pixFormat = AV_PIX_FMT_RGB555;
+ break;
+ case V4L2_PIX_FMT_RGB565 :
+ pixFormat = AV_PIX_FMT_RGB565;
+ break;
+ case V4L2_PIX_FMT_BGR24 :
+ pixFormat = AV_PIX_FMT_BGR24;
+ break;
+ case V4L2_PIX_FMT_RGB24 :
+ pixFormat = AV_PIX_FMT_RGB24;
+ break;
+ case V4L2_PIX_FMT_BGR32 :
+ pixFormat = AV_PIX_FMT_BGRA;
+ break;
+ case V4L2_PIX_FMT_RGB32 :
+ pixFormat = AV_PIX_FMT_ARGB;
+ break;
+ case V4L2_PIX_FMT_GREY :
+ pixFormat = AV_PIX_FMT_GRAY8;
+ break;
+ case V4L2_PIX_FMT_YUYV :
+ pixFormat = AV_PIX_FMT_YUYV422;
+ break;
+ case V4L2_PIX_FMT_YUV422P :
+ pixFormat = AV_PIX_FMT_YUV422P;
+ break;
+ case V4L2_PIX_FMT_YUV411P :
+ pixFormat = AV_PIX_FMT_YUV411P;
+ break;
#ifdef V4L2_PIX_FMT_YUV444
- case V4L2_PIX_FMT_YUV444 :
- pixFormat = AV_PIX_FMT_YUV444P;
- break;
+ case V4L2_PIX_FMT_YUV444 :
+ pixFormat = AV_PIX_FMT_YUV444P;
+ break;
#endif // V4L2_PIX_FMT_YUV444
- case V4L2_PIX_FMT_YUV410 :
- pixFormat = AV_PIX_FMT_YUV410P;
- break;
- case V4L2_PIX_FMT_YUV420 :
- pixFormat = AV_PIX_FMT_YUV420P;
- break;
- case V4L2_PIX_FMT_JPEG :
- case V4L2_PIX_FMT_MJPEG :
- pixFormat = AV_PIX_FMT_YUVJ444P;
- break;
- case V4L2_PIX_FMT_UYVY :
- pixFormat = AV_PIX_FMT_UYVY422;
- break;
- // These don't seem to have ffmpeg equivalents
- // See if you can match any of the ones in the default clause below!?
- case V4L2_PIX_FMT_RGB332 :
- case V4L2_PIX_FMT_RGB555X :
- case V4L2_PIX_FMT_RGB565X :
- //case V4L2_PIX_FMT_Y16 :
- //case V4L2_PIX_FMT_PAL8 :
- case V4L2_PIX_FMT_YVU410 :
- case V4L2_PIX_FMT_YVU420 :
- case V4L2_PIX_FMT_Y41P :
- //case V4L2_PIX_FMT_YUV555 :
- //case V4L2_PIX_FMT_YUV565 :
- //case V4L2_PIX_FMT_YUV32 :
- case V4L2_PIX_FMT_NV12 :
- case V4L2_PIX_FMT_NV21 :
- case V4L2_PIX_FMT_YYUV :
- case V4L2_PIX_FMT_HI240 :
- case V4L2_PIX_FMT_HM12 :
- //case V4L2_PIX_FMT_SBGGR8 :
- //case V4L2_PIX_FMT_SGBRG8 :
- //case V4L2_PIX_FMT_SBGGR16 :
- case V4L2_PIX_FMT_DV :
- case V4L2_PIX_FMT_MPEG :
- case V4L2_PIX_FMT_WNVA :
- case V4L2_PIX_FMT_SN9C10X :
- case V4L2_PIX_FMT_PWC1 :
- case V4L2_PIX_FMT_PWC2 :
- case V4L2_PIX_FMT_ET61X251 :
- //case V4L2_PIX_FMT_SPCA501 :
- //case V4L2_PIX_FMT_SPCA505 :
- //case V4L2_PIX_FMT_SPCA508 :
- //case V4L2_PIX_FMT_SPCA561 :
- //case V4L2_PIX_FMT_PAC207 :
- //case V4L2_PIX_FMT_PJPG :
- //case V4L2_PIX_FMT_YVYU :
- default :
- {
- Fatal( "Can't find swscale format for palette %d", palette );
- break;
- // These are all spare and may match some of the above
- pixFormat = AV_PIX_FMT_YUVJ420P;
- pixFormat = AV_PIX_FMT_YUVJ422P;
- pixFormat = AV_PIX_FMT_UYVY422;
- pixFormat = AV_PIX_FMT_UYYVYY411;
- pixFormat = AV_PIX_FMT_BGR565;
- pixFormat = AV_PIX_FMT_BGR555;
- pixFormat = AV_PIX_FMT_BGR8;
- pixFormat = AV_PIX_FMT_BGR4;
- pixFormat = AV_PIX_FMT_BGR4_BYTE;
- pixFormat = AV_PIX_FMT_RGB8;
- pixFormat = AV_PIX_FMT_RGB4;
- pixFormat = AV_PIX_FMT_RGB4_BYTE;
- pixFormat = AV_PIX_FMT_NV12;
- pixFormat = AV_PIX_FMT_NV21;
- pixFormat = AV_PIX_FMT_RGB32_1;
- pixFormat = AV_PIX_FMT_BGR32_1;
- pixFormat = AV_PIX_FMT_GRAY16BE;
- pixFormat = AV_PIX_FMT_GRAY16LE;
- pixFormat = AV_PIX_FMT_YUV440P;
- pixFormat = AV_PIX_FMT_YUVJ440P;
- pixFormat = AV_PIX_FMT_YUVA420P;
- //pixFormat = AV_PIX_FMT_VDPAU_H264;
- //pixFormat = AV_PIX_FMT_VDPAU_MPEG1;
- //pixFormat = AV_PIX_FMT_VDPAU_MPEG2;
- }
+ case V4L2_PIX_FMT_YUV410 :
+ pixFormat = AV_PIX_FMT_YUV410P;
+ break;
+ case V4L2_PIX_FMT_YUV420 :
+ pixFormat = AV_PIX_FMT_YUV420P;
+ break;
+ case V4L2_PIX_FMT_JPEG :
+ case V4L2_PIX_FMT_MJPEG :
+ pixFormat = AV_PIX_FMT_YUVJ444P;
+ break;
+ case V4L2_PIX_FMT_UYVY :
+ pixFormat = AV_PIX_FMT_UYVY422;
+ break;
+ // These don't seem to have ffmpeg equivalents
+ // See if you can match any of the ones in the default clause below!?
+ case V4L2_PIX_FMT_RGB332 :
+ case V4L2_PIX_FMT_RGB555X :
+ case V4L2_PIX_FMT_RGB565X :
+ //case V4L2_PIX_FMT_Y16 :
+ //case V4L2_PIX_FMT_PAL8 :
+ case V4L2_PIX_FMT_YVU410 :
+ case V4L2_PIX_FMT_YVU420 :
+ case V4L2_PIX_FMT_Y41P :
+ //case V4L2_PIX_FMT_YUV555 :
+ //case V4L2_PIX_FMT_YUV565 :
+ //case V4L2_PIX_FMT_YUV32 :
+ case V4L2_PIX_FMT_NV12 :
+ case V4L2_PIX_FMT_NV21 :
+ case V4L2_PIX_FMT_YYUV :
+ case V4L2_PIX_FMT_HI240 :
+ case V4L2_PIX_FMT_HM12 :
+ //case V4L2_PIX_FMT_SBGGR8 :
+ //case V4L2_PIX_FMT_SGBRG8 :
+ //case V4L2_PIX_FMT_SBGGR16 :
+ case V4L2_PIX_FMT_DV :
+ case V4L2_PIX_FMT_MPEG :
+ case V4L2_PIX_FMT_WNVA :
+ case V4L2_PIX_FMT_SN9C10X :
+ case V4L2_PIX_FMT_PWC1 :
+ case V4L2_PIX_FMT_PWC2 :
+ case V4L2_PIX_FMT_ET61X251 :
+ //case V4L2_PIX_FMT_SPCA501 :
+ //case V4L2_PIX_FMT_SPCA505 :
+ //case V4L2_PIX_FMT_SPCA508 :
+ //case V4L2_PIX_FMT_SPCA561 :
+ //case V4L2_PIX_FMT_PAC207 :
+ //case V4L2_PIX_FMT_PJPG :
+ //case V4L2_PIX_FMT_YVYU :
+ default :
+ {
+ Fatal( "Can't find swscale format for palette %d", palette );
+ break;
+ // These are all spare and may match some of the above
+ pixFormat = AV_PIX_FMT_YUVJ420P;
+ pixFormat = AV_PIX_FMT_YUVJ422P;
+ pixFormat = AV_PIX_FMT_UYVY422;
+ pixFormat = AV_PIX_FMT_UYYVYY411;
+ pixFormat = AV_PIX_FMT_BGR565;
+ pixFormat = AV_PIX_FMT_BGR555;
+ pixFormat = AV_PIX_FMT_BGR8;
+ pixFormat = AV_PIX_FMT_BGR4;
+ pixFormat = AV_PIX_FMT_BGR4_BYTE;
+ pixFormat = AV_PIX_FMT_RGB8;
+ pixFormat = AV_PIX_FMT_RGB4;
+ pixFormat = AV_PIX_FMT_RGB4_BYTE;
+ pixFormat = AV_PIX_FMT_NV12;
+ pixFormat = AV_PIX_FMT_NV21;
+ pixFormat = AV_PIX_FMT_RGB32_1;
+ pixFormat = AV_PIX_FMT_BGR32_1;
+ pixFormat = AV_PIX_FMT_GRAY16BE;
+ pixFormat = AV_PIX_FMT_GRAY16LE;
+ pixFormat = AV_PIX_FMT_YUV440P;
+ pixFormat = AV_PIX_FMT_YUVJ440P;
+ pixFormat = AV_PIX_FMT_YUVA420P;
+ //pixFormat = AV_PIX_FMT_VDPAU_H264;
+ //pixFormat = AV_PIX_FMT_VDPAU_MPEG1;
+ //pixFormat = AV_PIX_FMT_VDPAU_MPEG2;
}
}
+ }
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
- if ( v4l_version == 1 )
+ if ( v4l_version == 1 )
+ {
+ switch( palette )
{
- switch( palette )
+ case VIDEO_PALETTE_RGB32 :
+ if(BigEndian)
+ pixFormat = AV_PIX_FMT_ARGB;
+ else
+ pixFormat = AV_PIX_FMT_BGRA;
+ break;
+ case VIDEO_PALETTE_RGB24 :
+ if(BigEndian)
+ pixFormat = AV_PIX_FMT_RGB24;
+ else
+ pixFormat = AV_PIX_FMT_BGR24;
+ break;
+ case VIDEO_PALETTE_GREY :
+ pixFormat = AV_PIX_FMT_GRAY8;
+ break;
+ case VIDEO_PALETTE_RGB555 :
+ pixFormat = AV_PIX_FMT_RGB555;
+ break;
+ case VIDEO_PALETTE_RGB565 :
+ pixFormat = AV_PIX_FMT_RGB565;
+ break;
+ case VIDEO_PALETTE_YUYV :
+ case VIDEO_PALETTE_YUV422 :
+ pixFormat = AV_PIX_FMT_YUYV422;
+ break;
+ case VIDEO_PALETTE_YUV422P :
+ pixFormat = AV_PIX_FMT_YUV422P;
+ break;
+ case VIDEO_PALETTE_YUV420P :
+ pixFormat = AV_PIX_FMT_YUV420P;
+ break;
+ default :
{
- case VIDEO_PALETTE_RGB32 :
- if(BigEndian)
- pixFormat = AV_PIX_FMT_ARGB;
- else
- pixFormat = AV_PIX_FMT_BGRA;
- break;
- case VIDEO_PALETTE_RGB24 :
- if(BigEndian)
- pixFormat = AV_PIX_FMT_RGB24;
- else
- pixFormat = AV_PIX_FMT_BGR24;
- break;
- case VIDEO_PALETTE_GREY :
- pixFormat = AV_PIX_FMT_GRAY8;
- break;
- case VIDEO_PALETTE_RGB555 :
- pixFormat = AV_PIX_FMT_RGB555;
- break;
- case VIDEO_PALETTE_RGB565 :
- pixFormat = AV_PIX_FMT_RGB565;
- break;
- case VIDEO_PALETTE_YUYV :
- case VIDEO_PALETTE_YUV422 :
- pixFormat = AV_PIX_FMT_YUYV422;
- break;
- case VIDEO_PALETTE_YUV422P :
- pixFormat = AV_PIX_FMT_YUV422P;
- break;
- case VIDEO_PALETTE_YUV420P :
- pixFormat = AV_PIX_FMT_YUV420P;
- break;
- default :
- {
- Fatal( "Can't find swscale format for palette %d", palette );
- break;
- // These are all spare and may match some of the above
- pixFormat = AV_PIX_FMT_YUVJ420P;
- pixFormat = AV_PIX_FMT_YUVJ422P;
- pixFormat = AV_PIX_FMT_YUVJ444P;
- pixFormat = AV_PIX_FMT_UYVY422;
- pixFormat = AV_PIX_FMT_UYYVYY411;
- pixFormat = AV_PIX_FMT_BGR565;
- pixFormat = AV_PIX_FMT_BGR555;
- pixFormat = AV_PIX_FMT_BGR8;
- pixFormat = AV_PIX_FMT_BGR4;
- pixFormat = AV_PIX_FMT_BGR4_BYTE;
- pixFormat = AV_PIX_FMT_RGB8;
- pixFormat = AV_PIX_FMT_RGB4;
- pixFormat = AV_PIX_FMT_RGB4_BYTE;
- pixFormat = AV_PIX_FMT_NV12;
- pixFormat = AV_PIX_FMT_NV21;
- pixFormat = AV_PIX_FMT_RGB32_1;
- pixFormat = AV_PIX_FMT_BGR32_1;
- pixFormat = AV_PIX_FMT_GRAY16BE;
- pixFormat = AV_PIX_FMT_GRAY16LE;
- pixFormat = AV_PIX_FMT_YUV440P;
- pixFormat = AV_PIX_FMT_YUVJ440P;
- pixFormat = AV_PIX_FMT_YUVA420P;
- //pixFormat = AV_PIX_FMT_VDPAU_H264;
- //pixFormat = AV_PIX_FMT_VDPAU_MPEG1;
- //pixFormat = AV_PIX_FMT_VDPAU_MPEG2;
- }
+ Fatal( "Can't find swscale format for palette %d", palette );
+ break;
+ // These are all spare and may match some of the above
+ pixFormat = AV_PIX_FMT_YUVJ420P;
+ pixFormat = AV_PIX_FMT_YUVJ422P;
+ pixFormat = AV_PIX_FMT_YUVJ444P;
+ pixFormat = AV_PIX_FMT_UYVY422;
+ pixFormat = AV_PIX_FMT_UYYVYY411;
+ pixFormat = AV_PIX_FMT_BGR565;
+ pixFormat = AV_PIX_FMT_BGR555;
+ pixFormat = AV_PIX_FMT_BGR8;
+ pixFormat = AV_PIX_FMT_BGR4;
+ pixFormat = AV_PIX_FMT_BGR4_BYTE;
+ pixFormat = AV_PIX_FMT_RGB8;
+ pixFormat = AV_PIX_FMT_RGB4;
+ pixFormat = AV_PIX_FMT_RGB4_BYTE;
+ pixFormat = AV_PIX_FMT_NV12;
+ pixFormat = AV_PIX_FMT_NV21;
+ pixFormat = AV_PIX_FMT_RGB32_1;
+ pixFormat = AV_PIX_FMT_BGR32_1;
+ pixFormat = AV_PIX_FMT_GRAY16BE;
+ pixFormat = AV_PIX_FMT_GRAY16LE;
+ pixFormat = AV_PIX_FMT_YUV440P;
+ pixFormat = AV_PIX_FMT_YUVJ440P;
+ pixFormat = AV_PIX_FMT_YUVA420P;
+ //pixFormat = AV_PIX_FMT_VDPAU_H264;
+ //pixFormat = AV_PIX_FMT_VDPAU_MPEG1;
+ //pixFormat = AV_PIX_FMT_VDPAU_MPEG2;
}
}
+ }
#endif // ZM_HAS_V4L1
- return( pixFormat );
+ return( pixFormat );
}
#endif // HAVE_LIBSWSCALE
@@ -287,359 +287,359 @@ AVFrame **LocalCamera::capturePictures = 0;
LocalCamera *LocalCamera::last_camera = NULL;
LocalCamera::LocalCamera(
- int p_id,
- const std::string &p_device,
- int p_channel,
- int p_standard,
- bool p_v4l_multi_buffer,
- unsigned int p_v4l_captures_per_frame,
- const std::string &p_method,
- int p_width,
- int p_height,
- int p_colours,
- int p_palette,
- int p_brightness,
- int p_contrast,
- int p_hue,
- int p_colour,
- bool p_capture,
- bool p_record_audio,
- unsigned int p_extras) :
- Camera( p_id, LOCAL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
- device( p_device ),
- channel( p_channel ),
- standard( p_standard ),
- palette( p_palette ),
- channel_index( 0 ),
- extras ( p_extras )
+ int p_id,
+ const std::string &p_device,
+ int p_channel,
+ int p_standard,
+ bool p_v4l_multi_buffer,
+ unsigned int p_v4l_captures_per_frame,
+ const std::string &p_method,
+ int p_width,
+ int p_height,
+ int p_colours,
+ int p_palette,
+ int p_brightness,
+ int p_contrast,
+ int p_hue,
+ int p_colour,
+ bool p_capture,
+ bool p_record_audio,
+ unsigned int p_extras) :
+ Camera( p_id, LOCAL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
+ device( p_device ),
+ channel( p_channel ),
+ standard( p_standard ),
+ palette( p_palette ),
+ channel_index( 0 ),
+ extras ( p_extras )
{
- // If we are the first, or only, input on this device then
- // do the initial opening etc
- device_prime = (camera_count++ == 0);
- v4l_version = (p_method=="v4l2"?2:1);
- v4l_multi_buffer = p_v4l_multi_buffer;
- v4l_captures_per_frame = p_v4l_captures_per_frame;
-
- if ( capture )
+ // If we are the first, or only, input on this device then
+ // do the initial opening etc
+ device_prime = (camera_count++ == 0);
+ v4l_version = (p_method=="v4l2"?2:1);
+ v4l_multi_buffer = p_v4l_multi_buffer;
+ v4l_captures_per_frame = p_v4l_captures_per_frame;
+
+ if ( capture )
+ {
+ if ( device_prime )
{
- if ( device_prime )
- {
- Debug( 2, "V4L support enabled, using V4L%d api", v4l_version );
- }
-
- if ( !last_camera || channel != last_camera->channel )
- {
- // We are the first, or only, input that uses this channel
- channel_prime = true;
- channel_index = channel_count++;
- channels[channel_index] = channel;
- standards[channel_index] = standard;
- }
- else
- {
- // We are the second, or subsequent, input using this channel
- channel_prime = false;
- }
-
+ Debug( 2, "V4L support enabled, using V4L%d api", v4l_version );
}
-
- /* The V4L1 API doesn't care about endianness, we need to check the endianness of the machine */
- uint32_t checkval = 0xAABBCCDD;
- if(*(unsigned char*)&checkval == 0xDD) {
- BigEndian = 0;
- Debug(2,"little-endian processor detected");
- } else if(*(unsigned char*)&checkval == 0xAA) {
- BigEndian = 1;
- Debug(2,"Big-endian processor detected");
- } else {
- Error("Unable to detect the processor's endianness. Assuming little-endian.");
- BigEndian = 0;
- }
-
-#if ZM_HAS_V4L2
- if( v4l_version == 2 && palette == 0 ) {
- /* Use automatic format selection */
- Debug(2,"Using automatic format selection");
- palette = AutoSelectFormat(colours);
- if(palette == 0) {
- Error("Automatic format selection failed. Falling back to YUYV");
- palette = V4L2_PIX_FMT_YUYV;
- } else {
- if(capture) {
- Info("Selected capture palette: %s (%c%c%c%c)", palette_desc, palette&0xff, (palette>>8)&0xff, (palette>>16)&0xff, (palette>>24)&0xff);
- }
- }
- }
-#endif
-
- if( capture ) {
- if ( last_camera ) {
- if ( (p_method == "v4l2" && v4l_version != 2) || (p_method == "v4l1" && v4l_version != 1) )
- Fatal( "Different Video For Linux version used for monitors sharing same device" );
-
- if ( standard != last_camera->standard )
- Warning( "Different video standards defined for monitors sharing same device, results may be unpredictable or completely wrong" );
-
- if ( palette != last_camera->palette )
- Warning( "Different video palettes defined for monitors sharing same device, results may be unpredictable or completely wrong" );
-
- if ( width != last_camera->width || height != last_camera->height )
- Warning( "Different capture sizes defined for monitors sharing same device, results may be unpredictable or completely wrong" );
- }
-
-#if HAVE_LIBSWSCALE
- /* Get ffmpeg pixel format based on capture palette and endianness */
- capturePixFormat = getFfPixFormatFromV4lPalette( v4l_version, palette );
- imagePixFormat = AV_PIX_FMT_NONE;
-#endif // HAVE_LIBSWSCALE
- }
- /* V4L2 format matching */
+ if ( !last_camera || channel != last_camera->channel )
+ {
+ // We are the first, or only, input that uses this channel
+ channel_prime = true;
+ channel_index = channel_count++;
+ channels[channel_index] = channel;
+ standards[channel_index] = standard;
+ }
+ else
+ {
+ // We are the second, or subsequent, input using this channel
+ channel_prime = false;
+ }
+
+ }
+
+ /* The V4L1 API doesn't care about endianness, we need to check the endianness of the machine */
+ uint32_t checkval = 0xAABBCCDD;
+ if(*(unsigned char*)&checkval == 0xDD) {
+ BigEndian = 0;
+ Debug(2,"little-endian processor detected");
+ } else if(*(unsigned char*)&checkval == 0xAA) {
+ BigEndian = 1;
+ Debug(2,"Big-endian processor detected");
+ } else {
+ Error("Unable to detect the processor's endianness. Assuming little-endian.");
+ BigEndian = 0;
+ }
+
#if ZM_HAS_V4L2
- if ( v4l_version == 2 ) {
- /* Try to find a match for the selected palette and target colourspace */
-
- /* RGB32 palette and 32bit target colourspace */
- if(palette == V4L2_PIX_FMT_RGB32 && colours == ZM_COLOUR_RGB32) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_ARGB;
-
- /* BGR32 palette and 32bit target colourspace */
- } else if(palette == V4L2_PIX_FMT_BGR32 && colours == ZM_COLOUR_RGB32) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_BGRA;
-
- /* RGB24 palette and 24bit target colourspace */
- } else if(palette == V4L2_PIX_FMT_RGB24 && colours == ZM_COLOUR_RGB24) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
-
- /* BGR24 palette and 24bit target colourspace */
- } else if(palette == V4L2_PIX_FMT_BGR24 && colours == ZM_COLOUR_RGB24) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_BGR;
-
- /* Grayscale palette and grayscale target colourspace */
- } else if(palette == V4L2_PIX_FMT_GREY && colours == ZM_COLOUR_GRAY8) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- /* Unable to find a solution for the selected palette and target colourspace. Conversion required. Notify the user of performance penalty */
- } else {
- if( capture )
+ if( v4l_version == 2 && palette == 0 ) {
+ /* Use automatic format selection */
+ Debug(2,"Using automatic format selection");
+ palette = AutoSelectFormat(colours);
+ if(palette == 0) {
+ Error("Automatic format selection failed. Falling back to YUYV");
+ palette = V4L2_PIX_FMT_YUYV;
+ } else {
+ if(capture) {
+ Info("Selected capture palette: %s (%c%c%c%c)", palette_desc, palette&0xff, (palette>>8)&0xff, (palette>>16)&0xff, (palette>>24)&0xff);
+ }
+ }
+ }
+#endif
+
+ if( capture ) {
+ if ( last_camera ) {
+ if ( (p_method == "v4l2" && v4l_version != 2) || (p_method == "v4l1" && v4l_version != 1) )
+ Fatal( "Different Video For Linux version used for monitors sharing same device" );
+
+ if ( standard != last_camera->standard )
+ Warning( "Different video standards defined for monitors sharing same device, results may be unpredictable or completely wrong" );
+
+ if ( palette != last_camera->palette )
+ Warning( "Different video palettes defined for monitors sharing same device, results may be unpredictable or completely wrong" );
+
+ if ( width != last_camera->width || height != last_camera->height )
+ Warning( "Different capture sizes defined for monitors sharing same device, results may be unpredictable or completely wrong" );
+ }
+
#if HAVE_LIBSWSCALE
- Info("No direct match for the selected palette (%c%c%c%c) and target colorspace (%d). Format conversion is required, performance penalty expected", (capturePixFormat)&0xff,((capturePixFormat>>8)&0xff),((capturePixFormat>>16)&0xff),((capturePixFormat>>24)&0xff), colours );
+ /* Get ffmpeg pixel format based on capture palette and endianness */
+ capturePixFormat = getFfPixFormatFromV4lPalette( v4l_version, palette );
+ imagePixFormat = AV_PIX_FMT_NONE;
+#endif // HAVE_LIBSWSCALE
+ }
+
+ /* V4L2 format matching */
+#if ZM_HAS_V4L2
+ if ( v4l_version == 2 ) {
+ /* Try to find a match for the selected palette and target colourspace */
+
+ /* RGB32 palette and 32bit target colourspace */
+ if(palette == V4L2_PIX_FMT_RGB32 && colours == ZM_COLOUR_RGB32) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_ARGB;
+
+ /* BGR32 palette and 32bit target colourspace */
+ } else if(palette == V4L2_PIX_FMT_BGR32 && colours == ZM_COLOUR_RGB32) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_BGRA;
+
+ /* RGB24 palette and 24bit target colourspace */
+ } else if(palette == V4L2_PIX_FMT_RGB24 && colours == ZM_COLOUR_RGB24) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+
+ /* BGR24 palette and 24bit target colourspace */
+ } else if(palette == V4L2_PIX_FMT_BGR24 && colours == ZM_COLOUR_RGB24) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_BGR;
+
+ /* Grayscale palette and grayscale target colourspace */
+ } else if(palette == V4L2_PIX_FMT_GREY && colours == ZM_COLOUR_GRAY8) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ /* Unable to find a solution for the selected palette and target colourspace. Conversion required. Notify the user of performance penalty */
+ } else {
+ if( capture )
+#if HAVE_LIBSWSCALE
+ Info("No direct match for the selected palette (%c%c%c%c) and target colorspace (%d). Format conversion is required, performance penalty expected", (capturePixFormat)&0xff,((capturePixFormat>>8)&0xff),((capturePixFormat>>16)&0xff),((capturePixFormat>>24)&0xff), colours );
#else
- Info("No direct match for the selected palette and target colorspace. Format conversion is required, performance penalty expected");
+ Info("No direct match for the selected palette and target colorspace. Format conversion is required, performance penalty expected");
#endif
#if HAVE_LIBSWSCALE
- /* Try using swscale for the conversion */
- conversion_type = 1;
- Debug(2,"Using swscale for image conversion");
- if(colours == ZM_COLOUR_RGB32) {
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- imagePixFormat = AV_PIX_FMT_RGBA;
- } else if(colours == ZM_COLOUR_RGB24) {
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- imagePixFormat = AV_PIX_FMT_RGB24;
- } else if(colours == ZM_COLOUR_GRAY8) {
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- imagePixFormat = AV_PIX_FMT_GRAY8;
- } else {
- Panic("Unexpected colours: %d",colours);
- }
- if( capture ) {
+ /* Try using swscale for the conversion */
+ conversion_type = 1;
+ Debug(2,"Using swscale for image conversion");
+ if(colours == ZM_COLOUR_RGB32) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ imagePixFormat = AV_PIX_FMT_RGBA;
+ } else if(colours == ZM_COLOUR_RGB24) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ imagePixFormat = AV_PIX_FMT_RGB24;
+ } else if(colours == ZM_COLOUR_GRAY8) {
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ imagePixFormat = AV_PIX_FMT_GRAY8;
+ } else {
+ Panic("Unexpected colours: %d",colours);
+ }
+ if( capture ) {
#if LIBSWSCALE_VERSION_CHECK(0, 8, 0, 8, 0)
- if(!sws_isSupportedInput(capturePixFormat)) {
- Error("swscale does not support the used capture format: %c%c%c%c",(capturePixFormat)&0xff,((capturePixFormat>>8)&0xff),((capturePixFormat>>16)&0xff),((capturePixFormat>>24)&0xff));
- conversion_type = 2; /* Try ZM format conversions */
- }
- if(!sws_isSupportedOutput(imagePixFormat)) {
- Error("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
- conversion_type = 2; /* Try ZM format conversions */
- }
+ if(!sws_isSupportedInput(capturePixFormat)) {
+ Error("swscale does not support the used capture format: %c%c%c%c",(capturePixFormat)&0xff,((capturePixFormat>>8)&0xff),((capturePixFormat>>16)&0xff),((capturePixFormat>>24)&0xff));
+ conversion_type = 2; /* Try ZM format conversions */
+ }
+ if(!sws_isSupportedOutput(imagePixFormat)) {
+ Error("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
+ conversion_type = 2; /* Try ZM format conversions */
+ }
#endif
- }
+ }
#else
- /* Don't have swscale, see what we can do */
- conversion_type = 2;
+ /* Don't have swscale, see what we can do */
+ conversion_type = 2;
#endif
- /* Our YUYV->Grayscale conversion is a lot faster than swscale's */
- if(colours == ZM_COLOUR_GRAY8 && palette == V4L2_PIX_FMT_YUYV) {
- conversion_type = 2;
- }
-
- /* JPEG */
- if(palette == V4L2_PIX_FMT_JPEG || palette == V4L2_PIX_FMT_MJPEG) {
- Debug(2,"Using JPEG image decoding");
- conversion_type = 3;
- }
-
- if(conversion_type == 2) {
- Debug(2,"Using ZM for image conversion");
- if(palette == V4L2_PIX_FMT_RGB32 && colours == ZM_COLOUR_GRAY8) {
- conversion_fptr = &std_convert_argb_gray8;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- } else if(palette == V4L2_PIX_FMT_BGR32 && colours == ZM_COLOUR_GRAY8) {
- conversion_fptr = &std_convert_bgra_gray8;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_GRAY8) {
- /* Fast YUYV->Grayscale conversion by extracting the Y channel */
- if(config.cpu_extensions && sseversion >= 35) {
- conversion_fptr = &ssse3_convert_yuyv_gray8;
- Debug(2,"Using SSSE3 YUYV->grayscale fast conversion");
- } else {
- conversion_fptr = &std_convert_yuyv_gray8;
- Debug(2,"Using standard YUYV->grayscale fast conversion");
- }
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_yuyv_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_yuyv_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else if(palette == V4L2_PIX_FMT_RGB555 && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_rgb555_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if(palette == V4L2_PIX_FMT_RGB555 && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_rgb555_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else if(palette == V4L2_PIX_FMT_RGB565 && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_rgb565_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if(palette == V4L2_PIX_FMT_RGB565 && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_rgb565_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else {
- Fatal("Unable to find a suitable format conversion for the selected palette and target colorspace.");
- }
- }
- }
- }
+ /* Our YUYV->Grayscale conversion is a lot faster than swscale's */
+ if(colours == ZM_COLOUR_GRAY8 && palette == V4L2_PIX_FMT_YUYV) {
+ conversion_type = 2;
+ }
+
+ /* JPEG */
+ if(palette == V4L2_PIX_FMT_JPEG || palette == V4L2_PIX_FMT_MJPEG) {
+ Debug(2,"Using JPEG image decoding");
+ conversion_type = 3;
+ }
+
+ if(conversion_type == 2) {
+ Debug(2,"Using ZM for image conversion");
+ if(palette == V4L2_PIX_FMT_RGB32 && colours == ZM_COLOUR_GRAY8) {
+ conversion_fptr = &std_convert_argb_gray8;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ } else if(palette == V4L2_PIX_FMT_BGR32 && colours == ZM_COLOUR_GRAY8) {
+ conversion_fptr = &std_convert_bgra_gray8;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_GRAY8) {
+ /* Fast YUYV->Grayscale conversion by extracting the Y channel */
+ if(config.cpu_extensions && sseversion >= 35) {
+ conversion_fptr = &ssse3_convert_yuyv_gray8;
+ Debug(2,"Using SSSE3 YUYV->grayscale fast conversion");
+ } else {
+ conversion_fptr = &std_convert_yuyv_gray8;
+ Debug(2,"Using standard YUYV->grayscale fast conversion");
+ }
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_yuyv_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if(palette == V4L2_PIX_FMT_YUYV && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_yuyv_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else if(palette == V4L2_PIX_FMT_RGB555 && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_rgb555_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if(palette == V4L2_PIX_FMT_RGB555 && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_rgb555_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else if(palette == V4L2_PIX_FMT_RGB565 && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_rgb565_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if(palette == V4L2_PIX_FMT_RGB565 && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_rgb565_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else {
+ Fatal("Unable to find a suitable format conversion for the selected palette and target colorspace.");
+ }
+ }
+ }
+ }
#endif // ZM_HAS_V4L2
- /* V4L1 format matching */
+ /* V4L1 format matching */
#if ZM_HAS_V4L1
- if ( v4l_version == 1) {
- /* Try to find a match for the selected palette and target colourspace */
-
- /* RGB32 palette and 32bit target colourspace */
- if(palette == VIDEO_PALETTE_RGB32 && colours == ZM_COLOUR_RGB32) {
- conversion_type = 0;
- if(BigEndian) {
- subpixelorder = ZM_SUBPIX_ORDER_ARGB;
- } else {
- subpixelorder = ZM_SUBPIX_ORDER_BGRA;
- }
-
- /* RGB24 palette and 24bit target colourspace */
- } else if(palette == VIDEO_PALETTE_RGB24 && colours == ZM_COLOUR_RGB24) {
- conversion_type = 0;
- if(BigEndian) {
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else {
- subpixelorder = ZM_SUBPIX_ORDER_BGR;
- }
-
- /* Grayscale palette and grayscale target colourspace */
- } else if(palette == VIDEO_PALETTE_GREY && colours == ZM_COLOUR_GRAY8) {
- conversion_type = 0;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- /* Unable to find a solution for the selected palette and target colourspace. Conversion required. Notify the user of performance penalty */
- } else {
- if( capture )
- Info("No direct match for the selected palette and target colorspace. Format conversion is required, performance penalty expected");
+ if ( v4l_version == 1) {
+ /* Try to find a match for the selected palette and target colourspace */
+
+ /* RGB32 palette and 32bit target colourspace */
+ if(palette == VIDEO_PALETTE_RGB32 && colours == ZM_COLOUR_RGB32) {
+ conversion_type = 0;
+ if(BigEndian) {
+ subpixelorder = ZM_SUBPIX_ORDER_ARGB;
+ } else {
+ subpixelorder = ZM_SUBPIX_ORDER_BGRA;
+ }
+
+ /* RGB24 palette and 24bit target colourspace */
+ } else if(palette == VIDEO_PALETTE_RGB24 && colours == ZM_COLOUR_RGB24) {
+ conversion_type = 0;
+ if(BigEndian) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else {
+ subpixelorder = ZM_SUBPIX_ORDER_BGR;
+ }
+
+ /* Grayscale palette and grayscale target colourspace */
+ } else if(palette == VIDEO_PALETTE_GREY && colours == ZM_COLOUR_GRAY8) {
+ conversion_type = 0;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ /* Unable to find a solution for the selected palette and target colourspace. Conversion required. Notify the user of performance penalty */
+ } else {
+ if( capture )
+ Info("No direct match for the selected palette and target colorspace. Format conversion is required, performance penalty expected");
#if HAVE_LIBSWSCALE
- /* Try using swscale for the conversion */
- conversion_type = 1;
- Debug(2,"Using swscale for image conversion");
- if(colours == ZM_COLOUR_RGB32) {
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- imagePixFormat = AV_PIX_FMT_RGBA;
- } else if(colours == ZM_COLOUR_RGB24) {
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- imagePixFormat = AV_PIX_FMT_RGB24;
- } else if(colours == ZM_COLOUR_GRAY8) {
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- imagePixFormat = AV_PIX_FMT_GRAY8;
- } else {
- Panic("Unexpected colours: %d",colours);
- }
- if( capture ) {
- if(!sws_isSupportedInput(capturePixFormat)) {
- Error("swscale does not support the used capture format");
- conversion_type = 2; /* Try ZM format conversions */
- }
- if(!sws_isSupportedOutput(imagePixFormat)) {
- Error("swscale does not support the target format");
- conversion_type = 2; /* Try ZM format conversions */
- }
- }
+ /* Try using swscale for the conversion */
+ conversion_type = 1;
+ Debug(2,"Using swscale for image conversion");
+ if(colours == ZM_COLOUR_RGB32) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ imagePixFormat = AV_PIX_FMT_RGBA;
+ } else if(colours == ZM_COLOUR_RGB24) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ imagePixFormat = AV_PIX_FMT_RGB24;
+ } else if(colours == ZM_COLOUR_GRAY8) {
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ imagePixFormat = AV_PIX_FMT_GRAY8;
+ } else {
+ Panic("Unexpected colours: %d",colours);
+ }
+ if( capture ) {
+ if(!sws_isSupportedInput(capturePixFormat)) {
+ Error("swscale does not support the used capture format");
+ conversion_type = 2; /* Try ZM format conversions */
+ }
+ if(!sws_isSupportedOutput(imagePixFormat)) {
+ Error("swscale does not support the target format");
+ conversion_type = 2; /* Try ZM format conversions */
+ }
+ }
#else
- /* Don't have swscale, see what we can do */
- conversion_type = 2;
+ /* Don't have swscale, see what we can do */
+ conversion_type = 2;
#endif
- /* Our YUYV->Grayscale conversion is a lot faster than swscale's */
- if(colours == ZM_COLOUR_GRAY8 && (palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422)) {
- conversion_type = 2;
- }
-
- if(conversion_type == 2) {
- Debug(2,"Using ZM for image conversion");
- if(palette == VIDEO_PALETTE_RGB32 && colours == ZM_COLOUR_GRAY8) {
- if(BigEndian) {
- conversion_fptr = &std_convert_argb_gray8;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- } else {
- conversion_fptr = &std_convert_bgra_gray8;
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- }
- } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_GRAY8) {
- /* Fast YUYV->Grayscale conversion by extracting the Y channel */
- if(config.cpu_extensions && sseversion >= 35) {
- conversion_fptr = &ssse3_convert_yuyv_gray8;
- Debug(2,"Using SSSE3 YUYV->grayscale fast conversion");
- } else {
- conversion_fptr = &std_convert_yuyv_gray8;
- Debug(2,"Using standard YUYV->grayscale fast conversion");
- }
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_yuyv_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_yuyv_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else if(palette == VIDEO_PALETTE_RGB555 && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_rgb555_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if(palette == VIDEO_PALETTE_RGB555 && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_rgb555_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else if(palette == VIDEO_PALETTE_RGB565 && colours == ZM_COLOUR_RGB24) {
- conversion_fptr = &zm_convert_rgb565_rgb;
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- } else if(palette == VIDEO_PALETTE_RGB565 && colours == ZM_COLOUR_RGB32) {
- conversion_fptr = &zm_convert_rgb565_rgba;
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- } else {
- Fatal("Unable to find a suitable format conversion for the selected palette and target colorspace.");
- }
- }
- }
- }
+ /* Our YUYV->Grayscale conversion is a lot faster than swscale's */
+ if(colours == ZM_COLOUR_GRAY8 && (palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422)) {
+ conversion_type = 2;
+ }
+
+ if(conversion_type == 2) {
+ Debug(2,"Using ZM for image conversion");
+ if(palette == VIDEO_PALETTE_RGB32 && colours == ZM_COLOUR_GRAY8) {
+ if(BigEndian) {
+ conversion_fptr = &std_convert_argb_gray8;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ } else {
+ conversion_fptr = &std_convert_bgra_gray8;
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ }
+ } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_GRAY8) {
+ /* Fast YUYV->Grayscale conversion by extracting the Y channel */
+ if(config.cpu_extensions && sseversion >= 35) {
+ conversion_fptr = &ssse3_convert_yuyv_gray8;
+ Debug(2,"Using SSSE3 YUYV->grayscale fast conversion");
+ } else {
+ conversion_fptr = &std_convert_yuyv_gray8;
+ Debug(2,"Using standard YUYV->grayscale fast conversion");
+ }
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_yuyv_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if((palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_yuyv_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else if(palette == VIDEO_PALETTE_RGB555 && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_rgb555_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if(palette == VIDEO_PALETTE_RGB555 && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_rgb555_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else if(palette == VIDEO_PALETTE_RGB565 && colours == ZM_COLOUR_RGB24) {
+ conversion_fptr = &zm_convert_rgb565_rgb;
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ } else if(palette == VIDEO_PALETTE_RGB565 && colours == ZM_COLOUR_RGB32) {
+ conversion_fptr = &zm_convert_rgb565_rgba;
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ } else {
+ Fatal("Unable to find a suitable format conversion for the selected palette and target colorspace.");
+ }
+ }
+ }
+ }
#endif // ZM_HAS_V4L1
- last_camera = this;
- Debug(3,"Selected subpixelorder: %d",subpixelorder);
+ last_camera = this;
+ Debug(3,"Selected subpixelorder: %d",subpixelorder);
#if HAVE_LIBSWSCALE
- /* Initialize swscale stuff */
- if(capture && conversion_type == 1) {
+ /* Initialize swscale stuff */
+ if(capture && conversion_type == 1) {
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- tmpPicture = av_frame_alloc();
+ tmpPicture = av_frame_alloc();
#else
- tmpPicture = avcodec_alloc_frame();
+ tmpPicture = avcodec_alloc_frame();
#endif
if ( !tmpPicture )
Fatal( "Could not allocate temporary picture" );
@@ -652,34 +652,34 @@ LocalCamera::LocalCamera(
if( (unsigned int)pSize != imagesize) {
Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
}
-
+
imgConversionContext = sws_getContext(width, height, capturePixFormat, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
-
+
if ( !imgConversionContext ) {
Fatal( "Unable to initialise image scaling context" );
}
-
+
}
#endif
}
LocalCamera::~LocalCamera()
{
- if ( device_prime && capture )
- Terminate();
-
+ if ( device_prime && capture )
+ Terminate();
+
#if HAVE_LIBSWSCALE
- /* Clean up swscale stuff */
- if(capture && conversion_type == 1) {
- sws_freeContext(imgConversionContext);
- imgConversionContext = NULL;
-
+ /* Clean up swscale stuff */
+ if(capture && conversion_type == 1) {
+ sws_freeContext(imgConversionContext);
+ imgConversionContext = NULL;
+
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- av_frame_free( &tmpPicture );
+ av_frame_free( &tmpPicture );
#else
- av_freep( &tmpPicture );
+ av_freep( &tmpPicture );
#endif
- }
+ }
#endif
}
@@ -879,16 +879,16 @@ void LocalCamera::Initialise()
#else
capturePictures[i] = avcodec_alloc_frame();
#endif
- if ( !capturePictures[i] )
- Fatal( "Could not allocate picture" );
+ if ( !capturePictures[i] )
+ Fatal( "Could not allocate picture" );
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
- av_image_fill_arrays(capturePictures[i]->data,
- capturePictures[i]->linesize,
- (uint8_t*)v4l2_data.buffers[i].start,capturePixFormat,
- v4l2_data.fmt.fmt.pix.width,
- v4l2_data.fmt.fmt.pix.height, 1);
+ av_image_fill_arrays(capturePictures[i]->data,
+ capturePictures[i]->linesize,
+ (uint8_t*)v4l2_data.buffers[i].start,capturePixFormat,
+ v4l2_data.fmt.fmt.pix.width,
+ v4l2_data.fmt.fmt.pix.height, 1);
#else
- avpicture_fill( (AVPicture *)capturePictures[i],
+ avpicture_fill( (AVPicture *)capturePictures[i],
(uint8_t*)v4l2_data.buffers[i].start, capturePixFormat,
v4l2_data.fmt.fmt.pix.width,
v4l2_data.fmt.fmt.pix.height );
diff --git a/src/zm_local_camera.h b/src/zm_local_camera.h
index db9227cb4..d41f8f44d 100644
--- a/src/zm_local_camera.h
+++ b/src/zm_local_camera.h
@@ -75,90 +75,90 @@ protected:
#endif // ZM_HAS_V4L1
protected:
- std::string device;
- int channel;
- int standard;
- int palette;
- bool device_prime;
- bool channel_prime;
- int channel_index;
- unsigned int extras;
-
- unsigned int conversion_type; /* 0 = no conversion needed, 1 = use libswscale, 2 = zm internal conversion, 3 = jpeg decoding */
- convert_fptr_t conversion_fptr; /* Pointer to conversion function used */
-
- uint32_t AutoSelectFormat(int p_colours);
+ std::string device;
+ int channel;
+ int standard;
+ int palette;
+ bool device_prime;
+ bool channel_prime;
+ int channel_index;
+ unsigned int extras;
+
+ unsigned int conversion_type; /* 0 = no conversion needed, 1 = use libswscale, 2 = zm internal conversion, 3 = jpeg decoding */
+ convert_fptr_t conversion_fptr; /* Pointer to conversion function used */
+
+ uint32_t AutoSelectFormat(int p_colours);
- static int camera_count;
- static int channel_count;
- static int channels[VIDEO_MAX_FRAME];
- static int standards[VIDEO_MAX_FRAME];
- static int vid_fd;
- static int v4l_version;
- bool v4l_multi_buffer;
- unsigned int v4l_captures_per_frame;
+ static int camera_count;
+ static int channel_count;
+ static int channels[VIDEO_MAX_FRAME];
+ static int standards[VIDEO_MAX_FRAME];
+ static int vid_fd;
+ static int v4l_version;
+ bool v4l_multi_buffer;
+ unsigned int v4l_captures_per_frame;
#if ZM_HAS_V4L2
- static V4L2Data v4l2_data;
+ static V4L2Data v4l2_data;
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
- static V4L1Data v4l1_data;
+ static V4L1Data v4l1_data;
#endif // ZM_HAS_V4L1
#if HAVE_LIBSWSCALE
- static AVFrame **capturePictures;
- _AVPIXELFORMAT imagePixFormat;
- _AVPIXELFORMAT capturePixFormat;
- struct SwsContext *imgConversionContext;
- AVFrame *tmpPicture;
+ static AVFrame **capturePictures;
+ _AVPIXELFORMAT imagePixFormat;
+ _AVPIXELFORMAT capturePixFormat;
+ struct SwsContext *imgConversionContext;
+ AVFrame *tmpPicture;
#endif // HAVE_LIBSWSCALE
- static LocalCamera *last_camera;
+ static LocalCamera *last_camera;
public:
- LocalCamera(
- int p_id,
- const std::string &device,
- int p_channel,
- int p_format,
- bool v4lmultibuffer,
- unsigned int v4lcapturesperframe,
- const std::string &p_method,
- int p_width,
- int p_height,
- int p_colours,
- int p_palette,
- int p_brightness,
- int p_contrast,
- int p_hue,
- int p_colour,
- bool p_capture,
- bool p_record_audio,
- unsigned int p_extras = 0);
- ~LocalCamera();
+ LocalCamera(
+ int p_id,
+ const std::string &device,
+ int p_channel,
+ int p_format,
+ bool v4lmultibuffer,
+ unsigned int v4lcapturesperframe,
+ const std::string &p_method,
+ int p_width,
+ int p_height,
+ int p_colours,
+ int p_palette,
+ int p_brightness,
+ int p_contrast,
+ int p_hue,
+ int p_colour,
+ bool p_capture,
+ bool p_record_audio,
+ unsigned int p_extras = 0);
+ ~LocalCamera();
- void Initialise();
- void Terminate();
+ void Initialise();
+ void Terminate();
- const std::string &Device() const { return( device ); }
+ const std::string &Device() const { return( device ); }
- int Channel() const { return( channel ); }
- int Standard() const { return( standard ); }
- int Palette() const { return( palette ); }
- int Extras() const { return( extras ); }
+ int Channel() const { return( channel ); }
+ int Standard() const { return( standard ); }
+ int Palette() const { return( palette ); }
+ int Extras() const { return( extras ); }
- int Brightness( int p_brightness=-1 );
- int Hue( int p_hue=-1 );
- int Colour( int p_colour=-1 );
- int Contrast( int p_contrast=-1 );
+ int Brightness( int p_brightness=-1 );
+ int Hue( int p_hue=-1 );
+ int Colour( int p_colour=-1 );
+ int Contrast( int p_contrast=-1 );
- int PrimeCapture();
- int PreCapture();
- int Capture( Image &image );
- int PostCapture();
- int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
+ int PrimeCapture();
+ int PreCapture();
+ int Capture( Image &image );
+ int PostCapture();
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
- static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
+ static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
};
#endif // ZM_HAS_V4L
diff --git a/src/zm_monitor.cpp b/src/zm_monitor.cpp
index 70a6a4b24..1d23ac336 100644
--- a/src/zm_monitor.cpp
+++ b/src/zm_monitor.cpp
@@ -504,7 +504,7 @@ Monitor::Monitor(
}
bool Monitor::connect() {
- Debug(3, "Connecting to monitor. Purpose is %d", purpose );
+ Debug(3, "Connecting to monitor. Purpose is %d", purpose );
#if ZM_MEM_MAPPED
snprintf( mem_file, sizeof(mem_file), "%s/zm.mmap.%d", config.path_map, id );
map_fd = open( mem_file, O_RDWR|O_CREAT, (mode_t)0600 );
@@ -564,7 +564,7 @@ bool Monitor::connect() {
Debug(3,"Aligning shared memory images to the next 16 byte boundary");
shared_images = (uint8_t*)((unsigned long)shared_images + (16 - ((unsigned long)shared_images % 16)));
}
- Debug(3, "Allocating %d image buffers", image_buffer_count );
+ Debug(3, "Allocating %d image buffers", image_buffer_count );
image_buffer = new Snapshot[image_buffer_count];
for ( int i = 0; i < image_buffer_count; i++ ) {
image_buffer[i].timestamp = &(shared_timestamps[i]);
@@ -1338,7 +1338,7 @@ bool Monitor::Analyse()
if ( shared_data->action )
{
- // Can there be more than 1 bit set in the action? Shouldn't these be elseifs?
+ // Can there be more than 1 bit set in the action? Shouldn't these be elseifs?
if ( shared_data->action & RELOAD )
{
Info( "Received reload indication at count %d", image_count );
@@ -1352,7 +1352,7 @@ bool Monitor::Analyse()
Info( "Received suspend indication at count %d", image_count );
shared_data->active = false;
//closeEvent();
- } else {
+ } else {
Info( "Received suspend indication at count %d, but wasn't active", image_count );
}
if ( config.max_suspend_time )
@@ -1401,7 +1401,7 @@ bool Monitor::Analyse()
{
bool signal = shared_data->signal;
bool signal_change = (signal != last_signal);
- Debug(3, "Motion detection is enabled signal(%d) signal_change(%d)", signal, signal_change);
+ Debug(3, "Motion detection is enabled signal(%d) signal_change(%d)", signal, signal_change);
//Set video recording flag for event start constructor and easy reference in code
// TODO: Use enum instead of the # 2. Makes for easier reading
@@ -1467,9 +1467,9 @@ bool Monitor::Analyse()
// Get new score.
motion_score = DetectMotion( *snap_image, zoneSet );
- Debug( 3, "After motion detection, last_motion_score(%d), new motion score(%d)", last_motion_score, motion_score );
- // Why are we updating the last_motion_score too?
- last_motion_score = motion_score;
+ Debug( 3, "After motion detection, last_motion_score(%d), new motion score(%d)", last_motion_score, motion_score );
+ // Why are we updating the last_motion_score too?
+ last_motion_score = motion_score;
}
//int motion_score = DetectBlack( *snap_image, zoneSet );
if ( motion_score )
@@ -1531,27 +1531,27 @@ bool Monitor::Analyse()
//TODO: We shouldn't have to do this every time. Not sure why it clears itself if this isn't here??
snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "%s", event->getEventFile());
- if ( section_length ) {
- int section_mod = timestamp->tv_sec%section_length;
- Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
- if ( section_mod < last_section_mod ) {
- //if ( state == IDLE || state == TAPE || event_close_mode == CLOSE_TIME ) {
- //if ( state == TAPE ) {
- //shared_data->state = state = IDLE;
- //Info( "%s: %03d - Closing event %d, section end", name, image_count, event->Id() )
- //} else {
- Info( "%s: %03d - Closing event %d, section end forced ", name, image_count, event->Id() );
- //}
- closeEvent();
- last_section_mod = 0;
- //} else {
- //Debug( 2, "Time to close event, but state (%d) is not IDLE or TAPE and event_close_mode is not CLOSE_TIME (%d)", state, event_close_mode );
- //}
- } else {
- last_section_mod = section_mod;
- }
- }
- } // end if section_length
+ if ( section_length ) {
+ int section_mod = timestamp->tv_sec%section_length;
+ Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
+ if ( section_mod < last_section_mod ) {
+ //if ( state == IDLE || state == TAPE || event_close_mode == CLOSE_TIME ) {
+ //if ( state == TAPE ) {
+ //shared_data->state = state = IDLE;
+ //Info( "%s: %03d - Closing event %d, section end", name, image_count, event->Id() )
+ //} else {
+ Info( "%s: %03d - Closing event %d, section end forced ", name, image_count, event->Id() );
+ //}
+ closeEvent();
+ last_section_mod = 0;
+ //} else {
+ //Debug( 2, "Time to close event, but state (%d) is not IDLE or TAPE and event_close_mode is not CLOSE_TIME (%d)", state, event_close_mode );
+ //}
+ } else {
+ last_section_mod = section_mod;
+ }
+ }
+ } // end if section_length
if ( !event )
{
@@ -4412,14 +4412,14 @@ void Monitor::SingleImageZip( int scale)
fwrite( img_buffer, img_buffer_size, 1, stdout );
}
unsigned int Monitor::Colours() const { return( camera->Colours() ); }
- unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() ); }
- int Monitor::PrimeCapture() {
- return( camera->PrimeCapture() );
- }
- int Monitor::PreCapture() {
- return( camera->PreCapture() );
- }
- int Monitor::PostCapture() {
- return( camera->PostCapture() );
- }
+ unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() ); }
+ int Monitor::PrimeCapture() {
+ return( camera->PrimeCapture() );
+ }
+ int Monitor::PreCapture() {
+ return( camera->PreCapture() );
+ }
+ int Monitor::PostCapture() {
+ return( camera->PostCapture() );
+ }
Monitor::Orientation Monitor::getOrientation()const { return orientation; }
diff --git a/src/zm_monitor.h b/src/zm_monitor.h
index a9c62c0e2..3f2fbad85 100644
--- a/src/zm_monitor.h
+++ b/src/zm_monitor.h
@@ -120,16 +120,16 @@ class Monitor
/*
** This keeps 32bit time_t and 64bit time_t identical and compatible as long as time is before 2038.
** Shared memory layout should be identical for both 32bit and 64bit and is multiples of 16.
- */
+ */
union { /* +64 */
time_t last_write_time;
uint64_t extrapad1;
};
- union { /* +72 */
+ union { /* +72 */
time_t last_read_time;
uint64_t extrapad2;
};
- uint8_t control_state[256]; /* +80 */
+ uint8_t control_state[256]; /* +80 */
} SharedData;
@@ -150,8 +150,8 @@ class Monitor
/* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */
struct Snapshot
{
- struct timeval *timestamp;
- Image *image;
+ struct timeval *timestamp;
+ Image *image;
void* padding;
};
@@ -170,27 +170,27 @@ class Monitor
class MonitorLink {
protected:
- unsigned int id;
- char name[64];
+ unsigned int id;
+ char name[64];
- bool connected;
- time_t last_connect_time;
+ bool connected;
+ time_t last_connect_time;
#if ZM_MEM_MAPPED
- int map_fd;
- char mem_file[PATH_MAX];
+ int map_fd;
+ char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED
- int shm_id;
+ int shm_id;
#endif // ZM_MEM_MAPPED
- off_t mem_size;
- unsigned char *mem_ptr;
+ off_t mem_size;
+ unsigned char *mem_ptr;
- volatile SharedData *shared_data;
- volatile TriggerData *trigger_data;
+ volatile SharedData *shared_data;
+ volatile TriggerData *trigger_data;
volatile VideoStoreData *video_store_data;
- int last_state;
- int last_event;
+ int last_state;
+ int last_event;
public:
@@ -221,263 +221,263 @@ class Monitor
protected:
// These are read from the DB and thereafter remain unchanged
- unsigned int id;
- char name[64];
- unsigned int server_id; // Id of the Server object
- unsigned int storage_id; // Id of the Storage Object, which currently will just provide a path, but in future may do more.
- Function function; // What the monitor is doing
- bool enabled; // Whether the monitor is enabled or asleep
- unsigned int width; // Normally the same as the camera, but not if partly rotated
- unsigned int height; // Normally the same as the camera, but not if partly rotated
- bool v4l_multi_buffer;
- unsigned int v4l_captures_per_frame;
- Orientation orientation; // Whether the image has to be rotated at all
- unsigned int deinterlacing;
+ unsigned int id;
+ char name[64];
+ unsigned int server_id; // Id of the Server object
+ unsigned int storage_id; // Id of the Storage Object, which currently will just provide a path, but in future may do more.
+ Function function; // What the monitor is doing
+ bool enabled; // Whether the monitor is enabled or asleep
+ unsigned int width; // Normally the same as the camera, but not if partly rotated
+ unsigned int height; // Normally the same as the camera, but not if partly rotated
+ bool v4l_multi_buffer;
+ unsigned int v4l_captures_per_frame;
+ Orientation orientation; // Whether the image has to be rotated at all
+ unsigned int deinterlacing;
int savejpegspref;
int videowriterpref;
std::string encoderparams;
std::vector encoderparamsvec;
- bool record_audio; // Whether to store the audio that we receive
+ bool record_audio; // Whether to store the audio that we receive
- int brightness; // The statically saved brightness of the camera
- int contrast; // The statically saved contrast of the camera
- int hue; // The statically saved hue of the camera
- int colour; // The statically saved colour of the camera
- char event_prefix[64]; // The prefix applied to event names as they are created
- char label_format[64]; // The format of the timestamp on the images
- Coord label_coord; // The coordinates of the timestamp on the images
- int label_size; // Size of the timestamp on the images
- int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count
- int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate,
+ int brightness; // The statically saved brightness of the camera
+ int contrast; // The statically saved contrast of the camera
+ int hue; // The statically saved hue of the camera
+ int colour; // The statically saved colour of the camera
+ char event_prefix[64]; // The prefix applied to event names as they are created
+ char label_format[64]; // The format of the timestamp on the images
+ Coord label_coord; // The coordinates of the timestamp on the images
+ int label_size; // Size of the timestamp on the images
+ int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count
+ int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate,
// value is pre_event_count + alarm_frame_count - 1
- int warmup_count; // How many images to process before looking for events
- int pre_event_count; // How many images to hold and prepend to an alarm event
- int post_event_count; // How many unalarmed images must occur before the alarm state is reset
- int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now
- int section_length; // How long events should last in continuous modes
- bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
- int frame_skip; // How many frames to skip in continuous modes
- int motion_frame_skip; // How many frames to skip in motion detection
- double analysis_fps; // Target framerate for video analysis
- unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
- int capture_delay; // How long we wait between capture frames
- int alarm_capture_delay; // How long we wait between capture frames when in alarm state
- int alarm_frame_count; // How many alarm frames are required before an event is triggered
- int fps_report_interval; // How many images should be captured/processed between reporting the current FPS
- int ref_blend_perc; // Percentage of new image going into reference image.
- int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm.
- bool track_motion; // Whether this monitor tries to track detected motion
- Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
- bool embed_exif; // Whether to embed Exif data into each image frame or not
+ int warmup_count; // How many images to process before looking for events
+ int pre_event_count; // How many images to hold and prepend to an alarm event
+ int post_event_count; // How many unalarmed images must occur before the alarm state is reset
+ int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now
+ int section_length; // How long events should last in continuous modes
+ bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
+ int frame_skip; // How many frames to skip in continuous modes
+ int motion_frame_skip; // How many frames to skip in motion detection
+ double analysis_fps; // Target framerate for video analysis
+ unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
+ int capture_delay; // How long we wait between capture frames
+ int alarm_capture_delay; // How long we wait between capture frames when in alarm state
+ int alarm_frame_count; // How many alarm frames are required before an event is triggered
+ int fps_report_interval; // How many images should be captured/processed between reporting the current FPS
+ int ref_blend_perc; // Percentage of new image going into reference image.
+ int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm.
+ bool track_motion; // Whether this monitor tries to track detected motion
+ Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
+ bool embed_exif; // Whether to embed Exif data into each image frame or not
- double fps;
- Image delta_image;
- Image ref_image;
- Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
- Image write_image; // Used when creating snapshot images
+ double fps;
+ Image delta_image;
+ Image ref_image;
+ Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
+ Image write_image; // Used when creating snapshot images
- Purpose purpose; // What this monitor has been created to do
- int event_count;
- int image_count;
- int ready_count;
- int first_alarm_count;
- int last_alarm_count;
- int buffer_count;
- int prealarm_count;
- State state;
- time_t start_time;
- time_t last_fps_time;
- time_t auto_resume_time;
- unsigned int last_motion_score;
+ Purpose purpose; // What this monitor has been created to do
+ int event_count;
+ int image_count;
+ int ready_count;
+ int first_alarm_count;
+ int last_alarm_count;
+ int buffer_count;
+ int prealarm_count;
+ State state;
+ time_t start_time;
+ time_t last_fps_time;
+ time_t auto_resume_time;
+ unsigned int last_motion_score;
EventCloseMode event_close_mode;
#if ZM_MEM_MAPPED
- int map_fd;
- char mem_file[PATH_MAX];
+ int map_fd;
+ char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED
- int shm_id;
+ int shm_id;
#endif // ZM_MEM_MAPPED
- off_t mem_size;
- unsigned char *mem_ptr;
- Storage *storage;
+ off_t mem_size;
+ unsigned char *mem_ptr;
+ Storage *storage;
- SharedData *shared_data;
- TriggerData *trigger_data;
- VideoStoreData *video_store_data;
+ SharedData *shared_data;
+ TriggerData *trigger_data;
+ VideoStoreData *video_store_data;
- Snapshot *image_buffer;
- Snapshot next_buffer; /* Used by four field deinterlacing */
- Snapshot *pre_event_buffer;
+ Snapshot *image_buffer;
+ Snapshot next_buffer; /* Used by four field deinterlacing */
+ Snapshot *pre_event_buffer;
- Camera *camera;
+ Camera *camera;
- Event *event;
+ Event *event;
- int n_zones;
- Zone **zones;
+ int n_zones;
+ Zone **zones;
- struct timeval **timestamps;
- Image **images;
+ struct timeval **timestamps;
+ Image **images;
- const unsigned char *privacy_bitmask;
+ const unsigned char *privacy_bitmask;
- int n_linked_monitors;
- MonitorLink **linked_monitors;
+ int n_linked_monitors;
+ MonitorLink **linked_monitors;
public:
Monitor( int p_id );
// OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info.
//bool OurCheckAlarms( Zone *zone, const Image *pImage );
- Monitor(
- int p_id,
- const char *p_name,
- unsigned int p_server_id,
- unsigned int p_storage_id,
- int p_function,
- bool p_enabled,
- const char *p_linked_monitors,
- Camera *p_camera,
- int p_orientation,
- unsigned int p_deinterlacing,
- int p_savejpegs,
- int p_videowriter,
- std::string p_encoderparams,
- bool p_record_audio,
- const char *p_event_prefix,
- const char *p_label_format,
- const Coord &p_label_coord,
- int label_size,
- int p_image_buffer_count,
- int p_warmup_count,
- int p_pre_event_count,
- int p_post_event_count,
- int p_stream_replay_buffer,
- int p_alarm_frame_count,
- int p_section_length,
- int p_frame_skip,
- int p_motion_frame_skip,
- double p_analysis_fps,
- unsigned int p_analysis_update_delay,
- int p_capture_delay,
- int p_alarm_capture_delay,
- int p_fps_report_interval,
- int p_ref_blend_perc,
- int p_alarm_ref_blend_perc,
- bool p_track_motion,
- Rgb p_signal_check_colour,
- bool p_embed_exif,
- Purpose p_purpose,
- int p_n_zones=0,
- Zone *p_zones[]=0
- );
- ~Monitor();
+ Monitor(
+ int p_id,
+ const char *p_name,
+ unsigned int p_server_id,
+ unsigned int p_storage_id,
+ int p_function,
+ bool p_enabled,
+ const char *p_linked_monitors,
+ Camera *p_camera,
+ int p_orientation,
+ unsigned int p_deinterlacing,
+ int p_savejpegs,
+ int p_videowriter,
+ std::string p_encoderparams,
+ bool p_record_audio,
+ const char *p_event_prefix,
+ const char *p_label_format,
+ const Coord &p_label_coord,
+ int label_size,
+ int p_image_buffer_count,
+ int p_warmup_count,
+ int p_pre_event_count,
+ int p_post_event_count,
+ int p_stream_replay_buffer,
+ int p_alarm_frame_count,
+ int p_section_length,
+ int p_frame_skip,
+ int p_motion_frame_skip,
+ double p_analysis_fps,
+ unsigned int p_analysis_update_delay,
+ int p_capture_delay,
+ int p_alarm_capture_delay,
+ int p_fps_report_interval,
+ int p_ref_blend_perc,
+ int p_alarm_ref_blend_perc,
+ bool p_track_motion,
+ Rgb p_signal_check_colour,
+ bool p_embed_exif,
+ Purpose p_purpose,
+ int p_n_zones=0,
+ Zone *p_zones[]=0
+ );
+ ~Monitor();
- void AddZones( int p_n_zones, Zone *p_zones[] );
- void AddPrivacyBitmask( Zone *p_zones[] );
+ void AddZones( int p_n_zones, Zone *p_zones[] );
+ void AddPrivacyBitmask( Zone *p_zones[] );
- bool connect();
- inline int ShmValid() const {
- return( shared_data->valid );
- }
+ bool connect();
+ inline int ShmValid() const {
+ return( shared_data->valid );
+ }
- inline int Id() const {
- return( id );
- }
- inline const char *Name() const {
- return( name );
- }
- inline Storage *getStorage() {
- if ( ! storage ) {
- storage = new Storage( storage_id );
- }
- return( storage );
- }
- inline Function GetFunction() const {
- return( function );
- }
- inline bool Enabled() {
- if ( function <= MONITOR )
- return( false );
- return( enabled );
- }
- inline const char *EventPrefix() const {
- return( event_prefix );
- }
- inline bool Ready() {
- if ( function <= MONITOR )
- return( false );
- return( image_count > ready_count );
- }
- inline bool Active() {
- if ( function <= MONITOR )
- return( false );
- return( enabled && shared_data->active );
- }
- inline bool Exif() {
- return( embed_exif );
- }
+ inline int Id() const {
+ return( id );
+ }
+ inline const char *Name() const {
+ return( name );
+ }
+ inline Storage *getStorage() {
+ if ( ! storage ) {
+ storage = new Storage( storage_id );
+ }
+ return( storage );
+ }
+ inline Function GetFunction() const {
+ return( function );
+ }
+ inline bool Enabled() {
+ if ( function <= MONITOR )
+ return( false );
+ return( enabled );
+ }
+ inline const char *EventPrefix() const {
+ return( event_prefix );
+ }
+ inline bool Ready() {
+ if ( function <= MONITOR )
+ return( false );
+ return( image_count > ready_count );
+ }
+ inline bool Active() {
+ if ( function <= MONITOR )
+ return( false );
+ return( enabled && shared_data->active );
+ }
+ inline bool Exif() {
+ return( embed_exif );
+ }
Orientation getOrientation()const;
- unsigned int Width() const { return width; }
- unsigned int Height() const { return height; }
- unsigned int Colours() const;
- unsigned int SubpixelOrder() const;
+ unsigned int Width() const { return width; }
+ unsigned int Height() const { return height; }
+ unsigned int Colours() const;
+ unsigned int SubpixelOrder() const;
- int GetOptSaveJPEGs() const { return( savejpegspref ); }
- int GetOptVideoWriter() const { return( videowriterpref ); }
- const std::vector* GetOptEncoderParams() const { return( &encoderparamsvec ); }
+ int GetOptSaveJPEGs() const { return( savejpegspref ); }
+ int GetOptVideoWriter() const { return( videowriterpref ); }
+ const std::vector* GetOptEncoderParams() const { return( &encoderparamsvec ); }
- State GetState() const;
- int GetImage( int index=-1, int scale=100 );
- struct timeval GetTimestamp( int index=-1 ) const;
- void UpdateAdaptiveSkip();
- useconds_t GetAnalysisRate();
- unsigned int GetAnalysisUpdateDelay() const { return( analysis_update_delay ); }
- int GetCaptureDelay() const { return( capture_delay ); }
- int GetAlarmCaptureDelay() const { return( alarm_capture_delay ); }
- unsigned int GetLastReadIndex() const;
- unsigned int GetLastWriteIndex() const;
- unsigned int GetLastEvent() const;
- double GetFPS() const;
- void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" );
- void ForceAlarmOff();
- void CancelForced();
- TriggerState GetTriggerState() const { return( (TriggerState)(trigger_data?trigger_data->trigger_state:TRIGGER_CANCEL )); }
+ State GetState() const;
+ int GetImage( int index=-1, int scale=100 );
+ struct timeval GetTimestamp( int index=-1 ) const;
+ void UpdateAdaptiveSkip();
+ useconds_t GetAnalysisRate();
+ unsigned int GetAnalysisUpdateDelay() const { return( analysis_update_delay ); }
+ int GetCaptureDelay() const { return( capture_delay ); }
+ int GetAlarmCaptureDelay() const { return( alarm_capture_delay ); }
+ unsigned int GetLastReadIndex() const;
+ unsigned int GetLastWriteIndex() const;
+ unsigned int GetLastEvent() const;
+ double GetFPS() const;
+ void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" );
+ void ForceAlarmOff();
+ void CancelForced();
+ TriggerState GetTriggerState() const { return( (TriggerState)(trigger_data?trigger_data->trigger_state:TRIGGER_CANCEL )); }
- void actionReload();
- void actionEnable();
- void actionDisable();
- void actionSuspend();
- void actionResume();
+ void actionReload();
+ void actionEnable();
+ void actionDisable();
+ void actionSuspend();
+ void actionResume();
- int actionBrightness( int p_brightness=-1 );
- int actionHue( int p_hue=-1 );
- int actionColour( int p_colour=-1 );
- int actionContrast( int p_contrast=-1 );
+ int actionBrightness( int p_brightness=-1 );
+ int actionHue( int p_hue=-1 );
+ int actionColour( int p_colour=-1 );
+ int actionContrast( int p_contrast=-1 );
- int PrimeCapture();
- int PreCapture();
- int Capture();
- int PostCapture();
+ int PrimeCapture();
+ int PreCapture();
+ int Capture();
+ int PostCapture();
- unsigned int DetectMotion( const Image &comp_image, Event::StringSet &zoneSet );
+ unsigned int DetectMotion( const Image &comp_image, Event::StringSet &zoneSet );
// DetectBlack seems to be unused. Check it on zm_monitor.cpp for more info.
//unsigned int DetectBlack( const Image &comp_image, Event::StringSet &zoneSet );
- bool CheckSignal( const Image *image );
- bool Analyse();
- void DumpImage( Image *dump_image ) const;
- void TimestampImage( Image *ts_image, const struct timeval *ts_time ) const;
- bool closeEvent();
+ bool CheckSignal( const Image *image );
+ bool Analyse();
+ void DumpImage( Image *dump_image ) const;
+ void TimestampImage( Image *ts_image, const struct timeval *ts_time ) const;
+ bool closeEvent();
- void Reload();
- void ReloadZones();
- void ReloadLinkedMonitors( const char * );
+ void Reload();
+ void ReloadZones();
+ void ReloadLinkedMonitors( const char * );
- bool DumpSettings( char *output, bool verbose );
- void DumpZoneImage( const char *zone_string=0 );
+ bool DumpSettings( char *output, bool verbose );
+ void DumpZoneImage( const char *zone_string=0 );
#if ZM_HAS_V4L
static int LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose );
@@ -505,9 +505,9 @@ public:
class MonitorStream : public StreamBase {
protected:
typedef struct SwapImage {
- bool valid;
+ bool valid;
struct timeval timestamp;
- char file_name[PATH_MAX];
+ char file_name[PATH_MAX];
} SwapImage;
private:
diff --git a/src/zm_remote_camera.h b/src/zm_remote_camera.h
index 4caae411e..6273774d3 100644
--- a/src/zm_remote_camera.h
+++ b/src/zm_remote_camera.h
@@ -36,60 +36,60 @@
class RemoteCamera : public Camera
{
protected:
- std::string protocol;
- std::string host;
- std::string port;
- std::string path;
- std::string auth;
- std::string username;
- std::string password;
- std::string auth64;
+ std::string protocol;
+ std::string host;
+ std::string port;
+ std::string path;
+ std::string auth;
+ std::string username;
+ std::string password;
+ std::string auth64;
- // Reworked authentication system
- // First try without authentication, even if we have a username and password
- // on receiving a 401 response, select authentication method (basic or digest)
- // fill required fields and set needAuth
- // subsequent requests can set the required authentication header.
- bool mNeedAuth;
- zm::Authenticator* mAuthenticator;
+ // Reworked authentication system
+ // First try without authentication, even if we have a username and password
+ // on receiving a 401 response, select authentication method (basic or digest)
+ // fill required fields and set needAuth
+ // subsequent requests can set the required authentication header.
+ bool mNeedAuth;
+ zm::Authenticator* mAuthenticator;
protected:
- struct addrinfo *hp;
+ struct addrinfo *hp;
public:
- RemoteCamera(
- int p_id,
- const std::string &p_proto,
- const std::string &p_host,
- const std::string &p_port,
- const std::string &p_path,
- int p_width,
- int p_height,
- int p_colours,
- int p_brightness,
- int p_contrast,
- int p_hue,
- int p_colour,
- bool p_capture,
- bool p_record_audio
- );
- virtual ~RemoteCamera();
+ RemoteCamera(
+ int p_id,
+ const std::string &p_proto,
+ const std::string &p_host,
+ const std::string &p_port,
+ const std::string &p_path,
+ int p_width,
+ int p_height,
+ int p_colours,
+ int p_brightness,
+ int p_contrast,
+ int p_hue,
+ int p_colour,
+ bool p_capture,
+ bool p_record_audio
+ );
+ virtual ~RemoteCamera();
- const std::string &Protocol() const { return( protocol ); }
- const std::string &Host() const { return( host ); }
- const std::string &Port() const { return( port ); }
- const std::string &Path() const { return( path ); }
- const std::string &Auth() const { return( auth ); }
- const std::string &Username() const { return( username ); }
- const std::string &Password() const { return( password ); }
+ const std::string &Protocol() const { return( protocol ); }
+ const std::string &Host() const { return( host ); }
+ const std::string &Port() const { return( port ); }
+ const std::string &Path() const { return( path ); }
+ const std::string &Auth() const { return( auth ); }
+ const std::string &Username() const { return( username ); }
+ const std::string &Password() const { return( password ); }
- virtual void Initialise();
- virtual void Terminate() = 0;
- virtual int Connect() = 0;
- virtual int Disconnect() = 0;
- virtual int PreCapture() = 0;
- virtual int Capture( Image &image ) = 0;
- virtual int PostCapture() = 0;
- virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0;
+ virtual void Initialise();
+ virtual void Terminate() = 0;
+ virtual int Connect() = 0;
+ virtual int Disconnect() = 0;
+ virtual int PreCapture() = 0;
+ virtual int Capture( Image &image ) = 0;
+ virtual int PostCapture() = 0;
+ virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0;
};
#endif // ZM_REMOTE_CAMERA_H
diff --git a/src/zm_remote_camera_http.cpp b/src/zm_remote_camera_http.cpp
index 5babc0b0e..3a6b5a57d 100644
--- a/src/zm_remote_camera_http.cpp
+++ b/src/zm_remote_camera_http.cpp
@@ -32,125 +32,125 @@
#endif
RemoteCameraHttp::RemoteCameraHttp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
- RemoteCamera( p_id, "http", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
+ RemoteCamera( p_id, "http", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
{
- sd = -1;
+ sd = -1;
- timeout.tv_sec = 0;
- timeout.tv_usec = 0;
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 0;
- if ( p_method == "simple" )
- method = SIMPLE;
- else if ( p_method == "regexp" )
- method = REGEXP;
- else
- Fatal( "Unrecognised method '%s' when creating HTTP camera %d", p_method.c_str(), id );
- if ( capture )
- {
- Initialise();
- }
+ if ( p_method == "simple" )
+ method = SIMPLE;
+ else if ( p_method == "regexp" )
+ method = REGEXP;
+ else
+ Fatal( "Unrecognised method '%s' when creating HTTP camera %d", p_method.c_str(), id );
+ if ( capture )
+ {
+ Initialise();
+ }
}
RemoteCameraHttp::~RemoteCameraHttp()
{
- if ( capture )
- {
- Terminate();
- }
+ if ( capture )
+ {
+ Terminate();
+ }
}
void RemoteCameraHttp::Initialise()
{
- RemoteCamera::Initialise();
+ RemoteCamera::Initialise();
- if ( request.empty() )
- {
- request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
- request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
- request += stringtf( "Host: %s\r\n", host.c_str());
- if ( strcmp( config.http_version, "1.0" ) == 0 )
- request += stringtf( "Connection: Keep-Alive\r\n" );
- if ( !auth.empty() )
- request += stringtf( "Authorization: Basic %s\r\n", auth64.c_str() );
- request += "\r\n";
- Debug( 2, "Request: %s", request.c_str() );
- }
+ if ( request.empty() )
+ {
+ request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
+ request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
+ request += stringtf( "Host: %s\r\n", host.c_str());
+ if ( strcmp( config.http_version, "1.0" ) == 0 )
+ request += stringtf( "Connection: Keep-Alive\r\n" );
+ if ( !auth.empty() )
+ request += stringtf( "Authorization: Basic %s\r\n", auth64.c_str() );
+ request += "\r\n";
+ Debug( 2, "Request: %s", request.c_str() );
+ }
- if ( !timeout.tv_sec )
- {
- timeout.tv_sec = config.http_timeout/1000;
- timeout.tv_usec = (config.http_timeout%1000)*1000;
- }
+ if ( !timeout.tv_sec )
+ {
+ timeout.tv_sec = config.http_timeout/1000;
+ timeout.tv_usec = (config.http_timeout%1000)*1000;
+ }
- int max_size = width*height*colours;
+ int max_size = width*height*colours;
- buffer.size( max_size );
+ buffer.size( max_size );
- mode = SINGLE_IMAGE;
- format = UNDEF;
- state = HEADER;
+ mode = SINGLE_IMAGE;
+ format = UNDEF;
+ state = HEADER;
}
int RemoteCameraHttp::Connect()
{
- struct addrinfo *p;
+ struct addrinfo *p;
- for(p = hp; p != NULL; p = p->ai_next)
+ for(p = hp; p != NULL; p = p->ai_next)
+ {
+ sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
+ if ( sd < 0 )
{
- sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
- if ( sd < 0 )
- {
- Warning("Can't create socket: %s", strerror(errno) );
- continue;
- }
-
- if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 )
- {
- close(sd);
- sd = -1;
- char buf[sizeof(struct in6_addr)];
- struct sockaddr_in *addr;
- addr = (struct sockaddr_in *)p->ai_addr;
- inet_ntop( AF_INET, &(addr->sin_addr), buf, INET6_ADDRSTRLEN );
-
- Warning("Can't connect to remote camera mid: %d at %s: %s", id, buf, strerror(errno) );
- continue;
- }
-
- /* If we got here, we must have connected successfully */
- break;
+ Warning("Can't create socket: %s", strerror(errno) );
+ continue;
}
- if(p == NULL) {
- Error("Unable to connect to the remote camera, aborting");
- return( -1 );
+ if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 )
+ {
+ close(sd);
+ sd = -1;
+ char buf[sizeof(struct in6_addr)];
+ struct sockaddr_in *addr;
+ addr = (struct sockaddr_in *)p->ai_addr;
+ inet_ntop( AF_INET, &(addr->sin_addr), buf, INET6_ADDRSTRLEN );
+
+ Warning("Can't connect to remote camera mid: %d at %s: %s", id, buf, strerror(errno) );
+ continue;
}
- Debug( 3, "Connected to host, socket = %d", sd );
- return( sd );
+ /* If we got here, we must have connected successfully */
+ break;
+ }
+
+ if(p == NULL) {
+ Error("Unable to connect to the remote camera, aborting");
+ return( -1 );
+ }
+
+ Debug( 3, "Connected to host, socket = %d", sd );
+ return( sd );
}
int RemoteCameraHttp::Disconnect()
{
- close( sd );
- sd = -1;
- Debug( 3, "Disconnected from host" );
- return( 0 );
+ close( sd );
+ sd = -1;
+ Debug( 3, "Disconnected from host" );
+ return( 0 );
}
int RemoteCameraHttp::SendRequest()
{
- Debug( 2, "Sending request: %s", request.c_str() );
- if ( write( sd, request.data(), request.length() ) < 0 )
- {
- Error( "Can't write: %s", strerror(errno) );
- Disconnect();
- return( -1 );
- }
- format = UNDEF;
- state = HEADER;
- Debug( 3, "Request sent" );
- return( 0 );
+ Debug( 2, "Sending request: %s", request.c_str() );
+ if ( write( sd, request.data(), request.length() ) < 0 )
+ {
+ Error( "Can't write: %s", strerror(errno) );
+ Disconnect();
+ return( -1 );
+ }
+ format = UNDEF;
+ state = HEADER;
+ Debug( 3, "Request sent" );
+ return( 0 );
}
/* Return codes are as follows:
@@ -161,1037 +161,1037 @@ int RemoteCameraHttp::SendRequest()
int RemoteCameraHttp::ReadData( Buffer &buffer, int bytes_expected )
{
- fd_set rfds;
- FD_ZERO(&rfds);
- FD_SET(sd, &rfds);
+ fd_set rfds;
+ FD_ZERO(&rfds);
+ FD_SET(sd, &rfds);
- struct timeval temp_timeout = timeout;
+ struct timeval temp_timeout = timeout;
- int n_found = select( sd+1, &rfds, NULL, NULL, &temp_timeout );
- if( n_found == 0 )
+ int n_found = select( sd+1, &rfds, NULL, NULL, &temp_timeout );
+ if( n_found == 0 )
+ {
+ Debug( 4, "Select timed out timeout was %d secs %d usecs", temp_timeout.tv_sec, temp_timeout.tv_usec );
+ // Why are we disconnecting? It's just a timeout, meaning that data wasn't available.
+ //Disconnect();
+ return( 0 );
+ }
+ else if ( n_found < 0)
+ {
+ Error( "Select error: %s", strerror(errno) );
+ return( -1 );
+ }
+
+ int total_bytes_to_read = 0;
+
+ if ( bytes_expected )
+ {
+ total_bytes_to_read = bytes_expected;
+ }
+ else
+ {
+ if ( ioctl( sd, FIONREAD, &total_bytes_to_read ) < 0 )
{
- Debug( 4, "Select timed out timeout was %d secs %d usecs", temp_timeout.tv_sec, temp_timeout.tv_usec );
- // Why are we disconnecting? It's just a timeout, meaning that data wasn't available.
- //Disconnect();
- return( 0 );
- }
- else if ( n_found < 0)
- {
- Error( "Select error: %s", strerror(errno) );
- return( -1 );
+ Error( "Can't ioctl(): %s", strerror(errno) );
+ return( -1 );
}
- int total_bytes_to_read = 0;
-
- if ( bytes_expected )
+ if ( total_bytes_to_read == 0 )
{
- total_bytes_to_read = bytes_expected;
+ // If socket is closed locally, then select will fail, but if it is closed remotely
+ // then we have an exception on our socket.. but no data.
+ Debug( 3, "Socket closed remotely" );
+ //Disconnect(); // Disconnect is done outside of ReadData now.
+ return( -1 );
}
- else
- {
- if ( ioctl( sd, FIONREAD, &total_bytes_to_read ) < 0 )
- {
- Error( "Can't ioctl(): %s", strerror(errno) );
- return( -1 );
- }
- if ( total_bytes_to_read == 0 )
- {
- // If socket is closed locally, then select will fail, but if it is closed remotely
- // then we have an exception on our socket.. but no data.
- Debug( 3, "Socket closed remotely" );
- //Disconnect(); // Disconnect is done outside of ReadData now.
- return( -1 );
- }
-
- // There can be lots of bytes available. I've seen 4MB or more. This will vastly inflate our buffer size unnecessarily.
- if ( total_bytes_to_read > ZM_NETWORK_BUFSIZ ) {
- total_bytes_to_read = ZM_NETWORK_BUFSIZ;
- Debug(3, "Just getting 32K" );
- } else {
- Debug(3, "Just getting %d", total_bytes_to_read );
- }
- }
- Debug( 3, "Expecting %d bytes", total_bytes_to_read );
-
- int total_bytes_read = 0;
- do
- {
- int bytes_read = buffer.read_into( sd, total_bytes_to_read );
- if ( bytes_read < 0)
- {
- Error( "Read error: %s", strerror(errno) );
- return( -1 );
- }
- else if ( bytes_read == 0)
- {
- Debug( 2, "Socket closed" );
- //Disconnect(); // Disconnect is done outside of ReadData now.
- return( -1 );
- }
- else if ( bytes_read < total_bytes_to_read )
- {
- Error( "Incomplete read, expected %d, got %d", total_bytes_to_read, bytes_read );
- return( -1 );
- }
- Debug( 3, "Read %d bytes", bytes_read );
- total_bytes_read += bytes_read;
- total_bytes_to_read -= bytes_read;
+ // There can be lots of bytes available. I've seen 4MB or more. This will vastly inflate our buffer size unnecessarily.
+ if ( total_bytes_to_read > ZM_NETWORK_BUFSIZ ) {
+ total_bytes_to_read = ZM_NETWORK_BUFSIZ;
+ Debug(3, "Just getting 32K" );
+ } else {
+ Debug(3, "Just getting %d", total_bytes_to_read );
}
- while ( total_bytes_to_read );
+ }
+ Debug( 3, "Expecting %d bytes", total_bytes_to_read );
- Debug( 4, buffer );
+ int total_bytes_read = 0;
+ do
+ {
+ int bytes_read = buffer.read_into( sd, total_bytes_to_read );
+ if ( bytes_read < 0)
+ {
+ Error( "Read error: %s", strerror(errno) );
+ return( -1 );
+ }
+ else if ( bytes_read == 0)
+ {
+ Debug( 2, "Socket closed" );
+ //Disconnect(); // Disconnect is done outside of ReadData now.
+ return( -1 );
+ }
+ else if ( bytes_read < total_bytes_to_read )
+ {
+ Error( "Incomplete read, expected %d, got %d", total_bytes_to_read, bytes_read );
+ return( -1 );
+ }
+ Debug( 3, "Read %d bytes", bytes_read );
+ total_bytes_read += bytes_read;
+ total_bytes_to_read -= bytes_read;
+ }
+ while ( total_bytes_to_read );
- return( total_bytes_read );
+ Debug( 4, buffer );
+
+ return( total_bytes_read );
}
int RemoteCameraHttp::GetResponse()
{
- int buffer_len;
+ int buffer_len;
#if HAVE_LIBPCRE
+ if ( method == REGEXP )
+ {
+ const char *header = 0;
+ int header_len = 0;
+ const char *http_version = 0;
+ int status_code = 0;
+ const char *status_mesg = 0;
+ const char *connection_type = "";
+ int content_length = 0;
+ const char *content_type = "";
+ const char *content_boundary = "";
+ const char *subheader = 0;
+ int subheader_len = 0;
+ //int subcontent_length = 0;
+ //const char *subcontent_type = "";
+
+ while ( true )
+ {
+ switch( state )
+ {
+ case HEADER :
+ {
+ static RegExpr *header_expr = 0;
+ static RegExpr *status_expr = 0;
+ static RegExpr *connection_expr = 0;
+ static RegExpr *content_length_expr = 0;
+ static RegExpr *content_type_expr = 0;
+
+ while ( ! ( buffer_len = ReadData( buffer ) ) ) {
+ }
+ if ( buffer_len < 0 ) {
+ Error( "Unable to read header data" );
+ return( -1 );
+ }
+ if ( !header_expr )
+ header_expr = new RegExpr( "^(.+?\r?\n\r?\n)", PCRE_DOTALL );
+ if ( header_expr->Match( (char*)buffer, buffer.size() ) == 2 )
+ {
+ header = header_expr->MatchString( 1 );
+ header_len = header_expr->MatchLength( 1 );
+ Debug( 4, "Captured header (%d bytes):\n'%s'", header_len, header );
+
+ if ( !status_expr )
+ status_expr = new RegExpr( "^HTTP/(1\\.[01]) +([0-9]+) +(.+?)\r?\n", PCRE_CASELESS );
+ if ( status_expr->Match( header, header_len ) < 4 )
+ {
+ Error( "Unable to extract HTTP status from header" );
+ return( -1 );
+ }
+ http_version = status_expr->MatchString( 1 );
+ status_code = atoi( status_expr->MatchString( 2 ) );
+ status_mesg = status_expr->MatchString( 3 );
+
+ if ( status_code == 401 ) {
+ if ( mNeedAuth ) {
+ Error( "Failed authentication: " );
+ return( -1 );
+ }
+ mNeedAuth = true;
+ std::string Header = header;
+
+ mAuthenticator->checkAuthResponse(Header);
+ if ( mAuthenticator->auth_method() == zm::AUTH_DIGEST ) {
+ Debug( 2, "Need Digest Authentication" );
+ request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
+ request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
+ request += stringtf( "Host: %s\r\n", host.c_str());
+ if ( strcmp( config.http_version, "1.0" ) == 0 )
+ request += stringtf( "Connection: Keep-Alive\r\n" );
+ request += mAuthenticator->getAuthHeader( "GET", path.c_str() );
+ request += "\r\n";
+
+ Debug( 2, "New request header: %s", request.c_str() );
+ return( 0 );
+ }
+
+ } else if ( status_code < 200 || status_code > 299 ) {
+ Error( "Invalid response status %d: %s\n%s", status_code, status_mesg, (char *)buffer );
+ return( -1 );
+ }
+ Debug( 3, "Got status '%d' (%s), http version %s", status_code, status_mesg, http_version );
+
+ if ( !connection_expr )
+ connection_expr = new RegExpr( "Connection: ?(.+?)\r?\n", PCRE_CASELESS );
+ if ( connection_expr->Match( header, header_len ) == 2 )
+ {
+ connection_type = connection_expr->MatchString( 1 );
+ Debug( 3, "Got connection '%s'", connection_type );
+ }
+
+ if ( !content_length_expr )
+ content_length_expr = new RegExpr( "Content-length: ?([0-9]+)\r?\n", PCRE_CASELESS );
+ if ( content_length_expr->Match( header, header_len ) == 2 )
+ {
+ content_length = atoi( content_length_expr->MatchString( 1 ) );
+ Debug( 3, "Got content length '%d'", content_length );
+ }
+
+ if ( !content_type_expr )
+ content_type_expr = new RegExpr( "Content-type: ?(.+?)(?:; ?boundary=(.+?))?\r?\n", PCRE_CASELESS );
+ if ( content_type_expr->Match( header, header_len ) >= 2 )
+ {
+ content_type = content_type_expr->MatchString( 1 );
+ Debug( 3, "Got content type '%s'\n", content_type );
+ if ( content_type_expr->MatchCount() > 2 )
+ {
+ content_boundary = content_type_expr->MatchString( 2 );
+ Debug( 3, "Got content boundary '%s'", content_boundary );
+ }
+ }
+
+ if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = JPEG;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgb" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = X_RGB;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = X_RGBZ;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) )
+ {
+ // Image stream, so start processing
+ if ( !content_boundary[0] )
+ {
+ Error( "No content boundary found in header '%s'", header );
+ return( -1 );
+ }
+ mode = MULTI_IMAGE;
+ state = SUBHEADER;
+ }
+ //else if ( !strcasecmp( content_type, "video/mpeg" ) || !strcasecmp( content_type, "video/mpg" ) )
+ //{
+ //// MPEG stream, coming soon!
+ //}
+ else
+ {
+ Error( "Unrecognised content type '%s'", content_type );
+ return( -1 );
+ }
+ buffer.consume( header_len );
+ }
+ else
+ {
+ Debug( 3, "Unable to extract header from stream, retrying" );
+ //return( -1 );
+ }
+ break;
+ }
+ case SUBHEADER :
+ {
+ static RegExpr *subheader_expr = 0;
+ static RegExpr *subcontent_length_expr = 0;
+ static RegExpr *subcontent_type_expr = 0;
+
+ if ( !subheader_expr )
+ {
+ char subheader_pattern[256] = "";
+ snprintf( subheader_pattern, sizeof(subheader_pattern), "^((?:\r?\n){0,2}?(?:--)?%s\r?\n.+?\r?\n\r?\n)", content_boundary );
+ subheader_expr = new RegExpr( subheader_pattern, PCRE_DOTALL );
+ }
+ if ( subheader_expr->Match( (char *)buffer, (int)buffer ) == 2 )
+ {
+ subheader = subheader_expr->MatchString( 1 );
+ subheader_len = subheader_expr->MatchLength( 1 );
+ Debug( 4, "Captured subheader (%d bytes):'%s'", subheader_len, subheader );
+
+ if ( !subcontent_length_expr )
+ subcontent_length_expr = new RegExpr( "Content-length: ?([0-9]+)\r?\n", PCRE_CASELESS );
+ if ( subcontent_length_expr->Match( subheader, subheader_len ) == 2 )
+ {
+ content_length = atoi( subcontent_length_expr->MatchString( 1 ) );
+ Debug( 3, "Got subcontent length '%d'", content_length );
+ }
+
+ if ( !subcontent_type_expr )
+ subcontent_type_expr = new RegExpr( "Content-type: ?(.+?)\r?\n", PCRE_CASELESS );
+ if ( subcontent_type_expr->Match( subheader, subheader_len ) == 2 )
+ {
+ content_type = subcontent_type_expr->MatchString( 1 );
+ Debug( 3, "Got subcontent type '%s'", content_type );
+ }
+
+ buffer.consume( subheader_len );
+ state = CONTENT;
+ }
+ else
+ {
+ Debug( 3, "Unable to extract subheader from stream, retrying" );
+ while ( ! ( buffer_len = ReadData( buffer ) ) ) {
+ }
+ if ( buffer_len < 0 ) {
+ Error( "Unable to extract subheader data" );
+ return( -1 );
+ }
+ }
+ break;
+ }
+ case CONTENT :
+ {
+
+ // if content_type is something like image/jpeg;size=, this will strip the ;size=
+ char * semicolon = strchr( (char *)content_type, ';' );
+ if ( semicolon ) {
+ *semicolon = '\0';
+ }
+
+ if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
+ {
+ format = JPEG;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgb" ) )
+ {
+ format = X_RGB;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
+ {
+ format = X_RGBZ;
+ }
+ else
+ {
+ Error( "Found unsupported content type '%s'", content_type );
+ return( -1 );
+ }
+
+ if ( content_length )
+ {
+ while ( (long)buffer.size() < content_length )
+ {
+ Debug(3, "Need more data buffer %d < content length %d", buffer.size(), content_length );
+ if ( ReadData( buffer ) < 0 ) {
+ Error( "Unable to read content" );
+ return( -1 );
+ }
+ }
+ Debug( 3, "Got end of image by length, content-length = %d", content_length );
+ }
+ else
+ {
+ while ( !content_length )
+ {
+ while ( ! ( buffer_len = ReadData( buffer ) ) ) {
+ }
+ if ( buffer_len < 0 ) {
+ Error( "Unable to read content" );
+ return( -1 );
+ }
+ static RegExpr *content_expr = 0;
+ if ( mode == MULTI_IMAGE )
+ {
+ if ( !content_expr )
+ {
+ char content_pattern[256] = "";
+ snprintf( content_pattern, sizeof(content_pattern), "^(.+?)(?:\r?\n)*(?:--)?%s\r?\n", content_boundary );
+ content_expr = new RegExpr( content_pattern, PCRE_DOTALL );
+ }
+ if ( content_expr->Match( buffer, buffer.size() ) == 2 )
+ {
+ content_length = content_expr->MatchLength( 1 );
+ Debug( 3, "Got end of image by pattern, content-length = %d", content_length );
+ }
+ }
+ }
+ }
+ if ( mode == SINGLE_IMAGE )
+ {
+ state = HEADER;
+ Disconnect();
+ }
+ else
+ {
+ state = SUBHEADER;
+ }
+ Debug( 3, "Returning %d (%d) bytes of captured content", content_length, buffer.size() );
+ return( content_length );
+ }
+ case HEADERCONT :
+ case SUBHEADERCONT :
+ {
+ // Ignore
+ break;
+ }
+ }
+ }
+ }
+ else
+#endif // HAVE_LIBPCRE
+ {
if ( method == REGEXP )
{
- const char *header = 0;
- int header_len = 0;
- const char *http_version = 0;
- int status_code = 0;
- const char *status_mesg = 0;
- const char *connection_type = "";
- int content_length = 0;
- const char *content_type = "";
- const char *content_boundary = "";
- const char *subheader = 0;
- int subheader_len = 0;
- //int subcontent_length = 0;
- //const char *subcontent_type = "";
-
- while ( true )
- {
- switch( state )
- {
- case HEADER :
- {
- static RegExpr *header_expr = 0;
- static RegExpr *status_expr = 0;
- static RegExpr *connection_expr = 0;
- static RegExpr *content_length_expr = 0;
- static RegExpr *content_type_expr = 0;
-
- while ( ! ( buffer_len = ReadData( buffer ) ) ) {
- }
- if ( buffer_len < 0 ) {
- Error( "Unable to read header data" );
- return( -1 );
- }
- if ( !header_expr )
- header_expr = new RegExpr( "^(.+?\r?\n\r?\n)", PCRE_DOTALL );
- if ( header_expr->Match( (char*)buffer, buffer.size() ) == 2 )
- {
- header = header_expr->MatchString( 1 );
- header_len = header_expr->MatchLength( 1 );
- Debug( 4, "Captured header (%d bytes):\n'%s'", header_len, header );
-
- if ( !status_expr )
- status_expr = new RegExpr( "^HTTP/(1\\.[01]) +([0-9]+) +(.+?)\r?\n", PCRE_CASELESS );
- if ( status_expr->Match( header, header_len ) < 4 )
- {
- Error( "Unable to extract HTTP status from header" );
- return( -1 );
- }
- http_version = status_expr->MatchString( 1 );
- status_code = atoi( status_expr->MatchString( 2 ) );
- status_mesg = status_expr->MatchString( 3 );
-
- if ( status_code == 401 ) {
- if ( mNeedAuth ) {
- Error( "Failed authentication: " );
- return( -1 );
- }
- mNeedAuth = true;
- std::string Header = header;
-
- mAuthenticator->checkAuthResponse(Header);
- if ( mAuthenticator->auth_method() == zm::AUTH_DIGEST ) {
- Debug( 2, "Need Digest Authentication" );
- request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
- request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
- request += stringtf( "Host: %s\r\n", host.c_str());
- if ( strcmp( config.http_version, "1.0" ) == 0 )
- request += stringtf( "Connection: Keep-Alive\r\n" );
- request += mAuthenticator->getAuthHeader( "GET", path.c_str() );
- request += "\r\n";
-
- Debug( 2, "New request header: %s", request.c_str() );
- return( 0 );
- }
-
- } else if ( status_code < 200 || status_code > 299 ) {
- Error( "Invalid response status %d: %s\n%s", status_code, status_mesg, (char *)buffer );
- return( -1 );
- }
- Debug( 3, "Got status '%d' (%s), http version %s", status_code, status_mesg, http_version );
-
- if ( !connection_expr )
- connection_expr = new RegExpr( "Connection: ?(.+?)\r?\n", PCRE_CASELESS );
- if ( connection_expr->Match( header, header_len ) == 2 )
- {
- connection_type = connection_expr->MatchString( 1 );
- Debug( 3, "Got connection '%s'", connection_type );
- }
-
- if ( !content_length_expr )
- content_length_expr = new RegExpr( "Content-length: ?([0-9]+)\r?\n", PCRE_CASELESS );
- if ( content_length_expr->Match( header, header_len ) == 2 )
- {
- content_length = atoi( content_length_expr->MatchString( 1 ) );
- Debug( 3, "Got content length '%d'", content_length );
- }
-
- if ( !content_type_expr )
- content_type_expr = new RegExpr( "Content-type: ?(.+?)(?:; ?boundary=(.+?))?\r?\n", PCRE_CASELESS );
- if ( content_type_expr->Match( header, header_len ) >= 2 )
- {
- content_type = content_type_expr->MatchString( 1 );
- Debug( 3, "Got content type '%s'\n", content_type );
- if ( content_type_expr->MatchCount() > 2 )
- {
- content_boundary = content_type_expr->MatchString( 2 );
- Debug( 3, "Got content boundary '%s'", content_boundary );
- }
- }
-
- if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = JPEG;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "image/x-rgb" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = X_RGB;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = X_RGBZ;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) )
- {
- // Image stream, so start processing
- if ( !content_boundary[0] )
- {
- Error( "No content boundary found in header '%s'", header );
- return( -1 );
- }
- mode = MULTI_IMAGE;
- state = SUBHEADER;
- }
- //else if ( !strcasecmp( content_type, "video/mpeg" ) || !strcasecmp( content_type, "video/mpg" ) )
- //{
- //// MPEG stream, coming soon!
- //}
- else
- {
- Error( "Unrecognised content type '%s'", content_type );
- return( -1 );
- }
- buffer.consume( header_len );
- }
- else
- {
- Debug( 3, "Unable to extract header from stream, retrying" );
- //return( -1 );
- }
- break;
- }
- case SUBHEADER :
- {
- static RegExpr *subheader_expr = 0;
- static RegExpr *subcontent_length_expr = 0;
- static RegExpr *subcontent_type_expr = 0;
-
- if ( !subheader_expr )
- {
- char subheader_pattern[256] = "";
- snprintf( subheader_pattern, sizeof(subheader_pattern), "^((?:\r?\n){0,2}?(?:--)?%s\r?\n.+?\r?\n\r?\n)", content_boundary );
- subheader_expr = new RegExpr( subheader_pattern, PCRE_DOTALL );
- }
- if ( subheader_expr->Match( (char *)buffer, (int)buffer ) == 2 )
- {
- subheader = subheader_expr->MatchString( 1 );
- subheader_len = subheader_expr->MatchLength( 1 );
- Debug( 4, "Captured subheader (%d bytes):'%s'", subheader_len, subheader );
-
- if ( !subcontent_length_expr )
- subcontent_length_expr = new RegExpr( "Content-length: ?([0-9]+)\r?\n", PCRE_CASELESS );
- if ( subcontent_length_expr->Match( subheader, subheader_len ) == 2 )
- {
- content_length = atoi( subcontent_length_expr->MatchString( 1 ) );
- Debug( 3, "Got subcontent length '%d'", content_length );
- }
-
- if ( !subcontent_type_expr )
- subcontent_type_expr = new RegExpr( "Content-type: ?(.+?)\r?\n", PCRE_CASELESS );
- if ( subcontent_type_expr->Match( subheader, subheader_len ) == 2 )
- {
- content_type = subcontent_type_expr->MatchString( 1 );
- Debug( 3, "Got subcontent type '%s'", content_type );
- }
-
- buffer.consume( subheader_len );
- state = CONTENT;
- }
- else
- {
- Debug( 3, "Unable to extract subheader from stream, retrying" );
- while ( ! ( buffer_len = ReadData( buffer ) ) ) {
- }
- if ( buffer_len < 0 ) {
- Error( "Unable to extract subheader data" );
- return( -1 );
- }
- }
- break;
- }
- case CONTENT :
- {
-
- // if content_type is something like image/jpeg;size=, this will strip the ;size=
- char * semicolon = strchr( (char *)content_type, ';' );
- if ( semicolon ) {
- *semicolon = '\0';
- }
-
- if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
- {
- format = JPEG;
- }
- else if ( !strcasecmp( content_type, "image/x-rgb" ) )
- {
- format = X_RGB;
- }
- else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
- {
- format = X_RGBZ;
- }
- else
- {
- Error( "Found unsupported content type '%s'", content_type );
- return( -1 );
- }
-
- if ( content_length )
- {
- while ( (long)buffer.size() < content_length )
- {
-Debug(3, "Need more data buffer %d < content length %d", buffer.size(), content_length );
- if ( ReadData( buffer ) < 0 ) {
- Error( "Unable to read content" );
- return( -1 );
- }
- }
- Debug( 3, "Got end of image by length, content-length = %d", content_length );
- }
- else
- {
- while ( !content_length )
- {
- while ( ! ( buffer_len = ReadData( buffer ) ) ) {
- }
- if ( buffer_len < 0 ) {
- Error( "Unable to read content" );
- return( -1 );
- }
- static RegExpr *content_expr = 0;
- if ( mode == MULTI_IMAGE )
- {
- if ( !content_expr )
- {
- char content_pattern[256] = "";
- snprintf( content_pattern, sizeof(content_pattern), "^(.+?)(?:\r?\n)*(?:--)?%s\r?\n", content_boundary );
- content_expr = new RegExpr( content_pattern, PCRE_DOTALL );
- }
- if ( content_expr->Match( buffer, buffer.size() ) == 2 )
- {
- content_length = content_expr->MatchLength( 1 );
- Debug( 3, "Got end of image by pattern, content-length = %d", content_length );
- }
- }
- }
- }
- if ( mode == SINGLE_IMAGE )
- {
- state = HEADER;
- Disconnect();
- }
- else
- {
- state = SUBHEADER;
- }
- Debug( 3, "Returning %d (%d) bytes of captured content", content_length, buffer.size() );
- return( content_length );
- }
- case HEADERCONT :
- case SUBHEADERCONT :
- {
- // Ignore
- break;
- }
- }
- }
+ Warning( "Unable to use netcam regexps as not compiled with libpcre" );
}
- else
-#endif // HAVE_LIBPCRE
+ static const char *http_match = "HTTP/";
+ static const char *connection_match = "Connection:";
+ static const char *content_length_match = "Content-length:";
+ static const char *content_type_match = "Content-type:";
+ static const char *boundary_match = "boundary=";
+ static const char *authenticate_match = "WWW-Authenticate:";
+ static int http_match_len = 0;
+ static int connection_match_len = 0;
+ static int content_length_match_len = 0;
+ static int content_type_match_len = 0;
+ static int boundary_match_len = 0;
+ static int authenticate_match_len = 0;
+
+ if ( !http_match_len )
+ http_match_len = strlen( http_match );
+ if ( !connection_match_len )
+ connection_match_len = strlen( connection_match );
+ if ( !content_length_match_len )
+ content_length_match_len = strlen( content_length_match );
+ if ( !content_type_match_len )
+ content_type_match_len = strlen( content_type_match );
+ if ( !boundary_match_len )
+ boundary_match_len = strlen( boundary_match );
+ if ( !authenticate_match_len )
+ authenticate_match_len = strlen( authenticate_match );
+
+ static int n_headers;
+ //static char *headers[32];
+
+ static int n_subheaders;
+ //static char *subheaders[32];
+
+ static char *http_header;
+ static char *connection_header;
+ static char *content_length_header;
+ static char *content_type_header;
+ static char *boundary_header;
+ static char *authenticate_header;
+ static char subcontent_length_header[32];
+ static char subcontent_type_header[64];
+
+ static char http_version[16];
+ static char status_code[16];
+ static char status_mesg[256];
+ static char connection_type[32];
+ static int content_length;
+ static char content_type[32];
+ static char content_boundary[64];
+ static int content_boundary_len;
+
+ while ( true )
{
- if ( method == REGEXP )
- {
- Warning( "Unable to use netcam regexps as not compiled with libpcre" );
- }
- static const char *http_match = "HTTP/";
- static const char *connection_match = "Connection:";
- static const char *content_length_match = "Content-length:";
- static const char *content_type_match = "Content-type:";
- static const char *boundary_match = "boundary=";
- static const char *authenticate_match = "WWW-Authenticate:";
- static int http_match_len = 0;
- static int connection_match_len = 0;
- static int content_length_match_len = 0;
- static int content_type_match_len = 0;
- static int boundary_match_len = 0;
- static int authenticate_match_len = 0;
+ switch( state )
+ {
+ case HEADER :
+ {
+ n_headers = 0;
+ http_header = 0;
+ connection_header = 0;
+ content_length_header = 0;
+ content_type_header = 0;
+ authenticate_header = 0;
- if ( !http_match_len )
- http_match_len = strlen( http_match );
- if ( !connection_match_len )
- connection_match_len = strlen( connection_match );
- if ( !content_length_match_len )
- content_length_match_len = strlen( content_length_match );
- if ( !content_type_match_len )
- content_type_match_len = strlen( content_type_match );
- if ( !boundary_match_len )
- boundary_match_len = strlen( boundary_match );
- if ( !authenticate_match_len )
- authenticate_match_len = strlen( authenticate_match );
-
- static int n_headers;
- //static char *headers[32];
-
- static int n_subheaders;
- //static char *subheaders[32];
-
- static char *http_header;
- static char *connection_header;
- static char *content_length_header;
- static char *content_type_header;
- static char *boundary_header;
- static char *authenticate_header;
- static char subcontent_length_header[32];
- static char subcontent_type_header[64];
-
- static char http_version[16];
- static char status_code[16];
- static char status_mesg[256];
- static char connection_type[32];
- static int content_length;
- static char content_type[32];
- static char content_boundary[64];
- static int content_boundary_len;
-
- while ( true )
- {
- switch( state )
- {
- case HEADER :
- {
- n_headers = 0;
- http_header = 0;
- connection_header = 0;
- content_length_header = 0;
- content_type_header = 0;
- authenticate_header = 0;
-
- http_version[0] = '\0';
- status_code [0]= '\0';
- status_mesg [0]= '\0';
- connection_type [0]= '\0';
- content_length = 0;
- content_type[0] = '\0';
- content_boundary[0] = '\0';
- content_boundary_len = 0;
- }
- case HEADERCONT :
- {
- while ( ! ( buffer_len = ReadData( buffer ) ) ) {
- }
- if ( buffer_len < 0 ) {
- Error( "Unable to read header" );
- return( -1 );
- }
-
- char *crlf = 0;
- char *header_ptr = (char *)buffer;
- int header_len = buffer.size();
- bool all_headers = false;
-
- while( true )
- {
- int crlf_len = memspn( header_ptr, "\r\n", header_len );
- if ( n_headers )
- {
- if ( (crlf_len == 2 && !strncmp( header_ptr, "\n\n", crlf_len )) || (crlf_len == 4 && !strncmp( header_ptr, "\r\n\r\n", crlf_len )) )
- {
- *header_ptr = '\0';
- header_ptr += crlf_len;
- header_len -= buffer.consume( header_ptr-(char *)buffer );
- all_headers = true;
- break;
- }
- }
- if ( crlf_len )
- {
- if ( header_len == crlf_len )
- {
- break;
- }
- else
- {
- *header_ptr = '\0';
- header_ptr += crlf_len;
- header_len -= buffer.consume( header_ptr-(char *)buffer );
- }
- }
-
- Debug( 6, "%s", header_ptr );
- if ( (crlf = mempbrk( header_ptr, "\r\n", header_len )) )
- {
- //headers[n_headers++] = header_ptr;
- n_headers++;
-
- if ( !http_header && (strncasecmp( header_ptr, http_match, http_match_len ) == 0) )
- {
- http_header = header_ptr+http_match_len;
- Debug( 6, "Got http header '%s'", header_ptr );
- }
- else if ( !connection_header && (strncasecmp( header_ptr, connection_match, connection_match_len) == 0) )
- {
- connection_header = header_ptr+connection_match_len;
- Debug( 6, "Got connection header '%s'", header_ptr );
- }
- else if ( !content_length_header && (strncasecmp( header_ptr, content_length_match, content_length_match_len) == 0) )
- {
- content_length_header = header_ptr+content_length_match_len;
- Debug( 6, "Got content length header '%s'", header_ptr );
- }
-
- else if ( !authenticate_header && (strncasecmp( header_ptr, authenticate_match, authenticate_match_len) == 0) )
- {
- authenticate_header = header_ptr;
- Debug( 6, "Got authenticate header '%s'", header_ptr );
- }
- else if ( !content_type_header && (strncasecmp( header_ptr, content_type_match, content_type_match_len) == 0) )
- {
- content_type_header = header_ptr+content_type_match_len;
- Debug( 6, "Got content type header '%s'", header_ptr );
- }
- else
- {
- Debug( 6, "Got ignored header '%s'", header_ptr );
- }
- header_ptr = crlf;
- header_len -= buffer.consume( header_ptr-(char *)buffer );
- }
- else
- {
- // No end of line found
- break;
- }
- }
-
- if ( all_headers )
- {
- char *start_ptr, *end_ptr;
-
- if ( !http_header )
- {
- Error( "Unable to extract HTTP status from header" );
- return( -1 );
- }
-
- start_ptr = http_header;
- end_ptr = start_ptr+strspn( start_ptr, "10." );
-
- memset( http_version, 0, sizeof(http_version) );
- strncpy( http_version, start_ptr, end_ptr-start_ptr );
-
- start_ptr = end_ptr;
- start_ptr += strspn( start_ptr, " " );
- end_ptr = start_ptr+strspn( start_ptr, "0123456789" );
-
- memset( status_code, 0, sizeof(status_code) );
- strncpy( status_code, start_ptr, end_ptr-start_ptr );
- int status = atoi( status_code );
-
- start_ptr = end_ptr;
- start_ptr += strspn( start_ptr, " " );
- strcpy( status_mesg, start_ptr );
-
- if ( status == 401 ) {
- if ( mNeedAuth ) {
- Error( "Failed authentication: " );
- return( -1 );
- }
- if ( ! authenticate_header ) {
- Error( "Failed authentication, but don't have an authentication header: " );
- return( -1 );
- }
- mNeedAuth = true;
- std::string Header = authenticate_header;
- Debug(2, "Checking for digest auth in %s", authenticate_header );
-
- mAuthenticator->checkAuthResponse(Header);
- if ( mAuthenticator->auth_method() == zm::AUTH_DIGEST ) {
- Debug( 2, "Need Digest Authentication" );
- request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
- request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
- request += stringtf( "Host: %s\r\n", host.c_str());
- if ( strcmp( config.http_version, "1.0" ) == 0 )
- request += stringtf( "Connection: Keep-Alive\r\n" );
- request += mAuthenticator->getAuthHeader( "GET", path.c_str() );
- request += "\r\n";
-
- Debug( 2, "New request header: %s", request.c_str() );
- return( 0 );
- } else {
- Debug( 2, "Need some other kind of Authentication" );
- }
- } else if ( status < 200 || status > 299 )
- {
- Error( "Invalid response status %s: %s", status_code, status_mesg );
- return( -1 );
- }
- Debug( 3, "Got status '%d' (%s), http version %s", status, status_mesg, http_version );
-
- if ( connection_header )
- {
- memset( connection_type, 0, sizeof(connection_type) );
- start_ptr = connection_header + strspn( connection_header, " " );
- strcpy( connection_type, start_ptr );
- Debug( 3, "Got connection '%s'", connection_type );
- }
- if ( content_length_header )
- {
- start_ptr = content_length_header + strspn( content_length_header, " " );
- content_length = atoi( start_ptr );
- Debug( 3, "Got content length '%d'", content_length );
- }
- if ( content_type_header )
- {
- memset( content_type, 0, sizeof(content_type) );
- start_ptr = content_type_header + strspn( content_type_header, " " );
- if ( (end_ptr = strchr( start_ptr, ';' )) )
- {
- strncpy( content_type, start_ptr, end_ptr-start_ptr );
- Debug( 3, "Got content type '%s'", content_type );
-
- start_ptr = end_ptr + strspn( end_ptr, "; " );
-
- if ( strncasecmp( start_ptr, boundary_match, boundary_match_len ) == 0 )
- {
- start_ptr += boundary_match_len;
- start_ptr += strspn( start_ptr, "-" );
- content_boundary_len = sprintf( content_boundary, "--%s", start_ptr );
- Debug( 3, "Got content boundary '%s'", content_boundary );
- }
- else
- {
- Error( "No content boundary found in header '%s'", content_type_header );
- }
- }
- else
- {
- strcpy( content_type, start_ptr );
- Debug( 3, "Got content type '%s'", content_type );
- }
- }
-
- if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = JPEG;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "image/x-rgb" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = X_RGB;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
- {
- // Single image
- mode = SINGLE_IMAGE;
- format = X_RGBZ;
- state = CONTENT;
- }
- else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) )
- {
- // Image stream, so start processing
- if ( !content_boundary[0] )
- {
- Error( "No content boundary found in header '%s'", content_type_header );
- return( -1 );
- }
- mode = MULTI_IMAGE;
- state = SUBHEADER;
- }
- //else if ( !strcasecmp( content_type, "video/mpeg" ) || !strcasecmp( content_type, "video/mpg" ) )
- //{
- //// MPEG stream, coming soon!
- //}
- else
- {
- Error( "Unrecognised content type '%s'", content_type );
- return( -1 );
- }
- }
- else
- {
- Debug( 3, "Unable to extract entire header from stream, continuing" );
- state = HEADERCONT;
- //return( -1 );
- }
- break;
- }
- case SUBHEADER :
- {
- n_subheaders = 0;
- boundary_header = 0;
- subcontent_length_header[0] = '\0';
- subcontent_type_header[0] = '\0';
- content_length = 0;
- content_type[0] = '\0';
- }
- case SUBHEADERCONT :
- {
- char *crlf = 0;
- char *subheader_ptr = (char *)buffer;
- int subheader_len = buffer.size();
- bool all_headers = false;
-
- while( true )
- {
- int crlf_len = memspn( subheader_ptr, "\r\n", subheader_len );
- if ( n_subheaders )
- {
- if ( (crlf_len == 2 && !strncmp( subheader_ptr, "\n\n", crlf_len )) || (crlf_len == 4 && !strncmp( subheader_ptr, "\r\n\r\n", crlf_len )) )
- {
- *subheader_ptr = '\0';
- subheader_ptr += crlf_len;
- subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
- all_headers = true;
- break;
- }
- }
- if ( crlf_len )
- {
- if ( subheader_len == crlf_len )
- {
- break;
- }
- else
- {
- *subheader_ptr = '\0';
- subheader_ptr += crlf_len;
- subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
- }
- }
-
- Debug( 6, "%d: %s", subheader_len, subheader_ptr );
-
- if ( (crlf = mempbrk( subheader_ptr, "\r\n", subheader_len )) )
- {
- //subheaders[n_subheaders++] = subheader_ptr;
- n_subheaders++;
-
- if ( !boundary_header && (strncasecmp( subheader_ptr, content_boundary, content_boundary_len ) == 0) )
- {
- boundary_header = subheader_ptr;
- Debug( 4, "Got boundary subheader '%s'", subheader_ptr );
- }
- else if ( !subcontent_length_header[0] && (strncasecmp( subheader_ptr, content_length_match, content_length_match_len) == 0) )
- {
- strncpy( subcontent_length_header, subheader_ptr+content_length_match_len, sizeof(subcontent_length_header) );
- *(subcontent_length_header+strcspn( subcontent_length_header, "\r\n" )) = '\0';
- Debug( 4, "Got content length subheader '%s'", subcontent_length_header );
- }
- else if ( !subcontent_type_header[0] && (strncasecmp( subheader_ptr, content_type_match, content_type_match_len) == 0) )
- {
- strncpy( subcontent_type_header, subheader_ptr+content_type_match_len, sizeof(subcontent_type_header) );
- *(subcontent_type_header+strcspn( subcontent_type_header, "\r\n" )) = '\0';
- Debug( 4, "Got content type subheader '%s'", subcontent_type_header );
- }
- else
- {
- Debug( 6, "Got ignored subheader '%s' found", subheader_ptr );
- }
- subheader_ptr = crlf;
- subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
- }
- else
- {
- // No line end found
- break;
- }
- }
-
- if ( all_headers && boundary_header )
- {
- char *start_ptr/*, *end_ptr*/;
-
- Debug( 3, "Got boundary '%s'", boundary_header );
-
- if ( subcontent_length_header[0] )
- {
- start_ptr = subcontent_length_header + strspn( subcontent_length_header, " " );
- content_length = atoi( start_ptr );
- Debug( 3, "Got subcontent length '%d'", content_length );
- }
- if ( subcontent_type_header[0] )
- {
- memset( content_type, 0, sizeof(content_type) );
- start_ptr = subcontent_type_header + strspn( subcontent_type_header, " " );
- strcpy( content_type, start_ptr );
- Debug( 3, "Got subcontent type '%s'", content_type );
- }
- state = CONTENT;
- }
- else
- {
- Debug( 3, "Unable to extract subheader from stream, retrying" );
- while ( ! ( buffer_len = ReadData( buffer ) ) ) {
- }
- if ( buffer_len < 0 ) {
- Error( "Unable to read subheader" );
- return( -1 );
- }
- state = SUBHEADERCONT;
- }
- break;
- }
- case CONTENT :
- {
-
- // if content_type is something like image/jpeg;size=, this will strip the ;size=
- char * semicolon = strchr( content_type, ';' );
- if ( semicolon ) {
- *semicolon = '\0';
- }
-
- if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
- {
- format = JPEG;
- }
- else if ( !strcasecmp( content_type, "image/x-rgb" ) )
- {
- format = X_RGB;
- }
- else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
- {
- format = X_RGBZ;
- }
- else
- {
- Error( "Found unsupported content type '%s'", content_type );
- return( -1 );
- }
-
- if ( format == JPEG && buffer.size() >= 2 )
- {
- if ( buffer[0] != 0xff || buffer[1] != 0xd8 )
- {
- Error( "Found bogus jpeg header '%02x%02x'", buffer[0], buffer[1] );
- return( -1 );
- }
- }
-
- if ( content_length )
- {
- while ( (long)buffer.size() < content_length )
- {
- //int buffer_len = ReadData( buffer, content_length-buffer.size() );
- if ( ReadData( buffer ) < 0 ) {
- Error( "Unable to read content" );
- return( -1 );
- }
- }
- Debug( 3, "Got end of image by length, content-length = %d", content_length );
- }
- else
- {
- int content_pos = 0;
- while ( !content_length )
- {
- buffer_len = ReadData( buffer );
- if ( buffer_len < 0 )
- {
- Error( "Unable to read content" );
- return( -1 );
- }
- int buffer_size = buffer.size();
- if ( buffer_len )
- {
- if ( mode == MULTI_IMAGE )
- {
- while ( char *start_ptr = (char *)memstr( (char *)buffer+content_pos, "\r\n--", buffer_size-content_pos ) )
- {
- content_length = start_ptr - (char *)buffer;
- Debug( 3, "Got end of image by pattern (crlf--), content-length = %d", content_length );
- break;
- }
- }
- }
- else
- {
- content_length = buffer_size;
- Debug( 3, "Got end of image by closure, content-length = %d", content_length );
- if ( mode == SINGLE_IMAGE )
- {
- char *end_ptr = (char *)buffer+buffer_size;
-
- while( *end_ptr == '\r' || *end_ptr == '\n' )
- {
- content_length--;
- end_ptr--;
- }
-
- if ( end_ptr != ((char *)buffer+buffer_size) )
- {
- Debug( 3, "Trimmed end of image, new content-length = %d", content_length );
- }
- }
- }
- }
- }
- if ( mode == SINGLE_IMAGE )
- {
- state = HEADER;
- Disconnect();
- }
- else
- {
- state = SUBHEADER;
- }
-
- if ( format == JPEG && buffer.size() >= 2 )
- {
- if ( buffer[0] != 0xff || buffer[1] != 0xd8 )
- {
- Error( "Found bogus jpeg header '%02x%02x'", buffer[0], buffer[1] );
- return( -1 );
- }
- }
-
- Debug( 3, "Returning %d bytes, buffer size: (%d) bytes of captured content", content_length, buffer.size() );
- return( content_length );
- }
+ http_version[0] = '\0';
+ status_code [0]= '\0';
+ status_mesg [0]= '\0';
+ connection_type [0]= '\0';
+ content_length = 0;
+ content_type[0] = '\0';
+ content_boundary[0] = '\0';
+ content_boundary_len = 0;
+ }
+ case HEADERCONT :
+ {
+ while ( ! ( buffer_len = ReadData( buffer ) ) ) {
}
- }
+ if ( buffer_len < 0 ) {
+ Error( "Unable to read header" );
+ return( -1 );
+ }
+
+ char *crlf = 0;
+ char *header_ptr = (char *)buffer;
+ int header_len = buffer.size();
+ bool all_headers = false;
+
+ while( true )
+ {
+ int crlf_len = memspn( header_ptr, "\r\n", header_len );
+ if ( n_headers )
+ {
+ if ( (crlf_len == 2 && !strncmp( header_ptr, "\n\n", crlf_len )) || (crlf_len == 4 && !strncmp( header_ptr, "\r\n\r\n", crlf_len )) )
+ {
+ *header_ptr = '\0';
+ header_ptr += crlf_len;
+ header_len -= buffer.consume( header_ptr-(char *)buffer );
+ all_headers = true;
+ break;
+ }
+ }
+ if ( crlf_len )
+ {
+ if ( header_len == crlf_len )
+ {
+ break;
+ }
+ else
+ {
+ *header_ptr = '\0';
+ header_ptr += crlf_len;
+ header_len -= buffer.consume( header_ptr-(char *)buffer );
+ }
+ }
+
+ Debug( 6, "%s", header_ptr );
+ if ( (crlf = mempbrk( header_ptr, "\r\n", header_len )) )
+ {
+ //headers[n_headers++] = header_ptr;
+ n_headers++;
+
+ if ( !http_header && (strncasecmp( header_ptr, http_match, http_match_len ) == 0) )
+ {
+ http_header = header_ptr+http_match_len;
+ Debug( 6, "Got http header '%s'", header_ptr );
+ }
+ else if ( !connection_header && (strncasecmp( header_ptr, connection_match, connection_match_len) == 0) )
+ {
+ connection_header = header_ptr+connection_match_len;
+ Debug( 6, "Got connection header '%s'", header_ptr );
+ }
+ else if ( !content_length_header && (strncasecmp( header_ptr, content_length_match, content_length_match_len) == 0) )
+ {
+ content_length_header = header_ptr+content_length_match_len;
+ Debug( 6, "Got content length header '%s'", header_ptr );
+ }
+
+ else if ( !authenticate_header && (strncasecmp( header_ptr, authenticate_match, authenticate_match_len) == 0) )
+ {
+ authenticate_header = header_ptr;
+ Debug( 6, "Got authenticate header '%s'", header_ptr );
+ }
+ else if ( !content_type_header && (strncasecmp( header_ptr, content_type_match, content_type_match_len) == 0) )
+ {
+ content_type_header = header_ptr+content_type_match_len;
+ Debug( 6, "Got content type header '%s'", header_ptr );
+ }
+ else
+ {
+ Debug( 6, "Got ignored header '%s'", header_ptr );
+ }
+ header_ptr = crlf;
+ header_len -= buffer.consume( header_ptr-(char *)buffer );
+ }
+ else
+ {
+ // No end of line found
+ break;
+ }
+ }
+
+ if ( all_headers )
+ {
+ char *start_ptr, *end_ptr;
+
+ if ( !http_header )
+ {
+ Error( "Unable to extract HTTP status from header" );
+ return( -1 );
+ }
+
+ start_ptr = http_header;
+ end_ptr = start_ptr+strspn( start_ptr, "10." );
+
+ memset( http_version, 0, sizeof(http_version) );
+ strncpy( http_version, start_ptr, end_ptr-start_ptr );
+
+ start_ptr = end_ptr;
+ start_ptr += strspn( start_ptr, " " );
+ end_ptr = start_ptr+strspn( start_ptr, "0123456789" );
+
+ memset( status_code, 0, sizeof(status_code) );
+ strncpy( status_code, start_ptr, end_ptr-start_ptr );
+ int status = atoi( status_code );
+
+ start_ptr = end_ptr;
+ start_ptr += strspn( start_ptr, " " );
+ strcpy( status_mesg, start_ptr );
+
+ if ( status == 401 ) {
+ if ( mNeedAuth ) {
+ Error( "Failed authentication: " );
+ return( -1 );
+ }
+ if ( ! authenticate_header ) {
+ Error( "Failed authentication, but don't have an authentication header: " );
+ return( -1 );
+ }
+ mNeedAuth = true;
+ std::string Header = authenticate_header;
+ Debug(2, "Checking for digest auth in %s", authenticate_header );
+
+ mAuthenticator->checkAuthResponse(Header);
+ if ( mAuthenticator->auth_method() == zm::AUTH_DIGEST ) {
+ Debug( 2, "Need Digest Authentication" );
+ request = stringtf( "GET %s HTTP/%s\r\n", path.c_str(), config.http_version );
+ request += stringtf( "User-Agent: %s/%s\r\n", config.http_ua, ZM_VERSION );
+ request += stringtf( "Host: %s\r\n", host.c_str());
+ if ( strcmp( config.http_version, "1.0" ) == 0 )
+ request += stringtf( "Connection: Keep-Alive\r\n" );
+ request += mAuthenticator->getAuthHeader( "GET", path.c_str() );
+ request += "\r\n";
+
+ Debug( 2, "New request header: %s", request.c_str() );
+ return( 0 );
+ } else {
+ Debug( 2, "Need some other kind of Authentication" );
+ }
+ } else if ( status < 200 || status > 299 )
+ {
+ Error( "Invalid response status %s: %s", status_code, status_mesg );
+ return( -1 );
+ }
+ Debug( 3, "Got status '%d' (%s), http version %s", status, status_mesg, http_version );
+
+ if ( connection_header )
+ {
+ memset( connection_type, 0, sizeof(connection_type) );
+ start_ptr = connection_header + strspn( connection_header, " " );
+ strcpy( connection_type, start_ptr );
+ Debug( 3, "Got connection '%s'", connection_type );
+ }
+ if ( content_length_header )
+ {
+ start_ptr = content_length_header + strspn( content_length_header, " " );
+ content_length = atoi( start_ptr );
+ Debug( 3, "Got content length '%d'", content_length );
+ }
+ if ( content_type_header )
+ {
+ memset( content_type, 0, sizeof(content_type) );
+ start_ptr = content_type_header + strspn( content_type_header, " " );
+ if ( (end_ptr = strchr( start_ptr, ';' )) )
+ {
+ strncpy( content_type, start_ptr, end_ptr-start_ptr );
+ Debug( 3, "Got content type '%s'", content_type );
+
+ start_ptr = end_ptr + strspn( end_ptr, "; " );
+
+ if ( strncasecmp( start_ptr, boundary_match, boundary_match_len ) == 0 )
+ {
+ start_ptr += boundary_match_len;
+ start_ptr += strspn( start_ptr, "-" );
+ content_boundary_len = sprintf( content_boundary, "--%s", start_ptr );
+ Debug( 3, "Got content boundary '%s'", content_boundary );
+ }
+ else
+ {
+ Error( "No content boundary found in header '%s'", content_type_header );
+ }
+ }
+ else
+ {
+ strcpy( content_type, start_ptr );
+ Debug( 3, "Got content type '%s'", content_type );
+ }
+ }
+
+ if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = JPEG;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgb" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = X_RGB;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
+ {
+ // Single image
+ mode = SINGLE_IMAGE;
+ format = X_RGBZ;
+ state = CONTENT;
+ }
+ else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) )
+ {
+ // Image stream, so start processing
+ if ( !content_boundary[0] )
+ {
+ Error( "No content boundary found in header '%s'", content_type_header );
+ return( -1 );
+ }
+ mode = MULTI_IMAGE;
+ state = SUBHEADER;
+ }
+ //else if ( !strcasecmp( content_type, "video/mpeg" ) || !strcasecmp( content_type, "video/mpg" ) )
+ //{
+ //// MPEG stream, coming soon!
+ //}
+ else
+ {
+ Error( "Unrecognised content type '%s'", content_type );
+ return( -1 );
+ }
+ }
+ else
+ {
+ Debug( 3, "Unable to extract entire header from stream, continuing" );
+ state = HEADERCONT;
+ //return( -1 );
+ }
+ break;
+ }
+ case SUBHEADER :
+ {
+ n_subheaders = 0;
+ boundary_header = 0;
+ subcontent_length_header[0] = '\0';
+ subcontent_type_header[0] = '\0';
+ content_length = 0;
+ content_type[0] = '\0';
+ }
+ case SUBHEADERCONT :
+ {
+ char *crlf = 0;
+ char *subheader_ptr = (char *)buffer;
+ int subheader_len = buffer.size();
+ bool all_headers = false;
+
+ while( true )
+ {
+ int crlf_len = memspn( subheader_ptr, "\r\n", subheader_len );
+ if ( n_subheaders )
+ {
+ if ( (crlf_len == 2 && !strncmp( subheader_ptr, "\n\n", crlf_len )) || (crlf_len == 4 && !strncmp( subheader_ptr, "\r\n\r\n", crlf_len )) )
+ {
+ *subheader_ptr = '\0';
+ subheader_ptr += crlf_len;
+ subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
+ all_headers = true;
+ break;
+ }
+ }
+ if ( crlf_len )
+ {
+ if ( subheader_len == crlf_len )
+ {
+ break;
+ }
+ else
+ {
+ *subheader_ptr = '\0';
+ subheader_ptr += crlf_len;
+ subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
+ }
+ }
+
+ Debug( 6, "%d: %s", subheader_len, subheader_ptr );
+
+ if ( (crlf = mempbrk( subheader_ptr, "\r\n", subheader_len )) )
+ {
+ //subheaders[n_subheaders++] = subheader_ptr;
+ n_subheaders++;
+
+ if ( !boundary_header && (strncasecmp( subheader_ptr, content_boundary, content_boundary_len ) == 0) )
+ {
+ boundary_header = subheader_ptr;
+ Debug( 4, "Got boundary subheader '%s'", subheader_ptr );
+ }
+ else if ( !subcontent_length_header[0] && (strncasecmp( subheader_ptr, content_length_match, content_length_match_len) == 0) )
+ {
+ strncpy( subcontent_length_header, subheader_ptr+content_length_match_len, sizeof(subcontent_length_header) );
+ *(subcontent_length_header+strcspn( subcontent_length_header, "\r\n" )) = '\0';
+ Debug( 4, "Got content length subheader '%s'", subcontent_length_header );
+ }
+ else if ( !subcontent_type_header[0] && (strncasecmp( subheader_ptr, content_type_match, content_type_match_len) == 0) )
+ {
+ strncpy( subcontent_type_header, subheader_ptr+content_type_match_len, sizeof(subcontent_type_header) );
+ *(subcontent_type_header+strcspn( subcontent_type_header, "\r\n" )) = '\0';
+ Debug( 4, "Got content type subheader '%s'", subcontent_type_header );
+ }
+ else
+ {
+ Debug( 6, "Got ignored subheader '%s' found", subheader_ptr );
+ }
+ subheader_ptr = crlf;
+ subheader_len -= buffer.consume( subheader_ptr-(char *)buffer );
+ }
+ else
+ {
+ // No line end found
+ break;
+ }
+ }
+
+ if ( all_headers && boundary_header )
+ {
+ char *start_ptr/*, *end_ptr*/;
+
+ Debug( 3, "Got boundary '%s'", boundary_header );
+
+ if ( subcontent_length_header[0] )
+ {
+ start_ptr = subcontent_length_header + strspn( subcontent_length_header, " " );
+ content_length = atoi( start_ptr );
+ Debug( 3, "Got subcontent length '%d'", content_length );
+ }
+ if ( subcontent_type_header[0] )
+ {
+ memset( content_type, 0, sizeof(content_type) );
+ start_ptr = subcontent_type_header + strspn( subcontent_type_header, " " );
+ strcpy( content_type, start_ptr );
+ Debug( 3, "Got subcontent type '%s'", content_type );
+ }
+ state = CONTENT;
+ }
+ else
+ {
+ Debug( 3, "Unable to extract subheader from stream, retrying" );
+ while ( ! ( buffer_len = ReadData( buffer ) ) ) {
+ }
+ if ( buffer_len < 0 ) {
+ Error( "Unable to read subheader" );
+ return( -1 );
+ }
+ state = SUBHEADERCONT;
+ }
+ break;
+ }
+ case CONTENT :
+ {
+
+ // if content_type is something like image/jpeg;size=, this will strip the ;size=
+ char * semicolon = strchr( content_type, ';' );
+ if ( semicolon ) {
+ *semicolon = '\0';
+ }
+
+ if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
+ {
+ format = JPEG;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgb" ) )
+ {
+ format = X_RGB;
+ }
+ else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
+ {
+ format = X_RGBZ;
+ }
+ else
+ {
+ Error( "Found unsupported content type '%s'", content_type );
+ return( -1 );
+ }
+
+ if ( format == JPEG && buffer.size() >= 2 )
+ {
+ if ( buffer[0] != 0xff || buffer[1] != 0xd8 )
+ {
+ Error( "Found bogus jpeg header '%02x%02x'", buffer[0], buffer[1] );
+ return( -1 );
+ }
+ }
+
+ if ( content_length )
+ {
+ while ( (long)buffer.size() < content_length )
+ {
+ //int buffer_len = ReadData( buffer, content_length-buffer.size() );
+ if ( ReadData( buffer ) < 0 ) {
+ Error( "Unable to read content" );
+ return( -1 );
+ }
+ }
+ Debug( 3, "Got end of image by length, content-length = %d", content_length );
+ }
+ else
+ {
+ int content_pos = 0;
+ while ( !content_length )
+ {
+ buffer_len = ReadData( buffer );
+ if ( buffer_len < 0 )
+ {
+ Error( "Unable to read content" );
+ return( -1 );
+ }
+ int buffer_size = buffer.size();
+ if ( buffer_len )
+ {
+ if ( mode == MULTI_IMAGE )
+ {
+ while ( char *start_ptr = (char *)memstr( (char *)buffer+content_pos, "\r\n--", buffer_size-content_pos ) )
+ {
+ content_length = start_ptr - (char *)buffer;
+ Debug( 3, "Got end of image by pattern (crlf--), content-length = %d", content_length );
+ break;
+ }
+ }
+ }
+ else
+ {
+ content_length = buffer_size;
+ Debug( 3, "Got end of image by closure, content-length = %d", content_length );
+ if ( mode == SINGLE_IMAGE )
+ {
+ char *end_ptr = (char *)buffer+buffer_size;
+
+ while( *end_ptr == '\r' || *end_ptr == '\n' )
+ {
+ content_length--;
+ end_ptr--;
+ }
+
+ if ( end_ptr != ((char *)buffer+buffer_size) )
+ {
+ Debug( 3, "Trimmed end of image, new content-length = %d", content_length );
+ }
+ }
+ }
+ }
+ }
+ if ( mode == SINGLE_IMAGE )
+ {
+ state = HEADER;
+ Disconnect();
+ }
+ else
+ {
+ state = SUBHEADER;
+ }
+
+ if ( format == JPEG && buffer.size() >= 2 )
+ {
+ if ( buffer[0] != 0xff || buffer[1] != 0xd8 )
+ {
+ Error( "Found bogus jpeg header '%02x%02x'", buffer[0], buffer[1] );
+ return( -1 );
+ }
+ }
+
+ Debug( 3, "Returning %d bytes, buffer size: (%d) bytes of captured content", content_length, buffer.size() );
+ return( content_length );
+ }
+ }
}
- return( 0 );
+ }
+ return( 0 );
}
int RemoteCameraHttp::PreCapture()
{
+ if ( sd < 0 )
+ {
+ Connect();
if ( sd < 0 )
{
- Connect();
- if ( sd < 0 )
- {
- Error( "Unable to connect to camera" );
- return( -1 );
- }
- mode = SINGLE_IMAGE;
- buffer.clear();
+ Error( "Unable to connect to camera" );
+ return( -1 );
}
- if ( mode == SINGLE_IMAGE )
+ mode = SINGLE_IMAGE;
+ buffer.clear();
+ }
+ if ( mode == SINGLE_IMAGE )
+ {
+ if ( SendRequest() < 0 )
{
- if ( SendRequest() < 0 )
- {
- Error( "Unable to send request" );
- Disconnect();
- return( -1 );
- }
+ Error( "Unable to send request" );
+ Disconnect();
+ return( -1 );
}
- return( 0 );
+ }
+ return( 0 );
}
int RemoteCameraHttp::Capture( Image &image )
{
- int content_length = GetResponse();
- if ( content_length == 0 )
- {
- Warning( "Unable to capture image, retrying" );
- return( 1 );
- }
- if ( content_length < 0 )
- {
- Error( "Unable to get response, disconnecting" );
+ int content_length = GetResponse();
+ if ( content_length == 0 )
+ {
+ Warning( "Unable to capture image, retrying" );
+ return( 1 );
+ }
+ if ( content_length < 0 )
+ {
+ Error( "Unable to get response, disconnecting" );
+ Disconnect();
+ return( -1 );
+ }
+ switch( format )
+ {
+ case JPEG :
+ {
+ if ( !image.DecodeJpeg( buffer.extract( content_length ), content_length, colours, subpixelorder ) )
+ {
+ Error( "Unable to decode jpeg" );
+ Disconnect();
+ return( -1 );
+ }
+ break;
+ }
+ case X_RGB :
+ {
+ if ( content_length != (long)image.Size() )
+ {
+ Error( "Image length mismatch, expected %d bytes, content length was %d", image.Size(), content_length );
+ Disconnect();
+ return( -1 );
+ }
+ image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
+ break;
+ }
+ case X_RGBZ :
+ {
+ if ( !image.Unzip( buffer.extract( content_length ), content_length ) )
+ {
+ Error( "Unable to unzip RGB image" );
+ Disconnect();
+ return( -1 );
+ }
+ image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
+ break;
+ }
+ default :
+ {
+ Error( "Unexpected image format encountered" );
Disconnect();
return( -1 );
- }
- switch( format )
- {
- case JPEG :
- {
- if ( !image.DecodeJpeg( buffer.extract( content_length ), content_length, colours, subpixelorder ) )
- {
- Error( "Unable to decode jpeg" );
- Disconnect();
- return( -1 );
- }
- break;
- }
- case X_RGB :
- {
- if ( content_length != (long)image.Size() )
- {
- Error( "Image length mismatch, expected %d bytes, content length was %d", image.Size(), content_length );
- Disconnect();
- return( -1 );
- }
- image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
- break;
- }
- case X_RGBZ :
- {
- if ( !image.Unzip( buffer.extract( content_length ), content_length ) )
- {
- Error( "Unable to unzip RGB image" );
- Disconnect();
- return( -1 );
- }
- image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
- break;
- }
- default :
- {
- Error( "Unexpected image format encountered" );
- Disconnect();
- return( -1 );
- }
- }
- return( 0 );
+ }
+ }
+ return( 0 );
}
int RemoteCameraHttp::PostCapture()
{
- return( 0 );
+ return( 0 );
}
diff --git a/src/zm_remote_camera_http.h b/src/zm_remote_camera_http.h
index a5994b9b3..bd5712459 100644
--- a/src/zm_remote_camera_http.h
+++ b/src/zm_remote_camera_http.h
@@ -33,32 +33,32 @@
class RemoteCameraHttp : public RemoteCamera
{
protected:
- std::string request;
- struct timeval timeout;
- //struct hostent *hp;
- //struct sockaddr_in sa;
- int sd;
- Buffer buffer;
- enum { SINGLE_IMAGE, MULTI_IMAGE } mode;
- enum { UNDEF, JPEG, X_RGB, X_RGBZ } format;
- enum { HEADER, HEADERCONT, SUBHEADER, SUBHEADERCONT, CONTENT } state;
- enum { SIMPLE, REGEXP } method;
+ std::string request;
+ struct timeval timeout;
+ //struct hostent *hp;
+ //struct sockaddr_in sa;
+ int sd;
+ Buffer buffer;
+ enum { SINGLE_IMAGE, MULTI_IMAGE } mode;
+ enum { UNDEF, JPEG, X_RGB, X_RGBZ } format;
+ enum { HEADER, HEADERCONT, SUBHEADER, SUBHEADERCONT, CONTENT } state;
+ enum { SIMPLE, REGEXP } method;
public:
- RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~RemoteCameraHttp();
+ RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~RemoteCameraHttp();
- void Initialise();
- void Terminate() { Disconnect(); }
- int Connect();
- int Disconnect();
- int SendRequest();
- int ReadData( Buffer &buffer, int bytes_expected=0 );
- int GetResponse();
- int PreCapture();
- int Capture( Image &image );
- int PostCapture();
- int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
+ void Initialise();
+ void Terminate() { Disconnect(); }
+ int Connect();
+ int Disconnect();
+ int SendRequest();
+ int ReadData( Buffer &buffer, int bytes_expected=0 );
+ int GetResponse();
+ int PreCapture();
+ int Capture( Image &image );
+ int PostCapture();
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
};
#endif // ZM_REMOTE_CAMERA_HTTP_H
diff --git a/src/zm_remote_camera_rtsp.cpp b/src/zm_remote_camera_rtsp.cpp
index 78efbbaf9..0f79c459b 100644
--- a/src/zm_remote_camera_rtsp.cpp
+++ b/src/zm_remote_camera_rtsp.cpp
@@ -29,166 +29,166 @@
#include
RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
- RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
- rtsp_describe( p_rtsp_describe ),
- rtspThread( 0 )
+ RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
+ rtsp_describe( p_rtsp_describe ),
+ rtspThread( 0 )
{
- if ( p_method == "rtpUni" )
- method = RtspThread::RTP_UNICAST;
- else if ( p_method == "rtpMulti" )
- method = RtspThread::RTP_MULTICAST;
- else if ( p_method == "rtpRtsp" )
- method = RtspThread::RTP_RTSP;
- else if ( p_method == "rtpRtspHttp" )
- method = RtspThread::RTP_RTSP_HTTP;
- else
- Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id );
+ if ( p_method == "rtpUni" )
+ method = RtspThread::RTP_UNICAST;
+ else if ( p_method == "rtpMulti" )
+ method = RtspThread::RTP_MULTICAST;
+ else if ( p_method == "rtpRtsp" )
+ method = RtspThread::RTP_RTSP;
+ else if ( p_method == "rtpRtspHttp" )
+ method = RtspThread::RTP_RTSP_HTTP;
+ else
+ Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id );
- if ( capture )
- {
- Initialise();
- }
-
- mFormatContext = NULL;
- mVideoStreamId = -1;
- mAudioStreamId = -1;
- mCodecContext = NULL;
- mCodec = NULL;
- mRawFrame = NULL;
- mFrame = NULL;
- frameCount = 0;
- wasRecording = false;
- startTime=0;
-
+ if ( capture )
+ {
+ Initialise();
+ }
+
+ mFormatContext = NULL;
+ mVideoStreamId = -1;
+ mAudioStreamId = -1;
+ mCodecContext = NULL;
+ mCodec = NULL;
+ mRawFrame = NULL;
+ mFrame = NULL;
+ frameCount = 0;
+ wasRecording = false;
+ startTime=0;
+
#if HAVE_LIBSWSCALE
- mConvertContext = NULL;
+ mConvertContext = NULL;
#endif
- /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
- if(colours == ZM_COLOUR_RGB32) {
- subpixelorder = ZM_SUBPIX_ORDER_RGBA;
- imagePixFormat = AV_PIX_FMT_RGBA;
- } else if(colours == ZM_COLOUR_RGB24) {
- subpixelorder = ZM_SUBPIX_ORDER_RGB;
- imagePixFormat = AV_PIX_FMT_RGB24;
- } else if(colours == ZM_COLOUR_GRAY8) {
- subpixelorder = ZM_SUBPIX_ORDER_NONE;
- imagePixFormat = AV_PIX_FMT_GRAY8;
- } else {
- Panic("Unexpected colours: %d",colours);
- }
-
+ /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
+ if(colours == ZM_COLOUR_RGB32) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGBA;
+ imagePixFormat = AV_PIX_FMT_RGBA;
+ } else if(colours == ZM_COLOUR_RGB24) {
+ subpixelorder = ZM_SUBPIX_ORDER_RGB;
+ imagePixFormat = AV_PIX_FMT_RGB24;
+ } else if(colours == ZM_COLOUR_GRAY8) {
+ subpixelorder = ZM_SUBPIX_ORDER_NONE;
+ imagePixFormat = AV_PIX_FMT_GRAY8;
+ } else {
+ Panic("Unexpected colours: %d",colours);
+ }
+
}
RemoteCameraRtsp::~RemoteCameraRtsp()
{
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- av_frame_free( &mFrame );
- av_frame_free( &mRawFrame );
+ av_frame_free( &mFrame );
+ av_frame_free( &mRawFrame );
#else
- av_freep( &mFrame );
- av_freep( &mRawFrame );
+ av_freep( &mFrame );
+ av_freep( &mRawFrame );
#endif
-
+
#if HAVE_LIBSWSCALE
- if ( mConvertContext )
- {
- sws_freeContext( mConvertContext );
- mConvertContext = NULL;
- }
+ if ( mConvertContext )
+ {
+ sws_freeContext( mConvertContext );
+ mConvertContext = NULL;
+ }
#endif
- if ( mCodecContext )
- {
- avcodec_close( mCodecContext );
- mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class
- }
+ if ( mCodecContext )
+ {
+ avcodec_close( mCodecContext );
+ mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class
+ }
- if ( capture )
- {
- Terminate();
- }
+ if ( capture )
+ {
+ Terminate();
+ }
}
void RemoteCameraRtsp::Initialise()
{
- RemoteCamera::Initialise();
+ RemoteCamera::Initialise();
- int max_size = width*height*colours;
+ int max_size = width*height*colours;
- // This allocates a buffer able to hold a raw fframe, which is a little artbitrary. Might be nice to get some
+ // This allocates a buffer able to hold a raw fframe, which is a little artbitrary. Might be nice to get some
// decent data on how large a buffer is really needed. I think in ffmpeg there are now some functions to do that.
- buffer.size( max_size );
+ buffer.size( max_size );
- if ( logDebugging() )
- av_log_set_level( AV_LOG_DEBUG );
- else
- av_log_set_level( AV_LOG_QUIET );
+ if ( logDebugging() )
+ av_log_set_level( AV_LOG_DEBUG );
+ else
+ av_log_set_level( AV_LOG_QUIET );
- av_register_all();
+ av_register_all();
- Connect();
+ Connect();
}
void RemoteCameraRtsp::Terminate()
{
- Disconnect();
+ Disconnect();
}
int RemoteCameraRtsp::Connect()
{
- rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe );
+ rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe );
- rtspThread->start();
+ rtspThread->start();
- return( 0 );
+ return( 0 );
}
int RemoteCameraRtsp::Disconnect()
{
- if ( rtspThread )
- {
- rtspThread->stop();
- rtspThread->join();
- delete rtspThread;
- rtspThread = 0;
- }
- return( 0 );
+ if ( rtspThread )
+ {
+ rtspThread->stop();
+ rtspThread->join();
+ delete rtspThread;
+ rtspThread = 0;
+ }
+ return( 0 );
}
int RemoteCameraRtsp::PrimeCapture()
{
- Debug( 2, "Waiting for sources" );
- for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
- {
- usleep( 100000 );
- }
- if ( !rtspThread->hasSources() )
- Fatal( "No RTSP sources" );
+ Debug( 2, "Waiting for sources" );
+ for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
+ {
+ usleep( 100000 );
+ }
+ if ( !rtspThread->hasSources() )
+ Fatal( "No RTSP sources" );
- Debug( 2, "Got sources" );
+ Debug( 2, "Got sources" );
- mFormatContext = rtspThread->getFormatContext();
+ mFormatContext = rtspThread->getFormatContext();
- // Find first video stream present
- mVideoStreamId = -1;
+ // Find first video stream present
+ mVideoStreamId = -1;
mAudioStreamId = -1;
-
- // Find the first video stream.
- for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ ) {
+
+ // Find the first video stream.
+ for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ ) {
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
- {
+ {
if ( mVideoStreamId == -1 ) {
mVideoStreamId = i;
continue;
} else {
Debug(2, "Have another video stream." );
}
- }
+ }
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
@@ -203,39 +203,39 @@ int RemoteCameraRtsp::PrimeCapture()
}
}
- if ( mVideoStreamId == -1 )
- Fatal( "Unable to locate video stream" );
+ if ( mVideoStreamId == -1 )
+ Fatal( "Unable to locate video stream" );
if ( mAudioStreamId == -1 )
Debug( 3, "Unable to locate audio stream" );
- // Get a pointer to the codec context for the video stream
- mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
+ // Get a pointer to the codec context for the video stream
+ mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
- // Find the decoder for the video stream
- mCodec = avcodec_find_decoder( mCodecContext->codec_id );
- if ( mCodec == NULL )
- Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );
+ // Find the decoder for the video stream
+ mCodec = avcodec_find_decoder( mCodecContext->codec_id );
+ if ( mCodec == NULL )
+ Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );
- // Open codec
+ // Open codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
- if ( avcodec_open( mCodecContext, mCodec ) < 0 )
+ if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
- if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
+ if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
- Panic( "Can't open codec" );
+ Panic( "Can't open codec" );
- // Allocate space for the native video frame
+ // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- mRawFrame = av_frame_alloc();
+ mRawFrame = av_frame_alloc();
#else
- mRawFrame = avcodec_alloc_frame();
+ mRawFrame = avcodec_alloc_frame();
#endif
- // Allocate space for the converted video frame
+ // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
- mFrame = av_frame_alloc();
+ mFrame = av_frame_alloc();
#else
- mFrame = avcodec_alloc_frame();
+ mFrame = avcodec_alloc_frame();
#endif
if(mRawFrame == NULL || mFrame == NULL)
@@ -247,152 +247,152 @@ int RemoteCameraRtsp::PrimeCapture()
int pSize = avpicture_get_size( imagePixFormat, width, height );
#endif
- if( (unsigned int)pSize != imagesize) {
- Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
- }
+ if( (unsigned int)pSize != imagesize) {
+ Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
+ }
/*
#if HAVE_LIBSWSCALE
- if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
- Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
- }
+ if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
+ Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
+ }
- if(!sws_isSupportedOutput(imagePixFormat)) {
- Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
- }
-
+ if(!sws_isSupportedOutput(imagePixFormat)) {
+ Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
+ }
+
#else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
*/
- return( 0 );
+ return( 0 );
}
int RemoteCameraRtsp::PreCapture() {
- if ( !rtspThread->isRunning() )
- return( -1 );
- if ( !rtspThread->hasSources() )
- {
- Error( "Cannot precapture, no RTP sources" );
- return( -1 );
- }
- return( 0 );
+ if ( !rtspThread->isRunning() )
+ return( -1 );
+ if ( !rtspThread->hasSources() )
+ {
+ Error( "Cannot precapture, no RTP sources" );
+ return( -1 );
+ }
+ return( 0 );
}
int RemoteCameraRtsp::Capture( Image &image ) {
- AVPacket packet;
- uint8_t* directbuffer;
- int frameComplete = false;
-
- /* Request a writeable buffer of the target image */
- directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
- if(directbuffer == NULL) {
- Error("Failed requesting writeable buffer for the captured image.");
- return (-1);
- }
-
- while ( true ) {
- buffer.clear();
- if ( !rtspThread->isRunning() )
- return (-1);
+ AVPacket packet;
+ uint8_t* directbuffer;
+ int frameComplete = false;
+
+ /* Request a writeable buffer of the target image */
+ directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
+ if(directbuffer == NULL) {
+ Error("Failed requesting writeable buffer for the captured image.");
+ return (-1);
+ }
+
+ while ( true ) {
+ buffer.clear();
+ if ( !rtspThread->isRunning() )
+ return (-1);
- if ( rtspThread->getFrame( buffer ) ) {
- Debug( 3, "Read frame %d bytes", buffer.size() );
- Debug( 4, "Address %p", buffer.head() );
- Hexdump( 4, buffer.head(), 16 );
+ if ( rtspThread->getFrame( buffer ) ) {
+ Debug( 3, "Read frame %d bytes", buffer.size() );
+ Debug( 4, "Address %p", buffer.head() );
+ Hexdump( 4, buffer.head(), 16 );
- if ( !buffer.size() )
- return( -1 );
+ if ( !buffer.size() )
+ return( -1 );
- if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
- // SPS and PPS frames should be saved and appended to IDR frames
- int nalType = (buffer.head()[3] & 0x1f);
-
- // SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures
- if(nalType == 7)
- {
- lastSps = buffer;
- continue;
- }
- // PPS The PPS NAL unit contains parameters that apply to the decoding of one or more individual pictures inside a coded video sequence
- else if(nalType == 8)
- {
- lastPps = buffer;
- continue;
- }
- // IDR
- else if(nalType == 5)
- {
- buffer += lastSps;
- buffer += lastPps;
- }
+ if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
+ // SPS and PPS frames should be saved and appended to IDR frames
+ int nalType = (buffer.head()[3] & 0x1f);
+
+ // SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures
+ if(nalType == 7)
+ {
+ lastSps = buffer;
+ continue;
+ }
+ // PPS The PPS NAL unit contains parameters that apply to the decoding of one or more individual pictures inside a coded video sequence
+ else if(nalType == 8)
+ {
+ lastPps = buffer;
+ continue;
+ }
+ // IDR
+ else if(nalType == 5)
+ {
+ buffer += lastSps;
+ buffer += lastPps;
+ }
} else {
Debug(3, "Not an h264 packet");
- }
+ }
- av_init_packet( &packet );
-
- while ( !frameComplete && buffer.size() > 0 ) {
- packet.data = buffer.head();
- packet.size = buffer.size();
+ av_init_packet( &packet );
+
+ while ( !frameComplete && buffer.size() > 0 ) {
+ packet.data = buffer.head();
+ packet.size = buffer.size();
- // So I think this is the magic decode step. Result is a raw image?
- #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
- int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
- #else
- int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
- #endif
- if ( len < 0 ) {
- Error( "Error while decoding frame %d", frameCount );
- Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
- buffer.clear();
- continue;
- }
- Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
- //if ( buffer.size() < 400 )
- //Hexdump( 0, buffer.head(), buffer.size() );
-
- buffer -= len;
- }
- // At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer?
- if ( frameComplete ) {
-
- Debug( 3, "Got frame %d", frameCount );
-
- avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height );
-
- #if HAVE_LIBSWSCALE
- if(mConvertContext == NULL) {
- mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
+ // So I think this is the magic decode step. Result is a raw image?
+ #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
+ int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
+ #else
+ int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
+ #endif
+ if ( len < 0 ) {
+ Error( "Error while decoding frame %d", frameCount );
+ Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
+ buffer.clear();
+ continue;
+ }
+ Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
+ //if ( buffer.size() < 400 )
+ //Hexdump( 0, buffer.head(), buffer.size() );
+
+ buffer -= len;
+ }
+ // At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer?
+ if ( frameComplete ) {
+
+ Debug( 3, "Got frame %d", frameCount );
+
+ avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height );
+
+ #if HAVE_LIBSWSCALE
+ if(mConvertContext == NULL) {
+ mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
- if(mConvertContext == NULL)
- Fatal( "Unable to create conversion context");
- }
-
- if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
- Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
- #else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
- #endif // HAVE_LIBSWSCALE
-
- frameCount++;
+ if(mConvertContext == NULL)
+ Fatal( "Unable to create conversion context");
+ }
+
+ if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
+ Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
+ #else // HAVE_LIBSWSCALE
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
+ #endif // HAVE_LIBSWSCALE
+
+ frameCount++;
- } /* frame complete */
-
- #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
- av_packet_unref( &packet );
- #else
- av_free_packet( &packet );
- #endif
- } /* getFrame() */
-
- if(frameComplete)
- return (0);
-
- } // end while true
+ } /* frame complete */
+
+ #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
+ av_packet_unref( &packet );
+ #else
+ av_free_packet( &packet );
+ #endif
+ } /* getFrame() */
+
+ if(frameComplete)
+ return (0);
+
+ } // end while true
- // can never get here.
- return (0) ;
+ // can never get here.
+ return (0) ;
}
//int RemoteCameraRtsp::ReadData(void *opaque, uint8_t *buf, int bufSize) {
@@ -408,84 +408,84 @@ int RemoteCameraRtsp::Capture( Image &image ) {
//Function to handle capture and store
int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file ) {
- AVPacket packet;
- uint8_t* directbuffer;
- int frameComplete = false;
-
- /* Request a writeable buffer of the target image */
- directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
- if(directbuffer == NULL) {
- Error("Failed requesting writeable buffer for the captured image.");
- return (-1);
- }
-
- while ( true ) {
+ AVPacket packet;
+ uint8_t* directbuffer;
+ int frameComplete = false;
+
+ /* Request a writeable buffer of the target image */
+ directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
+ if(directbuffer == NULL) {
+ Error("Failed requesting writeable buffer for the captured image.");
+ return (-1);
+ }
+
+ while ( true ) {
- buffer.clear();
- if ( !rtspThread->isRunning() )
- return (-1);
+ buffer.clear();
+ if ( !rtspThread->isRunning() )
+ return (-1);
- if ( rtspThread->getFrame( buffer ) ) {
- Debug( 3, "Read frame %d bytes", buffer.size() );
- Debug( 4, "Address %p", buffer.head() );
- Hexdump( 4, buffer.head(), 16 );
+ if ( rtspThread->getFrame( buffer ) ) {
+ Debug( 3, "Read frame %d bytes", buffer.size() );
+ Debug( 4, "Address %p", buffer.head() );
+ Hexdump( 4, buffer.head(), 16 );
- if ( !buffer.size() )
- return( -1 );
+ if ( !buffer.size() )
+ return( -1 );
- if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
- // SPS and PPS frames should be saved and appended to IDR frames
- int nalType = (buffer.head()[3] & 0x1f);
-
- // SPS
- if(nalType == 7) {
- lastSps = buffer;
- continue;
- }
- // PPS
- else if(nalType == 8) {
- lastPps = buffer;
- continue;
- }
- // IDR
- else if(nalType == 5) {
- buffer += lastSps;
- buffer += lastPps;
- }
- } // end if H264, what about other codecs?
+ if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
+ // SPS and PPS frames should be saved and appended to IDR frames
+ int nalType = (buffer.head()[3] & 0x1f);
+
+ // SPS
+ if(nalType == 7) {
+ lastSps = buffer;
+ continue;
+ }
+ // PPS
+ else if(nalType == 8) {
+ lastPps = buffer;
+ continue;
+ }
+ // IDR
+ else if(nalType == 5) {
+ buffer += lastSps;
+ buffer += lastPps;
+ }
+ } // end if H264, what about other codecs?
- av_init_packet( &packet );
-
- // Why are we checking for it being the video stream? Because it might be audio or something else.
+ av_init_packet( &packet );
+
+ // Why are we checking for it being the video stream? Because it might be audio or something else.
// Um... we just initialized packet... we can't be testing for what it is yet....
- if ( packet.stream_index == mVideoStreamId ) {
-
- while ( !frameComplete && buffer.size() > 0 ) {
- packet.data = buffer.head();
- packet.size = buffer.size();
+ if ( packet.stream_index == mVideoStreamId ) {
+
+ while ( !frameComplete && buffer.size() > 0 ) {
+ packet.data = buffer.head();
+ packet.size = buffer.size();
- // So this does the decode
+ // So this does the decode
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
- int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
+ int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
- int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
+ int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
- if ( len < 0 ) {
- Error( "Error while decoding frame %d", frameCount );
- Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
- buffer.clear();
- continue;
- }
- Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
- //if ( buffer.size() < 400 )
- //Hexdump( 0, buffer.head(), buffer.size() );
+ if ( len < 0 ) {
+ Error( "Error while decoding frame %d", frameCount );
+ Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
+ buffer.clear();
+ continue;
+ }
+ Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
+ //if ( buffer.size() < 400 )
+ //Hexdump( 0, buffer.head(), buffer.size() );
- buffer -= len;
- } // end while get & decode a frame
+ buffer -= len;
+ } // end while get & decode a frame
- if ( frameComplete ) {
+ if ( frameComplete ) {
- Debug( 3, "Got frame %d", frameCount );
+ Debug( 3, "Got frame %d", frameCount );
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(mFrame->data, mFrame->linesize,
@@ -496,98 +496,98 @@ int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* even
#endif
//Video recording
- if ( recording && !wasRecording ) {
- //Instantiate the video storage module
+ if ( recording && !wasRecording ) {
+ //Instantiate the video storage module
- videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
- wasRecording = true;
- strcpy(oldDirectory, event_file);
+ videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
+ wasRecording = true;
+ strcpy(oldDirectory, event_file);
- } else if ( !recording && wasRecording && videoStore ) {
- // Why are we deleting the videostore? Becase for soem reason we are no longer recording? How does that happen?
- Info("Deleting videoStore instance");
- delete videoStore;
- videoStore = NULL;
- }
+ } else if ( !recording && wasRecording && videoStore ) {
+ // Why are we deleting the videostore? Becase for soem reason we are no longer recording? How does that happen?
+ Info("Deleting videoStore instance");
+ delete videoStore;
+ videoStore = NULL;
+ }
- //The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again
- if ( recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ) {
- //don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough?
- Info("Re-starting video storage module");
- if ( videoStore ) {
- delete videoStore;
- videoStore = NULL;
- }
+ //The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again
+ if ( recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ) {
+ //don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough?
+ Info("Re-starting video storage module");
+ if ( videoStore ) {
+ delete videoStore;
+ videoStore = NULL;
+ }
- videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
- strcpy( oldDirectory, event_file );
- }
+ videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
+ strcpy( oldDirectory, event_file );
+ }
- if ( videoStore && recording ) {
- //Write the packet to our video store
- int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
- if ( ret < 0 ) {//Less than zero and we skipped a frame
- av_free_packet( &packet );
- return 0;
- }
- }
+ if ( videoStore && recording ) {
+ //Write the packet to our video store
+ int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
+ if ( ret < 0 ) {//Less than zero and we skipped a frame
+ av_free_packet( &packet );
+ return 0;
+ }
+ }
#if HAVE_LIBSWSCALE
// Why are we re-scaling after writing out the packet?
- if(mConvertContext == NULL) {
- mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
+ if(mConvertContext == NULL) {
+ mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
- if(mConvertContext == NULL)
- Fatal( "Unable to create conversion context");
- }
+ if(mConvertContext == NULL)
+ Fatal( "Unable to create conversion context");
+ }
- if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
- Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
+ if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
+ Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
- Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
+ Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
- frameCount++;
+ frameCount++;
- } /* frame complete */
- } else if ( packet.stream_index == mAudioStreamId ) {
- Debug( 4, "Got audio packet" );
- if ( videoStore && recording ) {
- if ( record_audio ) {
- Debug( 4, "Storing Audio packet" );
- //Write the packet to our video store
- int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
- if ( ret < 0 ) { //Less than zero and we skipped a frame
+ } /* frame complete */
+ } else if ( packet.stream_index == mAudioStreamId ) {
+ Debug( 4, "Got audio packet" );
+ if ( videoStore && recording ) {
+ if ( record_audio ) {
+ Debug( 4, "Storing Audio packet" );
+ //Write the packet to our video store
+ int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
+ if ( ret < 0 ) { //Less than zero and we skipped a frame
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
- av_packet_unref( &packet );
+ av_packet_unref( &packet );
#else
- av_free_packet( &packet );
+ av_free_packet( &packet );
#endif
- return 0;
- }
- } else {
- Debug( 4, "Not storing audio" );
- }
- }
- } // end if video or audio packet
-
+ return 0;
+ }
+ } else {
+ Debug( 4, "Not storing audio" );
+ }
+ }
+ } // end if video or audio packet
+
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
- av_packet_unref( &packet );
+ av_packet_unref( &packet );
#else
- av_free_packet( &packet );
+ av_free_packet( &packet );
#endif
- } /* getFrame() */
-
- if(frameComplete)
- return (0);
- } // end while true
+ } /* getFrame() */
+
+ if(frameComplete)
+ return (0);
+ } // end while true
// can never get here.
- return (0) ;
+ return (0) ;
} // int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file )
int RemoteCameraRtsp::PostCapture()
{
- return( 0 );
+ return( 0 );
}
#endif // HAVE_LIBAVFORMAT
diff --git a/src/zm_remote_camera_rtsp.h b/src/zm_remote_camera_rtsp.h
index 6de7fddc1..579452ca5 100644
--- a/src/zm_remote_camera_rtsp.h
+++ b/src/zm_remote_camera_rtsp.h
@@ -36,56 +36,56 @@
class RemoteCameraRtsp : public RemoteCamera
{
protected:
- struct sockaddr_in rtsp_sa;
- struct sockaddr_in rtcp_sa;
- int rtsp_sd;
- int rtp_sd;
- int rtcp_sd;
- bool rtsp_describe;
+ struct sockaddr_in rtsp_sa;
+ struct sockaddr_in rtcp_sa;
+ int rtsp_sd;
+ int rtp_sd;
+ int rtcp_sd;
+ bool rtsp_describe;
- Buffer buffer;
- Buffer lastSps;
- Buffer lastPps;
+ Buffer buffer;
+ Buffer lastSps;
+ Buffer lastPps;
- RtspThread::RtspMethod method;
+ RtspThread::RtspMethod method;
- RtspThread *rtspThread;
+ RtspThread *rtspThread;
+
+ int frameCount;
- int frameCount;
-
#if HAVE_LIBAVFORMAT
- AVFormatContext *mFormatContext;
- int mVideoStreamId;
- int mAudioStreamId;
- AVCodecContext *mCodecContext;
- AVCodec *mCodec;
- AVFrame *mRawFrame;
- AVFrame *mFrame;
- _AVPIXELFORMAT imagePixFormat;
+ AVFormatContext *mFormatContext;
+ int mVideoStreamId;
+ int mAudioStreamId;
+ AVCodecContext *mCodecContext;
+ AVCodec *mCodec;
+ AVFrame *mRawFrame;
+ AVFrame *mFrame;
+ _AVPIXELFORMAT imagePixFormat;
#endif // HAVE_LIBAVFORMAT
- bool wasRecording;
- VideoStore *videoStore;
- char oldDirectory[4096];
- int64_t startTime;
+ bool wasRecording;
+ VideoStore *videoStore;
+ char oldDirectory[4096];
+ int64_t startTime;
#if HAVE_LIBSWSCALE
- struct SwsContext *mConvertContext;
+ struct SwsContext *mConvertContext;
#endif
public:
- RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
- ~RemoteCameraRtsp();
+ RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
+ ~RemoteCameraRtsp();
- void Initialise();
- void Terminate();
- int Connect();
- int Disconnect();
+ void Initialise();
+ void Terminate();
+ int Connect();
+ int Disconnect();
- int PrimeCapture();
- int PreCapture();
- int Capture( Image &image );
- int PostCapture();
- int CaptureAndRecord( Image &image, bool recording, char* event_directory );
+ int PrimeCapture();
+ int PreCapture();
+ int Capture( Image &image );
+ int PostCapture();
+ int CaptureAndRecord( Image &image, bool recording, char* event_directory );
};
#endif // ZM_REMOTE_CAMERA_RTSP_H
diff --git a/web/skins/classic/views/frame.php b/web/skins/classic/views/frame.php
index eb676c584..cf7e925ca 100644
--- a/web/skins/classic/views/frame.php
+++ b/web/skins/classic/views/frame.php
@@ -82,8 +82,11 @@ xhtmlHeaders(__FILE__, translate('Frame')." - ".$event['Id']." - ".$frame->Frame
FrameId() ?>">
">
-
FrameId() ?>" class=""/>
+if ( $imageData['hasAnalImage'] ) { ?>
+">
+
+
FrameId() ?>" class=""/>
+