From 1df10553bb6221be627233f43fb10f5415da9954 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Fri, 25 Jan 2013 23:45:41 +0400 Subject: [PATCH 01/25] fixed bugs #1373, #2629, #2719 --- .../core/include/opencv2/core/operations.hpp | 2 +- modules/core/include/opencv2/core/types_c.h | 22 ++++--------------- modules/core/src/matrix.cpp | 3 ++- 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index fc40f5724b..63fc23c3b5 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -716,7 +716,7 @@ template struct CV_EXPORTS Matx_DetOp double operator ()(const Matx<_Tp, m, m>& a) const { Matx<_Tp, m, m> temp = a; - double p = LU(temp.val, m, m, 0, 0, 0); + double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0); if( p == 0 ) return p; for( int i = 0; i < m; i++ ) diff --git a/modules/core/include/opencv2/core/types_c.h b/modules/core/include/opencv2/core/types_c.h index cbc7872e61..33e7fe9934 100644 --- a/modules/core/include/opencv2/core/types_c.h +++ b/modules/core/include/opencv2/core/types_c.h @@ -342,9 +342,8 @@ CV_INLINE int cvFloor( double value ) return i - (i > value); #else int i = cvRound(value); - Cv32suf diff; - diff.f = (float)(value - i); - return i - (diff.i < 0); + float diff = (float)(value - i); + return i - (diff < 0); #endif } @@ -360,9 +359,8 @@ CV_INLINE int cvCeil( double value ) return i + (i < value); #else int i = cvRound(value); - Cv32suf diff; - diff.f = (float)(i - value); - return i + (diff.i < 0); + float diff = (float)(i - value); + return i + (diff < 0); #endif } @@ -371,31 +369,19 @@ CV_INLINE int cvCeil( double value ) CV_INLINE int cvIsNaN( double value ) { -#if 1/*defined _MSC_VER || defined __BORLANDC__ - return _isnan(value); -#elif defined __GNUC__ - return isnan(value); -#else*/ Cv64suf ieee754; ieee754.f = value; return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + ((unsigned)ieee754.u != 0) > 0x7ff00000; -#endif } CV_INLINE int cvIsInf( double value ) { -#if 1/*defined _MSC_VER || defined __BORLANDC__ - return !_finite(value); -#elif defined __GNUC__ - return isinf(value); -#else*/ Cv64suf ieee754; ieee754.f = value; return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && (unsigned)ieee754.u == 0; -#endif } diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 7b58debfc7..21dfd6141d 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -830,7 +830,8 @@ int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) con { return (depth() == _depth || _depth <= 0) && (isContinuous() || !_requireContinuous) && - ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || (cols == _elemChannels))) || + ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || + (cols == _elemChannels && channels() == 1))) || (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) && (isContinuous() || step.p[1] == step.p[2]*size.p[2]))) ? (int)(total()*channels()/_elemChannels) : -1; From d90b8d615cd3e8fa881954e8831b998b8d663bd6 Mon Sep 17 00:00:00 2001 From: Patrick Welche Date: Mon, 17 Sep 2012 11:08:37 +0200 Subject: [PATCH 02/25] NetBSD video(4) support, patch 1 of 3 The video(4) driver provides a Video4Linux2 compatible API for various video peripherals. This patch propagates HAVE_VIDEOIO if the sys/videoio.h include file is found, which is the signature of video(4). --- CMakeLists.txt | 2 +- cmake/OpenCVFindLibsVideo.cmake | 1 + cmake/templates/cvconfig.h.cmake | 3 +++ modules/highgui/CMakeLists.txt | 2 +- modules/highgui/src/cap.cpp | 3 ++- modules/highgui/src/cap_v4l.cpp | 7 ++++++- 6 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0e5cd9ec03..c977f97662 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -722,7 +722,7 @@ if(DEFINED WITH_V4L) else() set(HAVE_CAMV4L_STR "NO") endif() - if(HAVE_CAMV4L2) + if(HAVE_CAMV4L2 OR HAVE_VIDEOIO) set(HAVE_CAMV4L2_STR "YES") else() set(HAVE_CAMV4L2_STR "NO") diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index fdc9ea27f7..cfa4dcab97 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -90,6 +90,7 @@ if(WITH_V4L) CHECK_MODULE(libv4l1 HAVE_LIBV4L) CHECK_INCLUDE_FILE(linux/videodev.h HAVE_CAMV4L) CHECK_INCLUDE_FILE(linux/videodev2.h HAVE_CAMV4L2) + CHECK_INCLUDE_FILE(sys/videoio.h HAVE_VIDEOIO) endif(WITH_V4L) # --- OpenNI --- diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index 368905fd9c..e44e1359a2 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -19,6 +19,9 @@ /* V4L2 capturing support */ #cmakedefine HAVE_CAMV4L2 +/* V4L2 capturing support in videoio.h */ +#cmakedefine HAVE_VIDEOIO + /* V4L/V4L2 capturing support via libv4l */ #cmakedefine HAVE_LIBV4L diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index b4b2896d7a..3c8f9ba9ab 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -131,7 +131,7 @@ endif(HAVE_UNICAP) if(HAVE_LIBV4L) list(APPEND highgui_srcs src/cap_libv4l.cpp) -elseif(HAVE_CAMV4L OR HAVE_CAMV4L2) +elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO) list(APPEND highgui_srcs src/cap_v4l.cpp) endif() diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index f8d32e7eee..821478f1b3 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -173,6 +173,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) defined(HAVE_VFW) || \ defined(HAVE_LIBV4L) || \ (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \ + defined(HAVE_VIDEOIO) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_DC1394_2) || \ defined(HAVE_DC1394) || \ @@ -216,7 +217,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) return capture; #endif -#if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2)) +#if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2)) || defined HAVE_VIDEOIO capture = cvCreateCameraCapture_V4L (index); if (capture) return capture; diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 2ab4567d15..33e0f386aa 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -202,7 +202,7 @@ make & enjoy! #include "precomp.hpp" -#if !defined WIN32 && defined HAVE_CAMV4L && defined HAVE_CAMV4L2 +#if !defined WIN32 && ((defined HAVE_CAMV4L && defined HAVE_CAMV4L2) || defined HAVE_VIDEOIO) #define CLEAR(x) memset (&(x), 0, sizeof (x)) @@ -227,6 +227,11 @@ make & enjoy! #include #endif +#ifdef HAVE_VIDEOIO +#include +#define HAVE_CAMV4L2 +#endif + /* Defaults - If your board can do better, set it here. Set for the most common type inputs. */ #define DEFAULT_V4L_WIDTH 640 #define DEFAULT_V4L_HEIGHT 480 From 681ffd9a215c5be006999a57b890ee446b4bd801 Mon Sep 17 00:00:00 2001 From: Patrick Welche Date: Mon, 17 Sep 2012 12:03:35 +0200 Subject: [PATCH 03/25] NetBSD video(4) support, patch 2 of 3 * Decouple Video4Linux2 support from Video4Linux as existence of v4l2 on a system does not imply support for v4l. * Don't use V4L's struct video_window in V4L2 code. * Removed __USE_GNU as comment says: /* support for MJPEG is only available with libjpeg and gcc, because it's use libjepg and fmemopen() so replace with test for fmemopen() if found necessary. --- modules/highgui/src/cap.cpp | 5 +- modules/highgui/src/cap_v4l.cpp | 181 +++++++++++++++++++++----------- 2 files changed, 123 insertions(+), 63 deletions(-) diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index 821478f1b3..13475f2633 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -172,7 +172,8 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) defined(HAVE_TYZX) || \ defined(HAVE_VFW) || \ defined(HAVE_LIBV4L) || \ - (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \ + defined(HAVE_CAMV4L) || \ + defined(HAVE_CAMV4L2) || \ defined(HAVE_VIDEOIO) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_DC1394_2) || \ @@ -217,7 +218,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) return capture; #endif -#if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2)) || defined HAVE_VIDEOIO +#if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO capture = cvCreateCameraCapture_V4L (index); if (capture) return capture; diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 33e0f386aa..a0d51e8d89 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -202,7 +202,7 @@ make & enjoy! #include "precomp.hpp" -#if !defined WIN32 && ((defined HAVE_CAMV4L && defined HAVE_CAMV4L2) || defined HAVE_VIDEOIO) +#if !defined WIN32 && (defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO) #define CLEAR(x) memset (&(x), 0, sizeof (x)) @@ -214,16 +214,18 @@ make & enjoy! #include #include +#ifdef HAVE_CAMVAL #include +#endif #include #include -#include /* for videodev2.h */ #include #include #include #ifdef HAVE_CAMV4L2 +#include /* for videodev2.h */ #include #endif @@ -293,11 +295,13 @@ typedef struct CvCaptureCAM_V4L int deviceHandle; int bufferIndex; int FirstCapture; +#ifdef HAVE_CAMV4L struct video_capability capability; struct video_window captureWindow; struct video_picture imageProperties; struct video_mbuf memoryBuffer; struct video_mmap *mmaps; +#endif /* HAVE_CAMV4L */ char *memoryMap; IplImage frame; @@ -350,9 +354,6 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h); static int numCameras = 0; static int indexList = 0; -#ifdef HAVE_CAMV4L2 - -// IOCTL handling for V4L2 static int xioctl( int fd, int request, void *arg) { @@ -366,8 +367,6 @@ static int xioctl( int fd, int request, void *arg) } -#endif /* HAVE_CAMV4L2 */ - /* Simple test program: Find number of Video Sources available. Start from 0 and go to MAX_CAMERAS while checking for the device with that name. If it fails on the first attempt of /dev/video0, then check if /dev/video is valid. @@ -398,6 +397,8 @@ static void icvInitCapture_V4L() { }; /* End icvInitCapture_V4L */ +#ifdef HAVE_CAMV4L + static int try_palette(int fd, struct video_picture *cam_pic, @@ -415,6 +416,8 @@ try_palette(int fd, return 0; } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) @@ -439,6 +442,8 @@ static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) { @@ -454,7 +459,6 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) /* No matter what the name - it still must be opened! */ capture->deviceHandle = open(deviceName, O_RDWR); - if (capture->deviceHandle == 0) { detect = -1; @@ -468,7 +472,6 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) if (ioctl(capture->deviceHandle, VIDIOCGCAP, &capture->capability) < 0) { detect = 0; - icvCloseCAM_V4L(capture); } else @@ -481,54 +484,64 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, char *deviceName) } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static int try_init_v4l2(CvCaptureCAM_V4L* capture, char *deviceName) { - - // if detect = -1 then unable to open device - // if detect = 0 then detected nothing - // if detect = 1 then V4L2 device - int detect = 0; - - // Test device for V4L2 compability + // Return value: + // -1 then unable to open device + // 0 then detected nothing + // 1 then V4L2 device + + int deviceIndex; /* Open and test V4L2 device */ capture->deviceHandle = open (deviceName, O_RDWR /* required */ | O_NONBLOCK, 0); - - - - if (capture->deviceHandle == 0) + if (-1 == capture->deviceHandle) { - detect = -1; - +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 open \"%s\": %s\n", deviceName, strerror(errno)); +#endif icvCloseCAM_V4L(capture); + return -1; } - if (detect == 0) + CLEAR (capture->cap); + if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) { - CLEAR (capture->cap); - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) - { - detect = 0; +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_QUERYCAP \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; + } - icvCloseCAM_V4L(capture); - } - else - { - CLEAR (capture->capability); - capture->capability.type = capture->cap.capabilities; + /* Query channels number */ + if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_INPUT, &deviceIndex)) + { +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_G_INPUT \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; + } - /* Query channels number */ - if (-1 != xioctl (capture->deviceHandle, VIDIOC_G_INPUT, &capture->capability.channels)) - { - detect = 1; - } - } + /* Query information about current input */ + CLEAR (capture->inp); + capture->inp.index = deviceIndex; + if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) + { +#ifndef NDEBUG + fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_ENUMINPUT \"%s\": %s\n", deviceName, strerror(errno)); +#endif + icvCloseCAM_V4L(capture); + return 0; } - return detect; + return 1; } @@ -551,17 +564,12 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture) else #ifdef HAVE_JPEG -#ifdef __USE_GNU - /* support for MJPEG is only available with libjpeg and gcc, - because it's use libjepg and fmemopen() - */ if (try_palette_v4l2(capture, V4L2_PIX_FMT_MJPEG) == 0 || try_palette_v4l2(capture, V4L2_PIX_FMT_JPEG) == 0) { capture->palette = PALETTE_MJPEG; } else -#endif #endif if (try_palette_v4l2(capture, V4L2_PIX_FMT_YUYV) == 0) @@ -598,6 +606,8 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) { @@ -631,6 +641,8 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) } +#endif /* HAVE_CAMV4L */ + #ifdef HAVE_CAMV4L2 static void v4l2_scan_controls_enumerate_menu(CvCaptureCAM_V4L* capture) @@ -981,8 +993,8 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) /* Set up Image data */ cvInitImageHeader( &capture->frame, - cvSize( capture->captureWindow.width, - capture->captureWindow.height ), + cvSize( capture->form.fmt.pix.width, + capture->form.fmt.pix.height ), IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 ); /* Allocate space for RGBA data */ capture->frame.imageData = (char *)cvAlloc(capture->frame.imageSize); @@ -992,6 +1004,8 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { int detect_v4l = 0; @@ -1110,6 +1124,8 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) return 1; }; /* End _capture_V4L */ +#endif /* HAVE_CAMV4L */ + static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) { static int autoindex; @@ -1159,10 +1175,12 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) icvCloseCAM_V4L(capture); V4L2_SUPPORT = 0; #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L if (_capture_V4L (capture, deviceName) == -1) { icvCloseCAM_V4L(capture); return NULL; } +#endif /* HAVE_CAMV4L */ #ifdef HAVE_CAMV4L2 } else { V4L2_SUPPORT = 1; @@ -1271,7 +1289,9 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { #ifdef HAVE_CAMV4L2 +#ifdef HAVE_CAMV4L if (V4L2_SUPPORT == 1) +#endif { for (capture->bufferIndex = 0; @@ -1301,8 +1321,12 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { perror ("VIDIOC_STREAMON"); return 0; } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { for (capture->bufferIndex = 0; @@ -1321,6 +1345,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { } } +#endif /* HAVE_CAMV4L */ #if defined(V4L_ABORT_BADJPEG) && defined(HAVE_CAMV4L2) if (V4L2_SUPPORT == 1) @@ -1342,8 +1367,12 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { mainloop_v4l2(capture); - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { capture->mmaps[capture->bufferIndex].frame = capture->bufferIndex; @@ -1363,6 +1392,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { } } +#endif /* HAVE_CAMV4L */ return(1); } @@ -2080,6 +2110,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { #ifdef HAVE_CAMV4L2 if (V4L2_SUPPORT == 0) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { /* [FD] this really belongs here */ @@ -2088,6 +2119,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ /* Now get what has already been captured as a IplImage return */ @@ -2108,8 +2140,12 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { capture->frame.imageData = (char *)cvAlloc(capture->frame.imageSize); } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if((capture->frame.width != capture->mmaps[capture->bufferIndex].width) @@ -2123,6 +2159,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ #ifdef HAVE_CAMV4L2 @@ -2150,10 +2187,6 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { (unsigned char*)capture->frame.imageData); break; #ifdef HAVE_JPEG -#ifdef __USE_GNU - /* support for MJPEG is only available with libjpeg and gcc, - because it's use libjepg and fmemopen() - */ case PALETTE_MJPEG: if (!mjpeg_to_rgb24(capture->form.fmt.pix.width, capture->form.fmt.pix.height, @@ -2163,7 +2196,6 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { (unsigned char*)capture->frame.imageData)) return 0; break; -#endif #endif case PALETTE_YUYV: @@ -2206,8 +2238,12 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { (unsigned char*)capture->frame.imageData); break; } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { switch(capture->imageProperties.palette) { @@ -2243,6 +2279,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { } } +#endif /* HAVE_CAMV4L */ return(&capture->frame); } @@ -2252,7 +2289,9 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, #ifdef HAVE_CAMV4L2 +#ifdef HAVE_CAMV4L if (V4L2_SUPPORT == 1) +#endif { /* default value for min and max */ @@ -2363,8 +2402,12 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, /* all was OK, so convert to 0.0 - 1.0 range, and return the value */ return ((float)capture->control.value - v4l2_min + 1) / (v4l2_max - v4l2_min); - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { int retval = -1; @@ -2422,6 +2465,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, return float (retval) / 0xFFFF; } +#endif /* HAVE_CAMV4L */ }; @@ -2494,8 +2538,12 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { return 0; - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if (capture==0) return 0; @@ -2522,6 +2570,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->FirstCapture = 1; } +#endif /* HAVE_CAMV4L */ return 0; @@ -2653,8 +2702,12 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, perror ("VIDIOC_S_CTRL"); return -1; } - } else + } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { int v4l_value; @@ -2699,6 +2752,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, return -1; } } +#endif /* HAVE_CAMV4L */ /* all was OK */ return 0; @@ -2759,6 +2813,7 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ #ifdef HAVE_CAMV4L2 if (V4L2_SUPPORT == 0) #endif /* HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L { if (capture->mmaps) @@ -2767,10 +2822,14 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ munmap(capture->memoryMap, capture->memoryBuffer.size); } +#endif /* HAVE_CAMV4L */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ #ifdef HAVE_CAMV4L2 - else { + { capture->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (ioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type) < 0) { + if (-1 == ioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type)) { perror ("Unable to stop the stream."); } From 1a84bcc56545d15b88bc6a159906f37879e778e9 Mon Sep 17 00:00:00 2001 From: Patrick Welche Date: Mon, 17 Sep 2012 14:46:54 +0200 Subject: [PATCH 04/25] NetBSD video(4) support, patch 3 of 3 xioctl() assumes that ioctl takes int request. Cope with int ioctl(int d, unsigned long request, ...) to avoid "invalid argument". --- cmake/OpenCVFindLibsVideo.cmake | 8 +++++++- cmake/templates/cvconfig.h.cmake | 3 +++ modules/highgui/src/cap_libv4l.cpp | 4 ++++ modules/highgui/src/cap_v4l.cpp | 7 ++++--- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index cfa4dcab97..323b1cbe96 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -85,12 +85,18 @@ if(WITH_XINE) endif(WITH_XINE) # --- V4L --- -ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2) +ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2 HAVE_IOCTL_ULONG) if(WITH_V4L) CHECK_MODULE(libv4l1 HAVE_LIBV4L) CHECK_INCLUDE_FILE(linux/videodev.h HAVE_CAMV4L) CHECK_INCLUDE_FILE(linux/videodev2.h HAVE_CAMV4L2) CHECK_INCLUDE_FILE(sys/videoio.h HAVE_VIDEOIO) + INCLUDE(CheckPrototypeDefinition) + CHECK_PROTOTYPE_DEFINITION(ioctl + "int ioctl(int d, unsigned long request, ...)" + "-1" + "sys/ioctl.h" + HAVE_IOCTL_ULONG) endif(WITH_V4L) # --- OpenNI --- diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index e44e1359a2..dab3ec8d51 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -25,6 +25,9 @@ /* V4L/V4L2 capturing support via libv4l */ #cmakedefine HAVE_LIBV4L +/* ioctl takes unsigned long request rather than int */ +#cmakedefine HAVE_IOCTL_ULONG + /* Carbon windowing environment */ #cmakedefine HAVE_CARBON diff --git a/modules/highgui/src/cap_libv4l.cpp b/modules/highgui/src/cap_libv4l.cpp index 01b611c66c..63a2ff96b0 100644 --- a/modules/highgui/src/cap_libv4l.cpp +++ b/modules/highgui/src/cap_libv4l.cpp @@ -346,7 +346,11 @@ static int numCameras = 0; static int indexList = 0; // IOCTL handling for V4L2 +#ifdef HAVE_IOCTL_ULONG +static int xioctl( int fd, unsigned long request, void *arg) +#else static int xioctl( int fd, int request, void *arg) +#endif { int r; diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index a0d51e8d89..3510ca3c4d 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -354,17 +354,18 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h); static int numCameras = 0; static int indexList = 0; +#ifdef HAVE_IOCTL_ULONG +static int xioctl( int fd, unsigned long request, void *arg) +#else static int xioctl( int fd, int request, void *arg) +#endif { - int r; - do r = ioctl (fd, request, arg); while (-1 == r && EINTR == errno); return r; - } /* Simple test program: Find number of Video Sources available. From 146ca61a2780afae82e053aa2126d705ddaea383 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Mon, 28 Jan 2013 17:27:08 +0400 Subject: [PATCH 05/25] added tests for #1373, #2629, #2719; fixed another bug in determinant(Matx) --- .../core/include/opencv2/core/operations.hpp | 2 +- modules/core/test/test_operations.cpp | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index 63fc23c3b5..1ee96bb09f 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -721,7 +721,7 @@ template struct CV_EXPORTS Matx_DetOp return p; for( int i = 0; i < m; i++ ) p *= temp(i, i); - return p; + return 1./p; } }; diff --git a/modules/core/test/test_operations.cpp b/modules/core/test/test_operations.cpp index ad201ea0ca..6b36883cfe 100644 --- a/modules/core/test/test_operations.cpp +++ b/modules/core/test/test_operations.cpp @@ -998,6 +998,23 @@ bool CV_OperationsTest::operations1() add(Mat::zeros(6, 1, CV_64F), 1, c, noArray(), c.type()); CV_Assert( norm(Matx61f(1.f, 1.f, 1.f, 1.f, 1.f, 1.f), c, CV_C) == 0 ); + + vector pt2d(3); + vector pt3d(2); + + CV_Assert( Mat(pt2d).checkVector(2) == 3 && Mat(pt2d).checkVector(3) < 0 && + Mat(pt3d).checkVector(2) < 0 && Mat(pt3d).checkVector(3) == 2 ); + + Matx44f m44(0.8147f, 0.6324f, 0.9575f, 0.9572f, + 0.9058f, 0.0975f, 0.9649f, 0.4854f, + 0.1270f, 0.2785f, 0.1576f, 0.8003f, + 0.9134f, 0.5469f, 0.9706f, 0.1419f); + double d = determinant(m44); + CV_Assert( fabs(d - (-0.0262)) <= 0.001 ); + + Cv32suf z; + z.i = 0x80000000; + CV_Assert( cvFloor(z.f) == 0 && cvCeil(z.f) == 0 && cvRound(z.f) == 0 ); } catch(const test_excep&) { From cf407c2ec0354a45e49c4880412474ec2ef52aa6 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Mon, 28 Jan 2013 17:58:57 +0400 Subject: [PATCH 06/25] Don't check for EINTR and replace xioctl with ioctl This should be safe todo unless we are writing a signal handler. --- CMakeLists.txt | 8 ++-- cmake/OpenCVFindLibsVideo.cmake | 8 +--- cmake/templates/cvconfig.h.cmake | 3 -- modules/highgui/src/cap_v4l.cpp | 66 +++++++++++++------------------- 4 files changed, 32 insertions(+), 53 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c977f97662..f2b088a6e7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -136,7 +136,7 @@ OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF WIN32 ) OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) ) OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) -OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) ) +OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) ) OCV_OPTION(WITH_VIDEOINPUT "Build HighGUI with DirectShow support" ON IF WIN32 ) OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID AND NOT APPLE) ) OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) @@ -722,13 +722,15 @@ if(DEFINED WITH_V4L) else() set(HAVE_CAMV4L_STR "NO") endif() - if(HAVE_CAMV4L2 OR HAVE_VIDEOIO) + if(HAVE_CAMV4L2) set(HAVE_CAMV4L2_STR "YES") + elseif(HAVE_VIDEOIO) + set(HAVE_CAMV4L2_STR "YES(videoio)") else() set(HAVE_CAMV4L2_STR "NO") endif() status(" V4L/V4L2:" HAVE_LIBV4L THEN "Using libv4l (ver ${ALIASOF_libv4l1_VERSION})" - ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}") + ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}") endif(DEFINED WITH_V4L) if(DEFINED WITH_VIDEOINPUT) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 323b1cbe96..414918527b 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -85,18 +85,12 @@ if(WITH_XINE) endif(WITH_XINE) # --- V4L --- -ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2 HAVE_IOCTL_ULONG) +ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2 HAVE_VIDEOIO) if(WITH_V4L) CHECK_MODULE(libv4l1 HAVE_LIBV4L) CHECK_INCLUDE_FILE(linux/videodev.h HAVE_CAMV4L) CHECK_INCLUDE_FILE(linux/videodev2.h HAVE_CAMV4L2) CHECK_INCLUDE_FILE(sys/videoio.h HAVE_VIDEOIO) - INCLUDE(CheckPrototypeDefinition) - CHECK_PROTOTYPE_DEFINITION(ioctl - "int ioctl(int d, unsigned long request, ...)" - "-1" - "sys/ioctl.h" - HAVE_IOCTL_ULONG) endif(WITH_V4L) # --- OpenNI --- diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index dab3ec8d51..e44e1359a2 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -25,9 +25,6 @@ /* V4L/V4L2 capturing support via libv4l */ #cmakedefine HAVE_LIBV4L -/* ioctl takes unsigned long request rather than int */ -#cmakedefine HAVE_IOCTL_ULONG - /* Carbon windowing environment */ #cmakedefine HAVE_CARBON diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 3510ca3c4d..eacd744b12 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -354,20 +354,6 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h); static int numCameras = 0; static int indexList = 0; -#ifdef HAVE_IOCTL_ULONG -static int xioctl( int fd, unsigned long request, void *arg) -#else -static int xioctl( int fd, int request, void *arg) -#endif -{ - int r; - - do r = ioctl (fd, request, arg); - while (-1 == r && EINTR == errno); - - return r; -} - /* Simple test program: Find number of Video Sources available. Start from 0 and go to MAX_CAMERAS while checking for the device with that name. If it fails on the first attempt of /dev/video0, then check if /dev/video is valid. @@ -431,7 +417,7 @@ static int try_palette_v4l2(CvCaptureCAM_V4L* capture, unsigned long colorspace) capture->form.fmt.pix.width = DEFAULT_V4L_WIDTH; capture->form.fmt.pix.height = DEFAULT_V4L_HEIGHT; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) return -1; @@ -511,7 +497,7 @@ static int try_init_v4l2(CvCaptureCAM_V4L* capture, char *deviceName) } CLEAR (capture->cap); - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QUERYCAP, &capture->cap)) { #ifndef NDEBUG fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_QUERYCAP \"%s\": %s\n", deviceName, strerror(errno)); @@ -521,7 +507,7 @@ static int try_init_v4l2(CvCaptureCAM_V4L* capture, char *deviceName) } /* Query channels number */ - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_INPUT, &deviceIndex)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_INPUT, &deviceIndex)) { #ifndef NDEBUG fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_G_INPUT \"%s\": %s\n", deviceName, strerror(errno)); @@ -533,7 +519,7 @@ static int try_init_v4l2(CvCaptureCAM_V4L* capture, char *deviceName) /* Query information about current input */ CLEAR (capture->inp); capture->inp.index = deviceIndex; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) { #ifndef NDEBUG fprintf(stderr, "(DEBUG) try_init_v4l2 VIDIOC_ENUMINPUT \"%s\": %s\n", deviceName, strerror(errno)); @@ -655,7 +641,7 @@ static void v4l2_scan_controls_enumerate_menu(CvCaptureCAM_V4L* capture) (int)capture->querymenu.index <= capture->queryctrl.maximum; capture->querymenu.index++) { - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYMENU, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYMENU, &capture->querymenu)) { // printf (" %s\n", capture->querymenu.name); @@ -679,7 +665,7 @@ static void v4l2_scan_controls(CvCaptureCAM_V4L* capture) CLEAR (capture->queryctrl); capture->queryctrl.id = ctrl_id; - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, &capture->queryctrl)) { @@ -749,7 +735,7 @@ static void v4l2_scan_controls(CvCaptureCAM_V4L* capture) CLEAR (capture->queryctrl); capture->queryctrl.id = ctrl_id; - if (0 == xioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, + if (0 == ioctl (capture->deviceHandle, VIDIOC_QUERYCTRL, &capture->queryctrl)) { @@ -872,7 +858,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->inp.index = CHANNEL_NUMBER; /* Set only channel number to CHANNEL_NUMBER */ /* V4L2 have a status field from selected video mode */ - if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) { fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n"); icvCloseCAM_V4L (capture); @@ -884,7 +870,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) CLEAR (capture->form); capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; @@ -922,7 +908,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; capture->req.memory = V4L2_MEMORY_MMAP; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_REQBUFS, &capture->req)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_REQBUFS, &capture->req)) { if (EINVAL == errno) { @@ -962,7 +948,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) buf.memory = V4L2_MEMORY_MMAP; buf.index = n_buffers; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QUERYBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QUERYBUF, &buf)) { perror ("VIDIOC_QUERYBUF"); /* free capture, and returns an error code */ @@ -1201,7 +1187,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_DQBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_DQBUF, &buf)) { switch (errno) { case EAGAIN: return 0; @@ -1209,7 +1195,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { case EIO: if (!(buf.flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))) { - if (xioctl(capture->deviceHandle, VIDIOC_QBUF, &buf) == -1) + if (ioctl(capture->deviceHandle, VIDIOC_QBUF, &buf) == -1) { return 0; } @@ -1232,7 +1218,7 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { //printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n", // buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused); - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) perror ("VIDIOC_QBUF"); return 1; @@ -1308,7 +1294,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { buf.memory = V4L2_MEMORY_MMAP; buf.index = (unsigned long)capture->bufferIndex; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) { perror ("VIDIOC_QBUF"); return 0; } @@ -1316,7 +1302,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { /* enable the streaming */ capture->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_STREAMON, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_STREAMON, &capture->type)) { /* error enabling the stream */ perror ("VIDIOC_STREAMON"); @@ -2301,7 +2287,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, CLEAR (capture->form); capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { /* display an error message, and return an error code */ perror ("VIDIOC_G_FMT"); return -1; @@ -2342,7 +2328,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, return -1; } - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_CTRL, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL, &capture->control)) { fprintf( stderr, "HIGHGUI ERROR: V4L2: "); @@ -2480,7 +2466,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { CLEAR (capture->cropcap); capture->cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (xioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) { + if (ioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_CROPCAP\n"); } else { @@ -2489,7 +2475,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->crop.c= capture->cropcap.defrect; /* set the crop area, but don't exit if the device don't support croping */ - if (xioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) { + if (ioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_S_CROP\n"); } } @@ -2498,7 +2484,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* read the current setting, mainly to retreive the pixelformat information */ - xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form); + ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form); /* set the values we want to change */ capture->form.fmt.pix.width = w; @@ -2513,7 +2499,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { * don't test if the set of the size is ok, because some device * don't allow changing the size, and we will get the real size * later */ - xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form); + ioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form); /* try to set framerate to 30 fps */ struct v4l2_streamparm setfps; @@ -2521,14 +2507,14 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { setfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; setfps.parm.capture.timeperframe.numerator = 1; setfps.parm.capture.timeperframe.denominator = 30; - xioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps); + ioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps); /* we need to re-initialize some things, like buffers, because the size has * changed */ capture->FirstCapture = 1; /* Get window info again, to get the real value */ - if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); @@ -2628,7 +2614,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, } /* get the min and max values */ - if (-1 == xioctl (capture->deviceHandle, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL, &capture->control)) { // perror ("VIDIOC_G_CTRL for getting min/max values"); return -1; @@ -2698,7 +2684,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, capture->control.value = (int)(value * (v4l2_max - v4l2_min) + v4l2_min); /* The driver may clamp the value or return ERANGE, ignored here */ - if (-1 == xioctl (capture->deviceHandle, + if (-1 == ioctl (capture->deviceHandle, VIDIOC_S_CTRL, &capture->control) && errno != ERANGE) { perror ("VIDIOC_S_CTRL"); return -1; From 287fb2c611e302659296be921bb22a2d20a747b1 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 29 Jan 2013 14:46:13 +0400 Subject: [PATCH 07/25] Fix build warning --- modules/highgui/src/cap_v4l.cpp | 159 ++++++++++++++++---------------- 1 file changed, 82 insertions(+), 77 deletions(-) diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index eacd744b12..fdbd120faa 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -1415,7 +1415,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { static inline void move_420_block(int yTL, int yTR, int yBL, int yBR, int u, int v, - int rowPixels, unsigned char * rgb) + int rowPixels, unsigned char * rgb) { const int rvScale = 91881; const int guScale = -22553; @@ -1454,7 +1454,7 @@ move_420_block(int yTL, int yTR, int yBL, int yBR, int u, int v, static inline void move_411_block(int yTL, int yTR, int yBL, int yBR, int u, int v, - int /*rowPixels*/, unsigned char * rgb) + int /*rowPixels*/, unsigned char * rgb) { const int rvScale = 91881; const int guScale = -22553; @@ -1546,6 +1546,7 @@ yuv420p_to_rgb24(int width, int height, // /* Converts from interlaced YUV420 to RGB24. */ /* [FD] untested... */ +#ifdef HAVE_CAMV4L static void yuv420_to_rgb24(int width, int height, unsigned char *pIn0, unsigned char *pOut0) @@ -1590,6 +1591,7 @@ yuv420_to_rgb24(int width, int height, pOut += width * bytes; } } +#endif //HAVE_CAMV4L // Consider a YUV411P image of 8x2 pixels. // @@ -1641,6 +1643,8 @@ yuv411p_to_rgb24(int width, int height, /* based on ccvt_yuyv_bgr32() from camstream */ #define SAT(c) \ if (c & (~255)) { if (c < 0) c = 0; else c = 255; } + +#ifdef HAVE_CAMV4L2 static void yuyv_to_rgb24 (int width, int height, unsigned char *src, unsigned char *dst) { @@ -1732,6 +1736,7 @@ uyvy_to_rgb24 (int width, int height, unsigned char *src, unsigned char *dst) } } } +#endif //HAVE_CAMV4L2 #ifdef HAVE_JPEG @@ -1758,6 +1763,7 @@ mjpeg_to_rgb24 (int width, int height, * */ +#ifdef HAVE_CAMV4L2 static void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst) { long int i; @@ -1919,7 +1925,6 @@ static void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, uns } } - #define CLAMP(x) ((x)<0?0:((x)>255)?255:(x)) typedef struct { @@ -2090,7 +2095,7 @@ static int sonix_decompress(int width, int height, unsigned char *inp, unsigned return 0; } - +#endif //HAVE_CAMV4L2 static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { @@ -2153,78 +2158,77 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { if (V4L2_SUPPORT == 1) { switch (capture->palette) - { - case PALETTE_BGR24: - memcpy((char *)capture->frame.imageData, - (char *)capture->buffers[capture->bufferIndex].start, - capture->frame.imageSize); - break; + { + case PALETTE_BGR24: + memcpy((char *)capture->frame.imageData, + (char *)capture->buffers[capture->bufferIndex].start, + capture->frame.imageSize); + break; - case PALETTE_YVU420: - yuv420p_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; + case PALETTE_YVU420: + yuv420p_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; - case PALETTE_YUV411P: - yuv411p_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; + case PALETTE_YUV411P: + yuv411p_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; #ifdef HAVE_JPEG - case PALETTE_MJPEG: - if (!mjpeg_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex] - .start), - capture->buffers[capture->bufferIndex].length, - (unsigned char*)capture->frame.imageData)) - return 0; - break; + case PALETTE_MJPEG: + if (!mjpeg_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex] + .start), + capture->buffers[capture->bufferIndex].length, + (unsigned char*)capture->frame.imageData)) + return 0; + break; #endif - case PALETTE_YUYV: - yuyv_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; - - case PALETTE_UYVY: - uyvy_to_rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)(capture->buffers[capture->bufferIndex].start), - (unsigned char*)capture->frame.imageData); - break; - case PALETTE_SBGGR8: - bayer2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[capture->bufferIndex].start, - (unsigned char*)capture->frame.imageData); - break; - - case PALETTE_SN9C10X: - sonix_decompress_init(); - sonix_decompress(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[capture->bufferIndex].start, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start); - - bayer2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, - (unsigned char*)capture->frame.imageData); - break; - - case PALETTE_SGBRG: - sgbrg2rgb24(capture->form.fmt.pix.width, - capture->form.fmt.pix.height, - (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, - (unsigned char*)capture->frame.imageData); - break; - } + case PALETTE_YUYV: + yuyv_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; + case PALETTE_UYVY: + uyvy_to_rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)(capture->buffers[capture->bufferIndex].start), + (unsigned char*)capture->frame.imageData); + break; + case PALETTE_SBGGR8: + bayer2rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[capture->bufferIndex].start, + (unsigned char*)capture->frame.imageData); + break; + + case PALETTE_SN9C10X: + sonix_decompress_init(); + sonix_decompress(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[capture->bufferIndex].start, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start); + + bayer2rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, + (unsigned char*)capture->frame.imageData); + break; + + case PALETTE_SGBRG: + sgbrg2rgb24(capture->form.fmt.pix.width, + capture->form.fmt.pix.height, + (unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start, + (unsigned char*)capture->frame.imageData); + break; + } } #endif /* HAVE_CAMV4L2 */ #if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) @@ -2233,31 +2237,32 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { #ifdef HAVE_CAMV4L { - switch(capture->imageProperties.palette) { - case VIDEO_PALETTE_RGB24: + switch(capture->imageProperties.palette) + { + case VIDEO_PALETTE_RGB24: memcpy((char *)capture->frame.imageData, (char *)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), capture->frame.imageSize); break; - case VIDEO_PALETTE_YUV420P: + case VIDEO_PALETTE_YUV420P: yuv420p_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - case VIDEO_PALETTE_YUV420: + case VIDEO_PALETTE_YUV420: yuv420_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - case VIDEO_PALETTE_YUV411P: + case VIDEO_PALETTE_YUV411P: yuv411p_to_rgb24(capture->captureWindow.width, capture->captureWindow.height, (unsigned char*)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]), (unsigned char*)capture->frame.imageData); break; - default: + default: fprintf( stderr, "HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n", capture->imageProperties.palette); From fe86f31f44a4e1bdb6c8717783eb102ed9bee7fa Mon Sep 17 00:00:00 2001 From: Ilya Lysenkov Date: Mon, 28 Jan 2013 12:40:54 +0400 Subject: [PATCH 08/25] Added a test of CvModelEstimator2::checkSubset(...) --- modules/calib3d/src/_modelest.h | 2 +- modules/calib3d/test/test_modelest.cpp | 226 +++++++++++++++++++++++++ 2 files changed, 227 insertions(+), 1 deletion(-) create mode 100644 modules/calib3d/test/test_modelest.cpp diff --git a/modules/calib3d/src/_modelest.h b/modules/calib3d/src/_modelest.h index b86485e491..d30e4f4957 100644 --- a/modules/calib3d/src/_modelest.h +++ b/modules/calib3d/src/_modelest.h @@ -45,7 +45,7 @@ #include "precomp.hpp" -class CvModelEstimator2 +class CV_EXPORTS CvModelEstimator2 { public: CvModelEstimator2(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions); diff --git a/modules/calib3d/test/test_modelest.cpp b/modules/calib3d/test/test_modelest.cpp new file mode 100644 index 0000000000..91bf4b092f --- /dev/null +++ b/modules/calib3d/test/test_modelest.cpp @@ -0,0 +1,226 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "test_precomp.hpp" +#include "_modelest.h" + +using namespace cv; + +class BareModelEstimator : public CvModelEstimator2 +{ +public: + BareModelEstimator(int modelPoints, CvSize modelSize, int maxBasicSolutions); + + virtual int runKernel( const CvMat*, const CvMat*, CvMat* ); + virtual void computeReprojError( const CvMat*, const CvMat*, + const CvMat*, CvMat* ); + + bool checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset ); +}; + +BareModelEstimator::BareModelEstimator(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions) + :CvModelEstimator2(_modelPoints, _modelSize, _maxBasicSolutions) +{ +} + +int BareModelEstimator::runKernel( const CvMat*, const CvMat*, CvMat* ) +{ + return 0; +} + +void BareModelEstimator::computeReprojError( const CvMat*, const CvMat*, + const CvMat*, CvMat* ) +{ +} + +bool BareModelEstimator::checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset ) +{ + checkPartialSubsets = checkPartialSubset; + return checkSubset(ms1, count); +} + +class CV_ModelEstimator2_Test : public cvtest::ArrayTest +{ +public: + CV_ModelEstimator2_Test(); + +protected: + void get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ); + void fill_array( int test_case_idx, int i, int j, Mat& arr ); + double get_success_error_level( int test_case_idx, int i, int j ); + void run_func(); + void prepare_to_validation( int test_case_idx ); + + bool checkPartialSubsets; + int usedPointsCount; + + bool checkSubsetResult; + int generalPositionsCount; + int maxPointsCount; +}; + +CV_ModelEstimator2_Test::CV_ModelEstimator2_Test() +{ + generalPositionsCount = get_test_case_count() / 2; + maxPointsCount = 100; + + test_array[INPUT].push_back(NULL); + test_array[OUTPUT].push_back(NULL); + test_array[REF_OUTPUT].push_back(NULL); +} + +void CV_ModelEstimator2_Test::get_test_array_types_and_sizes( int /*test_case_idx*/, + vector > &sizes, vector > &types ) +{ + RNG &rng = ts->get_rng(); + checkPartialSubsets = (cvtest::randInt(rng) % 2 == 0); + + int pointsCount = cvtest::randInt(rng) % maxPointsCount; + usedPointsCount = pointsCount == 0 ? 0 : cvtest::randInt(rng) % pointsCount; + + sizes[INPUT][0] = cvSize(1, pointsCount); + types[INPUT][0] = CV_64FC2; + + sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(1, 1); + types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_8UC1; +} + +void CV_ModelEstimator2_Test::fill_array( int test_case_idx, int i, int j, Mat& arr ) +{ + if( i != INPUT ) + { + cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr ); + return; + } + + if (test_case_idx < generalPositionsCount) + { + //generate points in a general position (i.e. no three points can lie on the same line.) + + bool isGeneralPosition; + do + { + ArrayTest::fill_array(test_case_idx, i, j, arr); + + //a simple check that the position is general: + // for each line check that all other points don't belong to it + isGeneralPosition = true; + for (int startPointIndex = 0; startPointIndex < usedPointsCount && isGeneralPosition; startPointIndex++) + { + for (int endPointIndex = startPointIndex + 1; endPointIndex < usedPointsCount && isGeneralPosition; endPointIndex++) + { + + for (int testPointIndex = 0; testPointIndex < usedPointsCount && isGeneralPosition; testPointIndex++) + { + if (testPointIndex == startPointIndex || testPointIndex == endPointIndex) + { + continue; + } + + CV_Assert(arr.type() == CV_64FC2); + Point2d tangentVector_1 = arr.at(endPointIndex) - arr.at(startPointIndex); + Point2d tangentVector_2 = arr.at(testPointIndex) - arr.at(startPointIndex); + + const float eps = 1e-4; + //TODO: perhaps it is better to normalize the cross product by norms of the tangent vectors + if (fabs(tangentVector_1.cross(tangentVector_2)) < eps) + { + isGeneralPosition = false; + } + } + } + } + } + while(!isGeneralPosition); + } + else + { + //create points in a degenerate position (there are at least 3 points belonging to the same line) + + ArrayTest::fill_array(test_case_idx, i, j, arr); + if (usedPointsCount <= 2) + { + return; + } + + RNG &rng = ts->get_rng(); + int startPointIndex, endPointIndex, modifiedPointIndex; + do + { + startPointIndex = cvtest::randInt(rng) % usedPointsCount; + endPointIndex = cvtest::randInt(rng) % usedPointsCount; + modifiedPointIndex = checkPartialSubsets ? usedPointsCount - 1 : cvtest::randInt(rng) % usedPointsCount; + } + while (startPointIndex == endPointIndex || startPointIndex == modifiedPointIndex || endPointIndex == modifiedPointIndex); + + double startWeight = cvtest::randReal(rng); + CV_Assert(arr.type() == CV_64FC2); + arr.at(modifiedPointIndex) = startWeight * arr.at(startPointIndex) + (1.0 - startWeight) * arr.at(endPointIndex); + } +} + + +double CV_ModelEstimator2_Test::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ ) +{ + return 0; +} + +void CV_ModelEstimator2_Test::prepare_to_validation( int test_case_idx ) +{ + test_mat[OUTPUT][0].at(0) = checkSubsetResult; + test_mat[REF_OUTPUT][0].at(0) = test_case_idx < generalPositionsCount || usedPointsCount <= 2; +} + +void CV_ModelEstimator2_Test::run_func() +{ + //make the input continuous + Mat input = test_mat[INPUT][0].clone(); + CvMat _input = input; + + RNG &rng = ts->get_rng(); + int modelPoints = cvtest::randInt(rng); + CvSize modelSize = cvSize(2, modelPoints); + int maxBasicSolutions = cvtest::randInt(rng); + BareModelEstimator modelEstimator(modelPoints, modelSize, maxBasicSolutions); + checkSubsetResult = modelEstimator.checkSubsetPublic(&_input, usedPointsCount, checkPartialSubsets); +} + +TEST(Calib3d_ModelEstimator2, accuracy) { CV_ModelEstimator2_Test test; test.safe_run(); } From 5021a792b1b0e91fdb9092407f2aaa707e9d4695 Mon Sep 17 00:00:00 2001 From: Ilya Lysenkov Date: Mon, 28 Jan 2013 12:41:09 +0400 Subject: [PATCH 09/25] Fixed #2470 --- modules/calib3d/src/modelest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/src/modelest.cpp b/modules/calib3d/src/modelest.cpp index b6441e22cb..5140afe99d 100644 --- a/modules/calib3d/src/modelest.cpp +++ b/modules/calib3d/src/modelest.cpp @@ -351,7 +351,7 @@ bool CvModelEstimator2::checkSubset( const CvMat* m, int count ) break; } - return i >= i1; + return i > i1; } From daead680cd06c2bc67f3a8cce1ebf015ff14812d Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 29 Jan 2013 16:38:59 +0400 Subject: [PATCH 10/25] Add option to control build of applications (feature #2568) --- CMakeLists.txt | 5 ++++- apps/haartraining/CMakeLists.txt | 4 ---- apps/traincascade/CMakeLists.txt | 4 ---- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6215028bf0..ac3b3fcd35 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -149,6 +149,7 @@ OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" OFF # OpenCV build components # =================================================== OCV_OPTION(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" NOT (ANDROID OR IOS) ) +OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID) IF (NOT IOS) ) OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" ON IF ANDROID ) OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON ) OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF ) @@ -453,7 +454,9 @@ add_subdirectory(doc) add_subdirectory(data) # extra applications -add_subdirectory(apps) +if(BUILD_opencv_apps) + add_subdirectory(apps) +endif() # examples if(BUILD_EXAMPLES OR BUILD_ANDROID_EXAMPLES OR INSTALL_PYTHON_EXAMPLES) diff --git a/apps/haartraining/CMakeLists.txt b/apps/haartraining/CMakeLists.txt index 22349ed6b9..953be3b7e5 100644 --- a/apps/haartraining/CMakeLists.txt +++ b/apps/haartraining/CMakeLists.txt @@ -1,7 +1,3 @@ -if(IOS OR ANDROID) - return() -endif() - SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS}) diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt index 350200fc49..be60137a9d 100644 --- a/apps/traincascade/CMakeLists.txt +++ b/apps/traincascade/CMakeLists.txt @@ -1,7 +1,3 @@ -if(IOS OR ANDROID) - return() -endif() - SET(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS}) From 0b1fe53a467972d25bca84c729c7509a65d60de4 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 29 Jan 2013 17:08:26 +0400 Subject: [PATCH 11/25] Add -fobjc-exceptions flag to ObjectiveC sources if supported (bug #2657) --- cmake/OpenCVUtils.cmake | 9 +++++++++ modules/highgui/CMakeLists.txt | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 879d332532..f40cc6d19c 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -64,6 +64,13 @@ MACRO(ocv_check_compiler_flag LANG FLAG RESULT) else() FILE(WRITE "${_fname}" "#pragma\nint main(void) { return 0; }\n") endif() + elseif("_${LANG}_" MATCHES "_OBJCXX_") + set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.mm") + if("${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror " OR "${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror=unknown-pragmas ") + FILE(WRITE "${_fname}" "int main() { return 0; }\n") + else() + FILE(WRITE "${_fname}" "#pragma\nint main() { return 0; }\n") + endif() else() unset(_fname) endif() @@ -100,6 +107,8 @@ macro(ocv_check_flag_support lang flag varname) set(_lang CXX) elseif("_${lang}_" MATCHES "_C_") set(_lang C) + elseif("_${lang}_" MATCHES "_OBJCXX_") + set(_lang OBJCXX) else() set(_lang ${lang}) endif() diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index b4b2896d7a..4ddd2179e3 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -230,6 +230,24 @@ ocv_module_include_directories() ocv_create_module(${GRFMT_LIBS} ${HIGHGUI_LIBRARIES}) +if(APPLE) + ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS) + if(HAVE_OBJC_EXCEPTIONS) + foreach(source ${OPENCV_MODULE_${the_module}_SOURCES}) + if("${source}" MATCHES "\\.mm$") + get_source_file_property(flags "${source}" COMPILE_FLAGS) + if(flags) + set(flags "${_flags} -fobjc-exceptions") + else() + set(flags "-fobjc-exceptions") + endif() + + set_source_files_properties("${source}" PROPERTIES COMPILE_FLAGS "${flags}") + endif() + endforeach() + endif() +endif() + if(BUILD_SHARED_LIBS) add_definitions(-DHIGHGUI_EXPORTS) endif() From c69312ea0df5b55c24aeac27560679cb4b2f3419 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Tue, 29 Jan 2013 19:38:56 +0400 Subject: [PATCH 12/25] fixed #2580, #2210. some work on #2025. modified SIFT to 1) double image before finding keypoints, 2) use floating-point internally instead of 16-bit integers, 3) set the keypoint response to the abs(interpolated_DoG_value). step 1) increases the number of detected keypoints significantly and together with 2) and 3) it improves some detection benchmarks. On the other hand, the stability of the small keypoints is lower, so the rotation and scale invariance tests now struggle a bit. In 2.5 need to make this feature optional and add some more intelligence to the algorithm. added test that finds a planar object using SIFT. --- modules/nonfree/CMakeLists.txt | 2 +- modules/nonfree/src/sift.cpp | 183 ++++++++++++------ modules/nonfree/test/test_features2d.cpp | 58 +++++- .../test_rotation_and_scale_invariance.cpp | 27 ++- samples/cpp/descriptor_extractor_matcher.cpp | 2 + 5 files changed, 209 insertions(+), 63 deletions(-) diff --git a/modules/nonfree/CMakeLists.txt b/modules/nonfree/CMakeLists.txt index 8c7bd0efde..eeaf53a6f8 100644 --- a/modules/nonfree/CMakeLists.txt +++ b/modules/nonfree/CMakeLists.txt @@ -3,4 +3,4 @@ if(BUILD_ANDROID_PACKAGE) endif() set(the_description "Functionality with possible limitations on the use") -ocv_define_module(nonfree opencv_imgproc opencv_features2d) +ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d) diff --git a/modules/nonfree/src/sift.cpp b/modules/nonfree/src/sift.cpp index aca8020cc7..58ebd31016 100644 --- a/modules/nonfree/src/sift.cpp +++ b/modules/nonfree/src/sift.cpp @@ -162,8 +162,24 @@ static const float SIFT_DESCR_MAG_THR = 0.2f; // factor used to convert floating-point descriptor to unsigned char static const float SIFT_INT_DESCR_FCTR = 512.f; +#if 0 +// intermediate type used for DoG pyramids +typedef short sift_wt; static const int SIFT_FIXPT_SCALE = 48; - +#else +// intermediate type used for DoG pyramids +typedef float sift_wt; +static const int SIFT_FIXPT_SCALE = 1; +#endif + +static inline void +unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale) +{ + octave = kpt.octave & 255; + layer = (kpt.octave >> 8) & 255; + octave = octave < 128 ? octave : (-128 | octave); + scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave); +} static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma ) { @@ -172,7 +188,7 @@ static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma cvtColor(img, gray, COLOR_BGR2GRAY); else img.copyTo(gray); - gray.convertTo(gray_fpt, CV_16S, SIFT_FIXPT_SCALE, 0); + gray.convertTo(gray_fpt, DataType::type, SIFT_FIXPT_SCALE, 0); float sig_diff; @@ -245,7 +261,7 @@ void SIFT::buildDoGPyramid( const vector& gpyr, vector& dogpyr ) const const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i]; const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1]; Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i]; - subtract(src2, src1, dst, noArray(), CV_16S); + subtract(src2, src1, dst, noArray(), DataType::type); } } } @@ -276,8 +292,8 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius, if( x <= 0 || x >= img.cols - 1 ) continue; - float dx = (float)(img.at(y, x+1) - img.at(y, x-1)); - float dy = (float)(img.at(y-1, x) - img.at(y+1, x)); + float dx = (float)(img.at(y, x+1) - img.at(y, x-1)); + float dy = (float)(img.at(y-1, x) - img.at(y+1, x)); X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale; k++; @@ -323,7 +339,7 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius, // // Interpolates a scale-space extremum's location and scale to subpixel -// accuracy to form an image feature. Rejects features with low contrast. +// accuracy to form an image feature. Rejects features with low contrast. // Based on Section 4 of Lowe's paper. static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int octv, int& layer, int& r, int& c, int nOctaveLayers, @@ -334,7 +350,7 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const float second_deriv_scale = img_scale; const float cross_deriv_scale = img_scale*0.25f; - float xi=0, xr=0, xc=0, contr; + float xi=0, xr=0, xc=0, contr=0; int i = 0; for( ; i < SIFT_MAX_INTERP_STEPS; i++ ) @@ -344,20 +360,20 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const Mat& prev = dog_pyr[idx-1]; const Mat& next = dog_pyr[idx+1]; - Vec3f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); - - float v2 = (float)img.at(r, c)*2; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dss = (next.at(r, c) + prev.at(r, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1))*cross_deriv_scale; - float dxs = (next.at(r, c+1) - next.at(r, c-1) - - prev.at(r, c+1) + prev.at(r, c-1))*cross_deriv_scale; - float dys = (next.at(r+1, c) - next.at(r-1, c) - - prev.at(r+1, c) + prev.at(r-1, c))*cross_deriv_scale; + Vec3f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, + (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, + (next.at(r, c) - prev.at(r, c))*deriv_scale); + + float v2 = (float)img.at(r, c)*2; + float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; + float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; + float dss = (next.at(r, c) + prev.at(r, c) - v2)*second_deriv_scale; + float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - + img.at(r-1, c+1) + img.at(r-1, c-1))*cross_deriv_scale; + float dxs = (next.at(r, c+1) - next.at(r, c-1) - + prev.at(r, c+1) + prev.at(r, c-1))*cross_deriv_scale; + float dys = (next.at(r+1, c) - next.at(r-1, c) - + prev.at(r+1, c) + prev.at(r-1, c))*cross_deriv_scale; Matx33f H(dxx, dxy, dxs, dxy, dyy, dys, @@ -369,20 +385,25 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o xr = -X[1]; xc = -X[0]; - if( std::abs( xi ) < 0.5f && std::abs( xr ) < 0.5f && std::abs( xc ) < 0.5f ) + if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f ) break; - c += cvRound( xc ); - r += cvRound( xr ); - layer += cvRound( xi ); + if( std::abs(xi) > (float)(INT_MAX/3) || + std::abs(xr) > (float)(INT_MAX/3) || + std::abs(xc) > (float)(INT_MAX/3) ) + return false; + + c += cvRound(xc); + r += cvRound(xr); + layer += cvRound(xi); if( layer < 1 || layer > nOctaveLayers || - c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || - r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) + c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || + r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) return false; } - /* ensure convergence of interpolation */ + // ensure convergence of interpolation if( i >= SIFT_MAX_INTERP_STEPS ) return false; @@ -391,21 +412,21 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o const Mat& img = dog_pyr[idx]; const Mat& prev = dog_pyr[idx-1]; const Mat& next = dog_pyr[idx+1]; - Matx31f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); + Matx31f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, + (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, + (next.at(r, c) - prev.at(r, c))*deriv_scale); float t = dD.dot(Matx31f(xc, xr, xi)); - contr = img.at(r, c)*img_scale + t * 0.5f; + contr = img.at(r, c)*img_scale + t * 0.5f; if( std::abs( contr ) * nOctaveLayers < contrastThreshold ) return false; - /* principal curvatures are computed using the trace and det of Hessian */ - float v2 = img.at(r, c)*2.f; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1)) * cross_deriv_scale; + // principal curvatures are computed using the trace and det of Hessian + float v2 = img.at(r, c)*2.f; + float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; + float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; + float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - + img.at(r-1, c+1) + img.at(r-1, c-1)) * cross_deriv_scale; float tr = dxx + dyy; float det = dxx * dyy - dxy * dxy; @@ -417,6 +438,7 @@ static bool adjustLocalExtrema( const vector& dog_pyr, KeyPoint& kpt, int o kpt.pt.y = (r + xr) * (1 << octv); kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16); kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2; + kpt.response = std::abs(contr); return true; } @@ -448,13 +470,13 @@ void SIFT::findScaleSpaceExtrema( const vector& gauss_pyr, const vector(r); - const short* prevptr = prev.ptr(r); - const short* nextptr = next.ptr(r); + const sift_wt* currptr = img.ptr(r); + const sift_wt* prevptr = prev.ptr(r); + const sift_wt* nextptr = next.ptr(r); for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++) { - int val = currptr[c]; + sift_wt val = currptr[c]; // find local extrema with pixel accuracy if( std::abs(val) > threshold && @@ -541,11 +563,9 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc for( i = -radius, k = 0; i <= radius; i++ ) for( j = -radius; j <= radius; j++ ) { - /* - Calculate sample's histogram array coords rotated relative to ori. - Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. - r_rot = 1.5) have full weight placed in row 1 after interpolation. - */ + // Calculate sample's histogram array coords rotated relative to ori. + // Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. + // r_rot = 1.5) have full weight placed in row 1 after interpolation. float c_rot = j * cos_t - i * sin_t; float r_rot = j * sin_t + i * cos_t; float rbin = r_rot + d/2 - 0.5f; @@ -553,10 +573,10 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc int r = pt.y + i, c = pt.x + j; if( rbin > -1 && rbin < d && cbin > -1 && cbin < d && - r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) + r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) { - float dx = (float)(img.at(r, c+1) - img.at(r, c-1)); - float dy = (float)(img.at(r-1, c) - img.at(r+1, c)); + float dx = (float)(img.at(r, c+1) - img.at(r, c-1)); + float dy = (float)(img.at(r-1, c) - img.at(r+1, c)); X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin; W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale; k++; @@ -632,29 +652,46 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc nrm2 += val*val; } nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON); + +#if 1 for( k = 0; k < len; k++ ) { dst[k] = saturate_cast(dst[k]*nrm2); } +#else + float nrm1 = 0; + for( k = 0; k < len; k++ ) + { + dst[k] *= nrm2; + nrm1 += dst[k]; + } + nrm1 = 1.f/std::max(nrm1, FLT_EPSILON); + for( k = 0; k < len; k++ ) + { + dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR); + } +#endif } static void calcDescriptors(const vector& gpyr, const vector& keypoints, - Mat& descriptors, int nOctaveLayers ) + Mat& descriptors, int nOctaveLayers, int firstOctave ) { int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS; for( size_t i = 0; i < keypoints.size(); i++ ) { KeyPoint kpt = keypoints[i]; - int octv=kpt.octave & 255, layer=(kpt.octave >> 8) & 255; - float scale = 1.f/(1 << octv); + int octave, layer; + float scale; + unpackOctave(kpt, octave, layer, scale); + CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2); float size=kpt.size*scale; Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale); - const Mat& img = gpyr[octv*(nOctaveLayers + 3) + layer]; + const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer]; float angle = 360.f - kpt.angle; if(std::abs(angle - 360.f) < FLT_EPSILON) - angle = 0.f; + angle = 0.f; calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr((int)i)); } } @@ -691,6 +728,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask, OutputArray _descriptors, bool useProvidedKeypoints) const { + int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0; Mat image = _image.getMat(), mask = _mask.getMat(); if( image.empty() || image.depth() != CV_8U ) @@ -699,9 +737,28 @@ void SIFT::operator()(InputArray _image, InputArray _mask, if( !mask.empty() && mask.type() != CV_8UC1 ) CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); - Mat base = createInitialImage(image, false, (float)sigma); + if( useProvidedKeypoints ) + { + firstOctave = 0; + int maxOctave = INT_MIN; + for( size_t i = 0; i < keypoints.size(); i++ ) + { + int octave, layer; + float scale; + unpackOctave(keypoints[i], octave, layer, scale); + firstOctave = std::min(firstOctave, octave); + maxOctave = std::max(maxOctave, octave); + actualNLayers = std::max(actualNLayers, layer-2); + } + + firstOctave = std::min(firstOctave, 0); + CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers ); + actualNOctaves = maxOctave - firstOctave + 1; + } + + Mat base = createInitialImage(image, firstOctave < 0, (float)sigma); vector gpyr, dogpyr; - int nOctaves = cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2); + int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave; //double t, tf = getTickFrequency(); //t = (double)getTickCount(); @@ -724,6 +781,16 @@ void SIFT::operator()(InputArray _image, InputArray _mask, KeyPointsFilter::retainBest(keypoints, nfeatures); //t = (double)getTickCount() - t; //printf("keypoint detection time: %g\n", t*1000./tf); + + if( firstOctave < 0 ) + for( size_t i = 0; i < keypoints.size(); i++ ) + { + KeyPoint& kpt = keypoints[i]; + float scale = 1.f/(float)(1 << -firstOctave); + kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255); + kpt.pt *= scale; + kpt.size *= scale; + } } else { @@ -738,7 +805,7 @@ void SIFT::operator()(InputArray _image, InputArray _mask, _descriptors.create((int)keypoints.size(), dsize, CV_32F); Mat descriptors = _descriptors.getMat(); - calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers); + calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave); //t = (double)getTickCount() - t; //printf("descriptor extraction time: %g\n", t*1000./tf); } diff --git a/modules/nonfree/test/test_features2d.cpp b/modules/nonfree/test/test_features2d.cpp index eb8f44b8d3..a4cc373b78 100644 --- a/modules/nonfree/test/test_features2d.cpp +++ b/modules/nonfree/test/test_features2d.cpp @@ -40,6 +40,7 @@ //M*/ #include "test_precomp.hpp" +#include "opencv2/calib3d/calib3d.hpp" using namespace std; using namespace cv; @@ -1085,4 +1086,59 @@ TEST(Features2d_BruteForceDescriptorMatcher_knnMatch, regression) Ptr s = DescriptorExtractor::create("SURF"); ASSERT_STREQ(s->paramHelp("extended").c_str(), ""); } -*/ \ No newline at end of file +*/ + +class CV_DetectPlanarTest : public cvtest::BaseTest +{ +public: + CV_DetectPlanarTest(const string& _fname, int _min_ninliers) : fname(_fname), min_ninliers(_min_ninliers) {} + +protected: + void run(int) + { + Ptr f = Algorithm::create("Feature2D." + fname); + if(f.empty()) + return; + string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/"; + string imgname1 = path + "box.png"; + string imgname2 = path + "box_in_scene.png"; + Mat img1 = imread(imgname1, 0); + Mat img2 = imread(imgname2, 0); + if( img1.empty() || img2.empty() ) + { + ts->printf( cvtest::TS::LOG, "missing %s and/or %s\n", imgname1.c_str(), imgname2.c_str()); + ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); + return; + } + vector kpt1, kpt2; + Mat d1, d2; + f->operator()(img1, Mat(), kpt1, d1); + f->operator()(img1, Mat(), kpt2, d2); + + vector matches; + BFMatcher(NORM_L2, true).match(d1, d2, matches); + + vector pt1, pt2; + for( size_t i = 0; i < matches.size(); i++ ) { + pt1.push_back(kpt1[matches[i].queryIdx].pt); + pt2.push_back(kpt2[matches[i].trainIdx].pt); + } + + Mat inliers, H = findHomography(pt1, pt2, RANSAC, 10, inliers); + int ninliers = countNonZero(inliers); + + if( ninliers < min_ninliers ) + { + ts->printf( cvtest::TS::LOG, "too little inliers (%d) vs expected %d\n", ninliers, min_ninliers); + ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); + return; + } + } + + string fname; + int min_ninliers; +}; + +TEST(Features2d_SIFTHomographyTest, regression) { CV_DetectPlanarTest test("SIFT", 80); test.safe_run(); } +//TEST(Features2d_SURFHomographyTest, regression) { CV_DetectPlanarTest test("SURF", 80); test.safe_run(); } + diff --git a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp index 3479be72a7..6262456d9d 100644 --- a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp +++ b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp @@ -186,6 +186,20 @@ void matchKeyPoints(const vector& keypoints0, const Mat& H, } } +static void removeVerySmallKeypoints(vector& keypoints) +{ + size_t i, j = 0, n = keypoints.size(); + for( i = 0; i < n; i++ ) + { + if( (keypoints[i].octave & 128) != 0 ) + ; + else + keypoints[j++] = keypoints[i]; + } + keypoints.resize(j); +} + + class DetectorRotationInvarianceTest : public cvtest::BaseTest { public: @@ -216,6 +230,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -226,6 +241,7 @@ protected: vector keypoints1; featureDetector->detect(image1, keypoints1, mask1); + removeVerySmallKeypoints(keypoints1); vector matches; matchKeyPoints(keypoints0, H, keypoints1, matches); @@ -329,6 +345,7 @@ protected: vector keypoints0; Mat descriptors0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); descriptorExtractor->compute(image0, keypoints0, descriptors0); @@ -382,6 +399,7 @@ protected: float minDescInliersRatio; }; + class DetectorScaleInvarianceTest : public cvtest::BaseTest { public: @@ -412,6 +430,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -423,6 +442,7 @@ protected: vector keypoints1, osiKeypoints1; // osi - original size image featureDetector->detect(image1, keypoints1); + removeVerySmallKeypoints(keypoints1); if(keypoints1.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); @@ -531,6 +551,7 @@ protected: vector keypoints0; featureDetector->detect(image0, keypoints0); + removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); Mat descriptors0; @@ -603,8 +624,8 @@ TEST(Features2d_RotationInvariance_Detector_SURF, regression) TEST(Features2d_RotationInvariance_Detector_SIFT, regression) { DetectorRotationInvarianceTest test(Algorithm::create("Feature2D.SIFT"), - 0.75f, - 0.76f); + 0.45f, + 0.70f); test.safe_run(); } @@ -665,7 +686,7 @@ TEST(Features2d_ScaleInvariance_Descriptor_SIFT, regression) DescriptorScaleInvarianceTest test(Algorithm::create("Feature2D.SIFT"), Algorithm::create("Feature2D.SIFT"), NORM_L1, - 0.87f); + 0.78f); test.safe_run(); } diff --git a/samples/cpp/descriptor_extractor_matcher.cpp b/samples/cpp/descriptor_extractor_matcher.cpp index 9902e2f707..f09e9ea462 100644 --- a/samples/cpp/descriptor_extractor_matcher.cpp +++ b/samples/cpp/descriptor_extractor_matcher.cpp @@ -221,6 +221,8 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective, drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask, DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); #endif + + printf("Number of inliers: %d\n", countNonZero(matchesMask)); } else drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg ); From b4d0dff4c597a2c2b8949dd2281f4f022894b37a Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 29 Jan 2013 20:13:09 +0400 Subject: [PATCH 13/25] Added minimal support for tiff encoder parameters and test for issue #2161 --- modules/highgui/src/grfmt_tiff.cpp | 18 +++++++++++++++++- modules/highgui/test/test_grfmt.cpp | 19 +++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/modules/highgui/src/grfmt_tiff.cpp b/modules/highgui/src/grfmt_tiff.cpp index d2321ceb5e..07fe6445d2 100644 --- a/modules/highgui/src/grfmt_tiff.cpp +++ b/modules/highgui/src/grfmt_tiff.cpp @@ -402,7 +402,18 @@ void TiffEncoder::writeTag( WLByteStream& strm, TiffTag tag, } #ifdef HAVE_TIFF -bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) + +static void readParam(const vector& params, int key, int& value) +{ + for(size_t i = 0; i + 1 < params.size(); i += 2) + if(params[i] == key) + { + value = params[i+1]; + break; + } +} + +bool TiffEncoder::writeLibTiff( const Mat& img, const vector& params) { int channels = img.channels(); int width = img.cols, height = img.rows; @@ -429,7 +440,9 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) const int bitsPerByte = 8; size_t fileStep = (width * channels * bitsPerChannel) / bitsPerByte; + int rowsPerStrip = (int)((1 << 13)/fileStep); + readParam(params, TIFFTAG_ROWSPERSTRIP, rowsPerStrip); if( rowsPerStrip < 1 ) rowsPerStrip = 1; @@ -450,6 +463,9 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector& /*params*/) int compression = COMPRESSION_LZW; int predictor = PREDICTOR_HORIZONTAL; + readParam(params, TIFFTAG_COMPRESSION, compression); + readParam(params, TIFFTAG_PREDICTOR, predictor); + int colorspace = channels > 1 ? PHOTOMETRIC_RGB : PHOTOMETRIC_MINISBLACK; if ( !TIFFSetField(pTiffHandle, TIFFTAG_IMAGEWIDTH, width) diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/highgui/test/test_grfmt.cpp index 7226ebf7cb..28d30c9b07 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/highgui/test/test_grfmt.cpp @@ -291,3 +291,22 @@ TEST(Highgui_Jpeg, encode_empty) ASSERT_THROW(cv::imencode(".jpg", img, jpegImg), cv::Exception); } #endif + + +#ifdef HAVE_TIFF +#include "tiff.h" +TEST(Highgui_Tiff, decode_tile16384x16384) +{ + // see issue #2161 + cv::Mat big(16384, 16384, CV_8UC1, cv::Scalar::all(0)); + string file = cv::tempfile(".tiff"); + std::vector params; + params.push_back(TIFFTAG_ROWSPERSTRIP); + params.push_back(big.rows); + cv::imwrite(file, big, params); + big.release(); + + EXPECT_NO_THROW(cv::imread(file)); + remove(file.c_str()); +} +#endif From 62ce815197e7575c7018cf352c908cfb9798cfff Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 29 Jan 2013 20:29:31 +0400 Subject: [PATCH 14/25] Fix rollover when computing buffer size in tiff decoder (bug #2161) --- modules/highgui/src/grfmt_tiff.cpp | 31 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/modules/highgui/src/grfmt_tiff.cpp b/modules/highgui/src/grfmt_tiff.cpp index 07fe6445d2..5179531f50 100644 --- a/modules/highgui/src/grfmt_tiff.cpp +++ b/modules/highgui/src/grfmt_tiff.cpp @@ -168,7 +168,6 @@ bool TiffDecoder::readData( Mat& img ) bool result = false; bool color = img.channels() > 1; uchar* data = img.data; - int step = (int)img.step; if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F ) return false; @@ -211,14 +210,14 @@ bool TiffDecoder::readData( Mat& img ) if( tile_height0 <= 0 ) tile_height0 = m_height; - AutoBuffer _buffer(tile_height0*tile_width0*8); + AutoBuffer _buffer( size_t(8) * tile_height0*tile_width0); uchar* buffer = _buffer; ushort* buffer16 = (ushort*)buffer; float* buffer32 = (float*)buffer; double* buffer64 = (double*)buffer; int tileidx = 0; - for( y = 0; y < m_height; y += tile_height0, data += step*tile_height0 ) + for( y = 0; y < m_height; y += tile_height0, data += img.step*tile_height0 ) { int tile_height = tile_height0; @@ -250,11 +249,11 @@ bool TiffDecoder::readData( Mat& img ) for( i = 0; i < tile_height; i++ ) if( color ) icvCvt_BGRA2BGR_8u_C4C3R( buffer + i*tile_width*4, 0, - data + x*3 + step*(tile_height - i - 1), 0, + data + x*3 + img.step*(tile_height - i - 1), 0, cvSize(tile_width,1), 2 ); else icvCvt_BGRA2Gray_8u_C4C1R( buffer + i*tile_width*4, 0, - data + x + step*(tile_height - i - 1), 0, + data + x + img.step*(tile_height - i - 1), 0, cvSize(tile_width,1), 2 ); break; } @@ -279,19 +278,19 @@ bool TiffDecoder::readData( Mat& img ) if( ncn == 1 ) { icvCvt_Gray2BGR_16u_C1C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1) ); } else if( ncn == 3 ) { icvCvt_RGB2BGR_16u_C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1) ); } else { icvCvt_BGRA2BGR_16u_C4C3R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x*3, 0, + (ushort*)(data + img.step*i) + x*3, 0, cvSize(tile_width,1), 2 ); } } @@ -299,14 +298,14 @@ bool TiffDecoder::readData( Mat& img ) { if( ncn == 1 ) { - memcpy((ushort*)(data + step*i)+x, + memcpy((ushort*)(data + img.step*i)+x, buffer16 + i*tile_width*ncn, tile_width*sizeof(buffer16[0])); } else { icvCvt_BGRA2Gray_16u_CnC1R(buffer16 + i*tile_width*ncn, 0, - (ushort*)(data + step*i) + x, 0, + (ushort*)(data + img.step*i) + x, 0, cvSize(tile_width,1), ncn, 2 ); } } @@ -332,13 +331,13 @@ bool TiffDecoder::readData( Mat& img ) { if(dst_bpp == 32) { - memcpy((float*)(data + step*i)+x, + memcpy((float*)(data + img.step*i)+x, buffer32 + i*tile_width*ncn, tile_width*sizeof(buffer32[0])); } else { - memcpy((double*)(data + step*i)+x, + memcpy((double*)(data + img.step*i)+x, buffer64 + i*tile_width*ncn, tile_width*sizeof(buffer64[0])); } @@ -485,7 +484,7 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector& params) // row buffer, because TIFFWriteScanline modifies the original data! size_t scanlineSize = TIFFScanlineSize(pTiffHandle); - AutoBuffer _buffer(scanlineSize+32); + AutoBuffer _buffer(scanlineSize+32); uchar* buffer = _buffer; if (!buffer) { @@ -593,9 +592,9 @@ bool TiffEncoder::write( const Mat& img, const vector& /*params*/) #endif*/ int directoryOffset = 0; - AutoBuffer stripOffsets(stripCount); - AutoBuffer stripCounts(stripCount); - AutoBuffer _buffer(fileStep+32); + AutoBuffer stripOffsets(stripCount); + AutoBuffer stripCounts(stripCount); + AutoBuffer _buffer(fileStep+32); uchar* buffer = _buffer; int stripOffsetsOffset = 0; int stripCountsOffset = 0; From 568591670cda39425b1abd30194e3da416c8d756 Mon Sep 17 00:00:00 2001 From: Daniil Osokin Date: Tue, 29 Jan 2013 16:27:03 +0400 Subject: [PATCH 15/25] Fixed color code in cvtColor in "Load, Modify, and Save an Image" tutorial (bug #2739) --- .../introduction/load_save_image/load_save_image.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst index 6ea6499982..1a757cfabc 100644 --- a/doc/tutorials/introduction/load_save_image/load_save_image.rst +++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst @@ -15,7 +15,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare * Load an image using :imread:`imread <>` - * Transform an image from RGB to Grayscale format by using :cvt_color:`cvtColor <>` + * Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>` * Save your transformed image in a file on disk (using :imwrite:`imwrite <>`) Code @@ -45,7 +45,7 @@ Here it is: } Mat gray_image; - cvtColor( image, gray_image, CV_RGB2GRAY ); + cvtColor( image, gray_image, CV_BGR2GRAY ); imwrite( "../../images/Gray_Image.jpg", gray_image ); @@ -68,11 +68,11 @@ Explanation * Creating a Mat object to store the image information * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image. -#. Now we are going to convert our image from RGB to Grayscale format. OpenCV has a really nice function to do this kind of transformations: +#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: .. code-block:: cpp - cvtColor( image, gray_image, CV_RGB2GRAY ); + cvtColor( image, gray_image, CV_BGR2GRAY ); As you can see, :cvt_color:`cvtColor <>` takes as arguments: @@ -80,7 +80,7 @@ Explanation * a source image (*image*) * a destination image (*gray_image*), in which we will save the converted image. - * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_RGB2GRAY** (self-explanatory). + * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images). #. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>` From f9de98ec6473107e99c9fc8dcdfeb6bc2f49c18b Mon Sep 17 00:00:00 2001 From: Daniil Osokin Date: Wed, 30 Jan 2013 08:58:58 +0400 Subject: [PATCH 16/25] Fixed proposed values for speckleRange in StereoSGBM docs (bug #1937) --- .../calib3d/doc/camera_calibration_and_3d_reconstruction.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 887accd95c..b21d6f2bb7 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -1201,7 +1201,7 @@ StereoSGBM::StereoSGBM :param speckleWindowSize: Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range. - :param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, multiple of 16. Normally, 16 or 32 is good enough. + :param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough. :param fullDP: Set it to ``true`` to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to ``false`` . From 7305f955a5b4f4f2a6681ec518a79084fc362357 Mon Sep 17 00:00:00 2001 From: Daniil Osokin Date: Wed, 30 Jan 2013 11:06:48 +0400 Subject: [PATCH 17/25] Added nonfree header in "Feature description" tutorial code sample (bug #2527) --- .../features2d/feature_description/feature_description.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/tutorials/features2d/feature_description/feature_description.rst b/doc/tutorials/features2d/feature_description/feature_description.rst index 9ba777500a..fe9b7cdbff 100644 --- a/doc/tutorials/features2d/feature_description/feature_description.rst +++ b/doc/tutorials/features2d/feature_description/feature_description.rst @@ -32,6 +32,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/features2d.hpp" using namespace cv; From 82e325cbfa8f539ea7a83b7c9d8157b033b34850 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Wed, 30 Jan 2013 11:44:14 +0400 Subject: [PATCH 18/25] Patch #2721 More FourCC for gstreamer applied. --- modules/highgui/src/cap_gstreamer.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/modules/highgui/src/cap_gstreamer.cpp b/modules/highgui/src/cap_gstreamer.cpp index 863ddad096..cafc803db4 100644 --- a/modules/highgui/src/cap_gstreamer.cpp +++ b/modules/highgui/src/cap_gstreamer.cpp @@ -461,12 +461,19 @@ protected: void CvVideoWriter_GStreamer::init() { - encs[CV_FOURCC('H','F','Y','U')]=(char*)"ffenc_huffyuv"; encs[CV_FOURCC('D','R','A','C')]=(char*)"diracenc"; - encs[CV_FOURCC('X','V','I','D')]=(char*)"xvidenc"; - encs[CV_FOURCC('X','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('H','F','Y','U')]=(char*)"ffenc_huffyuv"; + encs[CV_FOURCC('J','P','E','G')]=(char*)"jpegenc"; + encs[CV_FOURCC('M','J','P','G')]=(char*)"jpegenc"; encs[CV_FOURCC('M','P','1','V')]=(char*)"mpeg2enc"; - //encs[CV_FOURCC('M','P','2','V')]=(char*)"mpeg2enc"; + encs[CV_FOURCC('M','P','2','V')]=(char*)"mpeg2enc"; + encs[CV_FOURCC('T','H','E','O')]=(char*)"theoraenc"; + encs[CV_FOURCC('V','P','8','0')]=(char*)"vp8enc"; + encs[CV_FOURCC('H','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('X','2','6','4')]=(char*)"x264enc"; + encs[CV_FOURCC('X','V','I','D')]=(char*)"xvidenc"; + encs[CV_FOURCC('F','F','Y','U')]=(char*)"y4menc"; + //encs[CV_FOURCC('H','F','Y','U')]=(char*)"y4menc"; pipeline=0; buffer=0; } From 9690ed8232b04c3bcaac55b88bb8e05c1062a868 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Wed, 30 Jan 2013 12:48:01 +0400 Subject: [PATCH 19/25] Fix build of OpenCV samples (Linux) --- samples/cpp/descriptor_extractor_matcher.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/cpp/descriptor_extractor_matcher.cpp b/samples/cpp/descriptor_extractor_matcher.cpp index f09e9ea462..7aa5299103 100644 --- a/samples/cpp/descriptor_extractor_matcher.cpp +++ b/samples/cpp/descriptor_extractor_matcher.cpp @@ -222,7 +222,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective, DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); #endif - printf("Number of inliers: %d\n", countNonZero(matchesMask)); + cout << "Number of inliers: " << countNonZero(matchesMask) << endl; } else drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg ); From 9ce2197e9d294492fd55100a592ae1f7202872d6 Mon Sep 17 00:00:00 2001 From: Ilya Lysenkov Date: Wed, 30 Jan 2013 13:03:03 +0400 Subject: [PATCH 20/25] Added processing of trivial subsets --- modules/calib3d/src/modelest.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/calib3d/src/modelest.cpp b/modules/calib3d/src/modelest.cpp index b6441e22cb..48d95c1aca 100644 --- a/modules/calib3d/src/modelest.cpp +++ b/modules/calib3d/src/modelest.cpp @@ -319,6 +319,9 @@ bool CvModelEstimator2::getSubset( const CvMat* m1, const CvMat* m2, bool CvModelEstimator2::checkSubset( const CvMat* m, int count ) { + if( count <= 2 ) + return true; + int j, k, i, i0, i1; CvPoint2D64f* ptr = (CvPoint2D64f*)m->data.ptr; From 627b44102281d01bd96b501e078d657f836c4ebc Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Wed, 30 Jan 2013 13:05:57 +0400 Subject: [PATCH 21/25] Bugfix #2532 patch 4 corner detection sample possible bug solved. Tutorial text was not consistent with tutorial source code in samples directory. Inline source code was replaced on "includeliteral" directive with link to cpp file. --- .../generic_corner_detector.rst | 123 +----------------- 1 file changed, 2 insertions(+), 121 deletions(-) diff --git a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst index 5dabe60048..465ff216cb 100644 --- a/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst +++ b/doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst @@ -22,127 +22,8 @@ Code This tutorial code's is shown lines below. You can also download it from `here `_ -.. code-block:: cpp - - #include "opencv2/highgui/highgui.hpp" - #include "opencv2/imgproc/imgproc.hpp" - #include - #include - #include - - using namespace cv; - using namespace std; - - /// Global variables - Mat src, src_gray; - Mat myHarris_dst; Mat myHarris_copy; Mat Mc; - Mat myShiTomasi_dst; Mat myShiTomasi_copy; - - int myShiTomasi_qualityLevel = 50; - int myHarris_qualityLevel = 50; - int max_qualityLevel = 100; - - double myHarris_minVal; double myHarris_maxVal; - double myShiTomasi_minVal; double myShiTomasi_maxVal; - - RNG rng(12345); - - char* myHarris_window = "My Harris corner detector"; - char* myShiTomasi_window = "My Shi Tomasi corner detector"; - - /// Function headers - void myShiTomasi_function( int, void* ); - void myHarris_function( int, void* ); - - /** @function main */ - int main( int argc, char** argv ) - { - /// Load source image and convert it to gray - src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); - - /// Set some parameters - int blockSize = 3; int apertureSize = 3; - - /// My Harris matrix -- Using cornerEigenValsAndVecs - myHarris_dst = Mat::zeros( src_gray.size(), CV_32FC(6) ); - Mc = Mat::zeros( src_gray.size(), CV_32FC1 ); - - cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize, BORDER_DEFAULT ); - - /* calculate Mc */ - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - float lambda_1 = myHarris_dst.at( j, i, 0 ); - float lambda_2 = myHarris_dst.at( j, i, 1 ); - Mc.at(j,i) = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 ); - } - } - - minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() ); - - /* Create Window and Trackbar */ - namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE ); - createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, - myHarris_function ); - myHarris_function( 0, 0 ); - - /// My Shi-Tomasi -- Using cornerMinEigenVal - myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 ); - cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize, BORDER_DEFAULT ); - - minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() ); - - /* Create Window and Trackbar */ - namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE ); - createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, - myShiTomasi_function ); - myShiTomasi_function( 0, 0 ); - - waitKey(0); - return(0); - } - - /** @function myShiTomasi_function */ - void myShiTomasi_function( int, void* ) - { - myShiTomasi_copy = src.clone(); - - if( myShiTomasi_qualityLevel < 1 ) { myShiTomasi_qualityLevel = 1; } - - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - if( myShiTomasi_dst.at(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal - - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel ) - { circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), - rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); } - } - } - imshow( myShiTomasi_window, myShiTomasi_copy ); - } - - /** @function myHarris_function */ - void myHarris_function( int, void* ) - { - myHarris_copy = src.clone(); - - if( myHarris_qualityLevel < 1 ) { myHarris_qualityLevel = 1; } - - for( int j = 0; j < src_gray.rows; j++ ) - { for( int i = 0; i < src_gray.cols; i++ ) - { - if( Mc.at(j,i) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal ) - *myHarris_qualityLevel/max_qualityLevel ) - { circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), - rng.uniform(0,255) ), -1, 8, 0 ); } - } - } - imshow( myHarris_window, myHarris_copy ); - } - - +.. literalinclude:: ../../../../../samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp + :language: cpp Explanation ============ From eeb865ee8a18deabe20af0c2b23f6ec4b1084b7d Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Wed, 30 Jan 2013 13:11:33 +0400 Subject: [PATCH 22/25] Fix Android build warnings --- modules/calib3d/src/_modelest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/src/_modelest.h b/modules/calib3d/src/_modelest.h index d30e4f4957..2488a934b1 100644 --- a/modules/calib3d/src/_modelest.h +++ b/modules/calib3d/src/_modelest.h @@ -43,7 +43,7 @@ #ifndef _CV_MODEL_EST_H_ #define _CV_MODEL_EST_H_ -#include "precomp.hpp" +#include "opencv2/calib3d/calib3d.hpp" class CV_EXPORTS CvModelEstimator2 { From 6feade31104366936e610a616feeb9e65f66c084 Mon Sep 17 00:00:00 2001 From: Ilya Lysenkov Date: Wed, 30 Jan 2013 13:19:12 +0400 Subject: [PATCH 23/25] Added support of different resolution in rectify3Collinear --- modules/calib3d/src/calibration.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/calib3d/src/calibration.cpp b/modules/calib3d/src/calibration.cpp index 62a1c3c006..7a5c8d948c 100644 --- a/modules/calib3d/src/calibration.cpp +++ b/modules/calib3d/src/calibration.cpp @@ -3735,13 +3735,13 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1, OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3, OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3, OutputArray _Qmat, - double alpha, Size /*newImgSize*/, + double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags ) { // first, rectify the 1-2 stereo pair stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2, imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat, - flags, alpha, imageSize, roi1, roi2 ); + flags, alpha, newImgSize, roi1, roi2 ); Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat(); From e79e81c6cd77865266f710561af403462840b26a Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Wed, 30 Jan 2013 13:24:49 +0400 Subject: [PATCH 24/25] Fix Windows build warnings --- modules/calib3d/test/test_modelest.cpp | 2 +- modules/ts/src/ts.cpp | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/calib3d/test/test_modelest.cpp b/modules/calib3d/test/test_modelest.cpp index 91bf4b092f..e27c12d49c 100644 --- a/modules/calib3d/test/test_modelest.cpp +++ b/modules/calib3d/test/test_modelest.cpp @@ -159,7 +159,7 @@ void CV_ModelEstimator2_Test::fill_array( int test_case_idx, int i, int j, Mat& Point2d tangentVector_1 = arr.at(endPointIndex) - arr.at(startPointIndex); Point2d tangentVector_2 = arr.at(testPointIndex) - arr.at(startPointIndex); - const float eps = 1e-4; + const float eps = 1e-4f; //TODO: perhaps it is better to normalize the cross product by norms of the tangent vectors if (fabs(tangentVector_1.cross(tangentVector_2)) < eps) { diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 4ae9e7ea4d..7ce37752db 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -47,8 +47,12 @@ #include #if defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64 #include -#define NOMINMAX + #include +#undef small +#undef min +#undef max +#undef abs #ifdef _MSC_VER #include From 68be50bbabfb0bd624b67fd16fc773f1e7a8b41d Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Wed, 30 Jan 2013 13:31:00 +0400 Subject: [PATCH 25/25] Fix clang build warning --- samples/cpp/videostab.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index bd4e8533f4..8b8504a1e6 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -1,3 +1,7 @@ +#if defined __clang__ +# pragma GCC diagnostic ignored "-Wdelete-non-virtual-dtor" +#endif + #include #include #include