From 7fe1664bbf5195651a065081b97f765faab85877 Mon Sep 17 00:00:00 2001 From: MaximSmolskiy Date: Sun, 10 Mar 2024 17:49:27 +0300 Subject: [PATCH 01/21] Use initial quads corners in ChessBoardDetector::findQuadNeighbors --- modules/calib3d/src/calibinit.cpp | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/modules/calib3d/src/calibinit.cpp b/modules/calib3d/src/calibinit.cpp index 7e9b87ba5d..991f931097 100644 --- a/modules/calib3d/src/calibinit.cpp +++ b/modules/calib3d/src/calibinit.cpp @@ -1631,10 +1631,11 @@ void ChessBoardDetector::findQuadNeighbors() continue; float min_dist = FLT_MAX; + int closest_neighbor_idx = -1; int closest_corner_idx = -1; ChessBoardQuad *closest_quad = 0; - cv::Point2f pt = cur_quad.corners[i]->pt; + cv::Point2f pt = all_quads_pts[(idx << 2) + i]; // find the closest corner in all other quadrangles std::vector query = Mat(pt); @@ -1654,7 +1655,7 @@ void ChessBoardDetector::findQuadNeighbors() if (q_k.neighbors[j]) continue; - const float dist = normL2Sqr(pt - q_k.corners[j]->pt); + const float dist = normL2Sqr(pt - all_quads_pts[neighbor_idx]); if (dist < min_dist && dist <= cur_quad.edge_len * thresh_scale && dist <= q_k.edge_len * thresh_scale) @@ -1669,6 +1670,7 @@ void ChessBoardDetector::findQuadNeighbors() DPRINTF("Incompatible edge lengths"); continue; } + closest_neighbor_idx = neighbor_idx; closest_corner_idx = j; closest_quad = &q_k; min_dist = dist; @@ -1676,7 +1678,7 @@ void ChessBoardDetector::findQuadNeighbors() } // we found a matching corner point? - if (closest_corner_idx >= 0 && min_dist < FLT_MAX) + if (closest_neighbor_idx >= 0 && closest_corner_idx >= 0 && min_dist < FLT_MAX) { CV_Assert(closest_quad); @@ -1688,6 +1690,7 @@ void ChessBoardDetector::findQuadNeighbors() // This is necessary to support small squares where otherwise the wrong // corner will get matched to closest_quad; ChessBoardCorner& closest_corner = *closest_quad->corners[closest_corner_idx]; + cv::Point2f closest_corner_pt = all_quads_pts[closest_neighbor_idx]; int j = 0; for (; j < 4; j++) @@ -1695,7 +1698,7 @@ void ChessBoardDetector::findQuadNeighbors() if (cur_quad.neighbors[j] == closest_quad) break; - if (normL2Sqr(closest_corner.pt - cur_quad.corners[j]->pt) < min_dist) + if (normL2Sqr(closest_corner_pt - all_quads_pts[(idx << 2) + j]) < min_dist) break; } if (j < 4) @@ -1710,9 +1713,8 @@ void ChessBoardDetector::findQuadNeighbors() if (j < 4) continue; - // check whether the closest corner to closest_corner - // is different from cur_quad->corners[i]->pt - query = Mat(closest_corner.pt); + // check whether the closest corner to closest_corner is different from pt + query = Mat(closest_corner_pt); radius = min_dist + 1; neighbors_count = all_quads_pts_index.radiusSearch(query, neighbors_indices, neighbors_dists, radius, search_params); @@ -1730,14 +1732,14 @@ void ChessBoardDetector::findQuadNeighbors() CV_DbgAssert(q); if (!q->neighbors[k]) { - if (normL2Sqr(closest_corner.pt - q->corners[k]->pt) < min_dist) + if (normL2Sqr(closest_corner_pt - all_quads_pts[neighbor_idx]) < min_dist) break; } } if (neighbor_idx_idx < neighbors_count) continue; - closest_corner.pt = (pt + closest_corner.pt) * 0.5f; + closest_corner.pt = (pt + closest_corner_pt) * 0.5f; // We've found one more corner - remember it cur_quad.count++; From 13c95efa74690cace66316f04894ce63b9d11570 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Wed, 3 Apr 2024 09:41:40 +0300 Subject: [PATCH 02/21] Merge pull request #25312 from dkurt:dnn_hotfix_tflite Ownership check in TFLite importer #25312 ### Pull Request Readiness Checklist resolves https://github.com/opencv/opencv/issues/25310 See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- modules/dnn/src/tflite/tflite_importer.cpp | 7 ++++--- modules/dnn/test/test_tflite_importer.cpp | 2 -- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index a23bff2545..8850cd9ad2 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -300,6 +300,10 @@ void TFLiteImporter::addLayer(LayerParams& layerParams, const Operator& op) { Mat blob = allTensors[idx]; layerParams.blobs.push_back(blob.u ? blob : blob.clone()); // some tensors are owned by OpenCV } + } else { + for (auto& blob : layerParams.blobs) { + CV_Assert(blob.u); + } } int dtype = CV_32F; @@ -830,9 +834,6 @@ void TFLiteImporter::parseFullyConnected(const Operator& op, const std::string& auto options = op.builtin_options_as_FullyConnectedOptions(); CV_Assert(options); - int idx = op.inputs()->Get(1); - Mat weights = allTensors[idx]; - layerParams.blobs.resize(1, weights); layerParams.set("transB", true); layerParams.set("constB", true); addLayer(layerParams, op); diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index 26f2c373b8..291d1f50d2 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -240,8 +240,6 @@ TEST_P(Test_TFLite, split) { } TEST_P(Test_TFLite, fully_connected) { - if (backend == DNN_BACKEND_CUDA) - applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA, CV_TEST_TAG_DNN_SKIP_CUDA_FP16); if (backend == DNN_BACKEND_VKCOM) applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); testLayer("fully_connected"); From 45587f23db3554fcc88867201df1bc3b1aa9b975 Mon Sep 17 00:00:00 2001 From: sepperliu <165877317+sepperliu@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:54:06 +0800 Subject: [PATCH 03/21] Update highgui+HighGui.java toBufferedImage Modifying the original array copying method here can double the speed. --- modules/highgui/misc/java/src/java/highgui+HighGui.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/modules/highgui/misc/java/src/java/highgui+HighGui.java b/modules/highgui/misc/java/src/java/highgui+HighGui.java index 87a0ec127a..7a25095166 100644 --- a/modules/highgui/misc/java/src/java/highgui+HighGui.java +++ b/modules/highgui/misc/java/src/java/highgui+HighGui.java @@ -62,14 +62,9 @@ public final class HighGui { if (m.channels() > 1) { type = BufferedImage.TYPE_3BYTE_BGR; } - - int bufferSize = m.channels() * m.cols() * m.rows(); - byte[] b = new byte[bufferSize]; - m.get(0, 0, b); // get all the pixels BufferedImage image = new BufferedImage(m.cols(), m.rows(), type); - final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); - System.arraycopy(b, 0, targetPixels, 0, b.length); + m.get(0, 0, targetPixels); return image; } From 2d864c3af93b69ce87e64984b1fabb226f89eab2 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Wed, 3 Apr 2024 14:30:11 +0300 Subject: [PATCH 04/21] Export TIFF compression options as API and git rid of tiff.h. --- .../imgcodecs/include/opencv2/imgcodecs.hpp | 46 ++++++++++++++++++- modules/imgcodecs/test/test_tiff.cpp | 18 +++----- 2 files changed, 51 insertions(+), 13 deletions(-) diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp index df02eaf021..0ca202722d 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -103,9 +103,9 @@ enum ImwriteFlags { IMWRITE_TIFF_RESUNIT = 256,//!< For TIFF, use to specify which DPI resolution unit to set; see libtiff documentation for valid values IMWRITE_TIFF_XDPI = 257,//!< For TIFF, use to specify the X direction DPI IMWRITE_TIFF_YDPI = 258,//!< For TIFF, use to specify the Y direction DPI - IMWRITE_TIFF_COMPRESSION = 259,//!< For TIFF, use to specify the image compression scheme. See libtiff for integer constants corresponding to compression formats. Note, for images whose depth is CV_32F, only libtiff's SGILOG compression scheme is used. For other supported depths, the compression scheme can be specified by this flag; LZW compression is the default. + IMWRITE_TIFF_COMPRESSION = 259,//!< For TIFF, use to specify the image compression scheme. See cv::ImwriteTiffCompressionFlags. Note, for images whose depth is CV_32F, only libtiff's SGILOG compression scheme is used. For other supported depths, the compression scheme can be specified by this flag; LZW compression is the default. IMWRITE_TIFF_ROWSPERSTRIP = 278,//!< For TIFF, use to specify the number of rows per strip. - IMWRITE_TIFF_PREDICTOR = 317,//!< For TIFF, use to specify predictor. + IMWRITE_TIFF_PREDICTOR = 317,//!< For TIFF, use to specify predictor. See cv::ImwriteTiffPredictorFlags. IMWRITE_JPEG2000_COMPRESSION_X1000 = 272,//!< For JPEG2000, use to specify the target compression rate (multiplied by 1000). The value can be from 0 to 1000. Default is 1000. IMWRITE_AVIF_QUALITY = 512,//!< For AVIF, it can be a quality between 0 and 100 (the higher the better). Default is 95. IMWRITE_AVIF_DEPTH = 513,//!< For AVIF, it can be 8, 10 or 12. If >8, it is stored/read as CV_32F. Default is 8. @@ -120,6 +120,48 @@ enum ImwriteJPEGSamplingFactorParams { IMWRITE_JPEG_SAMPLING_FACTOR_444 = 0x111111 //!< 1x1,1x1,1x1(No subsampling) }; +enum ImwriteTiffCompressionFlags { + IMWRITE_TIFF_COMPRESSION_NONE = 1, //!< dump mode + IMWRITE_TIFF_COMPRESSION_CCITTRLE = 2, //!< CCITT modified Huffman RLE + IMWRITE_TIFF_COMPRESSION_CCITTFAX3 = 3, //!< CCITT Group 3 fax encoding + IMWRITE_TIFF_COMPRESSION_CCITT_T4 = 3, //!< CCITT T.4 (TIFF 6 name) + IMWRITE_TIFF_COMPRESSION_CCITTFAX4 = 4, //!< CCITT Group 4 fax encoding + IMWRITE_TIFF_COMPRESSION_CCITT_T6 = 4, //!< CCITT T.6 (TIFF 6 name) + IMWRITE_TIFF_COMPRESSION_LZW = 5, //!< Lempel-Ziv & Welch + IMWRITE_TIFF_COMPRESSION_OJPEG = 6, //!< !6.0 JPEG + IMWRITE_TIFF_COMPRESSION_JPEG = 7, //!< %JPEG DCT compression + IMWRITE_TIFF_COMPRESSION_T85 = 9, //!< !TIFF/FX T.85 JBIG compression + IMWRITE_TIFF_COMPRESSION_T43 = 10, //!< !TIFF/FX T.43 colour by layered JBIG compression + IMWRITE_TIFF_COMPRESSION_NEXT = 32766, //!< NeXT 2-bit RLE + IMWRITE_TIFF_COMPRESSION_CCITTRLEW = 32771, //!< #1 w/ word alignment + IMWRITE_TIFF_COMPRESSION_PACKBITS = 32773, //!< Macintosh RLE + IMWRITE_TIFF_COMPRESSION_THUNDERSCAN = 32809, //!< ThunderScan RLE + IMWRITE_TIFF_COMPRESSION_IT8CTPAD = 32895, //!< IT8 CT w/padding + IMWRITE_TIFF_COMPRESSION_IT8LW = 32896, //!< IT8 Linework RLE + IMWRITE_TIFF_COMPRESSION_IT8MP = 32897, //!< IT8 Monochrome picture + IMWRITE_TIFF_COMPRESSION_IT8BL = 32898, //!< IT8 Binary line art + IMWRITE_TIFF_COMPRESSION_PIXARFILM = 32908, //!< Pixar companded 10bit LZW + IMWRITE_TIFF_COMPRESSION_PIXARLOG = 32909, //!< Pixar companded 11bit ZIP + IMWRITE_TIFF_COMPRESSION_DEFLATE = 32946, //!< Deflate compression, legacy tag + IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE = 8, //!< Deflate compression, as recognized by Adobe + IMWRITE_TIFF_COMPRESSION_DCS = 32947, //!< Kodak DCS encoding + IMWRITE_TIFF_COMPRESSION_JBIG = 34661, //!< ISO JBIG + IMWRITE_TIFF_COMPRESSION_SGILOG = 34676, //!< SGI Log Luminance RLE + IMWRITE_TIFF_COMPRESSION_SGILOG24 = 34677, //!< SGI Log 24-bit packed + IMWRITE_TIFF_COMPRESSION_JP2000 = 34712, //!< Leadtools JPEG2000 + IMWRITE_TIFF_COMPRESSION_LERC = 34887, //!< ESRI Lerc codec: https://github.com/Esri/lerc + IMWRITE_TIFF_COMPRESSION_LZMA = 34925, //!< LZMA2 + IMWRITE_TIFF_COMPRESSION_ZSTD = 50000, //!< ZSTD: WARNING not registered in Adobe-maintained registry + IMWRITE_TIFF_COMPRESSION_WEBP = 50001, //!< WEBP: WARNING not registered in Adobe-maintained registry + IMWRITE_TIFF_COMPRESSION_JXL = 50002 //!< JPEGXL: WARNING not registered in Adobe-maintained registry +}; + +enum ImwriteTiffPredictorFlags { + IMWRITE_TIFF_PREDICTOR_NONE = 1, //!< no prediction scheme used + IMWRITE_TIFF_PREDICTOR_HORIZONTAL = 2, //!< horizontal differencing + IMWRITE_TIFF_PREDICTOR_FLOATINGPOINT = 3 //!< floating point predictor + +}; enum ImwriteEXRTypeFlags { /*IMWRITE_EXR_TYPE_UNIT = 0, //!< not supported */ diff --git a/modules/imgcodecs/test/test_tiff.cpp b/modules/imgcodecs/test/test_tiff.cpp index bd815c02a0..8ff7db52eb 100644 --- a/modules/imgcodecs/test/test_tiff.cpp +++ b/modules/imgcodecs/test/test_tiff.cpp @@ -9,11 +9,6 @@ namespace opencv_test { namespace { #ifdef HAVE_TIFF -// these defines are used to resolve conflict between tiff.h and opencv2/core/types_c.h -#define uint64 uint64_hack_ -#define int64 int64_hack_ -#include "tiff.h" - #ifdef __ANDROID__ // Test disabled as it uses a lot of memory. // It is killed with SIGKILL by out of memory killer. @@ -767,7 +762,7 @@ TEST(Imgcodecs_Tiff, readWrite_32FC3_RAW) std::vector params; params.push_back(IMWRITE_TIFF_COMPRESSION); - params.push_back(COMPRESSION_NONE); + params.push_back(IMWRITE_TIFF_COMPRESSION_NONE); ASSERT_TRUE(cv::imwrite(filenameOutput, img, params)); const Mat img2 = cv::imread(filenameOutput, IMREAD_UNCHANGED); @@ -816,8 +811,9 @@ TEST(Imgcodecs_Tiff, readWrite_predictor) cv::Mat mat(10, 16, CV_8UC1, (void*)sample_data); int methods[] = { - COMPRESSION_NONE, COMPRESSION_LZW, - COMPRESSION_PACKBITS, COMPRESSION_DEFLATE, COMPRESSION_ADOBE_DEFLATE + IMWRITE_TIFF_COMPRESSION_NONE, IMWRITE_TIFF_COMPRESSION_LZW, + IMWRITE_TIFF_COMPRESSION_PACKBITS, IMWRITE_TIFF_COMPRESSION_DEFLATE, + IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE }; for (size_t i = 0; i < sizeof(methods) / sizeof(int); i++) { @@ -827,7 +823,7 @@ TEST(Imgcodecs_Tiff, readWrite_predictor) params.push_back(IMWRITE_TIFF_COMPRESSION); params.push_back(methods[i]); params.push_back(IMWRITE_TIFF_PREDICTOR); - params.push_back(PREDICTOR_HORIZONTAL); + params.push_back(IMWRITE_TIFF_PREDICTOR_HORIZONTAL); EXPECT_NO_THROW(cv::imwrite(out, mat, params)); @@ -863,7 +859,7 @@ TEST_P(Imgcodecs_Tiff_Types, readWrite_alltypes) { std::vector params; params.push_back(IMWRITE_TIFF_COMPRESSION); - params.push_back(COMPRESSION_LZW); + params.push_back(IMWRITE_TIFF_COMPRESSION_LZW); ASSERT_NO_THROW(cv::imencode(".tiff", src, bufLZW, params)); Mat dstLZW; @@ -878,7 +874,7 @@ TEST_P(Imgcodecs_Tiff_Types, readWrite_alltypes) { std::vector params; params.push_back(IMWRITE_TIFF_COMPRESSION); - params.push_back(COMPRESSION_NONE); + params.push_back(IMWRITE_TIFF_COMPRESSION_NONE); ASSERT_NO_THROW(cv::imencode(".tiff", src, bufRAW, params)); Mat dstRAW; From d81cd13bb37a3827998fb98359827718a8478b13 Mon Sep 17 00:00:00 2001 From: catree Date: Thu, 28 Mar 2024 16:26:43 +0100 Subject: [PATCH 05/21] Use cvtColor() for Bayer image color demosaicing and for V4L2_PIX_FMT_SRGGB8, V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SGBRG8, V4L2_PIX_FMT_SGRBG8 options. Update modules/videoio/test/test_v4l2.cpp test file. --- modules/videoio/src/cap_v4l.cpp | 308 +++-------------------------- modules/videoio/test/test_v4l2.cpp | 21 +- 2 files changed, 47 insertions(+), 282 deletions(-) diff --git a/modules/videoio/src/cap_v4l.cpp b/modules/videoio/src/cap_v4l.cpp index 48cc0062da..531af03d1a 100644 --- a/modules/videoio/src/cap_v4l.cpp +++ b/modules/videoio/src/cap_v4l.cpp @@ -1321,262 +1321,6 @@ yuv411p_to_rgb24(int width, int height, } } -/* - * BAYER2RGB24 ROUTINE TAKEN FROM: - * - * Sonix SN9C10x based webcam basic I/F routines - * Takafumi Mizuno - * - */ -static void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst) -{ - long int i; - unsigned char *rawpt, *scanpt; - long int size; - - rawpt = src; - scanpt = dst; - size = WIDTH*HEIGHT; - - for ( i = 0; i < size; i++ ) { - if ( (i/WIDTH) % 2 == 0 ) { - if ( (i % 2) == 0 ) { - /* B */ - if ( (i > WIDTH) && ((i % WIDTH) > 0) ) { - *scanpt++ = (*(rawpt-WIDTH-1)+*(rawpt-WIDTH+1)+ - *(rawpt+WIDTH-1)+*(rawpt+WIDTH+1))/4; /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1)+ - *(rawpt+WIDTH)+*(rawpt-WIDTH))/4; /* G */ - *scanpt++ = *rawpt; /* B */ - } else { - /* first line or left column */ - *scanpt++ = *(rawpt+WIDTH+1); /* R */ - *scanpt++ = (*(rawpt+1)+*(rawpt+WIDTH))/2; /* G */ - *scanpt++ = *rawpt; /* B */ - } - } else { - /* (B)G */ - if ( (i > WIDTH) && ((i % WIDTH) < (WIDTH-1)) ) { - *scanpt++ = (*(rawpt+WIDTH)+*(rawpt-WIDTH))/2; /* R */ - *scanpt++ = *rawpt; /* G */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* B */ - } else { - /* first line or right column */ - *scanpt++ = *(rawpt+WIDTH); /* R */ - *scanpt++ = *rawpt; /* G */ - *scanpt++ = *(rawpt-1); /* B */ - } - } - } else { - if ( (i % 2) == 0 ) { - /* G(R) */ - if ( (i < (WIDTH*(HEIGHT-1))) && ((i % WIDTH) > 0) ) { - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* R */ - *scanpt++ = *rawpt; /* G */ - *scanpt++ = (*(rawpt+WIDTH)+*(rawpt-WIDTH))/2; /* B */ - } else { - /* bottom line or left column */ - *scanpt++ = *(rawpt+1); /* R */ - *scanpt++ = *rawpt; /* G */ - *scanpt++ = *(rawpt-WIDTH); /* B */ - } - } else { - /* R */ - if ( i < (WIDTH*(HEIGHT-1)) && ((i % WIDTH) < (WIDTH-1)) ) { - *scanpt++ = *rawpt; /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1)+ - *(rawpt-WIDTH)+*(rawpt+WIDTH))/4; /* G */ - *scanpt++ = (*(rawpt-WIDTH-1)+*(rawpt-WIDTH+1)+ - *(rawpt+WIDTH-1)+*(rawpt+WIDTH+1))/4; /* B */ - } else { - /* bottom line or right column */ - *scanpt++ = *rawpt; /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt-WIDTH))/2; /* G */ - *scanpt++ = *(rawpt-WIDTH-1); /* B */ - } - } - } - rawpt++; - } - -} - -// SGBRG to RGB24 -// for some reason, red and blue needs to be swapped -// at least for 046d:092f Logitech, Inc. QuickCam Express Plus to work -//see: http://www.siliconimaging.com/RGB%20Bayer.htm -//and 4.6 at http://tldp.org/HOWTO/html_single/libdc1394-HOWTO/ -static void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst) -{ - long int i; - unsigned char *rawpt, *scanpt; - long int size; - - rawpt = src; - scanpt = dst; - size = WIDTH*HEIGHT; - - for ( i = 0; i < size; i++ ) - { - if ( (i/WIDTH) % 2 == 0 ) //even row - { - if ( (i % 2) == 0 ) //even pixel - { - if ( (i > WIDTH) && ((i % WIDTH) > 0) ) - { - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-WIDTH) + *(rawpt+WIDTH))/2; /* B */ - } else - { - /* first line or left column */ - - *scanpt++ = *(rawpt+1); /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = *(rawpt+WIDTH); /* B */ - } - } else //odd pixel - { - if ( (i > WIDTH) && ((i % WIDTH) < (WIDTH-1)) ) - { - *scanpt++ = *(rawpt); /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1)+*(rawpt-WIDTH)+*(rawpt+WIDTH))/4; /* G */ - *scanpt++ = (*(rawpt-WIDTH-1) + *(rawpt-WIDTH+1) + *(rawpt+WIDTH-1) + *(rawpt+WIDTH+1))/4; /* B */ - } else - { - /* first line or right column */ - - *scanpt++ = *(rawpt); /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+WIDTH))/2; /* G */ - *scanpt++ = *(rawpt+WIDTH-1); /* B */ - } - } - } else - { //odd row - if ( (i % 2) == 0 ) //even pixel - { - if ( (i < (WIDTH*(HEIGHT-1))) && ((i % WIDTH) > 0) ) - { - *scanpt++ = (*(rawpt-WIDTH-1)+*(rawpt-WIDTH+1)+*(rawpt+WIDTH-1)+*(rawpt+WIDTH+1))/4; /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1)+*(rawpt-WIDTH)+*(rawpt+WIDTH))/4; /* G */ - *scanpt++ = *(rawpt); /* B */ - } else - { - /* bottom line or left column */ - - *scanpt++ = *(rawpt-WIDTH+1); /* R */ - *scanpt++ = (*(rawpt+1)+*(rawpt-WIDTH))/2; /* G */ - *scanpt++ = *(rawpt); /* B */ - } - } else - { //odd pixel - if ( i < (WIDTH*(HEIGHT-1)) && ((i % WIDTH) < (WIDTH-1)) ) - { - *scanpt++ = (*(rawpt-WIDTH)+*(rawpt+WIDTH))/2; /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* B */ - } else - { - /* bottom line or right column */ - - *scanpt++ = (*(rawpt-WIDTH)); /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-1)); /* B */ - } - } - } - rawpt++; - } -} - -// SGRBG to RGB24 -static void sgrbg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst) -{ - long int i; - unsigned char *rawpt, *scanpt; - long int size; - - rawpt = src; - scanpt = dst; - size = WIDTH*HEIGHT; - - for ( i = 0; i < size; i++ ) - { - if ( (i/WIDTH) % 2 == 0 ) //even row - { - if ( (i % 2) == 0 ) //even pixel - { - if ( (i > WIDTH) && ((i % WIDTH) > 0) ) - { - *scanpt++ = (*(rawpt-WIDTH) + *(rawpt+WIDTH))/2; /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* B */ - } else - { - /* first line or left column */ - - *scanpt++ = *(rawpt+WIDTH); /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = *(rawpt+1); /* B */ - } - } else //odd pixel - { - if ( (i > WIDTH) && ((i % WIDTH) < (WIDTH-1)) ) - { - *scanpt++ = (*(rawpt-WIDTH-1) + *(rawpt-WIDTH+1) + - *(rawpt+WIDTH-1) + *(rawpt+WIDTH+1)) / 4; /* R */ - *scanpt++ = (*(rawpt-1) + *(rawpt+1) + - *(rawpt-WIDTH) + *(rawpt+WIDTH)) / 4; /* G */ - *scanpt++ = *(rawpt); /* B */ - } else - { - /* first line or right column */ - - *scanpt++ = *(rawpt+WIDTH-1); /* R */ - *scanpt++ = (*(rawpt-1)+*(rawpt+WIDTH))/2; /* G */ - *scanpt++ = *(rawpt); /* B */ - } - } - } else - { //odd row - if ( (i % 2) == 0 ) //even pixel - { - if ( (i < (WIDTH*(HEIGHT-1))) && ((i % WIDTH) > 0) ) - { - *scanpt++ = *(rawpt); /* R */ - *scanpt++ = (*(rawpt-1) + *(rawpt+1)+ - *(rawpt-WIDTH) + *(rawpt+WIDTH)) / 4; /* G */ - *scanpt++ = (*(rawpt-WIDTH-1) + *(rawpt-WIDTH+1) + - *(rawpt+WIDTH-1) + *(rawpt+WIDTH+1)) / 4; /* B */ - } else - { - /* bottom line or left column */ - - *scanpt++ = *(rawpt); /* R */ - *scanpt++ = (*(rawpt+1)+*(rawpt-WIDTH))/2; /* G */ - *scanpt++ = *(rawpt-WIDTH+1); /* B */ - } - } else - { //odd pixel - if ( i < (WIDTH*(HEIGHT-1)) && ((i % WIDTH) < (WIDTH-1)) ) - { - *scanpt++ = (*(rawpt-1)+*(rawpt+1))/2; /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-WIDTH)+*(rawpt+WIDTH))/2; /* B */ - } else - { - /* bottom line or right column */ - - *scanpt++ = (*(rawpt-1)); /* R */ - *scanpt++ = *(rawpt); /* G */ - *scanpt++ = (*(rawpt-WIDTH)); /* B */ - } - } - } - rawpt++; - } -} - #define CLAMP(x) ((x)<0?0:((x)>255)?255:(x)) typedef struct { @@ -1778,28 +1522,6 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer ¤tBuffer) yuv411p_to_rgb24(imageSize.width, imageSize.height, start, (unsigned char*)frame.imageData); return; - case V4L2_PIX_FMT_SBGGR8: - bayer2rgb24(imageSize.width, imageSize.height, - start, (unsigned char*)frame.imageData); - return; - - case V4L2_PIX_FMT_SN9C10X: - sonix_decompress_init(); - sonix_decompress(imageSize.width, imageSize.height, - start, (unsigned char*)buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start); - - bayer2rgb24(imageSize.width, imageSize.height, - (unsigned char*)buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start, - (unsigned char*)frame.imageData); - return; - case V4L2_PIX_FMT_SGBRG8: - sgbrg2rgb24(imageSize.width, imageSize.height, - start, (unsigned char*)frame.imageData); - return; - case V4L2_PIX_FMT_SGRBG8: - sgrbg2rgb24(imageSize.width, imageSize.height, - start, (unsigned char*)frame.imageData); - return; default: break; } @@ -1872,6 +1594,36 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer ¤tBuffer) cv::cvtColor(temp, destination, COLOR_GRAY2BGR); return; } + case V4L2_PIX_FMT_SN9C10X: + { + sonix_decompress_init(); + sonix_decompress(imageSize.width, imageSize.height, + start, (unsigned char*)buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start); + + cv::Mat cv_buf(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start); + cv::cvtColor(cv_buf, destination, COLOR_BayerRG2BGR); + return; + } + case V4L2_PIX_FMT_SRGGB8: + { + cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerBG2BGR); + return; + } + case V4L2_PIX_FMT_SBGGR8: + { + cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerRG2BGR); + return; + } + case V4L2_PIX_FMT_SGBRG8: + { + cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerGR2BGR); + return; + } + case V4L2_PIX_FMT_SGRBG8: + { + cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerGB2BGR); + return; + } case V4L2_PIX_FMT_GREY: cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_GRAY2BGR); break; diff --git a/modules/videoio/test/test_v4l2.cpp b/modules/videoio/test/test_v4l2.cpp index 1c4917bfca..5bb1a1f5a3 100644 --- a/modules/videoio/test/test_v4l2.cpp +++ b/modules/videoio/test/test_v4l2.cpp @@ -70,6 +70,7 @@ TEST_P(videoio_v4l2, formats) const string device = devs[0]; const Size sz(640, 480); const Format_Channels_Depth params = GetParam(); + const Size esz(sz.width * params.mul_width, sz.height * params.mul_height); { // Case with RAW output @@ -83,7 +84,17 @@ TEST_P(videoio_v4l2, formats) Mat img; EXPECT_TRUE(cap.grab()); EXPECT_TRUE(cap.retrieve(img)); - EXPECT_EQ(Size(sz.width * params.mul_width, sz.height * params.mul_height), img.size()); + if (params.pixel_format == V4L2_PIX_FMT_SRGGB8 || + params.pixel_format == V4L2_PIX_FMT_SBGGR8 || + params.pixel_format == V4L2_PIX_FMT_SGBRG8 || + params.pixel_format == V4L2_PIX_FMT_SGRBG8) + { + EXPECT_EQ((size_t)esz.area(), img.total()); + } + else + { + EXPECT_EQ(esz, img.size()); + } EXPECT_EQ(params.channels, img.channels()); EXPECT_EQ(params.depth, img.depth()); } @@ -116,9 +127,11 @@ vector all_params = { // { V4L2_PIX_FMT_JPEG, 1, CV_8U, 1.f, 1.f }, { V4L2_PIX_FMT_YUYV, 2, CV_8U, 1.f, 1.f }, { V4L2_PIX_FMT_UYVY, 2, CV_8U, 1.f, 1.f }, -// { V4L2_PIX_FMT_SBGGR8, 1, CV_8U, 1.f, 1.f }, -// { V4L2_PIX_FMT_SN9C10X, 3, CV_8U, 1.f, 1.f }, -// { V4L2_PIX_FMT_SGBRG8, 1, CV_8U, 1.f, 1.f }, + { V4L2_PIX_FMT_SN9C10X, 3, CV_8U, 1.f, 1.f }, + { V4L2_PIX_FMT_SRGGB8, 1, CV_8U, 1.f, 1.f }, + { V4L2_PIX_FMT_SBGGR8, 1, CV_8U, 1.f, 1.f }, + { V4L2_PIX_FMT_SGBRG8, 1, CV_8U, 1.f, 1.f }, + { V4L2_PIX_FMT_SGRBG8, 1, CV_8U, 1.f, 1.f }, { V4L2_PIX_FMT_RGB24, 3, CV_8U, 1.f, 1.f }, { V4L2_PIX_FMT_Y16, 1, CV_16U, 1.f, 1.f }, { V4L2_PIX_FMT_Y16_BE, 1, CV_16U, 1.f, 1.f }, From 55d7e3f8cc9a87666b4597b32363aeeb9a0ed00b Mon Sep 17 00:00:00 2001 From: Yuantao Feng Date: Wed, 3 Apr 2024 20:56:59 +0800 Subject: [PATCH 06/21] Merge pull request #1165 from fengyuentau:gold_yolo [BugFix] dnn (ONNX): Foce dropping constant inputs in parseClip if they are shared #25319 Resolves https://github.com/opencv/opencv/issues/25278 Merge with https://github.com/opencv/opencv_extra/pull/1165 In Gold-YOLO ,`Div` has a constant input `B=6` which is then parsed into a `Const` layer in the ONNX importer, but `Clip` also has the shared constant input `max=6` which is already a `Const` layer and then connected to `Elementwise` layer. This should not happen because in the `forward()` of `Elementwise` layer, the legacy code goes through and apply activation to each input. More details on https://github.com/opencv/opencv/issues/25278#issuecomment-2032199630. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- modules/dnn/src/onnx/onnx_importer.cpp | 11 +++++++---- modules/dnn/test/test_onnx_importer.cpp | 4 ++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index c8fb026d8d..f0b2febb88 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -91,7 +91,8 @@ class ONNXImporter void addConstant(const std::string& name, const Mat& blob); void addLayer(LayerParams& layerParams, - const opencv_onnx::NodeProto& node_proto); + const opencv_onnx::NodeProto& node_proto, + int num_inputs = std::numeric_limits::max()); void setParamsDtype(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void lstm_extractConsts(LayerParams& layerParams, const opencv_onnx::NodeProto& lstm_proto, size_t idx, int* blobShape_, int size); @@ -617,7 +618,8 @@ ONNXImporter::TensorInfo ONNXImporter::getBlobExtraInfo(const std::string& input } void ONNXImporter::addLayer(LayerParams& layerParams, - const opencv_onnx::NodeProto& node_proto) + const opencv_onnx::NodeProto& node_proto, + int num_inputs) { int depth = layerParams.get("depth", CV_32F); int id = dstNet.addLayer(layerParams.name, layerParams.type, depth, layerParams); @@ -632,7 +634,8 @@ void ONNXImporter::addLayer(LayerParams& layerParams, std::vector layerInpShapes, layerOutShapes, layerInternalShapes; int inpNum = 0; - for (int j = 0; j < node_proto.input_size(); j++) + num_inputs = std::min(node_proto.input_size(), num_inputs); + for (int j = 0; j < num_inputs; j++) { const std::string& input_name = node_proto.input(j); IterLayerId_t layerId = layer_id.find(input_name); @@ -1799,7 +1802,7 @@ void ONNXImporter::parseClip(LayerParams& layerParams, const opencv_onnx::NodePr layerParams.set("min_value", layerParams.get("min", min_value)); layerParams.set("max_value", layerParams.get("max", max_value)); - addLayer(layerParams, node_proto); + addLayer(layerParams, node_proto, 1); } void ONNXImporter::parseLeakyRelu(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 4b9229a11e..669d363afa 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -3096,6 +3096,10 @@ TEST_P(Test_ONNX_layers, MatMulAddFusion) { testONNXModels("biased_matmul", npy, l1, lInf); } +TEST_P(Test_ONNX_layers, ClipDivSharedConstant) { + testONNXModels("clip_div_shared_constant"); +} + INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets()); }} // namespace From df627e1281ba499f3b9b26b3c429195644d36145 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Wed, 3 Apr 2024 16:58:46 +0300 Subject: [PATCH 07/21] Resolve valgrind issue at QRCode detector --- modules/objdetect/src/qrcode.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index 8497733ac3..b6350b83e6 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -2936,7 +2936,8 @@ QRDecode::QRDecode(bool _useAlignmentMarkers): useAlignmentMarkers(_useAlignmentMarkers), version(0), version_size(0), - test_perspective_size(0.f) + test_perspective_size(0.f), + mode(QRCodeEncoder::EncodeMode::MODE_AUTO) {} std::string ImplContour::decode(InputArray in, InputArray points, OutputArray straight_qrcode) const { From 89b91fcb50220d89268340e5ef733e19f3587d2a Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Thu, 4 Apr 2024 11:59:31 +0300 Subject: [PATCH 08/21] Added option to dump v4l2 test frame from virtual camera. --- modules/videoio/test/test_v4l2.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/videoio/test/test_v4l2.cpp b/modules/videoio/test/test_v4l2.cpp index 5bb1a1f5a3..b336a6fd8a 100644 --- a/modules/videoio/test/test_v4l2.cpp +++ b/modules/videoio/test/test_v4l2.cpp @@ -17,6 +17,8 @@ #ifdef HAVE_CAMV4L2 +// #define DUMP_CAMERA_FRAME + #include "test_precomp.hpp" #include #include @@ -113,6 +115,13 @@ TEST_P(videoio_v4l2, formats) EXPECT_EQ(sz, img.size()); EXPECT_EQ(3, img.channels()); EXPECT_EQ(CV_8U, img.depth()); +#ifdef DUMP_CAMERA_FRAME + std::string img_name = "frame_" + fourccToString(params.pixel_format); + // V4L2 flag for big-endian formats + if(params.pixel_format & (1 << 31)) + img_name += "-BE"; + cv::imwrite(img_name + ".png", img); +#endif } } } From e1ed422bdb60441af23f576016bdfbb9a9c3d7ee Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Fri, 5 Apr 2024 11:56:07 +0300 Subject: [PATCH 09/21] HALL interface for transpose2d. --- modules/core/src/hal_replacement.hpp | 15 ++++++++++++++- modules/core/src/matrix_transform.cpp | 2 ++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/modules/core/src/hal_replacement.hpp b/modules/core/src/hal_replacement.hpp index f0fd12ca35..046b0678a4 100644 --- a/modules/core/src/hal_replacement.hpp +++ b/modules/core/src/hal_replacement.hpp @@ -831,7 +831,6 @@ inline int hal_ni_flip(int src_type, const uchar* src_data, size_t src_step, int #define cv_hal_flip hal_ni_flip //! @endcond - /** @brief rotate90 @param src_type source and destination image type @@ -854,6 +853,20 @@ inline int hal_ni_rotate90(int src_type, const uchar* src_data, size_t src_step, #define cv_hal_rotate90 hal_ni_rotate90 //! @endcond +/** + @brief Transpose2d + @param src_data,src_step Source image + @param dst_data,dst_step Destination image + @param src_width,src_height Source image dimensions + @param element_size Size of an element in bytes +*/ +inline int hal_ni_transpose2d(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step, int src_width, + int src_height, int element_size) { return CV_HAL_ERROR_NOT_IMPLEMENTED; } + +//! @cond IGNORED +#define cv_hal_transpose2d hal_ni_transpose2d +//! @endcond + //! @} diff --git a/modules/core/src/matrix_transform.cpp b/modules/core/src/matrix_transform.cpp index 5a80ac8ca7..bad17e7b6b 100644 --- a/modules/core/src/matrix_transform.cpp +++ b/modules/core/src/matrix_transform.cpp @@ -269,6 +269,8 @@ void transpose( InputArray _src, OutputArray _dst ) return; } + CALL_HAL(transpose2d, cv_hal_transpose2d, src.data, src.step, dst.data, dst.step, src.cols, src.rows, esz); + CV_IPP_RUN_FAST(ipp_transpose(src, dst)) if( dst.data == src.data ) From e17b8ae8a14fbacbac6ae3e3b942eaee464c986b Mon Sep 17 00:00:00 2001 From: unknown <3591626+LaurentBerger@users.noreply.github.com> Date: Sat, 6 Apr 2024 20:07:10 +0200 Subject: [PATCH 10/21] typo in env_reference.markdown --- doc/tutorials/introduction/env_reference/env_reference.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/introduction/env_reference/env_reference.markdown b/doc/tutorials/introduction/env_reference/env_reference.markdown index 91b8e64841..0e4f7fe0be 100644 --- a/doc/tutorials/introduction/env_reference/env_reference.markdown +++ b/doc/tutorials/introduction/env_reference/env_reference.markdown @@ -123,7 +123,7 @@ Some modules have multiple available backends, following variables allow choosin | OPENCV_PARALLEL_PRIORITY_LIST | string, `,`-separated | | list of backends in priority order | | OPENCV_UI_BACKEND | string | | choose highgui backend for window rendering (one of `GTK`, `GTK3`, `GTK2`, `QT`, `WIN32`) | | OPENCV_UI_PRIORITY_${NAME} | num | | set highgui backend priority, default is 1000 | -| OPENCV_UI_PRIORITY_LIST | string, `,`-separated | | list of hioghgui backends in priority order | +| OPENCV_UI_PRIORITY_LIST | string, `,`-separated | | list of highgui backends in priority order | | OPENCV_VIDEOIO_PRIORITY_${NAME} | num | | set videoio backend priority, default is 1000 | | OPENCV_VIDEOIO_PRIORITY_LIST | string, `,`-separated | | list of videoio backends in priority order | From 5be158a2b6ed0f4f4def851d3a57c3e3b5865ad5 Mon Sep 17 00:00:00 2001 From: Liutong HAN Date: Sun, 7 Apr 2024 11:34:41 +0800 Subject: [PATCH 11/21] Further optimize fastDepthwiseConv for RVV. --- .../cpu_kernels/conv_depthwise.simd.hpp | 100 +++++------------- 1 file changed, 24 insertions(+), 76 deletions(-) diff --git a/modules/dnn/src/layers/cpu_kernels/conv_depthwise.simd.hpp b/modules/dnn/src/layers/cpu_kernels/conv_depthwise.simd.hpp index 1d561e9864..6d4b211b8c 100644 --- a/modules/dnn/src/layers/cpu_kernels/conv_depthwise.simd.hpp +++ b/modules/dnn/src/layers/cpu_kernels/conv_depthwise.simd.hpp @@ -209,34 +209,6 @@ void fastDepthwiseConv( const float* wptr, #if !defined(CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY) && CV_RVV -/* -Example for load_deinterleave: - input: ptr[16] = {1,2,3, ... ,14,15,16} - output: a = {1, 3, 5, 7, 9, 11, 13, 15} - output: b = {2, 4, 6, 8,10, 12, 14, 16} -*/ -static inline void vfloat32m2_load_deinterleave(const float* ptr, vfloat32m2_t& a, vfloat32m2_t& b, int vl) -{ - vuint64m4_t mask = vmv_v_x_u64m4(1,vl*2); - vuint32m4_t mask_re = vreinterpret_v_u64m4_u32m4(mask); - vbool8_t mask0 = vmseq_vx_u32m4_b8 (mask_re, 1, vl*2); - vbool8_t mask1 = vmseq_vx_u32m4_b8 (mask_re, 0, vl*2); - vfloat32m4_t tempa = vundefined_f32m4(), tempb = vundefined_f32m4(); - vfloat32m4_t vw = vle32_v_f32m4(ptr, vl*2); - tempa = vcompress_vm_f32m4(mask0, tempa, vw, vl*2); - tempb = vcompress_vm_f32m4(mask1, tempb, vw, vl*2); - /* The following instructions have not to be supported by the GNU toolchain. - So we temporarily use store and load instead. - // a = vlmul_trunc_v_f32m4_f32m2(tempa); - // b = vlmul_trunc_v_f32m4_f32m2(tempb); - */ - cv::AutoBuffer cvBuffer(sizeof(float)*vl*2); - float* buffer = (float*)cvBuffer.data(); - vse32_v_f32m4(buffer, tempa, vl); - a = vle32_v_f32m2(buffer, vl); - vse32_v_f32m4(buffer, tempb, vl); - b = vle32_v_f32m2(buffer, vl); -} void fastDepthwiseConv( const float* wptr, int kernel_h, int kernel_w, @@ -292,64 +264,40 @@ void fastDepthwiseConv( const float* wptr, if( stride_w == 1 ) for( ; out_j < outW1; out_j += vl, avl -= vl) { - vl = vsetvl_e32m2(avl); + vl = vsetvl_e32m8(avl); int in_j = out_j * stride_w - pad_l; - vfloat32m2_t v00 = vle32_v_f32m2(imgptr0 + in_j, vl), - v01 = vle32_v_f32m2(imgptr0 + in_j + dilation_w, vl), - v02 = vle32_v_f32m2(imgptr0 + in_j + dilation_w*2, vl), - v10 = vle32_v_f32m2(imgptr1 + in_j, vl), - v11 = vle32_v_f32m2(imgptr1 + in_j + dilation_w, vl), - v12 = vle32_v_f32m2(imgptr1 + in_j + dilation_w*2, vl), - v20 = vle32_v_f32m2(imgptr2 + in_j, vl), - v21 = vle32_v_f32m2(imgptr2 + in_j + dilation_w, vl), - v22 = vle32_v_f32m2(imgptr2 + in_j + dilation_w*2, vl); - - vfloat32m2_t vout0 = vfmul_vf_f32m2(v00, w00, vl); - vfloat32m2_t vout1 = vfmul_vf_f32m2(v01, w01, vl); - vfloat32m2_t vout2 = vfmul_vf_f32m2(v02, w02, vl); - vout0 = vfadd_vf_f32m2(vout0, bias, vl); - - vout0 = vfmacc_vf_f32m2(vout0, w10, v10, vl); - vout1 = vfmacc_vf_f32m2(vout1, w11, v11, vl); - vout2 = vfmacc_vf_f32m2(vout2, w12, v12, vl); - - vout0 = vfmacc_vf_f32m2(vout0, w20, v20, vl); - vout1 = vfmacc_vf_f32m2(vout1, w21, v21, vl); - vout2 = vfmacc_vf_f32m2(vout2, w22, v22, vl); - - vout0 = vfadd_vv_f32m2(vfadd_vv_f32m2(vout0, vout1, vl), vout2, vl); + vfloat32m8_t vout0 = vfmacc_vf_f32m8(vfmv_v_f_f32m8(bias, vl), w00, vle32_v_f32m8(imgptr0 + in_j, vl), vl); + vout0 = vfmacc_vf_f32m8(vout0, w01, vle32_v_f32m8(imgptr0 + in_j + dilation_w, vl), vl); + vout0 = vfmacc_vf_f32m8(vout0, w02, vle32_v_f32m8(imgptr0 + in_j + dilation_w*2, vl), vl); + vout0 = vfmacc_vf_f32m8(vout0, w10, vle32_v_f32m8(imgptr1 + in_j, vl),vl); + vout0 = vfmacc_vf_f32m8(vout0, w11, vle32_v_f32m8(imgptr1 + in_j + dilation_w, vl),vl); + vout0 = vfmacc_vf_f32m8(vout0, w12, vle32_v_f32m8(imgptr1 + in_j + dilation_w*2, vl),vl); + vout0 = vfmacc_vf_f32m8(vout0, w20, vle32_v_f32m8(imgptr2 + in_j, vl), vl); + vout0 = vfmacc_vf_f32m8(vout0, w21, vle32_v_f32m8(imgptr2 + in_j + dilation_w, vl), vl); + vout0 = vfmacc_vf_f32m8(vout0, w22, vle32_v_f32m8(imgptr2 + in_j + dilation_w*2, vl), vl); if (relu) { - vbool16_t m = vmfgt_vf_f32m2_b16(vout0, 0, vl); - vout0 = vmerge_vvm_f32m2(m, vfmul_vf_f32m2(vout0, relu_coeff, vl), vout0, vl); + vbool4_t m = vmfgt_vf_f32m8_b4(vout0, 0, vl); + vout0 = vmerge_vvm_f32m8(m, vfmul_vf_f32m8(vout0, relu_coeff, vl), vout0, vl); } - vse32_v_f32m2(outptr + out_j, vout0, vl); + vse32_v_f32m8(outptr + out_j, vout0, vl); } else //stride_w == 2 && dilation_w == 1 for( ; out_j < outW1; out_j += vl, avl -= vl) { vl = vsetvl_e32m2(avl); int in_j = out_j * stride_w - pad_l; - vfloat32m2_t v00, v01, v02, v10, v11, v12, v20, v21, v22, unused; - vfloat32m2_load_deinterleave(imgptr0 + in_j, v00, v01, vl); - vfloat32m2_load_deinterleave(imgptr0 + in_j + 2, v02, unused, vl); - vfloat32m2_load_deinterleave(imgptr1 + in_j, v10, v11, vl); - vfloat32m2_load_deinterleave(imgptr1 + in_j + 2, v12, unused, vl); - vfloat32m2_load_deinterleave(imgptr2 + in_j, v20, v21, vl); - vfloat32m2_load_deinterleave(imgptr2 + in_j + 2, v22, unused, vl); - - vfloat32m2_t vout0 = vfmul_vf_f32m2(v00, w00, vl); - vfloat32m2_t vout1 = vfmul_vf_f32m2(v01, w01, vl); - vfloat32m2_t vout2 = vfmul_vf_f32m2(v02, w02, vl); - vout0 = vfadd_vf_f32m2(vout0, bias, vl); - - vout0 = vfmacc_vf_f32m2(vout0, w10, v10, vl); - vout1 = vfmacc_vf_f32m2(vout1, w11, v11, vl); - vout2 = vfmacc_vf_f32m2(vout2, w12, v12, vl); - - vout0 = vfmacc_vf_f32m2(vout0, w20, v20, vl); - vout1 = vfmacc_vf_f32m2(vout1, w21, v21, vl); - vout2 = vfmacc_vf_f32m2(vout2, w22, v22, vl); + vfloat32m2_t vout0 = vfmacc_vf_f32m2(vfmv_v_f_f32m2(bias, vl), w00, vlse32_v_f32m2(imgptr0+in_j , 8, vl), vl); + vfloat32m2_t vout1 = vfmul_vf_f32m2(vlse32_v_f32m2(imgptr0+in_j+1, 8, vl), w01, vl); + vfloat32m2_t vout2 = vfmul_vf_f32m2(vlse32_v_f32m2(imgptr0+in_j+2, 8, vl), w02, vl); + + vout0 = vfmacc_vf_f32m2(vout0, w10, vlse32_v_f32m2(imgptr1+in_j , 8, vl), vl); + vout1 = vfmacc_vf_f32m2(vout1, w11, vlse32_v_f32m2(imgptr1+in_j+1, 8, vl), vl); + vout2 = vfmacc_vf_f32m2(vout2, w12, vlse32_v_f32m2(imgptr1+in_j+2, 8, vl), vl); + + vout0 = vfmacc_vf_f32m2(vout0, w20, vlse32_v_f32m2(imgptr2+in_j , 8, vl), vl); + vout1 = vfmacc_vf_f32m2(vout1, w21, vlse32_v_f32m2(imgptr2+in_j+1, 8, vl), vl); + vout2 = vfmacc_vf_f32m2(vout2, w22, vlse32_v_f32m2(imgptr2+in_j+2, 8, vl), vl); vout0 = vfadd_vv_f32m2(vfadd_vv_f32m2(vout0, vout1, vl), vout2, vl); if (relu) From 5528e70f3c8830adb7d08981f9b03a717bfa202e Mon Sep 17 00:00:00 2001 From: Tsukasa Sugiura Date: Mon, 8 Apr 2024 14:52:53 +0900 Subject: [PATCH 12/21] remove floating point literal --- modules/imgproc/src/phasecorr.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/imgproc/src/phasecorr.cpp b/modules/imgproc/src/phasecorr.cpp index 9db436673c..dadc3a3da7 100644 --- a/modules/imgproc/src/phasecorr.cpp +++ b/modules/imgproc/src/phasecorr.cpp @@ -613,7 +613,7 @@ void cv::createHanningWindow(OutputArray _dst, cv::Size winSize, int type) AutoBuffer _wc(cols); double* const wc = _wc.data(); - double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1); + double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0 * CV_PI / (double)(rows - 1); for(int j = 0; j < cols; j++) wc[j] = 0.5 * (1.0 - cos(coeff0 * j)); From 30889f4a90a548d8b3bde99b22ae68211190524d Mon Sep 17 00:00:00 2001 From: Alexander Lyulkov Date: Mon, 8 Apr 2024 13:04:50 +0300 Subject: [PATCH 13/21] Added tests for adaptiveThreshold and sepFilter2D --- modules/imgproc/test/test_filter.cpp | 172 +++++++++++++++++++++++++++ modules/imgproc/test/test_thresh.cpp | 54 +++++++++ 2 files changed, 226 insertions(+) diff --git a/modules/imgproc/test/test_filter.cpp b/modules/imgproc/test/test_filter.cpp index 011cbaab0b..a0ccf4372f 100644 --- a/modules/imgproc/test/test_filter.cpp +++ b/modules/imgproc/test/test_filter.cpp @@ -2380,4 +2380,176 @@ TEST(Imgproc, morphologyEx_small_input_22893) ASSERT_EQ(0, cvtest::norm(result, gold, NORM_INF)); } +TEST(Imgproc_sepFilter2D, identity) +{ + std::vector kernelX{0, 0, 0, 1, 0, 0, 0}; + std::vector kernelY{0, 0, 1, 0, 0}; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, input.depth(), kernelX, kernelY); + + EXPECT_EQ(0, cv::norm(result, input, NORM_INF)); +} + +TEST(Imgproc_sepFilter2D, shift) +{ + std::vector kernelX{1, 0, 0}; + std::vector kernelY{0, 0, 1}; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, input.depth(), kernelX, kernelY); + + int W = input.cols; + int H = input.rows; + Mat inputCrop = input(Range(1, H), Range(0, W - 1)); + Mat resultCrop = result(Range(0, H - 1), Range(1, W)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + // Checking borders. Should be BORDER_REFLECT_101 + + inputCrop = input(Range(H - 2, H - 1), Range(0, W - 1)); + resultCrop = result(Range(H - 1, H), Range(1, W)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + inputCrop = input(Range(1, H), Range(1, 2)); + resultCrop = result(Range(0, H - 1), Range(0, 1)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + inputCrop = input(Range(H - 2, H - 1), Range(1, 2)); + resultCrop = result(Range(H - 1, H), Range(0, 1)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); +} + +TEST(Imgproc_sepFilter2D, zeroPadding) +{ + std::vector kernelX{1, 0, 0}; + std::vector kernelY{0, 0, 1}; + Point anchor(-1, -1); + double delta = 0; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, input.depth(), kernelX, kernelY, anchor, delta, BORDER_CONSTANT); + + int W = input.cols; + int H = input.rows; + Mat inputCrop = input(Range(1, H), Range(0, W - 1)); + Mat resultCrop = result(Range(0, H - 1), Range(1, W)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + // Checking borders + + resultCrop = result(Range(H - 1, H), Range(0, W)); + EXPECT_EQ(0, cv::norm(resultCrop, NORM_INF)); + + resultCrop = result(Range(0, H), Range(0, 1)); + EXPECT_EQ(0, cv::norm(resultCrop, NORM_INF)); +} + +TEST(Imgproc_sepFilter2D, anchor) +{ + std::vector kernelX{0, 1, 0}; + std::vector kernelY{0, 1, 0}; + Point anchor(2, 0); + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, input.depth(), kernelX, kernelY, anchor); + + int W = input.cols; + int H = input.rows; + Mat inputCrop = input(Range(1, H), Range(0, W - 1)); + Mat resultCrop = result(Range(0, H - 1), Range(1, W)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + // Checking borders. Should be BORDER_REFLECT_101 + + inputCrop = input(Range(H - 2, H - 1), Range(0, W - 1)); + resultCrop = result(Range(H - 1, H), Range(1, W)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + inputCrop = input(Range(1, H), Range(1, 2)); + resultCrop = result(Range(0, H - 1), Range(0, 1)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); + + inputCrop = input(Range(H - 2, H - 1), Range(1, 2)); + resultCrop = result(Range(H - 1, H), Range(0, 1)); + EXPECT_EQ(0, cv::norm(resultCrop, inputCrop, NORM_INF)); +} + +TEST(Imgproc_sepFilter2D, delta) +{ + std::vector kernelX{0, 0.5, 0}; + std::vector kernelY{0, 1, 0}; + Point anchor(1, 1); + double delta = 5; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, input.depth(), kernelX, kernelY, anchor, delta); + + Mat gt = input / 2 + delta; + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +typedef testing::TestWithParam Imgproc_sepFilter2D_outTypes; +TEST_P(Imgproc_sepFilter2D_outTypes, simple) +{ + int outputType = GetParam(); + std::vector kernelX{0, 0.5, 0}; + std::vector kernelY{0, 0.5, 0}; + Point anchor(1, 1); + double delta = 5; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::sepFilter2D(input, result, outputType, kernelX, kernelY, anchor, delta); + + input.convertTo(input, outputType); + Mat gt = input / 4 + delta; + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +INSTANTIATE_TEST_CASE_P(/**/, Imgproc_sepFilter2D_outTypes, + testing::Values(CV_16S, CV_32F, CV_64F), +); + +typedef testing::TestWithParam Imgproc_sepFilter2D_types; +TEST_P(Imgproc_sepFilter2D_types, simple) +{ + int outputType = GetParam(); + std::vector kernelX{0, 0.5, 0}; + std::vector kernelY{0, 0.5, 0}; + Point anchor(1, 1); + double delta = 5; + + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + input.convertTo(input, outputType); + Mat result; + + cv::sepFilter2D(input, result, outputType, kernelX, kernelY, anchor, delta); + + Mat gt = input / 4 + delta; + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +INSTANTIATE_TEST_CASE_P(/**/, Imgproc_sepFilter2D_types, + testing::Values(CV_16S, CV_32F, CV_64F), +); + }} // namespace diff --git a/modules/imgproc/test/test_thresh.cpp b/modules/imgproc/test/test_thresh.cpp index 3eb096a655..f14f2e5716 100644 --- a/modules/imgproc/test/test_thresh.cpp +++ b/modules/imgproc/test/test_thresh.cpp @@ -541,4 +541,58 @@ TEST(Imgproc_Threshold, regression_THRESH_TOZERO_IPP_21258_Max) EXPECT_EQ(0, cv::norm(result, NORM_INF)); } +TEST(Imgproc_AdaptiveThreshold, mean) +{ + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::adaptiveThreshold(input, result, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, 8); + + const string gt_path = cvtest::findDataFile("../cv/imgproc/adaptive_threshold1.png"); + Mat gt = imread(gt_path, IMREAD_GRAYSCALE); + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +TEST(Imgproc_AdaptiveThreshold, mean_inv) +{ + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::adaptiveThreshold(input, result, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 15, 8); + + const string gt_path = cvtest::findDataFile("../cv/imgproc/adaptive_threshold1.png"); + Mat gt = imread(gt_path, IMREAD_GRAYSCALE); + gt = Mat(gt.rows, gt.cols, CV_8UC1, cv::Scalar(255)) - gt; + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +TEST(Imgproc_AdaptiveThreshold, gauss) +{ + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::adaptiveThreshold(input, result, 200, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 21, -5); + + const string gt_path = cvtest::findDataFile("../cv/imgproc/adaptive_threshold2.png"); + Mat gt = imread(gt_path, IMREAD_GRAYSCALE); + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + +TEST(Imgproc_AdaptiveThreshold, gauss_inv) +{ + const string input_path = cvtest::findDataFile("../cv/shared/baboon.png"); + Mat input = imread(input_path, IMREAD_GRAYSCALE); + Mat result; + + cv::adaptiveThreshold(input, result, 200, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, 21, -5); + + const string gt_path = cvtest::findDataFile("../cv/imgproc/adaptive_threshold2.png"); + Mat gt = imread(gt_path, IMREAD_GRAYSCALE); + gt = Mat(gt.rows, gt.cols, CV_8UC1, cv::Scalar(200)) - gt; + EXPECT_EQ(0, cv::norm(result, gt, NORM_INF)); +} + }} // namespace From e63690a2d9b0fb50c687b239f2d699436b442447 Mon Sep 17 00:00:00 2001 From: ecchen Date: Sat, 6 Apr 2024 13:55:17 +0000 Subject: [PATCH 14/21] Add a shape checker for tflite models --- modules/dnn/test/test_tflite_importer.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index 291d1f50d2..7ad62bf308 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -58,7 +58,13 @@ void Test_TFLite::testModel(Net& net, const std::string& modelName, const Mat& i ASSERT_EQ(outs.size(), outNames.size()); for (int i = 0; i < outNames.size(); ++i) { Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str()))); - normAssert(ref.reshape(1, 1), outs[i].reshape(1, 1), outNames[i].c_str(), l1, lInf); + // A workaround solution for the following cases due to inconsistent shape definitions. + // The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369 + if (modelName == "face_landmark" || modelName == "selfie_segmentation") { + ref = ref.reshape(1, 1); + outs[i] = outs[i].reshape(1, 1); + } + normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf); } } From 8ed52cb5647c73c767843a975a47867c7d34b1fe Mon Sep 17 00:00:00 2001 From: Kumataro Date: Tue, 9 Apr 2024 00:47:58 +0900 Subject: [PATCH 15/21] Merge pull request #25356 from Kumataro:fix25345 core: doc: add note for countNonZero, hasNonZero and findNonZero #25356 Close #25345 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [ ] The feature is well documented and sample code can be built with the project CMake --- modules/core/include/opencv2/core.hpp | 80 ++++++++++++++++++--------- 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 0958fe019d..be800333f8 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -248,7 +248,7 @@ CV_EXPORTS void swap( UMat& a, UMat& b ); The function computes and returns the coordinate of a donor pixel corresponding to the specified extrapolated pixel when using the specified extrapolation border mode. For example, if you use cv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and -want to compute value of the "virtual" pixel Point(-5, 100) in a floating-point image img , it +want to compute value of the "virtual" pixel Point(-5, 100) in a floating-point image img, it looks like: @code{.cpp} float val = img.at(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101), @@ -259,7 +259,7 @@ copyMakeBorder. @param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \<0 or \>= len @param len Length of the array along the corresponding axis. @param borderType Border type, one of the #BorderTypes, except for #BORDER_TRANSPARENT and -#BORDER_ISOLATED . When borderType==#BORDER_CONSTANT , the function always returns -1, regardless +#BORDER_ISOLATED. When borderType==#BORDER_CONSTANT, the function always returns -1, regardless of p and len. @sa copyMakeBorder @@ -585,8 +585,18 @@ CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); /** @brief Checks for the presence of at least one non-zero array element. The function returns whether there are non-zero elements in src + +The function do not work with multi-channel arrays. If you need to check non-zero array +elements across all the channels, use Mat::reshape first to reinterpret the array as +single-channel. Or you may extract the particular channel using either extractImageCOI, or +mixChannels, or split. + +@note +- If the location of non-zero array elements is important, @ref findNonZero is helpful. +- If the count of non-zero array elements is important, @ref countNonZero is helpful. @param src single-channel array. @sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix +@sa findNonZero, countNonZero */ CV_EXPORTS_W bool hasNonZero( InputArray src ); @@ -594,8 +604,18 @@ CV_EXPORTS_W bool hasNonZero( InputArray src ); The function returns the number of non-zero elements in src : \f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f] + +The function do not work with multi-channel arrays. If you need to count non-zero array +elements across all the channels, use Mat::reshape first to reinterpret the array as +single-channel. Or you may extract the particular channel using either extractImageCOI, or +mixChannels, or split. + +@note +- If only whether there are non-zero elements is important, @ref hasNonZero is helpful. +- If the location of non-zero array elements is important, @ref findNonZero is helpful. @param src single-channel array. @sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix +@sa findNonZero, hasNonZero */ CV_EXPORTS_W int countNonZero( InputArray src ); @@ -622,8 +642,18 @@ or // access pixel coordinates Point pnt = locations[i]; @endcode + +The function do not work with multi-channel arrays. If you need to find non-zero +elements across all the channels, use Mat::reshape first to reinterpret the array as +single-channel. Or you may extract the particular channel using either extractImageCOI, or +mixChannels, or split. + +@note +- If only count of non-zero array elements is important, @ref countNonZero is helpful. +- If only whether there are non-zero elements is important, @ref hasNonZero is helpful. @param src single-channel array @param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input +@sa countNonZero, hasNonZero */ CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); @@ -830,8 +860,8 @@ array region. The function do not work with multi-channel arrays. If you need to find minimum or maximum elements across all the channels, use Mat::reshape first to reinterpret the array as -single-channel. Or you may extract the particular channel using either extractImageCOI , or -mixChannels , or split . +single-channel. Or you may extract the particular channel using either extractImageCOI, or +mixChannels, or split. @param src input single-channel array. @param minVal pointer to the returned minimum value; NULL is used if not required. @param maxVal pointer to the returned maximum value; NULL is used if not required. @@ -884,8 +914,8 @@ The function cv::minMaxIdx finds the minimum and maximum element values and thei extremums are searched across the whole array or, if mask is not an empty array, in the specified array region. The function does not work with multi-channel arrays. If you need to find minimum or maximum elements across all the channels, use Mat::reshape first to reinterpret the array as -single-channel. Or you may extract the particular channel using either extractImageCOI , or -mixChannels , or split . In case of a sparse matrix, the minimum is found among non-zero elements +single-channel. Or you may extract the particular channel using either extractImageCOI, or +mixChannels, or split. In case of a sparse matrix, the minimum is found among non-zero elements only. @note When minIdx is not NULL, it must have at least 2 elements (as well as maxIdx), even if src is a single-row or single-column matrix. In OpenCV (following MATLAB) each array has at least 2 @@ -921,8 +951,8 @@ CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, The function #reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a -raster image. In case of #REDUCE_MAX and #REDUCE_MIN , the output image should have the same type as the source one. -In case of #REDUCE_SUM, #REDUCE_SUM2 and #REDUCE_AVG , the output may have a larger element bit-depth to preserve accuracy. +raster image. In case of #REDUCE_MAX and #REDUCE_MIN, the output image should have the same type as the source one. +In case of #REDUCE_SUM, #REDUCE_SUM2 and #REDUCE_AVG, the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes. The following code demonstrates its usage for a single channel matrix. @@ -976,7 +1006,7 @@ CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst); The function cv::split splits a multi-channel array into separate single-channel arrays: \f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f] If you need to extract a single channel or do some other sophisticated channel permutation, use -mixChannels . +mixChannels. The following example demonstrates how to split a 3-channel matrix into 3 single channel matrices. @snippet snippets/core_split.cpp example @@ -1117,7 +1147,7 @@ The example scenarios of using the function are the following: flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes. -@sa transpose , repeat , completeSymm +@sa transpose, repeat, completeSymm */ CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); @@ -1149,7 +1179,7 @@ The function cv::rotate rotates the array in one of three different ways: @param dst output array of the same type as src. The size is the same with ROTATE_180, and the rows and cols are switched for ROTATE_90_CLOCKWISE and ROTATE_90_COUNTERCLOCKWISE. @param rotateCode an enum to specify how to rotate the array; see the enum #RotateFlags -@sa transpose , repeat , completeSymm, flip, RotateFlags +@sa transpose, repeat, completeSymm, flip, RotateFlags */ CV_EXPORTS_W void rotate(InputArray src, OutputArray dst, int rotateCode); @@ -1583,7 +1613,7 @@ converts denormalized values to zeros on output. Special values (NaN, Inf) are not handled. @param src input array. @param dst output array of the same size and type as src. -@sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude +@sa log, cartToPolar, polarToCart, phase, pow, sqrt, magnitude */ CV_EXPORTS_W void exp(InputArray src, OutputArray dst); @@ -1727,7 +1757,7 @@ should have the same type as src1 and src2. @param dst output matrix; it has the proper size and the same type as input matrices. @param flags operation flags (cv::GemmFlags) -@sa mulTransposed , transform +@sa mulTransposed, transform */ CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, InputArray src3, double beta, OutputArray dst, int flags = 0); @@ -1737,7 +1767,7 @@ CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, The function cv::mulTransposed calculates the product of src and its transposition: \f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} )^T ( \texttt{src} - \texttt{delta} )\f] -if aTa=true , and +if aTa=true, and \f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} ) ( \texttt{src} - \texttt{delta} )^T\f] otherwise. The function is used to calculate the covariance matrix. With zero delta, it can be used as a faster substitute for general matrix @@ -1750,7 +1780,7 @@ description below. @param delta Optional delta matrix subtracted from src before the multiplication. When the matrix is empty ( delta=noArray() ), it is assumed to be zero, that is, nothing is subtracted. If it has the same -size as src , it is simply subtracted. Otherwise, it is "repeated" (see +size as src, it is simply subtracted. Otherwise, it is "repeated" (see repeat ) to cover the full src and then subtracted. Type of the delta matrix, when it is not empty, must be the same as the type of created output matrix. See the dtype parameter description below. @@ -2024,7 +2054,7 @@ in the descending order. @param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues. -@sa eigenNonSymmetric, completeSymm , PCA +@sa eigenNonSymmetric, completeSymm, PCA */ CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues, OutputArray eigenvectors = noArray()); @@ -2164,7 +2194,7 @@ So, the function chooses an operation mode depending on the flags and size of th If #DFT_SCALE is set, the scaling is done after the transformation. -Unlike dct , the function supports arrays of arbitrary size. But only those arrays are processed +Unlike dct, the function supports arrays of arbitrary size. But only those arrays are processed efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the current implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize method. @@ -2247,8 +2277,8 @@ nonzeroRows rows of the input array (#DFT_INVERSE is not set) or only the first output array (#DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the rows more efficiently and save some time; this technique is very useful for calculating array cross-correlation or convolution using DFT. -@sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar , -magnitude , phase +@sa dct, getOptimalDFTSize, mulSpectrums, filter2D, matchTemplate, flip, cartToPolar, +magnitude, phase */ CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0); @@ -2285,9 +2315,9 @@ floating-point array: \f[X = \left (C^{(N)} \right )^T \cdot X \cdot C^{(N)}\f] The function chooses the mode of operation by looking at the flags and size of the input array: -- If (flags & #DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it +- If (flags & #DCT_INVERSE) == 0, the function does a forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. -- If (flags & #DCT_ROWS) != 0 , the function performs a 1D transform of each row. +- If (flags & #DCT_ROWS) != 0, the function performs a 1D transform of each row. - If the array is a single column or a single row, the function performs a 1D transform. - If none of the above is true, the function performs a 2D transform. @@ -2303,7 +2333,7 @@ of a vector of size N/2 . Thus, the optimal DCT size N1 \>= N can be calculated @param src input floating-point array. @param dst output array of the same size and type as src . @param flags transformation flags as a combination of cv::DftFlags (DCT_*) -@sa dft , getOptimalDFTSize , idct +@sa dft, getOptimalDFTSize, idct */ CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags = 0); @@ -2322,7 +2352,7 @@ CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0); The function cv::mulSpectrums performs the per-element multiplication of the two CCS-packed or complex matrices that are results of a real or complex Fourier transform. -The function, together with dft and idft , may be used to calculate convolution (pass conjB=false ) +The function, together with dft and idft, may be used to calculate convolution (pass conjB=false ) or correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are simply multiplied (per element) with an optional conjugation of the second-array elements. When the arrays are real, they are assumed to be CCS-packed (see dft for details). @@ -2356,7 +2386,7 @@ While the function cannot be used directly to estimate the optimal vector size f (since the current DCT implementation supports only even-size vectors), it can be easily processed as getOptimalDFTSize((vecsize+1)/2)\*2. @param vecsize vector size. -@sa dft , dct , idft , idct , mulSpectrums +@sa dft, dct, idft, idct, mulSpectrums */ CV_EXPORTS_W int getOptimalDFTSize(int vecsize); @@ -2908,7 +2938,7 @@ public: The methods transform the state using the MWC algorithm and return the next random number. The first form is equivalent to RNG::next . The - second form returns the random number modulo N , which means that the + second form returns the random number modulo N, which means that the result is in the range [0, N) . */ unsigned operator ()(); From a25132986a32f9f99c49758bbb17be838728007c Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Tue, 9 Apr 2024 09:37:49 +0300 Subject: [PATCH 16/21] Merge pull request #25146 from mshabunin:cpp-contours Reworked findContours to reduce C-API usage #25146 What is done: * rewritten `findContours` and `icvApproximateChainTC89` using C++ data structures * extracted LINK_RUNS mode to separate new public functions - `findContoursLinkRuns` (it uses completely different algorithm) * ~added new public `cv::approximateChainTC89`~ - **:x: decided to hide it** * enabled chain code output (method = 0, no public enum value for this in C++ yet) * kept old function as `findContours_old` (exported, but not exposed to user) * added more tests for findContours (`test_contours_new.cpp`), some tests compare results of old function with new one. Following tests have been added: * contours of random rectangle * contours of many small (1-2px) blobs * contours of random noise * backport of old accuracy test * separate test for LINK RUNS variant What is left to be done (can be done now or later): * improve tests: * some tests have limited verification (e.g. only verify contour sizes) * perhaps reference data can be collected and stored * maybe more test variants can be added (?) * add enum value for chain code output and a method of returning starting points (e.g. first 8 elements of returned `vector` can represent 2 int point coordinates) * add documentation for new functions - **:heavy_check_mark: DONE** * check and improve performance (my experiment showed 0.7x-1.1x some time ago) * remove old functions completely (?) * change contour return order (BFS) or allow to select it (?) * return result tree as-is (?) (new data structures should be exposed, bindings should adapt) --- modules/imgproc/include/opencv2/imgproc.hpp | 13 + .../include/opencv2/imgproc/detail/legacy.hpp | 30 + modules/imgproc/src/contours.cpp | 5 +- modules/imgproc/src/contours_approx.cpp | 354 +++++++++ modules/imgproc/src/contours_common.cpp | 75 ++ modules/imgproc/src/contours_common.hpp | 219 ++++++ modules/imgproc/src/contours_link.cpp | 417 +++++++++++ modules/imgproc/src/contours_new.cpp | 697 ++++++++++++++++++ modules/imgproc/test/test_contours.cpp | 4 +- modules/imgproc/test/test_contours_new.cpp | 606 +++++++++++++++ 10 files changed, 2417 insertions(+), 3 deletions(-) create mode 100644 modules/imgproc/include/opencv2/imgproc/detail/legacy.hpp create mode 100644 modules/imgproc/src/contours_approx.cpp create mode 100644 modules/imgproc/src/contours_common.cpp create mode 100644 modules/imgproc/src/contours_common.hpp create mode 100644 modules/imgproc/src/contours_link.cpp create mode 100644 modules/imgproc/src/contours_new.cpp create mode 100644 modules/imgproc/test/test_contours_new.cpp diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 31847694fd..c686e37bd6 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -4040,6 +4040,19 @@ A program using pyramid scaling, Canny, contours and contour simplification to f squares in the input image. */ +//! @brief Find contours using link runs algorithm +//! +//! This function implements an algorithm different from cv::findContours: +//! - doesn't allocate temporary image internally, thus it has reduced memory consumption +//! - supports CV_8UC1 images only +//! - outputs 2-level hierarhy only (RETR_CCOMP mode) +//! - doesn't support approximation change other than CHAIN_APPROX_SIMPLE +//! In all other aspects this function is compatible with cv::findContours. +CV_EXPORTS_W void findContoursLinkRuns(InputArray image, OutputArrayOfArrays contours, OutputArray hierarchy); + +//! @overload +CV_EXPORTS_W void findContoursLinkRuns(InputArray image, OutputArrayOfArrays contours); + /** @brief Approximates a polygonal curve(s) with the specified precision. The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less diff --git a/modules/imgproc/include/opencv2/imgproc/detail/legacy.hpp b/modules/imgproc/include/opencv2/imgproc/detail/legacy.hpp new file mode 100644 index 0000000000..f9abd8d23e --- /dev/null +++ b/modules/imgproc/include/opencv2/imgproc/detail/legacy.hpp @@ -0,0 +1,30 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_IMGPROC_DETAIL_LEGACY_HPP +#define OPENCV_IMGPROC_DETAIL_LEGACY_HPP + +#include "opencv2/imgproc.hpp" + +namespace cv { + +#ifdef __OPENCV_BUILD + +CV_EXPORTS void findContours_legacy(InputArray _image, + OutputArrayOfArrays _contours, + OutputArray _hierarchy, + int mode, + int method, + Point offset = Point()); +CV_EXPORTS void findContours_legacy(InputArray image, + OutputArrayOfArrays contours, + int mode, + int method, + Point offset = Point()); + +#endif + +} // namespace cv + +#endif // OPENCV_IMGPROC_DETAIL_LEGACY_HPP diff --git a/modules/imgproc/src/contours.cpp b/modules/imgproc/src/contours.cpp index 2e3121418a..d88c6cbede 100644 --- a/modules/imgproc/src/contours.cpp +++ b/modules/imgproc/src/contours.cpp @@ -40,6 +40,7 @@ //M*/ #include "precomp.hpp" #include "opencv2/core/hal/intrin.hpp" +#include "opencv2/imgproc/detail/legacy.hpp" using namespace cv; @@ -1813,7 +1814,7 @@ cvFindContours( void* img, CvMemStorage* storage, return cvFindContours_Impl(img, storage, firstContour, cntHeaderSize, mode, method, offset, 1); } -void cv::findContours( InputArray _image, OutputArrayOfArrays _contours, +void cv::findContours_legacy( InputArray _image, OutputArrayOfArrays _contours, OutputArray _hierarchy, int mode, int method, Point offset ) { CV_INSTRUMENT_REGION(); @@ -1878,7 +1879,7 @@ void cv::findContours( InputArray _image, OutputArrayOfArrays _contours, } } -void cv::findContours( InputArray _image, OutputArrayOfArrays _contours, +void cv::findContours_legacy( InputArray _image, OutputArrayOfArrays _contours, int mode, int method, Point offset) { CV_INSTRUMENT_REGION(); diff --git a/modules/imgproc/src/contours_approx.cpp b/modules/imgproc/src/contours_approx.cpp new file mode 100644 index 0000000000..bf194933d6 --- /dev/null +++ b/modules/imgproc/src/contours_approx.cpp @@ -0,0 +1,354 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "opencv2/core/base.hpp" +#include "opencv2/core/types.hpp" +#include "opencv2/imgproc.hpp" +#include "contours_common.hpp" +#include + +using namespace std; +using namespace cv; + +namespace { + +struct ApproxItem +{ + Point pt; + size_t k; // support region + int s; // 1-curvature + bool removed; + ApproxItem() : k(0), s(0), removed(false) {} + ApproxItem(const Point& pt_, int s_) : pt(pt_), k(0), s(s_), removed(false) {} +}; + +static const schar abs_diff[16] = {1, 2, 3, 4, 3, 2, 1, 0, 1, 2, 3, 4, 3, 2, 1}; +static const Point chainCodeDeltas[8] = + {{1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}}; + +// Pass 0. +// Restores all the digital curve points from the chain code. +// Removes the points (from the resultant polygon) +// that have zero 1-curvature +static vector pass_0(const vector& chain, Point pt, bool isApprox, bool isFull) +{ + vector res; + const size_t len = chain.size(); + res.reserve(len / 2); + for (size_t i = 0; i < len; ++i) + { + const schar prev = (i == 0) ? chain[len - 1] : chain[i - 1]; + const schar cur = chain[i]; + const schar s = abs_diff[cur - prev + 7]; + if ((!isApprox && (isFull || s != 0)) || isApprox) + { + res.push_back(ApproxItem(pt, s)); + if (s == 0) + (res.end() - 1)->removed = true; + } + pt += chainCodeDeltas[cur]; + } + return res; +} + +static vector gatherPoints(const vector& ares) +{ + vector res; + res.reserve(ares.size() / 2); + for (const ApproxItem& item : ares) + { + if (item.removed) + continue; + res.push_back(item.pt); + } + return res; +} + +static size_t calc_support(const vector& ares, size_t i) +{ + const size_t len = ares.size(); + /* determine support region */ + int d_num = 0; + int l = 0; + size_t k = 1; + for (;; k++) + { + CV_Assert(k <= len); + /* calc indices */ + const size_t i1 = (i >= k) ? (i - k) : (len - k + i); + const size_t i2 = (i + k < len) ? (i + k) : (i + k - len); + + const int dx = ares[i2].pt.x - ares[i1].pt.x; + const int dy = ares[i2].pt.y - ares[i1].pt.y; + + /* distance between p_(i - k) and p_(i + k) */ + const int lk = dx * dx + dy * dy; + + /* distance between p_i and the line (p_(i-k), p_(i+k)) */ + const int dk_num = + (ares[i].pt.x - ares[i1].pt.x) * dy - (ares[i].pt.y - ares[i1].pt.y) * dx; + + union + { + int i; + float f; + } d; + d.f = (float)(((double)d_num) * lk - ((double)dk_num) * l); + + if (k > 1 && (l >= lk || ((d_num > 0 && d.i <= 0) || (d_num < 0 && d.i >= 0)))) + break; + + d_num = dk_num; + l = lk; + } + return k - 1; +} + +static int calc_cosine(const vector& ares, size_t i) +{ + const size_t k = ares[i].k; + size_t j; + int s; + const size_t len = ares.size(); + /* calc k-cosine curvature */ + for (j = k, s = 0; j > 0; j--) + { + const size_t i1 = (i >= j) ? (i - j) : (len - j + i); + const size_t i2 = (i + j < len) ? (i + j) : (i + j - len); + + const int dx1 = ares[i1].pt.x - ares[i].pt.x; + const int dy1 = ares[i1].pt.y - ares[i].pt.y; + const int dx2 = ares[i2].pt.x - ares[i].pt.x; + const int dy2 = ares[i2].pt.y - ares[i].pt.y; + + if ((dx1 | dy1) == 0 || (dx2 | dy2) == 0) + break; + + double temp_num = dx1 * dx2 + dy1 * dy2; + temp_num = (float)(temp_num / sqrt(((double)dx1 * dx1 + (double)dy1 * dy1) * + ((double)dx2 * dx2 + (double)dy2 * dy2))); + Cv32suf sk; + sk.f = (float)(temp_num + 1.1); + + CV_Assert(0 <= sk.f && sk.f <= 2.2); + if (j < k && sk.i <= s) + break; + + s = sk.i; + } + return s; +} + +static bool calc_nms_cleanup(const vector& ares, size_t i) +{ + const size_t k2 = ares[i].k >> 1; + const int s = ares[i].s; + const size_t len = ares.size(); + size_t j; + for (j = 1; j <= k2; j++) + { + const size_t i1 = (i >= j) ? (i - j) : (len - j + i); + const size_t i2 = (i + j < len) ? (i + j) : (i + j - len); + if (ares[i1].s > s || ares[i2].s > s) + break; + } + return j <= k2; +} + +static bool calc_dominance(const vector& ares, size_t i) +{ + const size_t len = ares.size(); + CV_Assert(len > 0); + const size_t i1 = (i >= 1) ? (i - 1) : (len - 1 + i); + const size_t i2 = (i + 1 < len) ? (i + 1) : (i + 1 - len); + return ares[i].s <= ares[i1].s || ares[i].s <= ares[i2].s; +} + +inline size_t get_next_idx(const vector& ares, const size_t start) +{ + const size_t len = ares.size(); + size_t res = start + 1; + for (; res < len; ++res) + { + if (!ares[res].removed) + break; + } + return res; +} + +inline void clear_until(vector& ares, const size_t start, const size_t finish) +{ + const size_t len = ares.size(); + for (size_t i = start + 1; i < finish && i < len; ++i) + { + ares[i].removed = true; + } +} + +static bool calc_new_start(vector& ares, size_t& res) +{ + const size_t len = ares.size(); + CV_Assert(len > 0); + size_t i1; + // remove all previous items from the beginning + for (i1 = 1; i1 < len && ares[i1].s != 0; i1++) + { + ares[i1 - 1].s = 0; + } + if (i1 == len) + { + // all points survived - skip to the end + return false; + } + i1--; + + size_t i2; + // remove all following items from the end + for (i2 = len - 2; i2 > 0 && ares[i2].s != 0; i2--) + { + clear_until(ares, i2, len); + ares[i2 + 1].s = 0; + } + i2++; + + // only two points left + if (i1 == 0 && i2 == len - 1) + { + // find first non-removed element from the start + i1 = get_next_idx(ares, 0); + // append first item to the end + ares.push_back(ares[0]); + (ares.end() - 1)->removed = false; + } + res = i1; + return true; +} + +static void pass_cleanup(vector& ares, size_t start_idx) +{ + int count = 1; + + const size_t len = ares.size(); + size_t first = start_idx; + for (size_t i = start_idx, prev = i; i < len; ++i) + { + ApproxItem& item = ares[i]; + if (item.removed) + continue; + size_t next_idx = get_next_idx(ares, i); + if (next_idx == len || next_idx - i != 1) + { + if (count >= 2) + { + if (count == 2) + { + const int s1 = ares[prev].s; + const int s2 = ares[i].s; + + if (s1 > s2 || (s1 == s2 && ares[prev].k <= ares[i].k)) + /* remove second */ + clear_until(ares, get_next_idx(ares, prev), get_next_idx(ares, i)); + else + /* remove first */ + clear_until(ares, first, i); + } + else + { + first = get_next_idx(ares, first); + clear_until(ares, first, i); + } + } + clear_until(ares, first, i); + first = i; + count = 1; + } + else + { + ++count; + } + prev = i; + } +} + +} // namespace + + +vector cv::approximateChainTC89(vector chain, const Point& origin, const int method) +{ + if (chain.size() == 0) + { + return vector({origin}); + } + + const bool isApprox = method == CHAIN_APPROX_TC89_L1 || method == CHAIN_APPROX_TC89_KCOS; + + ApproxItem root; + vector ares = pass_0(chain, origin, isApprox, method == CHAIN_APPROX_NONE); + + if (isApprox) + { + CV_DbgAssert(ares.size() < (size_t)numeric_limits::max()); + + // Pass 1. + // Determines support region for all the remained points */ + for (size_t i = 0; i < ares.size(); ++i) + { + ApproxItem& item = ares[i]; + if (item.removed) + continue; + item.k = calc_support(ares, i); + + if (method == CHAIN_APPROX_TC89_KCOS) + item.s = calc_cosine(ares, i); + } + + // Pass 2. + // Performs non-maxima suppression + for (size_t i = 0; i < ares.size(); ++i) + { + ApproxItem& item = ares[i]; + if (calc_nms_cleanup(ares, i)) + { + item.s = 0; // "clear" + item.removed = true; + } + } + + // Pass 3. + // Removes non-dominant points with 1-length support region */ + for (size_t i = 0; i < ares.size(); ++i) + { + ApproxItem& item = ares[i]; + if (item.removed) + continue; + if (item.k == 1 && calc_dominance(ares, i)) + { + item.s = 0; + item.removed = true; + } + } + + if (method == cv::CHAIN_APPROX_TC89_L1) + { + // Pass 4. + // Cleans remained couples of points + bool skip = false; + size_t new_start_idx = 0; + const size_t len = ares.size(); + if (ares[0].s != 0 && ares[len - 1].s != 0) + { + if (!calc_new_start(ares, new_start_idx)) + { + skip = true; + } + } + if (!skip) + { + pass_cleanup(ares, new_start_idx); + } + } + } + + return gatherPoints(ares); +} diff --git a/modules/imgproc/src/contours_common.cpp b/modules/imgproc/src/contours_common.cpp new file mode 100644 index 0000000000..a8cb12c1a2 --- /dev/null +++ b/modules/imgproc/src/contours_common.cpp @@ -0,0 +1,75 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "precomp.hpp" +#include "contours_common.hpp" +#include +#include + +using namespace std; +using namespace cv; + +void cv::contourTreeToResults(CTree& tree, + int res_type, + OutputArrayOfArrays& _contours, + OutputArray& _hierarchy) +{ + // check if there are no results + if (tree.isEmpty() || (tree.elem(0).body.isEmpty() && (tree.elem(0).first_child == -1))) + { + _contours.clear(); + return; + } + + // mapping for indexes (original -> resulting) + map index_mapping; + index_mapping[-1] = -1; + index_mapping[0] = -1; + + CV_Assert(tree.size() < (size_t)numeric_limits::max()); + const int total = (int)tree.size() - 1; + _contours.create(total, 1, 0, -1, true); + { + int i = 0; + CIterator it(tree); + while (!it.isDone()) + { + const CNode& elem = it.getNext_s(); + CV_Assert(elem.self() != -1); + if (elem.self() == 0) + continue; + index_mapping[elem.self()] = i; + CV_Assert(elem.body.size() < (size_t)numeric_limits::max()); + const int sz = (int)elem.body.size(); + _contours.create(sz, 1, res_type, i, true); + if (sz > 0) + { + Mat cmat = _contours.getMat(i); + CV_Assert(cmat.isContinuous()); + elem.body.copyTo(cmat.data); + } + ++i; + } + } + + if (_hierarchy.needed()) + { + _hierarchy.create(1, total, CV_32SC4, -1, true); + Mat h_mat = _hierarchy.getMat(); + int i = 0; + CIterator it(tree); + while (!it.isDone()) + { + const CNode& elem = it.getNext_s(); + if (elem.self() == 0) + continue; + Vec4i& h_vec = h_mat.at(i); + h_vec = Vec4i(index_mapping.at(elem.next), + index_mapping.at(elem.prev), + index_mapping.at(elem.first_child), + index_mapping.at(elem.parent)); + ++i; + } + } +} diff --git a/modules/imgproc/src/contours_common.hpp b/modules/imgproc/src/contours_common.hpp new file mode 100644 index 0000000000..b22c5cfd0b --- /dev/null +++ b/modules/imgproc/src/contours_common.hpp @@ -0,0 +1,219 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_CONTOURS_COMMON_HPP +#define OPENCV_CONTOURS_COMMON_HPP + +#include "precomp.hpp" +#include + +namespace cv { + +static const schar MAX_SIZE = 16; + +static const cv::Point chainCodeDeltas[8] = + {{1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}}; + +static inline int getDelta(schar s, size_t step) +{ + CV_DbgAssert(s >= 0 && s < 16); + const cv::Point res = chainCodeDeltas[s % 8]; + return res.x + res.y * (int)step; +} + +inline schar clamp_direction(schar dir) +{ + return std::min(dir, (schar)15); +} + +template +class TreeNode +{ +private: + int self_; + +public: + // tree hierarchy (parent - children) + int parent; + int first_child; + // 1st linked list - bidirectional - sibling children + int prev; + int next; + // 2nd linked list - unidirectional - not related to 1st list + int ctable_next; + T body; + +public: + TreeNode(int self) : + self_(self), parent(-1), first_child(-1), prev(-1), next(-1), ctable_next(-1) + { + CV_Assert(self >= 0); + } + int self() const + { + return self_; + } +}; + +template +class Tree +{ +private: + std::vector> nodes; + +public: + TreeNode& newElem() + { + const size_t idx = nodes.size(); + CV_DbgAssert(idx < (size_t)std::numeric_limits::max()); + nodes.push_back(TreeNode((int)idx)); + return nodes[idx]; + } + TreeNode& elem(int idx) + { + CV_DbgAssert(idx >= 0 && (size_t)idx < nodes.size()); + return nodes[(size_t)idx]; + } + const TreeNode& elem(int idx) const + { + CV_DbgAssert(idx >= 0 && (size_t)idx < nodes.size()); + return nodes[(size_t)idx]; + } + int lastSibling(int e) const + { + if (e != -1) + { + while (true) + { + const TreeNode& cur_elem = elem(e); + if (cur_elem.next == -1) + break; + e = cur_elem.next; + } + } + return e; + } + void addSiblingAfter(int prev, int idx) + { + TreeNode& prev_item = nodes[prev]; + TreeNode& child = nodes[idx]; + child.parent = prev_item.parent; + if (prev_item.next != -1) + { + nodes[prev_item.next].prev = idx; + child.next = prev_item.next; + } + child.prev = prev; + prev_item.next = idx; + } + void addChild(int parent_idx, int child_idx) + { + TreeNode& parent = nodes[parent_idx]; + TreeNode& child = nodes[child_idx]; + if (parent.first_child != -1) + { + TreeNode& fchild_ = nodes[parent.first_child]; + fchild_.prev = child_idx; + child.next = parent.first_child; + } + parent.first_child = child_idx; + child.parent = parent_idx; + child.prev = -1; + } + bool isEmpty() const + { + return nodes.size() == 0; + } + size_t size() const + { + return nodes.size(); + } +}; + +template +class TreeIterator +{ +public: + TreeIterator(Tree& tree_) : tree(tree_) + { + CV_Assert(!tree.isEmpty()); + levels.push(0); + } + bool isDone() const + { + return levels.empty(); + } + const TreeNode& getNext_s() + { + int idx = levels.top(); + levels.pop(); + const TreeNode& res = tree.elem(idx); + int cur = tree.lastSibling(res.first_child); + while (cur != -1) + { + levels.push(cur); + cur = tree.elem(cur).prev; + } + return res; + } + +private: + std::stack levels; + Tree& tree; +}; + +//============================================================================== + +class Contour +{ +public: + cv::Rect brect; + cv::Point origin; + std::vector pts; + std::vector codes; + bool isHole; + bool isChain; + + Contour() : isHole(false), isChain(false) {} + void updateBoundingRect() {} + bool isEmpty() const + { + return pts.size() == 0 && codes.size() == 0; + } + size_t size() const + { + return isChain ? codes.size() : pts.size(); + } + void copyTo(void* data) const + { + // NOTE: Mat::copyTo doesn't work because it creates new Mat object + // instead of reusing existing vector data + if (isChain) + { + memcpy(data, &codes[0], codes.size() * sizeof(codes[0])); + } + else + { + memcpy(data, &pts[0], pts.size() * sizeof(pts[0])); + } + } +}; + +typedef TreeNode CNode; +typedef Tree CTree; +typedef TreeIterator CIterator; + + +void contourTreeToResults(CTree& tree, + int res_type, + cv::OutputArrayOfArrays& _contours, + cv::OutputArray& _hierarchy); + + +std::vector + approximateChainTC89(std::vector chain, const Point& origin, const int method); + +} // namespace cv + +#endif // OPENCV_CONTOURS_COMMON_HPP diff --git a/modules/imgproc/src/contours_link.cpp b/modules/imgproc/src/contours_link.cpp new file mode 100644 index 0000000000..532519bc97 --- /dev/null +++ b/modules/imgproc/src/contours_link.cpp @@ -0,0 +1,417 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "precomp.hpp" +#include "contours_common.hpp" +#include "opencv2/core/hal/intrin.hpp" + +using namespace cv; +using namespace std; + +//============================================================================== + +namespace { + +inline static int findStartContourPoint(uchar* src_data, Size img_size, int j) +{ +#if (CV_SIMD || CV_SIMD_SCALABLE) + v_uint8 v_zero = vx_setzero_u8(); + for (; j <= img_size.width - VTraits::vlanes(); j += VTraits::vlanes()) + { + v_uint8 vmask = (v_ne(vx_load((uchar*)(src_data + j)), v_zero)); + if (v_check_any(vmask)) + { + j += v_scan_forward(vmask); + return j; + } + } +#endif + for (; j < img_size.width && !src_data[j]; ++j) + ; + return j; +} + +inline static int findEndContourPoint(uchar* src_data, Size img_size, int j) +{ +#if (CV_SIMD || CV_SIMD_SCALABLE) + if (j < img_size.width && !src_data[j]) + { + return j; + } + else + { + v_uint8 v_zero = vx_setzero_u8(); + for (; j <= img_size.width - VTraits::vlanes(); j += VTraits::vlanes()) + { + v_uint8 vmask = (v_eq(vx_load((uchar*)(src_data + j)), v_zero)); + if (v_check_any(vmask)) + { + j += v_scan_forward(vmask); + return j; + } + } + } +#endif + for (; j < img_size.width && src_data[j]; ++j) + ; + + return j; +} + +//============================================================================== + +struct LinkRunPoint +{ + int link; + int next; + Point pt; + LinkRunPoint() : link(-1), next(-1) {} + LinkRunPoint(const Point& pt_) : link(-1), next(-1), pt(pt_) {} +}; + +typedef LinkRunPoint LRP; + +//============================================================================== + +class LinkRunner +{ +public: + enum LinkConnectionDirection + { + ICV_SINGLE = 0, + ICV_CONNECTING_ABOVE = 1, + ICV_CONNECTING_BELOW = -1, + }; + + CTree tree; + + vector rns; + vector ext_rns; + vector int_rns; + +public: + LinkRunner() + { + tree.newElem(); + rns.reserve(100); + } + void process(Mat& image); + void convertLinks(int& first, int& prev, bool isHole); + void establishLinks(int& prev_point, + int upper_run, + int lower_run, + const int upper_total, + const int lower_total); +}; + +void LinkRunner::convertLinks(int& first, int& prev, bool isHole) +{ + const vector& contours = isHole ? int_rns : ext_rns; + int count = 0; + for (int j = 0; j < (int)contours.size(); j++, count++) + { + int start = contours[j]; + int cur = start; + + if (rns[cur].link == -1) + continue; + + CNode& node = tree.newElem(); + node.body.isHole = isHole; + + do + { + node.body.pts.push_back(rns[cur].pt); + int p_temp = cur; + cur = rns[cur].link; + rns[p_temp].link = -1; + } + while (cur != start); + + if (first == 0) + { + tree.addChild(0, node.self()); + prev = first = node.self(); + } + else + { + tree.addSiblingAfter(prev, node.self()); + prev = node.self(); + } + } +} +void LinkRunner::establishLinks(int& prev_point, + int upper_run, + int lower_run, + const int upper_total, + const int lower_total) +{ + int k, n; + int connect_flag = ICV_SINGLE; + for (k = 0, n = 0; k < upper_total / 2 && n < lower_total / 2;) + { + switch (connect_flag) + { + case ICV_SINGLE: + if (rns[rns[upper_run].next].pt.x < rns[rns[lower_run].next].pt.x) + { + if (rns[rns[upper_run].next].pt.x >= rns[lower_run].pt.x - 1) + { + rns[lower_run].link = upper_run; + connect_flag = ICV_CONNECTING_ABOVE; + prev_point = rns[upper_run].next; + } + else + rns[rns[upper_run].next].link = upper_run; + k++; + upper_run = rns[rns[upper_run].next].next; + } + else + { + if (rns[upper_run].pt.x <= rns[rns[lower_run].next].pt.x + 1) + { + rns[lower_run].link = upper_run; + connect_flag = ICV_CONNECTING_BELOW; + prev_point = rns[lower_run].next; + } + else + { + rns[lower_run].link = rns[lower_run].next; + // First point of contour + ext_rns.push_back(lower_run); + } + n++; + lower_run = rns[rns[lower_run].next].next; + } + break; + case ICV_CONNECTING_ABOVE: + if (rns[upper_run].pt.x > rns[rns[lower_run].next].pt.x + 1) + { + rns[prev_point].link = rns[lower_run].next; + connect_flag = ICV_SINGLE; + n++; + lower_run = rns[rns[lower_run].next].next; + } + else + { + rns[prev_point].link = upper_run; + if (rns[rns[upper_run].next].pt.x < rns[rns[lower_run].next].pt.x) + { + k++; + prev_point = rns[upper_run].next; + upper_run = rns[rns[upper_run].next].next; + } + else + { + connect_flag = ICV_CONNECTING_BELOW; + prev_point = rns[lower_run].next; + n++; + lower_run = rns[rns[lower_run].next].next; + } + } + break; + case ICV_CONNECTING_BELOW: + if (rns[lower_run].pt.x > rns[rns[upper_run].next].pt.x + 1) + { + rns[rns[upper_run].next].link = prev_point; + connect_flag = ICV_SINGLE; + k++; + upper_run = rns[rns[upper_run].next].next; + } + else + { + // First point of contour + int_rns.push_back(lower_run); + + rns[lower_run].link = prev_point; + if (rns[rns[lower_run].next].pt.x < rns[rns[upper_run].next].pt.x) + { + n++; + prev_point = rns[lower_run].next; + lower_run = rns[rns[lower_run].next].next; + } + else + { + connect_flag = ICV_CONNECTING_ABOVE; + k++; + prev_point = rns[upper_run].next; + upper_run = rns[rns[upper_run].next].next; + } + } + break; + } + } // k, n + + for (; n < lower_total / 2; n++) + { + if (connect_flag != ICV_SINGLE) + { + rns[prev_point].link = rns[lower_run].next; + connect_flag = ICV_SINGLE; + lower_run = rns[rns[lower_run].next].next; + continue; + } + rns[rns[lower_run].next] = rns[rns[lower_run].next]; + rns[lower_run].link = rns[lower_run].next; + + // First point of contour + ext_rns.push_back(lower_run); + lower_run = rns[rns[lower_run].next].next; + } + + for (; k < upper_total / 2; k++) + { + if (connect_flag != ICV_SINGLE) + { + rns[rns[upper_run].next].link = prev_point; + connect_flag = ICV_SINGLE; + upper_run = rns[rns[upper_run].next].next; + continue; + } + rns[rns[upper_run].next] = rns[rns[upper_run].next]; + rns[rns[upper_run].next].link = upper_run; + upper_run = rns[rns[upper_run].next].next; + } +} + + +void LinkRunner::process(Mat& image) +{ + const Size sz = image.size(); + int j; + int lower_total; + int upper_total; + int all_total; + + Point cur_point; + + rns.reserve(sz.height); // optimization, assuming some contours exist + + // First line. None of runs is binded + rns.push_back(LRP()); + int upper_line = (int)rns.size() - 1; + int cur = upper_line; + for (j = 0; j < sz.width;) + { + j = findStartContourPoint(image.ptr(), sz, j); + + if (j == sz.width) + break; + + cur_point.x = j; + + rns.push_back(LRP(cur_point)); + rns[cur].next = (int)rns.size() - 1; + cur = rns[cur].next; + + j = findEndContourPoint(image.ptr(), sz, j + 1); + + cur_point.x = j - 1; + + rns.push_back(LRP(cur_point)); + rns[cur].next = (int)rns.size() - 1; + rns[cur].link = rns[cur].next; + + // First point of contour + ext_rns.push_back(cur); + cur = rns[cur].next; + } + upper_line = rns[upper_line].next; + upper_total = (int)rns.size() - 1; // runs->total - 1; + + int last_elem = cur; + rns[cur].next = -1; + int prev_point = -1; + int lower_line = -1; + for (int i = 1; i < sz.height; i++) + { + // Find runs in next line + cur_point.y = i; + all_total = (int)rns.size(); // runs->total; + for (j = 0; j < sz.width;) + { + j = findStartContourPoint(image.ptr(i), sz, j); + + if (j == sz.width) + break; + + cur_point.x = j; + + rns.push_back(LRP(cur_point)); + rns[cur].next = (int)rns.size() - 1; + cur = rns[cur].next; + + j = findEndContourPoint(image.ptr(i), sz, j + 1); + + cur_point.x = j - 1; + rns.push_back(LRP(cur_point)); + cur = rns[cur].next = (int)rns.size() - 1; + } // j + lower_line = rns[last_elem].next; + lower_total = (int)rns.size() - all_total; // runs->total - all_total; + last_elem = cur; + rns[cur].next = -1; + + CV_DbgAssert(rns.size() < (size_t)numeric_limits::max()); + + // Find links between runs of lower_line and upper_line + establishLinks(prev_point, upper_line, lower_line, upper_total, lower_total); + + upper_line = lower_line; + upper_total = lower_total; + } // i + + // the last line of image + int upper_run = upper_line; + for (int k = 0; k < upper_total / 2; k++) + { + rns[rns[upper_run].next].link = upper_run; + upper_run = rns[rns[upper_run].next].next; + } + + int first = 0; + int prev = 0; + convertLinks(first, prev, false); + convertLinks(first, prev, true); +} + +} // namespace + +//============================================================================== + +void cv::findContoursLinkRuns(InputArray _image, + OutputArrayOfArrays _contours, + OutputArray _hierarchy) +{ + CV_INSTRUMENT_REGION(); + + CV_CheckType(_image.type(), + _image.type() == CV_8UC1 || _image.type() == CV_8SC1, + "Bad input image type, must be CV_8UC1 or CV_8SC1"); + + // Sanity check: output must be of type vector> + CV_Assert(_contours.kind() == _InputArray::STD_VECTOR_VECTOR || + _contours.kind() == _InputArray::STD_VECTOR_MAT || + _contours.kind() == _InputArray::STD_VECTOR_UMAT); + + if (!_contours.empty()) + CV_CheckTypeEQ(_contours.type(), CV_32SC2, "Contours must have type CV_32SC2"); + + if (_hierarchy.needed()) + _hierarchy.clear(); + + Mat image = _image.getMat(); + + LinkRunner runner; + runner.process(image); + + contourTreeToResults(runner.tree, CV_32SC2, _contours, _hierarchy); +} + + +void cv::findContoursLinkRuns(InputArray _image, OutputArrayOfArrays _contours) +{ + CV_INSTRUMENT_REGION(); + findContoursLinkRuns(_image, _contours, noArray()); +} diff --git a/modules/imgproc/src/contours_new.cpp b/modules/imgproc/src/contours_new.cpp new file mode 100644 index 0000000000..1481ad9021 --- /dev/null +++ b/modules/imgproc/src/contours_new.cpp @@ -0,0 +1,697 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "opencv2/imgproc.hpp" +#include "precomp.hpp" +#include "opencv2/core/hal/intrin.hpp" +#include "opencv2/core/check.hpp" +#include "opencv2/core/utils/logger.hpp" +#include +#include +#include +#include + +#include "contours_common.hpp" + +using namespace std; +using namespace cv; + +//============================================================================== + +namespace { + +template +struct Trait +{ +}; + +static const schar MASK8_RIGHT = '\x80'; // 1000 0000 +static const schar MASK8_NEW = '\x02'; // 0000 0010 (+2) +static const schar MASK8_FLAGS = '\xFE'; // 1111 1110 (-2) +static const schar MASK8_BLACK = '\x01'; // 0000 0001 - black pixel + +static const schar MASK8_LVAL = '\x7F'; // 0111 1111 (for table) + +template <> +struct Trait +{ + static inline bool checkValue(const schar* elem, const schar*) + { + return *elem != 0; + } + static inline bool isVal(const schar* elem, const schar*) + { + return *elem == MASK8_BLACK; + } + static inline bool isRight(const schar* elem, const schar*) + { + return (*elem & MASK8_RIGHT) != 0; + } + static inline void setRightFlag(schar* elem, const schar*, schar nbd) + { + *elem = nbd | MASK8_RIGHT; + } + static inline void setNewFlag(schar* elem, const schar*, schar nbd) + { + *elem = nbd; + } +}; + +static const int MASK_RIGHT = 0x80000000; // 100..000 +static const int MASK_NEW = 0x40000000; // 010..000 +static const int MASK_FLAGS = 0xC0000000; // right + new +static const int MASK_VAL = 0x3FFFFFFF; // ~flags - pixel label + +template <> +struct Trait +{ + static inline bool checkValue(const int* elem, const int* elem0) + { + return (*elem & MASK_VAL) == (*elem0 & MASK_VAL); + } + static inline bool isVal(const int* elem, const int* elem0) + { + return *elem == (*elem0 & MASK_VAL); + } + static inline bool isRight(const int* elem, const int* elem0) + { + return (*elem & MASK_RIGHT) == (*elem0 & MASK8_RIGHT); + } + static inline void setRightFlag(int* elem, const int* elem0, int) + { + *elem = (*elem0 & MASK_VAL) | MASK_NEW | MASK_RIGHT; + } + static inline void setNewFlag(int* elem, const int* elem0, int) + { + *elem = (*elem0 & MASK_VAL) | MASK_NEW; + } +}; + +} // namespace + + +//============================================================================== + + +namespace { + +template +static bool icvTraceContour(Mat& image, const Point& start, const Point& end, bool isHole) +{ + const T* stop_ptr = image.ptr(end.y, end.x); + const size_t step = image.step1(); + const T *i0 = image.ptr(start.y, start.x), *i1, *i3, *i4 = NULL; + const schar s_end = isHole ? 0 : 4; + + schar s = s_end; + do + { + s = (s - 1) & 7; + i1 = i0 + getDelta(s, step); + } + while (!Trait::checkValue(i1, i0) && s != s_end); + + i3 = i0; + + // check single pixel domain + if (s != s_end) + { + // follow border + for (;;) + { + CV_Assert(i3 != NULL); + s = clamp_direction(s); + while (s < MAX_SIZE - 1) + { + ++s; + i4 = i3 + getDelta(s, step); + CV_Assert(i4 != NULL); + if (Trait::checkValue(i4, i0)) + break; + } + + if (i3 == stop_ptr) + { + if (!Trait::isRight(i3, i0)) + { + // it's the only contour + return true; + } + + // check if this is the last contour + // encountered during a raster scan + const T* i5; + schar t = s; + while (true) + { + t = (t - 1) & 7; + i5 = i3 + getDelta(t, step); + if (*i5 != 0) + break; + if (t == 0) + return true; + } + } + + if ((i4 == i0 && i3 == i1)) + break; + + i3 = i4; + s = (s + 4) & 7; + } // end of border following loop + } + else + { + return i3 == stop_ptr; + } + + return false; +} + +template +static void icvFetchContourEx(Mat& image, + const Point& start, + T nbd, + Contour& res_contour, + const bool isDirect) +{ + const size_t step = image.step1(); + T *i0 = image.ptr(start.y, start.x), *i1, *i3, *i4 = NULL; + + Point pt = res_contour.origin; + + cv::Rect rect(pt.x, pt.y, pt.x, pt.y); + + schar s_end = res_contour.isHole ? 0 : 4; + schar s = s_end; + do + { + s = (s - 1) & 7; + i1 = i0 + getDelta(s, step); + } + while (!Trait::checkValue(i1, i0) && s != s_end); + + if (s == s_end) + { + Trait::setRightFlag(i0, i0, nbd); + if (!res_contour.isChain) + { + res_contour.pts.push_back(pt); + } + } + else + { + i3 = i0; + schar prev_s = s ^ 4; + + // follow border + for (;;) + { + s_end = s; + s = clamp_direction(s); + while (s < MAX_SIZE - 1) + { + ++s; + i4 = i3 + getDelta(s, step); + CV_Assert(i4 != NULL); + if (Trait::checkValue(i4, i0)) + break; + } + s &= 7; + + // check "right" bound + if ((unsigned)(s - 1) < (unsigned)s_end) + { + Trait::setRightFlag(i3, i0, nbd); + } + else if (Trait::isVal(i3, i0)) + { + Trait::setNewFlag(i3, i0, nbd); + } + + if (res_contour.isChain) + { + res_contour.codes.push_back(s); + } + else if (s != prev_s || isDirect) + { + res_contour.pts.push_back(pt); + } + + if (s != prev_s) + { + // update bounds + if (pt.x < rect.x) + rect.x = pt.x; + else if (pt.x > rect.width) + rect.width = pt.x; + + if (pt.y < rect.y) + rect.y = pt.y; + else if (pt.y > rect.height) + rect.height = pt.y; + } + + prev_s = s; + pt += chainCodeDeltas[s]; + + if (i4 == i0 && i3 == i1) + break; + + i3 = i4; + s = (s + 4) & 7; + } + } + rect.width -= rect.x - 1; + rect.height -= rect.y - 1; + res_contour.brect = rect; +} + +} // namespace + + +//============================================================================== + +// +// Raster->Chain Tree (Suzuki algorithms) +// + +// Structure that is used for sequential retrieving contours from the image. +// It supports both hierarchical and plane variants of Suzuki algorithm. +struct ContourScanner_ +{ + Mat image; + Point offset; // ROI offset: coordinates, added to each contour point + Point pt; // current scanner position + Point lnbd; // position of the last met contour + schar nbd; // current mark val + int approx_method1; // approx method when tracing + int approx_method2; // final approx method + int mode; + CTree tree; + array ctable; + +public: + ContourScanner_() {} + ~ContourScanner_() {} + inline bool isInt() const + { + return (this->mode == CV_RETR_FLOODFILL); + } + inline bool isSimple() const + { + return (this->mode == RETR_EXTERNAL || this->mode == RETR_LIST); + } + + CNode& makeContour(schar& nbd_, const bool is_hole, const int x, const int y); + bool contourScan(const int prev, int& p, Point& last_pos, const int x, const int y); + int findFirstBoundingContour(const Point& last_pos, const int y, const int lval, int par); + int findNextX(int x, int y, int& prev, int& p); + bool findNext(); + + static shared_ptr create(Mat img, int mode, int method, Point offset); +}; // class ContourScanner_ + +typedef shared_ptr ContourScanner; + + +shared_ptr ContourScanner_::create(Mat img, int mode, int method, Point offset) +{ + if (mode == RETR_CCOMP && img.type() == CV_32SC1) + mode = RETR_FLOODFILL; + + if (mode == RETR_FLOODFILL) + CV_CheckTypeEQ(img.type(), CV_32SC1, "RETR_FLOODFILL mode supports only CV_32SC1 images"); + else + CV_CheckTypeEQ(img.type(), + CV_8UC1, + "Modes other than RETR_FLOODFILL and RETR_CCOMP support only CV_8UC1 " + "images"); + + CV_Check(mode, + mode == RETR_EXTERNAL || mode == RETR_LIST || mode == RETR_CCOMP || + mode == RETR_TREE || mode == RETR_FLOODFILL, + "Wrong extraction mode"); + + CV_Check(method, + method == 0 || method == CHAIN_APPROX_NONE || method == CHAIN_APPROX_SIMPLE || + method == CHAIN_APPROX_TC89_L1 || method == CHAIN_APPROX_TC89_KCOS, + "Wrong approximation method"); + + Size size = img.size(); + CV_Assert(size.height >= 1); + + shared_ptr scanner = make_shared(); + scanner->image = img; + scanner->mode = mode; + scanner->offset = offset; + scanner->pt = Point(1, 1); + scanner->lnbd = Point(0, 1); + scanner->nbd = 2; + CNode& root = scanner->tree.newElem(); + CV_Assert(root.self() == 0); + root.body.isHole = true; + root.body.brect = Rect(Point(0, 0), size); + scanner->ctable.fill(-1); + scanner->approx_method2 = scanner->approx_method1 = method; + if (method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS) + scanner->approx_method1 = CV_CHAIN_CODE; + return scanner; +} + +CNode& ContourScanner_::makeContour(schar& nbd_, const bool is_hole, const int x, const int y) +{ + const bool isChain = (this->approx_method1 == CV_CHAIN_CODE); // TODO: get rid of old constant + const bool isDirect = (this->approx_method1 == CHAIN_APPROX_NONE); + + const Point start_pt(x - (is_hole ? 1 : 0), y); + + CNode& res = tree.newElem(); + if (isChain) + res.body.codes.reserve(200); + else + res.body.pts.reserve(200); + res.body.isHole = is_hole; + res.body.isChain = isChain; + res.body.origin = start_pt + offset; + if (isSimple()) + { + icvFetchContourEx(this->image, start_pt, MASK8_NEW, res.body, isDirect); + } + else + { + schar lval; + if (isInt()) + { + const int start_val = this->image.at(start_pt); + lval = start_val & MASK8_LVAL; + icvFetchContourEx(this->image, start_pt, 0, res.body, isDirect); + } + else + { + lval = nbd_; + // change nbd + nbd_ = (nbd_ + 1) & MASK8_LVAL; + if (nbd_ == 0) + nbd_ = MASK8_BLACK | MASK8_NEW; + icvFetchContourEx(this->image, start_pt, lval, res.body, isDirect); + } + res.body.brect.x -= this->offset.x; + res.body.brect.y -= this->offset.y; + res.ctable_next = this->ctable[lval]; + this->ctable[lval] = res.self(); + } + const Point prev_origin = res.body.origin; + res.body.origin = start_pt; + if (this->approx_method1 != this->approx_method2) + { + CV_Assert(res.body.isChain); + res.body.pts = approximateChainTC89(res.body.codes, prev_origin, this->approx_method2); + res.body.isChain = false; + } + return res; +} + +bool ContourScanner_::contourScan(const int prev, int& p, Point& last_pos, const int x, const int y) +{ + bool is_hole = false; + + /* if not external contour */ + if (isInt()) + { + if (!(((prev & MASK_FLAGS) != 0 || prev == 0) && (p & MASK_FLAGS) == 0)) + { + if ((prev & MASK_FLAGS) != 0 || ((p & MASK_FLAGS) != 0)) + return false; + + if (prev & MASK_FLAGS) + { + last_pos.x = x - 1; + } + is_hole = true; + } + } + else + { + if (!(prev == 0 && p == 1)) + { + if (p != 0 || prev < 1) + return false; + + if (prev & MASK8_FLAGS) + { + last_pos.x = x - 1; + } + is_hole = true; + } + } + + if (mode == RETR_EXTERNAL && (is_hole || this->image.at(last_pos) > 0)) + { + return false; + } + + /* find contour parent */ + int main_parent = -1; + if (isSimple() || (!is_hole && (mode == CV_RETR_CCOMP || mode == CV_RETR_FLOODFILL)) || + last_pos.x <= 0) + { + main_parent = 0; + } + else + { + int lval; + if (isInt()) + lval = this->image.at(last_pos.y, last_pos.x) & MASK8_LVAL; + else + lval = this->image.at(last_pos.y, last_pos.x) & MASK8_LVAL; + + main_parent = findFirstBoundingContour(last_pos, y, lval, main_parent); + + // if current contour is a hole and previous contour is a hole or + // current contour is external and previous contour is external then + // the parent of the contour is the parent of the previous contour else + // the parent is the previous contour itself. + { + CNode& main_parent_elem = tree.elem(main_parent); + if (main_parent_elem.body.isHole == is_hole) + { + if (main_parent_elem.parent != -1) + { + main_parent = main_parent_elem.parent; + } + else + { + main_parent = 0; + } + } + } + + // hole flag of the parent must differ from the flag of the contour + { + CNode& main_parent_elem = tree.elem(main_parent); + CV_Assert(main_parent_elem.body.isHole != is_hole); + } + } + + last_pos.x = x - (is_hole ? 1 : 0); + + schar nbd_ = this->nbd; + CNode& new_contour = makeContour(nbd_, is_hole, x, y); + if (new_contour.parent == -1) + { + tree.addChild(main_parent, new_contour.self()); + } + this->pt.x = !isInt() ? (x + 1) : (x + 1 - (is_hole ? 1 : 0)); + this->pt.y = y; + this->nbd = nbd_; + return true; +} + +int ContourScanner_::findFirstBoundingContour(const Point& last_pos, + const int y, + const int lval, + int par) +{ + const Point end_point(last_pos.x, y); + int res = par; + int cur = ctable[lval]; + while (cur != -1) + { + CNode& cur_elem = tree.elem(cur); + if (((last_pos.x - cur_elem.body.brect.x) < cur_elem.body.brect.width) && + ((last_pos.y - cur_elem.body.brect.y) < cur_elem.body.brect.height)) + { + if (res != -1) + { + CNode& res_elem = tree.elem(res); + const Point origin = res_elem.body.origin; + const bool isHole = res_elem.body.isHole; + if (isInt()) + { + if (icvTraceContour(this->image, origin, end_point, isHole)) + break; + } + else + { + if (icvTraceContour(this->image, origin, end_point, isHole)) + break; + } + } + res = cur; + } + cur = cur_elem.ctable_next; + } + return res; +} + +int ContourScanner_::findNextX(int x, int y, int& prev, int& p) +{ + const int width = this->image.size().width - 1; + if (isInt()) + { + for (; x < width && + ((p = this->image.at(y, x)) == prev || (p & MASK_VAL) == (prev & MASK_VAL)); + x++) + prev = p; + } + else + { +#if (CV_SIMD || CV_SIMD_SCALABLE) + if ((p = this->image.at(y, x)) != prev) + { + return x; + } + else + { + v_uint8 v_prev = vx_setall_u8((uchar)prev); + for (; x <= width - VTraits::vlanes(); x += VTraits::vlanes()) + { + v_uint8 vmask = (v_ne(vx_load(this->image.ptr(y, x)), v_prev)); + if (v_check_any(vmask)) + { + x += v_scan_forward(vmask); + p = this->image.at(y, x); + return x; + } + } + } +#endif + for (; x < width && (p = this->image.at(y, x)) == prev; x++) + ; + } + return x; +} + +bool ContourScanner_::findNext() +{ + int x = this->pt.x; + int y = this->pt.y; + int width = this->image.size().width - 1; + int height = this->image.size().height - 1; + Point last_pos = this->lnbd; + int prev = isInt() ? this->image.at(y, x - 1) : this->image.at(y, x - 1); + + for (; y < height; y++) + { + int p = 0; + for (; x < width; x++) + { + x = findNextX(x, y, prev, p); + if (x >= width) + break; + if (contourScan(prev, p, last_pos, x, y)) + { + this->lnbd = last_pos; + return true; + } + else + { + prev = p; + if ((isInt() && (prev & MASK_FLAGS)) || (!isInt() && (prev & MASK8_FLAGS))) + { + last_pos.x = x; + } + } + } + last_pos = Point(0, y + 1); + x = 1; + prev = 0; + } + + return false; +} + +//============================================================================== + +void cv::findContours(InputArray _image, + OutputArrayOfArrays _contours, + OutputArray _hierarchy, + int mode, + int method, + Point offset) +{ + CV_INSTRUMENT_REGION(); + + // TODO: remove this block in future + if (method == 5 /*CV_LINK_RUNS*/) + { + CV_LOG_ONCE_WARNING(NULL, + "LINK_RUNS mode has been extracted to separate function: " + "cv::findContoursLinkRuns. " + "Calling through cv::findContours will be removed in future."); + CV_CheckTrue(!_hierarchy.needed() || mode == RETR_CCOMP, + "LINK_RUNS mode supports only simplified hierarchy output (mode=RETR_CCOMP)"); + findContoursLinkRuns(_image, _contours, _hierarchy); + return; + } + + // TODO: need enum value, need way to return contour starting points with chain codes + if (method == 0 /*CV_CHAIN_CODE*/) + { + CV_LOG_ONCE_WARNING(NULL, + "Chain code output is an experimental feature and might change in " + "future!"); + } + + // Sanity check: output must be of type vector> + CV_Assert((_contours.kind() == _InputArray::STD_VECTOR_VECTOR) || + (_contours.kind() == _InputArray::STD_VECTOR_MAT) || + (_contours.kind() == _InputArray::STD_VECTOR_UMAT)); + + const int res_type = (method == 0 /*CV_CHAIN_CODE*/) ? CV_8SC1 : CV_32SC2; + if (!_contours.empty()) + { + CV_CheckTypeEQ(_contours.type(), + res_type, + "Contours must have type CV_8SC1 (chain code) or CV_32SC2 (other methods)"); + } + + if (_hierarchy.needed()) + _hierarchy.clear(); + + // preprocess + Mat image; + copyMakeBorder(_image, image, 1, 1, 1, 1, BORDER_CONSTANT | BORDER_ISOLATED, Scalar(0)); + if (image.type() != CV_32SC1) + threshold(image, image, 0, 1, THRESH_BINARY); + + // find contours + ContourScanner scanner = ContourScanner_::create(image, mode, method, offset + Point(-1, -1)); + while (scanner->findNext()) + { + } + + contourTreeToResults(scanner->tree, res_type, _contours, _hierarchy); +} + +void cv::findContours(InputArray _image, + OutputArrayOfArrays _contours, + int mode, + int method, + Point offset) +{ + CV_INSTRUMENT_REGION(); + findContours(_image, _contours, noArray(), mode, method, offset); +} diff --git a/modules/imgproc/test/test_contours.cpp b/modules/imgproc/test/test_contours.cpp index 6f4315225e..4a5c61b0e8 100644 --- a/modules/imgproc/test/test_contours.cpp +++ b/modules/imgproc/test/test_contours.cpp @@ -39,6 +39,7 @@ // //M*/ +#include "opencv2/imgproc/types_c.h" #include "test_precomp.hpp" #include @@ -459,7 +460,6 @@ TEST(Imgproc_FindContours, hilbert) dilate(img, img, Mat()); vector > contours; findContours(img, contours, noArray(), RETR_LIST, CHAIN_APPROX_SIMPLE); - printf("ncontours = %d, contour[0].npoints=%d\n", (int)contours.size(), (int)contours[0].size()); img.setTo(Scalar::all(0)); drawContours(img, contours, 0, Scalar::all(255), 1); @@ -530,10 +530,12 @@ TEST(Imgproc_FindContours, regression_4363_shared_nbd) if (found) { + ASSERT_EQ(contours.size(), hierarchy.size()); EXPECT_LT(hierarchy[index][3], 0) << "Desired result: (7,9) has no parent - Actual result: parent of (7,9) is another contour. index = " << index; } } + TEST(Imgproc_PointPolygonTest, regression_10222) { vector contour; diff --git a/modules/imgproc/test/test_contours_new.cpp b/modules/imgproc/test/test_contours_new.cpp new file mode 100644 index 0000000000..3381d31e64 --- /dev/null +++ b/modules/imgproc/test/test_contours_new.cpp @@ -0,0 +1,606 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "opencv2/imgproc/types_c.h" +#include "test_precomp.hpp" +#include "opencv2/ts/ocl_test.hpp" +#include "opencv2/imgproc/detail/legacy.hpp" + +#define CHECK_OLD 1 + +namespace opencv_test { namespace { + +// debug function +template +inline static void print_pts(const T& c) +{ + for (const auto& one_pt : c) + { + cout << one_pt << " "; + } + cout << endl; +} + +// debug function +template +inline static void print_pts_2(vector& cs) +{ + int cnt = 0; + cout << "Contours:" << endl; + for (const auto& one_c : cs) + { + cout << cnt++ << " : "; + print_pts(one_c); + } +}; + +// draw 1-2 px blob with orientation defined by 'kind' +template +inline static void drawSmallContour(Mat& img, Point pt, int kind, int color_) +{ + const T color = static_cast(color_); + img.at(pt) = color; + switch (kind) + { + case 1: img.at(pt + Point(1, 0)) = color; break; + case 2: img.at(pt + Point(1, -1)) = color; break; + case 3: img.at(pt + Point(0, -1)) = color; break; + case 4: img.at(pt + Point(-1, -1)) = color; break; + case 5: img.at(pt + Point(-1, 0)) = color; break; + case 6: img.at(pt + Point(-1, 1)) = color; break; + case 7: img.at(pt + Point(0, 1)) = color; break; + case 8: img.at(pt + Point(1, 1)) = color; break; + default: break; + } +} + +inline static void drawContours(Mat& img, + const vector>& contours, + const Scalar& color = Scalar::all(255)) +{ + for (const auto& contour : contours) + { + for (size_t n = 0, end = contour.size(); n < end; ++n) + { + size_t m = n + 1; + if (n == end - 1) + m = 0; + line(img, contour[m], contour[n], color, 1, LINE_8); + } + } +} + +//================================================================================================== + +// Test parameters - mode + method +typedef testing::TestWithParam> Imgproc_FindContours_Modes1; + + +// Draw random rectangle and find contours +// +TEST_P(Imgproc_FindContours_Modes1, rectangle) +{ + const int mode = get<0>(GetParam()); + const int method = get<1>(GetParam()); + + const size_t ITER = 100; + RNG rng = TS::ptr()->get_rng(); + + for (size_t i = 0; i < ITER; ++i) + { + SCOPED_TRACE(cv::format("i=%zu", i)); + const Size sz(rng.uniform(640, 1920), rng.uniform(480, 1080)); + Mat img(sz, CV_8UC1, Scalar::all(0)); + Mat img32s(sz, CV_32SC1, Scalar::all(0)); + const Rect r(Point(rng.uniform(1, sz.width / 2 - 1), rng.uniform(1, sz.height / 2)), + Point(rng.uniform(sz.width / 2 - 1, sz.width - 1), + rng.uniform(sz.height / 2 - 1, sz.height - 1))); + rectangle(img, r, Scalar::all(255)); + rectangle(img32s, r, Scalar::all(255), FILLED); + + const vector ext_ref {r.tl(), + r.tl() + Point(0, r.height - 1), + r.br() + Point(-1, -1), + r.tl() + Point(r.width - 1, 0)}; + const vector int_ref {ext_ref[0] + Point(0, 1), + ext_ref[0] + Point(1, 0), + ext_ref[3] + Point(-1, 0), + ext_ref[3] + Point(0, 1), + ext_ref[2] + Point(0, -1), + ext_ref[2] + Point(-1, 0), + ext_ref[1] + Point(1, 0), + ext_ref[1] + Point(0, -1)}; + const size_t ext_perimeter = r.width * 2 + r.height * 2; + const size_t int_perimeter = ext_perimeter - 4; + + vector> contours; + vector> chains; + vector hierarchy; + + // run functionn + if (mode == RETR_FLOODFILL) + if (method == 0) + findContours(img32s, chains, hierarchy, mode, method); + else + findContours(img32s, contours, hierarchy, mode, method); + else if (method == 0) + findContours(img, chains, hierarchy, mode, method); + else + findContours(img, contours, hierarchy, mode, method); + + // verify results + if (mode == RETR_EXTERNAL) + { + if (method == 0) + { + ASSERT_EQ(1U, chains.size()); + } + else + { + ASSERT_EQ(1U, contours.size()); + if (method == CHAIN_APPROX_NONE) + { + EXPECT_EQ(int_perimeter, contours[0].size()); + } + else if (method == CHAIN_APPROX_SIMPLE) + { + EXPECT_MAT_NEAR(Mat(ext_ref), Mat(contours[0]), 0); + } + } + } + else + { + if (method == 0) + { + ASSERT_EQ(2U, chains.size()); + } + else + { + ASSERT_EQ(2U, contours.size()); + if (mode == RETR_LIST) + { + if (method == CHAIN_APPROX_NONE) + { + EXPECT_EQ(int_perimeter - 4, contours[0].size()); + EXPECT_EQ(int_perimeter, contours[1].size()); + } + else if (method == CHAIN_APPROX_SIMPLE) + { + EXPECT_MAT_NEAR(Mat(int_ref), Mat(contours[0]), 0); + EXPECT_MAT_NEAR(Mat(ext_ref), Mat(contours[1]), 0); + } + } + else if (mode == RETR_CCOMP || mode == RETR_TREE) + { + if (method == CHAIN_APPROX_NONE) + { + EXPECT_EQ(int_perimeter, contours[0].size()); + EXPECT_EQ(int_perimeter - 4, contours[1].size()); + } + else if (method == CHAIN_APPROX_SIMPLE) + { + EXPECT_MAT_NEAR(Mat(ext_ref), Mat(contours[0]), 0); + EXPECT_MAT_NEAR(Mat(int_ref), Mat(contours[1]), 0); + } + } + else if (mode == RETR_FLOODFILL) + { + if (method == CHAIN_APPROX_NONE) + { + EXPECT_EQ(int_perimeter + 4, contours[0].size()); + } + else if (method == CHAIN_APPROX_SIMPLE) + { + EXPECT_EQ(int_ref.size(), contours[0].size()); + EXPECT_MAT_NEAR(Mat(ext_ref), Mat(contours[1]), 0); + } + } + } + } + +#if CHECK_OLD + if (method != 0) // old doesn't support chain codes + { + if (mode != RETR_FLOODFILL) + { + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t j = 0; j < contours.size(); ++j) + { + SCOPED_TRACE(format("contour %zu", j)); + EXPECT_MAT_NEAR(Mat(contours[j]), Mat(contours_o[j]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); + } + else + { + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img32s, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t j = 0; j < contours.size(); ++j) + { + SCOPED_TRACE(format("contour %zu", j)); + EXPECT_MAT_NEAR(Mat(contours[j]), Mat(contours_o[j]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); + } + } +#endif + } +} + + +// Draw many small 1-2px blobs and find contours +// +TEST_P(Imgproc_FindContours_Modes1, small) +{ + const int mode = get<0>(GetParam()); + const int method = get<1>(GetParam()); + + const size_t DIM = 1000; + const Size sz(DIM, DIM); + const int num = (DIM / 10) * (DIM / 10); // number of 10x10 squares + + Mat img(sz, CV_8UC1, Scalar::all(0)); + Mat img32s(sz, CV_32SC1, Scalar::all(0)); + vector pts; + int extra_contours_32s = 0; + for (int j = 0; j < num; ++j) + { + const int kind = j % 9; + Point pt {(j % 100) * 10 + 4, (j / 100) * 10 + 4}; + drawSmallContour(img, pt, kind, 255); + drawSmallContour(img32s, pt, kind, j + 1); + pts.push_back(pt); + // NOTE: for some reason these small diagonal contours (NW, SE) + // result in 2 external contours for FLOODFILL mode + if (kind == 8 || kind == 4) + ++extra_contours_32s; + } + { + vector> contours; + vector> chains; + vector hierarchy; + + if (mode == RETR_FLOODFILL) + { + if (method == 0) + { + findContours(img32s, chains, hierarchy, mode, method); + ASSERT_EQ(pts.size() * 2 + extra_contours_32s, chains.size()); + } + else + { + findContours(img32s, contours, hierarchy, mode, method); + ASSERT_EQ(pts.size() * 2 + extra_contours_32s, contours.size()); +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img32s, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t i = 0; i < contours.size(); ++i) + { + SCOPED_TRACE(format("contour %zu", i)); + EXPECT_MAT_NEAR(Mat(contours[i]), Mat(contours_o[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); +#endif + } + } + else + { + if (method == 0) + { + findContours(img, chains, hierarchy, mode, method); + ASSERT_EQ(pts.size(), chains.size()); + } + else + { + findContours(img, contours, hierarchy, mode, method); + ASSERT_EQ(pts.size(), contours.size()); +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t i = 0; i < contours.size(); ++i) + { + SCOPED_TRACE(format("contour %zu", i)); + EXPECT_MAT_NEAR(Mat(contours[i]), Mat(contours_o[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); +#endif + } + } + } +} + + +// Draw many nested rectangles and find contours +// +TEST_P(Imgproc_FindContours_Modes1, deep) +{ + const int mode = get<0>(GetParam()); + const int method = get<1>(GetParam()); + + const size_t DIM = 1000; + const Size sz(DIM, DIM); + const size_t NUM = 249U; + Mat img(sz, CV_8UC1, Scalar::all(0)); + Mat img32s(sz, CV_32SC1, Scalar::all(0)); + Rect rect(1, 1, 998, 998); + for (size_t i = 0; i < NUM; ++i) + { + rectangle(img, rect, Scalar::all(255)); + rectangle(img32s, rect, Scalar::all((double)i + 1), FILLED); + rect.x += 2; + rect.y += 2; + rect.width -= 4; + rect.height -= 4; + } + { + vector> contours {{{0, 0}, {1, 1}}}; + vector> chains {{1, 2, 3}}; + vector hierarchy; + + if (mode == RETR_FLOODFILL) + { + if (method == 0) + { + findContours(img32s, chains, hierarchy, mode, method); + ASSERT_EQ(2 * NUM, chains.size()); + } + else + { + findContours(img32s, contours, hierarchy, mode, method); + ASSERT_EQ(2 * NUM, contours.size()); +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img32s, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t i = 0; i < contours.size(); ++i) + { + SCOPED_TRACE(format("contour %zu", i)); + EXPECT_MAT_NEAR(Mat(contours[i]), Mat(contours_o[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); +#endif + } + } + else + { + const size_t expected_count = (mode == RETR_EXTERNAL) ? 1U : 2 * NUM; + if (method == 0) + { + findContours(img, chains, hierarchy, mode, method); + ASSERT_EQ(expected_count, chains.size()); + } + else + { + findContours(img, contours, hierarchy, mode, method); + ASSERT_EQ(expected_count, contours.size()); +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours.size(), contours_o.size()); + for (size_t i = 0; i < contours.size(); ++i) + { + SCOPED_TRACE(format("contour %zu", i)); + EXPECT_MAT_NEAR(Mat(contours[i]), Mat(contours_o[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy), Mat(hierarchy_o), 0); +#endif + } + } + } +} + +INSTANTIATE_TEST_CASE_P( + , + Imgproc_FindContours_Modes1, + testing::Combine( + testing::Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL), + testing::Values(0, + CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS))); + +//================================================================================================== + +typedef testing::TestWithParam> Imgproc_FindContours_Modes2; + +// Very approximate backport of an old accuracy test +// +TEST_P(Imgproc_FindContours_Modes2, new_accuracy) +{ + const int mode = get<0>(GetParam()); + const int method = get<1>(GetParam()); + + RNG& rng = TS::ptr()->get_rng(); + const int blob_count = rng.uniform(1, 10); + const Size sz(rng.uniform(640, 1920), rng.uniform(480, 1080)); + const int blob_sz = 50; + + // prepare image + Mat img(sz, CV_8UC1, Scalar::all(0)); + vector rects; + for (int i = 0; i < blob_count; ++i) + { + const Point2f center((float)rng.uniform(blob_sz, sz.width - blob_sz), + (float)rng.uniform(blob_sz, sz.height - blob_sz)); + const Size2f rsize((float)rng.uniform(1, blob_sz), (float)rng.uniform(1, blob_sz)); + RotatedRect rect(center, rsize, rng.uniform(0.f, 180.f)); + rects.push_back(rect); + ellipse(img, rect, Scalar::all(100), FILLED); + } + + // draw contours manually + Mat cont_img(sz, CV_8UC1, Scalar::all(0)); + for (int y = 1; y < sz.height - 1; ++y) + { + for (int x = 1; x < sz.width - 1; ++x) + { + if (img.at(y, x) != 0 && + ((img.at(y - 1, x) == 0) || (img.at(y + 1, x) == 0) || + (img.at(y, x + 1) == 0) || (img.at(y, x - 1) == 0))) + { + cont_img.at(y, x) = 255; + } + } + } + + // find contours + vector> contours; + vector hierarchy; + findContours(img, contours, hierarchy, mode, method); + + // 0 < contours <= rects + EXPECT_GT(contours.size(), 0U); + EXPECT_GE(rects.size(), contours.size()); + + // draw contours + Mat res_img(sz, CV_8UC1, Scalar::all(0)); + drawContours(res_img, contours); + + // compare resulting drawn contours with manually drawn contours + const double diff1 = cvtest::norm(cont_img, res_img, NORM_L1) / 255; + + if (method == CHAIN_APPROX_NONE || method == CHAIN_APPROX_SIMPLE) + { + EXPECT_EQ(0., diff1); + } +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours(img, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours_o.size(), contours.size()); + for (size_t i = 0; i < contours_o.size(); ++i) + { + SCOPED_TRACE(format("contour = %zu", i)); + EXPECT_MAT_NEAR(Mat(contours_o[i]), Mat(contours[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy_o), Mat(hierarchy), 0); +#endif +} + +TEST_P(Imgproc_FindContours_Modes2, approx) +{ + const int mode = get<0>(GetParam()); + const int method = get<1>(GetParam()); + + const Size sz {500, 500}; + Mat img = Mat::zeros(sz, CV_8UC1); + + for (int c = 0; c < 4; ++c) + { + if (c != 0) + { + // noise + filter + threshold + RNG& rng = TS::ptr()->get_rng(); + cvtest::randUni(rng, img, 0, 255); + + Mat fimg; + boxFilter(img, fimg, CV_8U, Size(5, 5)); + + Mat timg; + const int level = 44 + c * 42; + // 'level' goes through: + // 86 - some black speckles on white + // 128 - 50/50 black/white + // 170 - some white speckles on black + cv::threshold(fimg, timg, level, 255, THRESH_BINARY); + } + else + { + // circle with cut + const Point center {250, 250}; + const int r {20}; + const Point cut {r, r}; + circle(img, center, r, Scalar(255), FILLED); + rectangle(img, center, center + cut, Scalar(0), FILLED); + } + + vector> contours; + vector hierarchy; + findContours(img, contours, hierarchy, mode, method); + +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img, contours_o, hierarchy_o, mode, method); + ASSERT_EQ(contours_o.size(), contours.size()); + for (size_t i = 0; i < contours_o.size(); ++i) + { + SCOPED_TRACE(format("c = %d, contour = %zu", c, i)); + EXPECT_MAT_NEAR(Mat(contours_o[i]), Mat(contours[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy_o), Mat(hierarchy), 0); +#endif + // TODO: check something + } +} + +// TODO: offset test + +// no RETR_FLOODFILL - no CV_32S input images +INSTANTIATE_TEST_CASE_P( + , + Imgproc_FindContours_Modes2, + testing::Combine(testing::Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE), + testing::Values(CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS))); + +TEST(Imgproc_FindContours, link_runs) +{ + const Size sz {500, 500}; + Mat img = Mat::zeros(sz, CV_8UC1); + + // noise + filter + threshold + RNG& rng = TS::ptr()->get_rng(); + cvtest::randUni(rng, img, 0, 255); + + Mat fimg; + boxFilter(img, fimg, CV_8U, Size(5, 5)); + + const int level = 135; + cv::threshold(fimg, img, level, 255, THRESH_BINARY); + + vector> contours; + vector hierarchy; + findContoursLinkRuns(img, contours, hierarchy); + + if (cvtest::debugLevel >= 10) + { + print_pts_2(contours); + + Mat res = Mat::zeros(sz, CV_8UC1); + drawContours(res, contours); + imshow("res", res); + imshow("img", img); + waitKey(0); + } + +#if CHECK_OLD + vector> contours_o; + vector hierarchy_o; + findContours_legacy(img, contours_o, hierarchy_o, 0, 5); // CV_LINK_RUNS method + ASSERT_EQ(contours_o.size(), contours.size()); + for (size_t i = 0; i < contours_o.size(); ++i) + { + SCOPED_TRACE(format("contour = %zu", i)); + EXPECT_MAT_NEAR(Mat(contours_o[i]), Mat(contours[i]), 0); + } + EXPECT_MAT_NEAR(Mat(hierarchy_o), Mat(hierarchy), 0); +#endif +} + +}} // namespace opencv_test From 4221ae1d643a86e3d0574c9e70867d3b209f49ed Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Tue, 9 Apr 2024 12:57:23 +0300 Subject: [PATCH 17/21] Supress build warnings on Win32 for ARM. --- modules/gapi/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index cd64a9ee28..f18290ca7d 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -41,7 +41,7 @@ if(MSVC) # and IE deprecated code warning C4996 ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4503 /wd4996) endif() - if(MSVC_VERSION LESS 1920) # MSVS 2015/2017 + if((MSVC_VERSION LESS 1920) OR ARM OR AARCH64) # MSVS 2015/2017 on x86 and ARM ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702) # 'unreachable code' endif() endif() From 953581a92a0b168769298d0002ed46b108b6056a Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A.CABELO)" Date: Tue, 9 Apr 2024 10:56:07 -0300 Subject: [PATCH 18/21] Merge pull request #25357 from cabelo:yolov8m Added and tested yolov8m model. #25357 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [X] I agree to contribute to the project under Apache 2 License. - [X] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [X] The PR is proposed to the proper branch - [X] There is a reference to the original bug report and related work - [X] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [X] The feature is well documented and sample code can be built with the project CMake Below is evidence of the test: ![yolov8m](https://github.com/opencv/opencv/assets/675645/f9bfe2c6-fe4a-42fc-93a6-17e4da5c9bb5) --- samples/dnn/models.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/samples/dnn/models.yml b/samples/dnn/models.yml index d2d4fa661c..b14dae327c 100644 --- a/samples/dnn/models.yml +++ b/samples/dnn/models.yml @@ -62,7 +62,19 @@ yolov8n: background_label_id: 0 sample: "yolo_detector" - +yolov8m: + load_info: + url: "https://github.com/CVHub520/X-AnyLabeling/releases/download/v0.1.0/yolov8m.onnx" + sha1: "656ffeb4f3b067bc30df956728b5f9c61a4cb090" + model: "yolov8m.onnx" + mean: 0.0 + scale: 0.00392 + width: 640 + height: 640 + rgb: true + classes: "object_detection_classes_yolo.txt" + background_label_id: 0 + sample: "yolo_detector" # YOLO4 object detection family from Darknet (https://github.com/AlexeyAB/darknet) # YOLO object detection family from Darknet (https://pjreddie.com/darknet/yolo/) From f37924796f5e07d14b9011400573a48343a22d26 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Tue, 9 Apr 2024 17:44:36 +0300 Subject: [PATCH 19/21] Merge pull request #25364 from mshabunin:fix-unaligned-filter imgproc: fix unaligned memory access in filters and Gaussian blur #25364 * filter/SIMD: removed parts which casted 8u pointers to int causing unaligned memory access on RISC-V platform. * GaussianBlur/fixed_point: replaced casts from s16 to u32 with union operations Performance comparison: - [x] check performance on x86_64 - (4 threads, `-DCPU_BASELINE=AVX2`, GCC 11.4, Ubuntu 22) - [report_imgproc_x86_64.ods](https://github.com/opencv/opencv/files/14904702/report_x86_64.ods) - [x] check performance on AArch64 - (4 cores of RK3588, GCC 11.4 aarch64, Raspbian) - [report_imgproc_aarch64.ods](https://github.com/opencv/opencv/files/14908437/report_aarch64.ods) Note: for some reason my performance results are quite unstable, unaffected functions show speedups and slowdowns in many cases. Filter2D and GaussianBlur seem to be OK. Slightly related PR: https://github.com/opencv/ci-gha-workflow/pull/165 --- modules/imgproc/src/filter.simd.hpp | 44 -------------------------- modules/imgproc/src/fixedpoint.inl.hpp | 2 +- modules/imgproc/src/smooth.simd.hpp | 13 ++++++-- 3 files changed, 12 insertions(+), 47 deletions(-) diff --git a/modules/imgproc/src/filter.simd.hpp b/modules/imgproc/src/filter.simd.hpp index 21dab519af..c25345f407 100644 --- a/modules/imgproc/src/filter.simd.hpp +++ b/modules/imgproc/src/filter.simd.hpp @@ -86,7 +86,6 @@ Ptr getLinearFilter( #ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY -typedef int CV_DECL_ALIGNED(1) unaligned_int; #define VEC_ALIGN CV_MALLOC_ALIGN int FilterEngine__start(FilterEngine& this_, const Size &_wholeSize, const Size &sz, const Point &ofs) @@ -1083,21 +1082,6 @@ struct SymmColumnVec_32s8u v_pack_u_store(dst + i, v_pack(v_round(s0), v_round(s1))); i += VTraits::vlanes(); } -#if CV_SIMD_WIDTH > 16 - while( i <= width - 4 /*VTraits::vlanes()*/ ) -#else - if( i <= width - VTraits::vlanes() ) -#endif - { - v_float32 s0 = v_muladd(v_cvt_f32(vx_load(src[0] + i)), vx_setall_f32(ky[0]), vx_setall_f32(delta)); - s0 = v_muladd(v_cvt_f32(v_add(vx_load(src[1] + i), vx_load(src[-1] + i))), vx_setall_f32(ky[1]), s0); - for( k = 2; k <= ksize2; k++ ) - s0 = v_muladd(v_cvt_f32(v_add(vx_load(src[k] + i), vx_load(src[-k] + i))), vx_setall_f32(ky[k]), s0); - v_int32 s32 = v_round(s0); - v_int16 s16 = v_pack(s32, s32); - *(unaligned_int*)(dst + i) = v_get0(v_reinterpret_as_s32(v_pack_u(s16, s16))); - i += 4 /*v_int32x4::nlanes*/ ; - } } else { @@ -1139,20 +1123,6 @@ struct SymmColumnVec_32s8u v_pack_u_store(dst + i, v_pack(v_round(s0), v_round(s1))); i += VTraits::vlanes(); } -#if CV_SIMD_WIDTH > 16 - while( i <= width - 4 /*VTraits::vlanes()*/ ) -#else - if( i <= width - VTraits::vlanes() ) -#endif - { - v_float32 s0 = v_muladd(v_cvt_f32(v_sub(vx_load(src[1] + i), vx_load(src[-1] + i))), vx_setall_f32(ky[1]), vx_setall_f32(delta)); - for (k = 2; k <= ksize2; k++) - s0 = v_muladd(v_cvt_f32(v_sub(vx_load(src[k] + i), vx_load(src[-k] + i))), vx_setall_f32(ky[k]), s0); - v_int32 s32 = v_round(s0); - v_int16 s16 = v_pack(s32, s32); - *(unaligned_int*)(dst + i) = v_get0(v_reinterpret_as_s32(v_pack_u(s16, s16))); - i += 4 /*v_int32x4::nlanes*/ ; - } } return i; } @@ -2236,20 +2206,6 @@ struct FilterVec_8u v_pack_u_store(dst + i, v_pack(v_round(s0), v_round(s1))); i += VTraits::vlanes(); } -#if CV_SIMD_WIDTH > 16 - while( i <= width - 4 /*VTraits::vlanes()*/ ) -#else - if( i <= width - VTraits::vlanes() ) -#endif - { - v_float32 s0 = v_muladd(v_cvt_f32(v_reinterpret_as_s32(vx_load_expand_q(src[0] + i))), vx_setall_f32(kf[0]), vx_setall_f32(delta)); - for( k = 1; k < nz; k++ ) - s0 = v_muladd(v_cvt_f32(v_reinterpret_as_s32(vx_load_expand_q(src[k] + i))), vx_setall_f32(kf[k]), s0); - v_int32 s32 = v_round(s0); - v_int16 s16 = v_pack(s32, s32); - *(unaligned_int*)(dst + i) = v_get0(v_reinterpret_as_s32(v_pack_u(s16, s16))); - i += 4 /*VTraits::vlanes()*/ ; - } return i; } diff --git a/modules/imgproc/src/fixedpoint.inl.hpp b/modules/imgproc/src/fixedpoint.inl.hpp index f5f433fec6..7303e06ad3 100644 --- a/modules/imgproc/src/fixedpoint.inl.hpp +++ b/modules/imgproc/src/fixedpoint.inl.hpp @@ -370,7 +370,7 @@ public: static CV_ALWAYS_INLINE ufixedpoint16 one() { return ufixedpoint16((uint16_t)(1 << fixedShift)); } static CV_ALWAYS_INLINE ufixedpoint16 fromRaw(uint16_t v) { return ufixedpoint16(v); } - CV_ALWAYS_INLINE uint16_t raw() { return val; } + CV_ALWAYS_INLINE uint16_t raw() const { return val; } }; } diff --git a/modules/imgproc/src/smooth.simd.hpp b/modules/imgproc/src/smooth.simd.hpp index 33e58d4e80..7389cdbce9 100644 --- a/modules/imgproc/src/smooth.simd.hpp +++ b/modules/imgproc/src/smooth.simd.hpp @@ -1634,6 +1634,15 @@ void vlineSmooth(const FT* const * src, const FT* m, int n, ET* dst, int len) dst[i] = val; } } + +inline uint32_t read_pair_as_u32(const ufixedpoint16 * mem) +{ + union Cv32sufX2 { uint32_t v32; int16_t v16[2]; } res; + res.v16[0] = mem->raw(); + res.v16[1] = (mem + 1)->raw(); + return res.v32; +} + template <> void vlineSmooth(const ufixedpoint16* const * src, const ufixedpoint16* m, int n, uint8_t* dst, int len) { @@ -1655,7 +1664,7 @@ void vlineSmooth(const ufixedpoint16* const * src, const v_int16 v_src00, v_src10, v_src01, v_src11, v_src02, v_src12, v_src03, v_src13; v_int16 v_tmp0, v_tmp1; - v_int16 v_mul = v_reinterpret_as_s16(vx_setall_u32(*((uint32_t*)m))); + v_int16 v_mul = v_reinterpret_as_s16(vx_setall_u32(read_pair_as_u32(m))); const int16_t* src0 = (const int16_t*)src[0] + i; const int16_t* src1 = (const int16_t*)src[1] + i; @@ -1683,7 +1692,7 @@ void vlineSmooth(const ufixedpoint16* const * src, const int j = 2; for (; j < n - 1; j+=2) { - v_mul = v_reinterpret_as_s16(vx_setall_u32(*((uint32_t*)(m+j)))); + v_mul = v_reinterpret_as_s16(vx_setall_u32(read_pair_as_u32(m + j))); const int16_t* srcj0 = (const int16_t*)src[j] + i; const int16_t* srcj1 = (const int16_t*)src[j + 1] + i; From cb339ac6d1d90d96920a61608e9d1d34bb388ef6 Mon Sep 17 00:00:00 2001 From: Gonzalo Matheu Date: Tue, 9 Apr 2024 16:06:08 -0300 Subject: [PATCH 20/21] Fixing code example on js_image_display markdown --- .../js_gui/js_image_display/js_image_display.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown b/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown index 9ad4ce2e53..fb0ae42eb3 100644 --- a/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown +++ b/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown @@ -45,7 +45,7 @@ cv.cvtColor(dst, dst, cv.COLOR_***2RGBA); Then, new an ImageData obj from dst: @code{.js} -let imgData = new ImageData(new Uint8ClampedArray(dst.data, dst.cols, dst.rows); +let imgData = new ImageData(new Uint8ClampedArray(dst.data), dst.cols, dst.rows); @endcode Finally, display it: From 148b2ec3e000bc1cc943cc69c4ea0ee287b8cb7a Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Tue, 9 Apr 2024 23:47:30 +0300 Subject: [PATCH 21/21] calib3d: increased AP3P test threshold for RISC-V platform --- modules/calib3d/test/test_solvepnp_ransac.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/test/test_solvepnp_ransac.cpp b/modules/calib3d/test/test_solvepnp_ransac.cpp index a9ed88f0f5..a16928c738 100644 --- a/modules/calib3d/test/test_solvepnp_ransac.cpp +++ b/modules/calib3d/test/test_solvepnp_ransac.cpp @@ -2316,7 +2316,7 @@ TEST(AP3P, ctheta1p_nan_23607) res.row(j) += t[i].reshape(1, 1); res.row(j) /= res.row(j).at(2); } - EXPECT_LE(cvtest::norm(res.colRange(0, 2), expected, NORM_INF), 3e-16); + EXPECT_LE(cvtest::norm(res.colRange(0, 2), expected, NORM_INF), 3.34e-16); } }