From 98d84364404231c687db283f14441b1dd2c15e2a Mon Sep 17 00:00:00 2001 From: cftang Date: Tue, 26 Feb 2019 01:20:54 +0800 Subject: [PATCH 01/19] fix tf_text_graph_common tensor_content type bug --- samples/dnn/tf_text_graph_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/dnn/tf_text_graph_common.py b/samples/dnn/tf_text_graph_common.py index 5a8e62495d..b46b1d492c 100644 --- a/samples/dnn/tf_text_graph_common.py +++ b/samples/dnn/tf_text_graph_common.py @@ -324,6 +324,6 @@ def writeTextGraph(modelPath, outputPath, outNodes): for node in graph_def.node: if node.op == 'Const': if 'value' in node.attr and node.attr['value'].tensor.tensor_content: - node.attr['value'].tensor.tensor_content = '' + node.attr['value'].tensor.tensor_content = b'' tf.train.write_graph(graph_def, "", outputPath, as_text=True) From f1f0f630c727588fecc0cf2833e76f39c7c9cdc6 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 27 Feb 2019 18:04:00 +0300 Subject: [PATCH 02/19] core: disable I/O perf test - can be enable separately if needed - not stable (due storage I/O processing) --- modules/core/perf/perf_io_base64.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/core/perf/perf_io_base64.cpp b/modules/core/perf/perf_io_base64.cpp index ebc9ee1463..e6f7a6807f 100644 --- a/modules/core/perf/perf_io_base64.cpp +++ b/modules/core/perf/perf_io_base64.cpp @@ -12,7 +12,7 @@ typedef TestBaseWithParam Size_Mat_StrType; #define FILE_EXTENSION String(".xml"), String(".yml"), String(".json") -PERF_TEST_P(Size_Mat_StrType, fs_text, +PERF_TEST_P(Size_Mat_StrType, DISABLED_fs_text, testing::Combine(testing::Values(MAT_SIZES), testing::Values(MAT_TYPES), testing::Values(FILE_EXTENSION)) @@ -48,7 +48,7 @@ PERF_TEST_P(Size_Mat_StrType, fs_text, SANITY_CHECK_NOTHING(); } -PERF_TEST_P(Size_Mat_StrType, fs_base64, +PERF_TEST_P(Size_Mat_StrType, DISABLED_fs_base64, testing::Combine(testing::Values(MAT_SIZES), testing::Values(MAT_TYPES), testing::Values(FILE_EXTENSION)) From 9548093b466b9f515b00f3719f87631e7af39c22 Mon Sep 17 00:00:00 2001 From: Vitaly Tuzov Date: Thu, 11 Oct 2018 20:00:12 +0300 Subject: [PATCH 03/19] Horizontal line processing for pyrDown() reworked using wide universal intrinsics. --- .../include/opencv2/core/hal/intrin_avx.hpp | 10 + .../include/opencv2/core/hal/intrin_cpp.hpp | 1 - .../include/opencv2/core/hal/intrin_neon.hpp | 89 +- .../include/opencv2/core/hal/intrin_sse.hpp | 12 +- .../include/opencv2/core/hal/intrin_vsx.hpp | 4 + modules/imgproc/src/pyramids.cpp | 894 ++++++++++++------ 6 files changed, 694 insertions(+), 316 deletions(-) diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp index 913c915270..a6725c82de 100644 --- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp @@ -1610,6 +1610,16 @@ inline v_int16x16 v_pack_triplets(const v_int16x16& vec) } inline v_uint16x16 v_pack_triplets(const v_uint16x16& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } +inline v_int32x8 v_pack_triplets(const v_int32x8& vec) +{ + return v_int32x8(_mm256_permutevar8x32_epi32(vec.val, _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} +inline v_uint32x8 v_pack_triplets(const v_uint32x8& vec) { return v_reinterpret_as_u32(v_pack_triplets(v_reinterpret_as_s32(vec))); } +inline v_float32x8 v_pack_triplets(const v_float32x8& vec) +{ + return v_float32x8(_mm256_permutevar8x32_ps(vec.val, _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} + ////////// Matrix operations ///////// inline v_int32x8 v_dotprod(const v_int16x16& a, const v_int16x16& b) diff --git a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp index 5cfaea7220..757c67b314 100644 --- a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp @@ -1908,7 +1908,6 @@ template inline v_reg<_Tp, n> v_interleave_quads(const v_re template inline v_reg<_Tp, n> v_pack_triplets(const v_reg<_Tp, n>& vec) { v_reg c; - int j = 0; for (int i = 0; i < n/4; i++) { c.s[3*i ] = vec.s[4*i ]; diff --git a/modules/core/include/opencv2/core/hal/intrin_neon.hpp b/modules/core/include/opencv2/core/hal/intrin_neon.hpp index f67479171d..e131909845 100644 --- a/modules/core/include/opencv2/core/hal/intrin_neon.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_neon.hpp @@ -1597,29 +1597,49 @@ inline v_int8x16 v_lut(const schar* tab, const int* idx) } inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) { - short CV_DECL_ALIGNED(32) elems[8] = + schar CV_DECL_ALIGNED(32) elems[16] = { - *(short*)(tab+idx[0]), - *(short*)(tab+idx[1]), - *(short*)(tab+idx[2]), - *(short*)(tab+idx[3]), - *(short*)(tab+idx[4]), - *(short*)(tab+idx[5]), - *(short*)(tab+idx[6]), - *(short*)(tab+idx[7]) + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[4]], + tab[idx[4] + 1], + tab[idx[5]], + tab[idx[5] + 1], + tab[idx[6]], + tab[idx[6] + 1], + tab[idx[7]], + tab[idx[7] + 1] }; - return v_int8x16(vreinterpretq_s8_s16(vld1q_s16(elems))); + return v_int8x16(vld1q_s8(elems)); } inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) { - int CV_DECL_ALIGNED(32) elems[4] = + schar CV_DECL_ALIGNED(32) elems[16] = { - *(int*)(tab + idx[0]), - *(int*)(tab + idx[1]), - *(int*)(tab + idx[2]), - *(int*)(tab + idx[3]) + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[0] + 2], + tab[idx[0] + 3], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[1] + 2], + tab[idx[1] + 3], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[2] + 2], + tab[idx[2] + 3], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[3] + 2], + tab[idx[3] + 3] }; - return v_int8x16(vreinterpretq_s8_s32(vld1q_s32(elems))); + return v_int8x16(vld1q_s8(elems)); } inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((schar*)tab, idx)); } inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((schar*)tab, idx)); } @@ -1642,23 +1662,22 @@ inline v_int16x8 v_lut(const short* tab, const int* idx) } inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) { - int CV_DECL_ALIGNED(32) elems[4] = + short CV_DECL_ALIGNED(32) elems[8] = { - *(int*)(tab + idx[0]), - *(int*)(tab + idx[1]), - *(int*)(tab + idx[2]), - *(int*)(tab + idx[3]) + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1] }; - return v_int16x8(vreinterpretq_s16_s32(vld1q_s32(elems))); + return v_int16x8(vld1q_s16(elems)); } inline v_int16x8 v_lut_quads(const short* tab, const int* idx) { - int64 CV_DECL_ALIGNED(32) elems[2] = - { - *(int64*)(tab + idx[0]), - *(int64*)(tab + idx[1]) - }; - return v_int16x8(vreinterpretq_s16_s64(vld1q_s64(elems))); + return v_int16x8(vcombine_s16(vld1_s16(tab + idx[0]), vld1_s16(tab + idx[1]))); } inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((short*)tab, idx)); } inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((short*)tab, idx)); } @@ -1677,12 +1696,7 @@ inline v_int32x4 v_lut(const int* tab, const int* idx) } inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) { - int64 CV_DECL_ALIGNED(32) elems[2] = - { - *(int64*)(tab + idx[0]), - *(int64*)(tab + idx[1]) - }; - return v_int32x4(vreinterpretq_s32_s64(vld1q_s64(elems))); + return v_int32x4(vcombine_s32(vld1_s32(tab + idx[0]), vld1_s32(tab + idx[1]))); } inline v_int32x4 v_lut_quads(const int* tab, const int* idx) { @@ -1800,7 +1814,8 @@ inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } inline v_int16x8 v_interleave_quads(const v_int16x8& vec) { - return v_int16x8(vreinterpretq_s16_s8(vcombine_s8(vtbl1_s8(vget_low_s8(vreinterpretq_s8_s16(vec.val)), vcreate_s8(0x0b0a030209080100)), vtbl1_s8(vget_high_s8(vreinterpretq_s8_s16(vec.val)), vcreate_s8(0x0b0a030209080100))))); + int16x4x2_t res = vzip_s16(vget_low_s16(vec.val), vget_high_s16(vec.val)); + return v_int16x8(vcombine_s16(res.val[0], res.val[1])); } inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } @@ -1824,6 +1839,10 @@ inline v_int16x8 v_pack_triplets(const v_int16x8& vec) } inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + #if CV_SIMD128_64F inline v_float64x2 v_lut(const double* tab, const int* idx) { diff --git a/modules/core/include/opencv2/core/hal/intrin_sse.hpp b/modules/core/include/opencv2/core/hal/intrin_sse.hpp index dcfae9a3a8..a5adad04c5 100644 --- a/modules/core/include/opencv2/core/hal/intrin_sse.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_sse.hpp @@ -2789,7 +2789,7 @@ inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) } inline v_int32x4 v_lut_quads(const int* tab, const int* idx) { - return v_int32x4(_mm_load_si128((const __m128i*)(tab + idx[0]))); + return v_int32x4(_mm_loadu_si128((const __m128i*)(tab + idx[0]))); } inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((const int *)tab, idx)); } inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((const int *)tab, idx)); } @@ -2801,7 +2801,7 @@ inline v_int64x2 v_lut(const int64_t* tab, const int* idx) } inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) { - return v_int64x2(_mm_load_si128((const __m128i*)(tab + idx[0]))); + return v_int64x2(_mm_loadu_si128((const __m128i*)(tab + idx[0]))); } inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } @@ -2817,7 +2817,7 @@ inline v_float64x2 v_lut(const double* tab, const int* idx) { return v_float64x2(_mm_setr_pd(tab[idx[0]], tab[idx[1]])); } -inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) { return v_float64x2(_mm_castsi128_pd(_mm_load_si128((const __m128i*)(tab + idx[0])))); } +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) { return v_float64x2(_mm_castsi128_pd(_mm_loadu_si128((const __m128i*)(tab + idx[0])))); } inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) { @@ -2932,7 +2932,7 @@ inline v_int8x16 v_pack_triplets(const v_int8x16& vec) return v_int8x16(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0xffffff0f0e0d0c0a, 0x0908060504020100))); #else __m128i mask = _mm_set1_epi64x(0x00000000FFFFFFFF); - __m128i a = _mm_or_si128(_mm_andnot_si128(mask, vec.val), _mm_and_si128(mask, _mm_sll_epi32(vec.val, _mm_set_epi64x(0, 8)))); + __m128i a = _mm_srli_si128(_mm_or_si128(_mm_andnot_si128(mask, vec.val), _mm_and_si128(mask, _mm_sll_epi32(vec.val, _mm_set_epi64x(0, 8)))), 1); return v_int8x16(_mm_srli_si128(_mm_shufflelo_epi16(a, _MM_SHUFFLE(2, 1, 0, 3)), 2)); #endif } @@ -2948,6 +2948,10 @@ inline v_int16x8 v_pack_triplets(const v_int16x8& vec) } inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + ////////////// FP16 support /////////////////////////// inline v_float32x4 v_load_expand(const float16_t* ptr) diff --git a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp index ddda1d10d0..4e0c75ff93 100644 --- a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp @@ -1160,6 +1160,10 @@ inline v_int16x8 v_pack_triplets(const v_int16x8& vec) } inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + /////// FP16 support //////// // [TODO] implement these 2 using VSX or universal intrinsics (copy from intrin_sse.cpp and adopt) diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index d4efeec029..5a229b61f0 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -64,333 +64,662 @@ template struct FltCast rtype operator ()(type1 arg) const { return arg*(T)(1./(1 << shift)); } }; -template struct PyrDownNoVec +template int PyrDownVecH(const T1*, T2*, int) { - int operator()(T1**, T2*, int, int) const { return 0; } -}; + // row[x ] = src[x * 2 + 2*cn ] * 6 + (src[x * 2 + cn ] + src[x * 2 + 3*cn ]) * 4 + src[x * 2 ] + src[x * 2 + 4*cn ]; + // row[x + 1] = src[x * 2 + 2*cn+1] * 6 + (src[x * 2 + cn+1] + src[x * 2 + 3*cn+1]) * 4 + src[x * 2 + 1] + src[x * 2 + 4*cn+1]; + // .... + // row[x + cn-1] = src[x * 2 + 3*cn-1] * 6 + (src[x * 2 + 2*cn-1] + src[x * 2 + 4*cn-1]) * 4 + src[x * 2 + cn-1] + src[x * 2 + 5*cn-1]; + return 0; +} -template struct PyrUpNoVec +template int PyrUpVecH(const T1*, T2*, int) { - int operator()(T1**, T2**, int, int) const { return 0; } -}; + return 0; +} + +template int PyrDownVecV(T1**, T2*, int) { return 0; } + +template int PyrUpVecV(T1**, T2**, int) { return 0; } #if CV_SIMD -struct PyrDownVec_32s8u +template<> int PyrDownVecH(const uchar* src, int* row, int width) { - int operator()(int** src, uchar* dst, int, int width) const + int x = 0; + const uchar *src0 = src, *src2 = src + 2, *src4 = src + 3; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_reinterpret_as_s16(vx_load_expand(src0)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(vx_load_expand(src2)), v_6_4) + + (v_reinterpret_as_s32(vx_load_expand(src4)) >> 16)); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const uchar* src, int* row, int width) +{ + int x = 0; + const uchar *src0 = src, *src2 = src + 4, *src4 = src + 6; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src0))), v_1_4) + + v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src2))), v_6_4) + + (v_reinterpret_as_s32(v_interleave_pairs(vx_load_expand(src4))) >> 16)); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const uchar* src, int* row, int width) +{ + int idx[v_int8::nlanes/2 + 4]; + for (int i = 0; i < v_int8::nlanes/4 + 2; i++) { - int x = 0; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; - - for( ; x <= width - v_uint8::nlanes; x += v_uint8::nlanes ) - { - v_uint16 r0, r1, r2, r3, r4, t0, t1; - r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes))); - r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes))); - r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes))); - r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x), vx_load(row3 + x + v_int32::nlanes))); - r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x), vx_load(row4 + x + v_int32::nlanes))); - t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); - r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x + 2*v_int32::nlanes), vx_load(row0 + x + 3*v_int32::nlanes))); - r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x + 2*v_int32::nlanes), vx_load(row1 + x + 3*v_int32::nlanes))); - r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x + 2*v_int32::nlanes), vx_load(row2 + x + 3*v_int32::nlanes))); - r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x + 2*v_int32::nlanes), vx_load(row3 + x + 3*v_int32::nlanes))); - r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x + 2*v_int32::nlanes), vx_load(row4 + x + 3*v_int32::nlanes))); - t1 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); - v_store(dst + x, v_rshr_pack<8>(t0, t1)); - } - if (x <= width - v_int16::nlanes) - { - v_uint16 r0, r1, r2, r3, r4, t0; - r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes))); - r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes))); - r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes))); - r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x), vx_load(row3 + x + v_int32::nlanes))); - r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x), vx_load(row4 + x + v_int32::nlanes))); - t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); - v_rshr_pack_store<8>(dst + x, t0); - x += v_uint16::nlanes; - } - typedef int CV_DECL_ALIGNED(1) unaligned_int; - for ( ; x <= width - v_int32x4::nlanes; x += v_int32x4::nlanes) - { - v_int32x4 r0, r1, r2, r3, r4, t0; - r0 = v_load(row0 + x); - r1 = v_load(row1 + x); - r2 = v_load(row2 + x); - r3 = v_load(row3 + x); - r4 = v_load(row4 + x); - t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); - - *((unaligned_int*) (dst + x)) = v_reinterpret_as_s32(v_rshr_pack<8>(v_pack_u(t0, t0), v_setzero_u16())).get0(); - } + idx[i] = 6*i; + idx[i + v_int8::nlanes/4 + 2] = 6*i + 3; + } - return x; + int x = 0; + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int8::nlanes; x += 3*v_int8::nlanes/4, src += 6*v_int8::nlanes/4, row += 3*v_int8::nlanes/4) + { + v_uint16 r0l, r0h, r1l, r1h, r2l, r2h, r3l, r3h, r4l, r4h; + v_expand(vx_lut_quads(src, idx ), r0l, r0h); + v_expand(vx_lut_quads(src, idx + v_int8::nlanes/4 + 2), r1l, r1h); + v_expand(vx_lut_quads(src, idx + 1 ), r2l, r2h); + v_expand(vx_lut_quads(src, idx + v_int8::nlanes/4 + 3), r3l, r3h); + v_expand(vx_lut_quads(src, idx + 2 ), r4l, r4h); + + v_zip(r2l, r1l + r3l, r1l, r3l); + v_zip(r2h, r1h + r3h, r1h, r3h); + r0l += r4l; r0h += r4h; + + v_store(row , v_pack_triplets(v_dotprod(v_reinterpret_as_s16(r1l), v_6_4) + v_reinterpret_as_s32(v_expand_low( r0l)))); + v_store(row + 3*v_int32::nlanes/4, v_pack_triplets(v_dotprod(v_reinterpret_as_s16(r3l), v_6_4) + v_reinterpret_as_s32(v_expand_high(r0l)))); + v_store(row + 6*v_int32::nlanes/4, v_pack_triplets(v_dotprod(v_reinterpret_as_s16(r1h), v_6_4) + v_reinterpret_as_s32(v_expand_low( r0h)))); + v_store(row + 9*v_int32::nlanes/4, v_pack_triplets(v_dotprod(v_reinterpret_as_s16(r3h), v_6_4) + v_reinterpret_as_s32(v_expand_high(r0h)))); } -}; + vx_cleanup(); -struct PyrDownVec_32f + return x; +} +template<> int PyrDownVecH(const uchar* src, int* row, int width) { - int operator()(float** src, float* dst, int, int width) const + int x = 0; + const uchar *src0 = src, *src2 = src + 8, *src4 = src + 12; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src0))), v_1_4) + + v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src2))), v_6_4) + + (v_reinterpret_as_s32(v_interleave_quads(vx_load_expand(src4))) >> 16)); + vx_cleanup(); + + return x; +} + +template<> int PyrDownVecH(const short* src, int* row, int width) +{ + int x = 0; + const short *src0 = src, *src2 = src + 2, *src4 = src + 3; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(vx_load(src0), v_1_4) + + v_dotprod(vx_load(src2), v_6_4) + + (v_reinterpret_as_s32(vx_load(src4)) >> 16)); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const short* src, int* row, int width) +{ + int x = 0; + const short *src0 = src, *src2 = src + 4, *src4 = src + 6; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_interleave_pairs(vx_load(src0)), v_1_4) + + v_dotprod(v_interleave_pairs(vx_load(src2)), v_6_4) + + (v_reinterpret_as_s32(v_interleave_pairs(vx_load(src4))) >> 16)); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const short* src, int* row, int width) +{ + int idx[v_int16::nlanes/2 + 4]; + for (int i = 0; i < v_int16::nlanes/4 + 2; i++) { - int x = 0; - const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + idx[i] = 6*i; + idx[i + v_int16::nlanes/4 + 2] = 6*i + 3; + } - v_float32 _4 = vx_setall_f32(4.f), _scale = vx_setall_f32(1.f/256); - for( ; x <= width - v_float32::nlanes; x += v_float32::nlanes) - { - v_float32 r0, r1, r2, r3, r4; - r0 = vx_load(row0 + x); - r1 = vx_load(row1 + x); - r2 = vx_load(row2 + x); - r3 = vx_load(row3 + x); - r4 = vx_load(row4 + x); - v_store(dst + x, v_muladd(r1 + r3 + r2, _4, r0 + r4 + (r2 + r2)) * _scale); - } + int x = 0; + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int16::nlanes; x += 3*v_int16::nlanes/4, src += 6*v_int16::nlanes/4, row += 3*v_int16::nlanes/4) + { + v_int16 r0, r1, r2, r3, r4; + v_zip(vx_lut_quads(src, idx), vx_lut_quads(src, idx + v_int16::nlanes/4 + 2), r0, r1); + v_zip(vx_lut_quads(src, idx + 1), vx_lut_quads(src, idx + v_int16::nlanes/4 + 3), r2, r3); + r4 = vx_lut_quads(src, idx + 2); + v_store(row, v_pack_triplets(v_dotprod(r0, v_1_4) + v_dotprod(r2, v_6_4) + v_expand_low(r4))); + v_store(row + 3*v_int32::nlanes/4, v_pack_triplets(v_dotprod(r1, v_1_4) + v_dotprod(r3, v_6_4) + v_expand_high(r4))); + } + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const short* src, int* row, int width) +{ + int idx[v_int16::nlanes/2 + 4]; + for (int i = 0; i < v_int16::nlanes/4 + 2; i++) + { + idx[i] = 8*i; + idx[i + v_int16::nlanes/4 + 2] = 8*i + 4; + } - return x; + int x = 0; + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + for (; x <= width - v_int16::nlanes; x += v_int16::nlanes, src += 2*v_int16::nlanes, row += v_int16::nlanes) + { + v_int16 r0, r1, r2, r3, r4; + v_zip(vx_lut_quads(src, idx), vx_lut_quads(src, idx + v_int16::nlanes/4 + 2), r0, r1); + v_zip(vx_lut_quads(src, idx + 1), vx_lut_quads(src, idx + v_int16::nlanes/4 + 3), r2, r3); + r4 = vx_lut_quads(src, idx + 2); + v_store(row, v_dotprod(r0, v_1_4) + v_dotprod(r2, v_6_4) + v_expand_low(r4)); + v_store(row + v_int32::nlanes, v_dotprod(r1, v_1_4) + v_dotprod(r3, v_6_4) + v_expand_high(r4)); } -}; + vx_cleanup(); -#if CV_SSE4_1 || CV_NEON || CV_VSX + return x; +} -struct PyrDownVec_32s16u +template<> int PyrDownVecH(const ushort* src, int* row, int width) +{ + int x = 0; + const ushort *src0 = src, *src2 = src + 2, *src4 = src + 3; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + v_uint16 v_half = vx_setall_u16(0x8000); + v_int32 v_half15 = vx_setall_s32(0x00078000); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src0), v_half)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src2), v_half)), v_6_4) + + v_reinterpret_as_s32(v_reinterpret_as_u32(vx_load(src4)) >> 16) + v_half15); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const ushort* src, int* row, int width) +{ + int x = 0; + const ushort *src0 = src, *src2 = src + 4, *src4 = src + 6; + + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + v_uint16 v_half = vx_setall_u16(0x8000); + v_int32 v_half15 = vx_setall_s32(0x00078000); + for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes) + v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src0), v_half))), v_1_4) + + v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src2), v_half))), v_6_4) + + v_reinterpret_as_s32(v_reinterpret_as_u32(v_interleave_pairs(vx_load(src4))) >> 16) + v_half15); + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const ushort* src, int* row, int width) { - int operator()(int** src, ushort* dst, int, int width) const + int idx[v_int16::nlanes/2 + 4]; + for (int i = 0; i < v_int16::nlanes/4 + 2; i++) { - int x = 0; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + idx[i] = 6*i; + idx[i + v_int16::nlanes/4 + 2] = 6*i + 3; + } - for( ; x <= width - v_uint16::nlanes; x += v_uint16::nlanes) - { - v_int32 r00 = vx_load(row0 + x), - r01 = vx_load(row0 + x + v_int32::nlanes), - r10 = vx_load(row1 + x), - r11 = vx_load(row1 + x + v_int32::nlanes), - r20 = vx_load(row2 + x), - r21 = vx_load(row2 + x + v_int32::nlanes), - r30 = vx_load(row3 + x), - r31 = vx_load(row3 + x + v_int32::nlanes), - r40 = vx_load(row4 + x), - r41 = vx_load(row4 + x + v_int32::nlanes); - v_store(dst + x, v_rshr_pack_u<8>(r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2), - r01 + r41 + (r21 + r21) + ((r11 + r21 + r31) << 2))); - } - if (x <= width - v_int32::nlanes) - { - v_int32 r00 = vx_load(row0 + x), - r10 = vx_load(row1 + x), - r20 = vx_load(row2 + x), - r30 = vx_load(row3 + x), - r40 = vx_load(row4 + x); - v_rshr_pack_u_store<8>(dst + x, r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2)); - x += v_int32::nlanes; - } + int x = 0; + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + v_uint16 v_half = vx_setall_u16(0x8000); + v_int32 v_half15 = vx_setall_s32(0x00078000); + for (; x <= width - v_int16::nlanes; x += 3*v_int16::nlanes/4, src += 6*v_int16::nlanes/4, row += 3*v_int16::nlanes/4) + { + v_uint16 r0, r1, r2, r3, r4; + v_zip(vx_lut_quads(src, idx), vx_lut_quads(src, idx + v_int16::nlanes/4 + 2), r0, r1); + v_zip(vx_lut_quads(src, idx + 1), vx_lut_quads(src, idx + v_int16::nlanes/4 + 3), r2, r3); + r4 = vx_lut_quads(src, idx + 2); + v_store(row , v_pack_triplets(v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r0, v_half)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r2, v_half)), v_6_4) + + v_reinterpret_as_s32(v_expand_low(r4)) + v_half15)); + v_store(row + 3*v_int32::nlanes/4, v_pack_triplets(v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r1, v_half)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r3, v_half)), v_6_4) + + v_reinterpret_as_s32(v_expand_high(r4)) + v_half15)); + } + vx_cleanup(); - return x; + return x; +} +template<> int PyrDownVecH(const ushort* src, int* row, int width) +{ + int idx[v_int16::nlanes/2 + 4]; + for (int i = 0; i < v_int16::nlanes/4 + 2; i++) + { + idx[i] = 8*i; + idx[i + v_int16::nlanes/4 + 2] = 8*i + 4; } -}; -#else + int x = 0; + v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001)); + v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006)); + v_uint16 v_half = vx_setall_u16(0x8000); + v_int32 v_half15 = vx_setall_s32(0x00078000); + for (; x <= width - v_int16::nlanes; x += v_int16::nlanes, src += 2*v_int16::nlanes, row += v_int16::nlanes) + { + v_uint16 r0, r1, r2, r3, r4; + v_zip(vx_lut_quads(src, idx), vx_lut_quads(src, idx + v_int16::nlanes/4 + 2), r0, r1); + v_zip(vx_lut_quads(src, idx + 1), vx_lut_quads(src, idx + v_int16::nlanes/4 + 3), r2, r3); + r4 = vx_lut_quads(src, idx + 2); + v_store(row , v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r0, v_half)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r2, v_half)), v_6_4) + + v_reinterpret_as_s32(v_expand_low(r4)) + v_half15); + v_store(row + v_int32::nlanes, v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r1, v_half)), v_1_4) + + v_dotprod(v_reinterpret_as_s16(v_sub_wrap(r3, v_half)), v_6_4) + + v_reinterpret_as_s32(v_expand_high(r4)) + v_half15); + } + vx_cleanup(); -typedef PyrDownNoVec PyrDownVec_32s16u; + return x; +} -#endif +template<> int PyrDownVecH(const float* src, float* row, int width) +{ + int x = 0; + const float *src0 = src, *src2 = src + 2, *src4 = src + 4; -struct PyrDownVec_32s16s + v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f); + for (; x <= width - v_float32::nlanes; x += v_float32::nlanes, src0 += 2*v_float32::nlanes, src2 += 2*v_float32::nlanes, src4 += 2*v_float32::nlanes, row+=v_float32::nlanes) + { + v_float32 r0, r1, r2, r3, r4, rtmp; + v_load_deinterleave(src0, r0, r1); + v_load_deinterleave(src2, r2, r3); + v_load_deinterleave(src4, r4, rtmp); + v_store(row, v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4))); + } + vx_cleanup(); + + return x; +} +template<> int PyrDownVecH(const float* src, float* row, int width) { - int operator()(int** src, short* dst, int, int width) const + int x = 0; + const float *src0 = src, *src2 = src + 4, *src4 = src + 6; + + v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f); + for (; x <= width - 2*v_float32::nlanes; x += 2*v_float32::nlanes, src0 += 4*v_float32::nlanes, src2 += 4*v_float32::nlanes, src4 += 4*v_float32::nlanes, row += 2*v_float32::nlanes) { - int x = 0; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + v_float32 r0a, r0b, r1a, r1b, r2a, r2b, r3a, r3b, r4a, r4b, rtmpa, rtmpb; + v_load_deinterleave(src0, r0a, r0b, r1a, r1b); + v_load_deinterleave(src2, r2a, r2b, r3a, r3b); + v_load_deinterleave(src4, rtmpa, rtmpb, r4a, r4b); + v_store_interleave(row, v_muladd(r2a, _6, v_muladd(r1a + r3a, _4, r0a + r4a)), v_muladd(r2b, _6, v_muladd(r1b + r3b, _4, r0b + r4b))); + } + vx_cleanup(); - for( ; x <= width - v_int16::nlanes; x += v_int16::nlanes) - { - v_int32 r00 = vx_load(row0 + x), - r01 = vx_load(row0 + x + v_int32::nlanes), - r10 = vx_load(row1 + x), - r11 = vx_load(row1 + x + v_int32::nlanes), - r20 = vx_load(row2 + x), - r21 = vx_load(row2 + x + v_int32::nlanes), - r30 = vx_load(row3 + x), - r31 = vx_load(row3 + x + v_int32::nlanes), - r40 = vx_load(row4 + x), - r41 = vx_load(row4 + x + v_int32::nlanes); - v_store(dst + x, v_rshr_pack<8>(r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2), - r01 + r41 + (r21 + r21) + ((r11 + r21 + r31) << 2))); - } - if (x <= width - v_int32::nlanes) - { - v_int32 r00 = vx_load(row0 + x), - r10 = vx_load(row1 + x), - r20 = vx_load(row2 + x), - r30 = vx_load(row3 + x), - r40 = vx_load(row4 + x); - v_rshr_pack_store<8>(dst + x, r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2)); - x += v_int32::nlanes; - } + return x; +} +template<> int PyrDownVecH(const float* src, float* row, int width) +{ + int idx[v_float32::nlanes/2 + 4]; + for (int i = 0; i < v_float32::nlanes/4 + 2; i++) + { + idx[i] = 6*i; + idx[i + v_float32::nlanes/4 + 2] = 6*i + 3; + } - return x; + int x = 0; + v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f); + for (; x <= width - v_float32::nlanes; x += 3*v_float32::nlanes/4, src += 6*v_float32::nlanes/4, row += 3*v_float32::nlanes/4) + { + v_float32 r0 = vx_lut_quads(src, idx); + v_float32 r1 = vx_lut_quads(src, idx + v_float32::nlanes/4 + 2); + v_float32 r2 = vx_lut_quads(src, idx + 1); + v_float32 r3 = vx_lut_quads(src, idx + v_float32::nlanes/4 + 3); + v_float32 r4 = vx_lut_quads(src, idx + 2); + v_store(row, v_pack_triplets(v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4)))); } -}; + vx_cleanup(); -struct PyrUpVec_32s8u + return x; +} +template<> int PyrDownVecH(const float* src, float* row, int width) { - int operator()(int** src, uchar** dst, int, int width) const + int idx[v_float32::nlanes/2 + 4]; + for (int i = 0; i < v_float32::nlanes/4 + 2; i++) { - int x = 0; - uchar *dst0 = dst[0], *dst1 = dst[1]; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; + idx[i] = 8*i; + idx[i + v_float32::nlanes/4 + 2] = 8*i + 4; + } - for( ; x <= width - v_uint8::nlanes; x += v_uint8::nlanes) - { - v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)), - v_r01 = v_pack(vx_load(row0 + x + 2 * v_int32::nlanes), vx_load(row0 + x + 3 * v_int32::nlanes)), - v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)), - v_r11 = v_pack(vx_load(row1 + x + 2 * v_int32::nlanes), vx_load(row1 + x + 3 * v_int32::nlanes)), - v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes)), - v_r21 = v_pack(vx_load(row2 + x + 2 * v_int32::nlanes), vx_load(row2 + x + 3 * v_int32::nlanes)); - v_int16 v_2r10 = v_r10 + v_r10, v_2r11 = (v_r11 + v_r11); - v_store(dst0 + x, v_rshr_pack_u<6>(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), v_r01 + v_r21 + (v_2r11 + v_2r11 + v_2r11))); - v_store(dst1 + x, v_rshr_pack_u<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); - } - if(x <= width - v_uint16::nlanes) - { - v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)), - v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)), - v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes)); - v_int16 v_2r10 = v_r10 + v_r10; - v_rshr_pack_u_store<6>(dst0 + x, v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10)); - v_rshr_pack_u_store<6>(dst1 + x, (v_r10 + v_r20) << 2); - x += v_uint16::nlanes; - } - for (; x <= width - v_int32x4::nlanes; x += v_int32x4::nlanes) - { - v_int32 v_r00 = vx_load(row0 + x), - v_r10 = vx_load(row1 + x), - v_r20 = vx_load(row2 + x); - v_int32 v_2r10 = v_r10 + v_r10; - v_int16 d = v_pack(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), (v_r10 + v_r20) << 2); - *(int*)(dst0 + x) = v_reinterpret_as_s32(v_rshr_pack_u<6>(d, vx_setzero_s16())).get0(); - *(int*)(dst1 + x) = v_reinterpret_as_s32(v_rshr_pack_u<6>(v_combine_high(d, d), vx_setzero_s16())).get0(); - } + int x = 0; + v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f); + for (; x <= width - v_float32::nlanes; x += v_float32::nlanes, src += 2*v_float32::nlanes, row += v_float32::nlanes) + { + v_float32 r0 = vx_lut_quads(src, idx); + v_float32 r1 = vx_lut_quads(src, idx + v_float32::nlanes/4 + 2); + v_float32 r2 = vx_lut_quads(src, idx + 1); + v_float32 r3 = vx_lut_quads(src, idx + v_float32::nlanes/4 + 3); + v_float32 r4 = vx_lut_quads(src, idx + 2); + v_store(row, v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4))); + } + vx_cleanup(); + + return x; +} + +#if CV_SIMD_64F +template<> int PyrDownVecH(const double* src, double* row, int width) +{ + int x = 0; + const double *src0 = src, *src2 = src + 2, *src4 = src + 4; - return x; + v_float64 _4 = vx_setall_f64(4.f), _6 = vx_setall_f64(6.f); + for (; x <= width - v_float64::nlanes; x += v_float64::nlanes, src0 += 2*v_float64::nlanes, src2 += 2*v_float64::nlanes, src4 += 2*v_float64::nlanes, row += v_float64::nlanes) + { + v_float64 r0, r1, r2, r3, r4, rtmp; + v_load_deinterleave(src0, r0, r1); + v_load_deinterleave(src2, r2, r3); + v_load_deinterleave(src4, r4, rtmp); + v_store(row, v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4))); } -}; + vx_cleanup(); + + return x; +} +#endif -struct PyrUpVec_32s16s +template<> int PyrDownVecV(int** src, uchar* dst, int width) { - int operator()(int** src, short** dst, int, int width) const + int x = 0; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + + for( ; x <= width - v_uint8::nlanes; x += v_uint8::nlanes ) + { + v_uint16 r0, r1, r2, r3, r4, t0, t1; + r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes))); + r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes))); + r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes))); + r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x), vx_load(row3 + x + v_int32::nlanes))); + r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x), vx_load(row4 + x + v_int32::nlanes))); + t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); + r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x + 2*v_int32::nlanes), vx_load(row0 + x + 3*v_int32::nlanes))); + r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x + 2*v_int32::nlanes), vx_load(row1 + x + 3*v_int32::nlanes))); + r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x + 2*v_int32::nlanes), vx_load(row2 + x + 3*v_int32::nlanes))); + r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x + 2*v_int32::nlanes), vx_load(row3 + x + 3*v_int32::nlanes))); + r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x + 2*v_int32::nlanes), vx_load(row4 + x + 3*v_int32::nlanes))); + t1 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); + v_store(dst + x, v_rshr_pack<8>(t0, t1)); + } + if (x <= width - v_int16::nlanes) { - int x = 0; - short *dst0 = dst[0], *dst1 = dst[1]; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; + v_uint16 r0, r1, r2, r3, r4, t0; + r0 = v_reinterpret_as_u16(v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes))); + r1 = v_reinterpret_as_u16(v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes))); + r2 = v_reinterpret_as_u16(v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes))); + r3 = v_reinterpret_as_u16(v_pack(vx_load(row3 + x), vx_load(row3 + x + v_int32::nlanes))); + r4 = v_reinterpret_as_u16(v_pack(vx_load(row4 + x), vx_load(row4 + x + v_int32::nlanes))); + t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); + v_rshr_pack_store<8>(dst + x, t0); + x += v_uint16::nlanes; + } + typedef int CV_DECL_ALIGNED(1) unaligned_int; + for ( ; x <= width - v_int32x4::nlanes; x += v_int32x4::nlanes) + { + v_int32x4 r0, r1, r2, r3, r4, t0; + r0 = v_load(row0 + x); + r1 = v_load(row1 + x); + r2 = v_load(row2 + x); + r3 = v_load(row3 + x); + r4 = v_load(row4 + x); + t0 = r0 + r4 + (r2 + r2) + ((r1 + r3 + r2) << 2); + + *((unaligned_int*) (dst + x)) = v_reinterpret_as_s32(v_rshr_pack<8>(v_pack_u(t0, t0), v_setzero_u16())).get0(); + } + vx_cleanup(); - for( ; x <= width - v_int16::nlanes; x += v_int16::nlanes) - { - v_int32 v_r00 = vx_load(row0 + x), - v_r01 = vx_load(row0 + x + v_int32::nlanes), - v_r10 = vx_load(row1 + x), - v_r11 = vx_load(row1 + x + v_int32::nlanes), - v_r20 = vx_load(row2 + x), - v_r21 = vx_load(row2 + x + v_int32::nlanes); - v_store(dst0 + x, v_rshr_pack<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2)))); - v_store(dst1 + x, v_rshr_pack<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); - } - if(x <= width - v_int32::nlanes) - { - v_int32 v_r00 = vx_load(row0 + x), - v_r10 = vx_load(row1 + x), - v_r20 = vx_load(row2 + x); - v_rshr_pack_store<6>(dst0 + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2))); - v_rshr_pack_store<6>(dst1 + x, (v_r10 + v_r20) << 2); - x += v_int32::nlanes; - } + return x; +} - return x; +template <> +int PyrDownVecV(float** src, float* dst, int width) +{ + int x = 0; + const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + + v_float32 _4 = vx_setall_f32(4.f), _scale = vx_setall_f32(1.f/256); + for( ; x <= width - v_float32::nlanes; x += v_float32::nlanes) + { + v_float32 r0, r1, r2, r3, r4; + r0 = vx_load(row0 + x); + r1 = vx_load(row1 + x); + r2 = vx_load(row2 + x); + r3 = vx_load(row3 + x); + r4 = vx_load(row4 + x); + v_store(dst + x, v_muladd(r1 + r3 + r2, _4, r0 + r4 + (r2 + r2)) * _scale); } -}; + vx_cleanup(); -#if CV_SSE4_1 || CV_NEON || CV_VSX + return x; +} -struct PyrUpVec_32s16u +template <> int PyrDownVecV(int** src, ushort* dst, int width) { - int operator()(int** src, ushort** dst, int, int width) const + int x = 0; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + + for( ; x <= width - v_uint16::nlanes; x += v_uint16::nlanes) + { + v_int32 r00 = vx_load(row0 + x), + r01 = vx_load(row0 + x + v_int32::nlanes), + r10 = vx_load(row1 + x), + r11 = vx_load(row1 + x + v_int32::nlanes), + r20 = vx_load(row2 + x), + r21 = vx_load(row2 + x + v_int32::nlanes), + r30 = vx_load(row3 + x), + r31 = vx_load(row3 + x + v_int32::nlanes), + r40 = vx_load(row4 + x), + r41 = vx_load(row4 + x + v_int32::nlanes); + v_store(dst + x, v_rshr_pack_u<8>(r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2), + r01 + r41 + (r21 + r21) + ((r11 + r21 + r31) << 2))); + } + if (x <= width - v_int32::nlanes) { - int x = 0; - ushort *dst0 = dst[0], *dst1 = dst[1]; - const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; + v_int32 r00 = vx_load(row0 + x), + r10 = vx_load(row1 + x), + r20 = vx_load(row2 + x), + r30 = vx_load(row3 + x), + r40 = vx_load(row4 + x); + v_rshr_pack_u_store<8>(dst + x, r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2)); + x += v_int32::nlanes; + } + vx_cleanup(); - for( ; x <= width - v_uint16::nlanes; x += v_uint16::nlanes) - { - v_int32 v_r00 = vx_load(row0 + x), - v_r01 = vx_load(row0 + x + v_int32::nlanes), - v_r10 = vx_load(row1 + x), - v_r11 = vx_load(row1 + x + v_int32::nlanes), - v_r20 = vx_load(row2 + x), - v_r21 = vx_load(row2 + x + v_int32::nlanes); - v_store(dst0 + x, v_rshr_pack_u<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2)))); - v_store(dst1 + x, v_rshr_pack_u<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); - } - if(x <= width - v_int32::nlanes) - { - v_int32 v_r00 = vx_load(row0 + x), - v_r10 = vx_load(row1 + x), - v_r20 = vx_load(row2 + x); - v_rshr_pack_u_store<6>(dst0 + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2))); - v_rshr_pack_u_store<6>(dst1 + x, (v_r10 + v_r20) << 2); - x += v_int32::nlanes; - } + return x; +} - return x; +template <> int PyrDownVecV(int** src, short* dst, int width) +{ + int x = 0; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + + for( ; x <= width - v_int16::nlanes; x += v_int16::nlanes) + { + v_int32 r00 = vx_load(row0 + x), + r01 = vx_load(row0 + x + v_int32::nlanes), + r10 = vx_load(row1 + x), + r11 = vx_load(row1 + x + v_int32::nlanes), + r20 = vx_load(row2 + x), + r21 = vx_load(row2 + x + v_int32::nlanes), + r30 = vx_load(row3 + x), + r31 = vx_load(row3 + x + v_int32::nlanes), + r40 = vx_load(row4 + x), + r41 = vx_load(row4 + x + v_int32::nlanes); + v_store(dst + x, v_rshr_pack<8>(r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2), + r01 + r41 + (r21 + r21) + ((r11 + r21 + r31) << 2))); } -}; + if (x <= width - v_int32::nlanes) + { + v_int32 r00 = vx_load(row0 + x), + r10 = vx_load(row1 + x), + r20 = vx_load(row2 + x), + r30 = vx_load(row3 + x), + r40 = vx_load(row4 + x); + v_rshr_pack_store<8>(dst + x, r00 + r40 + (r20 + r20) + ((r10 + r20 + r30) << 2)); + x += v_int32::nlanes; + } + vx_cleanup(); -#else + return x; +} -typedef PyrUpNoVec PyrUpVec_32s16u; +template <> int PyrUpVecV(int** src, uchar** dst, int width) +{ + int x = 0; + uchar *dst0 = dst[0], *dst1 = dst[1]; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; -#endif // CV_SSE4_1 + for( ; x <= width - v_uint8::nlanes; x += v_uint8::nlanes) + { + v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)), + v_r01 = v_pack(vx_load(row0 + x + 2 * v_int32::nlanes), vx_load(row0 + x + 3 * v_int32::nlanes)), + v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)), + v_r11 = v_pack(vx_load(row1 + x + 2 * v_int32::nlanes), vx_load(row1 + x + 3 * v_int32::nlanes)), + v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes)), + v_r21 = v_pack(vx_load(row2 + x + 2 * v_int32::nlanes), vx_load(row2 + x + 3 * v_int32::nlanes)); + v_int16 v_2r10 = v_r10 + v_r10, v_2r11 = (v_r11 + v_r11); + v_store(dst0 + x, v_rshr_pack_u<6>(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), v_r01 + v_r21 + (v_2r11 + v_2r11 + v_2r11))); + v_store(dst1 + x, v_rshr_pack_u<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); + } + if(x <= width - v_uint16::nlanes) + { + v_int16 v_r00 = v_pack(vx_load(row0 + x), vx_load(row0 + x + v_int32::nlanes)), + v_r10 = v_pack(vx_load(row1 + x), vx_load(row1 + x + v_int32::nlanes)), + v_r20 = v_pack(vx_load(row2 + x), vx_load(row2 + x + v_int32::nlanes)); + v_int16 v_2r10 = v_r10 + v_r10; + v_rshr_pack_u_store<6>(dst0 + x, v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10)); + v_rshr_pack_u_store<6>(dst1 + x, (v_r10 + v_r20) << 2); + x += v_uint16::nlanes; + } + typedef int CV_DECL_ALIGNED(1) unaligned_int; + for (; x <= width - v_int32x4::nlanes; x += v_int32x4::nlanes) + { + v_int32 v_r00 = vx_load(row0 + x), + v_r10 = vx_load(row1 + x), + v_r20 = vx_load(row2 + x); + v_int32 v_2r10 = v_r10 + v_r10; + v_int16 d = v_pack(v_r00 + v_r20 + (v_2r10 + v_2r10 + v_2r10), (v_r10 + v_r20) << 2); + *(unaligned_int*)(dst0 + x) = v_reinterpret_as_s32(v_rshr_pack_u<6>(d, vx_setzero_s16())).get0(); + *(unaligned_int*)(dst1 + x) = v_reinterpret_as_s32(v_rshr_pack_u<6>(v_combine_high(d, d), vx_setzero_s16())).get0(); + } + vx_cleanup(); -struct PyrUpVec_32f + return x; +} + +template <> int PyrUpVecV(int** src, short** dst, int width) { - int operator()(float** src, float** dst, int, int width) const + int x = 0; + short *dst0 = dst[0], *dst1 = dst[1]; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; + + for( ; x <= width - v_int16::nlanes; x += v_int16::nlanes) + { + v_int32 v_r00 = vx_load(row0 + x), + v_r01 = vx_load(row0 + x + v_int32::nlanes), + v_r10 = vx_load(row1 + x), + v_r11 = vx_load(row1 + x + v_int32::nlanes), + v_r20 = vx_load(row2 + x), + v_r21 = vx_load(row2 + x + v_int32::nlanes); + v_store(dst0 + x, v_rshr_pack<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2)))); + v_store(dst1 + x, v_rshr_pack<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); + } + if(x <= width - v_int32::nlanes) { - int x = 0; - const float *row0 = src[0], *row1 = src[1], *row2 = src[2]; - float *dst0 = dst[0], *dst1 = dst[1]; + v_int32 v_r00 = vx_load(row0 + x), + v_r10 = vx_load(row1 + x), + v_r20 = vx_load(row2 + x); + v_rshr_pack_store<6>(dst0 + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2))); + v_rshr_pack_store<6>(dst1 + x, (v_r10 + v_r20) << 2); + x += v_int32::nlanes; + } + vx_cleanup(); - v_float32 v_6 = vx_setall_f32(6.0f), v_scale = vx_setall_f32(1.f/64.f), v_scale4 = vx_setall_f32(1.f/16.f); - for( ; x <= width - v_float32::nlanes; x += v_float32::nlanes) - { - v_float32 v_r0 = vx_load(row0 + x), - v_r1 = vx_load(row1 + x), - v_r2 = vx_load(row2 + x); - v_store(dst1 + x, v_scale4 * (v_r1 + v_r2)); - v_store(dst0 + x, v_scale * (v_muladd(v_6, v_r1, v_r0) + v_r2)); - } + return x; +} + +template <> int PyrUpVecV(int** src, ushort** dst, int width) +{ + int x = 0; + ushort *dst0 = dst[0], *dst1 = dst[1]; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2]; - return x; + for( ; x <= width - v_uint16::nlanes; x += v_uint16::nlanes) + { + v_int32 v_r00 = vx_load(row0 + x), + v_r01 = vx_load(row0 + x + v_int32::nlanes), + v_r10 = vx_load(row1 + x), + v_r11 = vx_load(row1 + x + v_int32::nlanes), + v_r20 = vx_load(row2 + x), + v_r21 = vx_load(row2 + x + v_int32::nlanes); + v_store(dst0 + x, v_rshr_pack_u<6>(v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2)), v_r01 + v_r21 + ((v_r11 << 1) + (v_r11 << 2)))); + v_store(dst1 + x, v_rshr_pack_u<6>((v_r10 + v_r20) << 2, (v_r11 + v_r21) << 2)); } -}; + if(x <= width - v_int32::nlanes) + { + v_int32 v_r00 = vx_load(row0 + x), + v_r10 = vx_load(row1 + x), + v_r20 = vx_load(row2 + x); + v_rshr_pack_u_store<6>(dst0 + x, v_r00 + v_r20 + ((v_r10 << 1) + (v_r10 << 2))); + v_rshr_pack_u_store<6>(dst1 + x, (v_r10 + v_r20) << 2); + x += v_int32::nlanes; + } + vx_cleanup(); -#else + return x; +} + +template <> int PyrUpVecV(float** src, float** dst, int width) +{ + int x = 0; + const float *row0 = src[0], *row1 = src[1], *row2 = src[2]; + float *dst0 = dst[0], *dst1 = dst[1]; -typedef PyrDownNoVec PyrDownVec_32s8u; -typedef PyrDownNoVec PyrDownVec_32s16u; -typedef PyrDownNoVec PyrDownVec_32s16s; -typedef PyrDownNoVec PyrDownVec_32f; + v_float32 v_6 = vx_setall_f32(6.0f), v_scale = vx_setall_f32(1.f/64.f), v_scale4 = vx_setall_f32(1.f/16.f); + for( ; x <= width - v_float32::nlanes; x += v_float32::nlanes) + { + v_float32 v_r0 = vx_load(row0 + x), + v_r1 = vx_load(row1 + x), + v_r2 = vx_load(row2 + x); + v_store(dst1 + x, v_scale4 * (v_r1 + v_r2)); + v_store(dst0 + x, v_scale * (v_muladd(v_6, v_r1, v_r0) + v_r2)); + } + vx_cleanup(); -typedef PyrUpNoVec PyrUpVec_32s8u; -typedef PyrUpNoVec PyrUpVec_32s16s; -typedef PyrUpNoVec PyrUpVec_32s16u; -typedef PyrUpNoVec PyrUpVec_32f; + return x; +} #endif -template void +template void pyrDown_( const Mat& _src, Mat& _dst, int borderType ) { const int PD_SZ = 5; @@ -408,7 +737,6 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType ) int* tabM = _tabM.data(); WT* rows[PD_SZ]; CastOp castOp; - VecOp vecOp; CV_Assert( ssize.width > 0 && ssize.height > 0 && std::abs(dsize.width*2 - ssize.width) <= 2 && @@ -460,12 +788,25 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType ) if( cn == 1 ) { + x += PyrDownVecH(src + x * 2 - 2, row + x, width0 - x); for( ; x < width0; x++ ) row[x] = src[x*2]*6 + (src[x*2 - 1] + src[x*2 + 1])*4 + src[x*2 - 2] + src[x*2 + 2]; } + else if( cn == 2 ) + { + x += PyrDownVecH(src + x * 2 - 4, row + x, width0 - x); + for( ; x < width0; x += 2 ) + { + const T* s = src + x*2; + WT t0 = s[0] * 6 + (s[-2] + s[2]) * 4 + s[-4] + s[4]; + WT t1 = s[1] * 6 + (s[-1] + s[3]) * 4 + s[-3] + s[5]; + row[x] = t0; row[x + 1] = t1; + } + } else if( cn == 3 ) { + x += PyrDownVecH(src + x * 2 - 6, row + x, width0 - x); for( ; x < width0; x += 3 ) { const T* s = src + x*2; @@ -477,6 +818,7 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType ) } else if( cn == 4 ) { + x += PyrDownVecH(src + x * 2 - 8, row + x, width0 - x); for( ; x < width0; x += 4 ) { const T* s = src + x*2; @@ -508,14 +850,14 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType ) rows[k] = buf + ((y*2 - PD_SZ/2 + k - sy0) % PD_SZ)*bufstep; row0 = rows[0]; row1 = rows[1]; row2 = rows[2]; row3 = rows[3]; row4 = rows[4]; - x = vecOp(rows, dst, (int)_dst.step, dsize.width); + x = PyrDownVecV(rows, dst, dsize.width); for( ; x < dsize.width; x++ ) dst[x] = castOp(row2[x]*6 + (row1[x] + row3[x])*4 + row0[x] + row4[x]); } } -template void +template void pyrUp_( const Mat& _src, Mat& _dst, int) { const int PU_SZ = 3; @@ -532,7 +874,7 @@ pyrUp_( const Mat& _src, Mat& _dst, int) WT* rows[PU_SZ]; T* dsts[2]; CastOp castOp; - VecOp vecOp; + //PyrUpVecH vecOpH; CV_Assert( std::abs(dsize.width - ssize.width*2) == dsize.width % 2 && std::abs(dsize.height - ssize.height*2) == dsize.height % 2); @@ -598,7 +940,7 @@ pyrUp_( const Mat& _src, Mat& _dst, int) row0 = rows[0]; row1 = rows[1]; row2 = rows[2]; dsts[0] = dst0; dsts[1] = dst1; - x = vecOp(rows, dsts, (int)_dst.step, dsize.width); + x = PyrUpVecV(rows, dsts, dsize.width); for( ; x < dsize.width; x++ ) { T t1 = castOp((row1[x] + row2[x])*4); @@ -912,15 +1254,15 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde PyrFunc func = 0; if( depth == CV_8U ) - func = pyrDown_, PyrDownVec_32s8u>; + func = pyrDown_< FixPtCast >; else if( depth == CV_16S ) - func = pyrDown_, PyrDownVec_32s16s >; + func = pyrDown_< FixPtCast >; else if( depth == CV_16U ) - func = pyrDown_, PyrDownVec_32s16u >; + func = pyrDown_< FixPtCast >; else if( depth == CV_32F ) - func = pyrDown_, PyrDownVec_32f>; + func = pyrDown_< FltCast >; else if( depth == CV_64F ) - func = pyrDown_, PyrDownNoVec >; + func = pyrDown_< FltCast >; else CV_Error( CV_StsUnsupportedFormat, "" ); @@ -1020,15 +1362,15 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT PyrFunc func = 0; if( depth == CV_8U ) - func = pyrUp_, PyrUpVec_32s8u >; + func = pyrUp_< FixPtCast >; else if( depth == CV_16S ) - func = pyrUp_, PyrUpVec_32s16s >; + func = pyrUp_< FixPtCast >; else if( depth == CV_16U ) - func = pyrUp_, PyrUpVec_32s16u >; + func = pyrUp_< FixPtCast >; else if( depth == CV_32F ) - func = pyrUp_, PyrUpVec_32f >; + func = pyrUp_< FltCast >; else if( depth == CV_64F ) - func = pyrUp_, PyrUpNoVec >; + func = pyrUp_< FltCast >; else CV_Error( CV_StsUnsupportedFormat, "" ); From 20afae5a144e4bdee9b341d7f8d3209e696e426d Mon Sep 17 00:00:00 2001 From: berak Date: Thu, 28 Feb 2019 11:28:38 +0100 Subject: [PATCH 04/19] core: fix mat matx multiplication --- modules/core/include/opencv2/core/mat.hpp | 4 ++-- modules/core/test/test_mat.cpp | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index a5fef4415a..2c23a2abb4 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -3627,9 +3627,9 @@ CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); template static inline -MatExpr operator * (const Mat& a, const Matx<_Tp, m, n>& b) { return a + Mat(b); } +MatExpr operator * (const Mat& a, const Matx<_Tp, m, n>& b) { return a * Mat(b); } template static inline -MatExpr operator * (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) + b; } +MatExpr operator * (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) * b; } CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); CV_EXPORTS MatExpr operator / (const Mat& a, double s); diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp index f69140bb8c..dc430d08a3 100644 --- a/modules/core/test/test_mat.cpp +++ b/modules/core/test/test_mat.cpp @@ -2001,6 +2001,22 @@ TEST(Core_Vectors, issue_13078_workaround) ASSERT_EQ(7, ints[3]); } +TEST(Core_MatExpr, issue_13926) +{ + Mat M1 = (Mat_(4,4,CV_64FC1) << 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16); + + Matx44d M2(1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16); + + EXPECT_GE(1e-6, cvtest::norm(M1*M2, M1*M1, NORM_INF)) << Mat(M1*M2) << std::endl << Mat(M1*M1); + EXPECT_GE(1e-6, cvtest::norm(M2*M1, M2*M2, NORM_INF)) << Mat(M2*M1) << std::endl << Mat(M2*M2); +} + #ifdef HAVE_EIGEN TEST(Core_Eigen, eigen2cv_check_Mat_type) From 5478165e16f2b6a2cfd245d03fde8181d3bc46d5 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 1 Mar 2019 00:48:38 +0000 Subject: [PATCH 05/19] core:vsx Fix narrowing warning on vector splats --- modules/core/include/opencv2/core/vsx_utils.hpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/core/include/opencv2/core/vsx_utils.hpp b/modules/core/include/opencv2/core/vsx_utils.hpp index b4e3f30562..da5b25625c 100644 --- a/modules/core/include/opencv2/core/vsx_utils.hpp +++ b/modules/core/include/opencv2/core/vsx_utils.hpp @@ -22,37 +22,37 @@ typedef __vector unsigned char vec_uchar16; #define vec_uchar16_set(...) (vec_uchar16){__VA_ARGS__} -#define vec_uchar16_sp(c) (__VSX_S16__(vec_uchar16, c)) +#define vec_uchar16_sp(c) (__VSX_S16__(vec_uchar16, (unsigned char)c)) #define vec_uchar16_c(v) ((vec_uchar16)(v)) #define vec_uchar16_z vec_uchar16_sp(0) typedef __vector signed char vec_char16; #define vec_char16_set(...) (vec_char16){__VA_ARGS__} -#define vec_char16_sp(c) (__VSX_S16__(vec_char16, c)) +#define vec_char16_sp(c) (__VSX_S16__(vec_char16, (signed char)c)) #define vec_char16_c(v) ((vec_char16)(v)) #define vec_char16_z vec_char16_sp(0) typedef __vector unsigned short vec_ushort8; #define vec_ushort8_set(...) (vec_ushort8){__VA_ARGS__} -#define vec_ushort8_sp(c) (__VSX_S8__(vec_ushort8, c)) +#define vec_ushort8_sp(c) (__VSX_S8__(vec_ushort8, (unsigned short)c)) #define vec_ushort8_c(v) ((vec_ushort8)(v)) #define vec_ushort8_z vec_ushort8_sp(0) typedef __vector signed short vec_short8; #define vec_short8_set(...) (vec_short8){__VA_ARGS__} -#define vec_short8_sp(c) (__VSX_S8__(vec_short8, c)) +#define vec_short8_sp(c) (__VSX_S8__(vec_short8, (signed short)c)) #define vec_short8_c(v) ((vec_short8)(v)) #define vec_short8_z vec_short8_sp(0) typedef __vector unsigned int vec_uint4; #define vec_uint4_set(...) (vec_uint4){__VA_ARGS__} -#define vec_uint4_sp(c) (__VSX_S4__(vec_uint4, c)) +#define vec_uint4_sp(c) (__VSX_S4__(vec_uint4, (unsigned int)c)) #define vec_uint4_c(v) ((vec_uint4)(v)) #define vec_uint4_z vec_uint4_sp(0) typedef __vector signed int vec_int4; #define vec_int4_set(...) (vec_int4){__VA_ARGS__} -#define vec_int4_sp(c) (__VSX_S4__(vec_int4, c)) +#define vec_int4_sp(c) (__VSX_S4__(vec_int4, (signed int)c)) #define vec_int4_c(v) ((vec_int4)(v)) #define vec_int4_z vec_int4_sp(0) @@ -64,13 +64,13 @@ typedef __vector float vec_float4; typedef __vector unsigned long long vec_udword2; #define vec_udword2_set(...) (vec_udword2){__VA_ARGS__} -#define vec_udword2_sp(c) (__VSX_S2__(vec_udword2, c)) +#define vec_udword2_sp(c) (__VSX_S2__(vec_udword2, (unsigned long long)c)) #define vec_udword2_c(v) ((vec_udword2)(v)) #define vec_udword2_z vec_udword2_sp(0) typedef __vector signed long long vec_dword2; #define vec_dword2_set(...) (vec_dword2){__VA_ARGS__} -#define vec_dword2_sp(c) (__VSX_S2__(vec_dword2, c)) +#define vec_dword2_sp(c) (__VSX_S2__(vec_dword2, (signed long long)c)) #define vec_dword2_c(v) ((vec_dword2)(v)) #define vec_dword2_z vec_dword2_sp(0) From 7d2472718404bdf4e3ca79069363cb7e6813b6de Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 Mar 2019 13:47:45 +0300 Subject: [PATCH 06/19] ml: handle sigmoid NaN result (should be Inf) - added more debug checks --- modules/ml/src/svm.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp index 4d987eb678..4a1ffd0575 100644 --- a/modules/ml/src/svm.cpp +++ b/modules/ml/src/svm.cpp @@ -205,11 +205,14 @@ public: for( j = 0; j < vcount; j++ ) { Qfloat t = results[j]; - Qfloat e = std::exp(std::abs(t)); - if( t > 0 ) - results[j] = (Qfloat)((e - 1.)/(e + 1.)); - else - results[j] = (Qfloat)((1. - e)/(1. + e)); + Qfloat e = std::exp(std::abs(t)); // Inf value is possible here + Qfloat r = (Qfloat)((e - 1.) / (e + 1.)); // NaN value is possible here (Inf/Inf or similar) + if (cvIsNaN(r)) + r = std::numeric_limits::infinity(); + if (t < 0) + r = -r; + CV_DbgAssert(!cvIsNaN(r)); + results[j] = r; } } @@ -327,7 +330,7 @@ public: const Qfloat max_val = (Qfloat)(FLT_MAX*1e-3); for( int j = 0; j < vcount; j++ ) { - if( results[j] > max_val ) + if (!(results[j] <= max_val)) // handle NaNs too results[j] = max_val; } } @@ -1949,6 +1952,7 @@ public: const DecisionFunc& df = svm->decision_func[dfi]; sum = -df.rho; int sv_count = svm->getSVCount(dfi); + CV_DbgAssert(sv_count > 0); const double* alpha = &svm->df_alpha[df.ofs]; const int* sv_index = &svm->df_index[df.ofs]; for( k = 0; k < sv_count; k++ ) From 3ba49ccecc773592a3e8d68ad9f5b06196dae6b6 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 Mar 2019 16:18:49 +0300 Subject: [PATCH 07/19] imgproc: removed LSD code due original code license conflict --- modules/imgproc/include/opencv2/imgproc.hpp | 10 +- modules/imgproc/src/lsd.cpp | 983 +------------------- modules/imgproc/test/test_lsd.cpp | 4 + samples/cpp/lsd_lines.cpp | 73 -- 4 files changed, 15 insertions(+), 1055 deletions(-) delete mode 100644 samples/cpp/lsd_lines.cpp diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 6dc88d5bd5..ab7d0e6b84 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -1211,14 +1211,12 @@ protected: //! @addtogroup imgproc_feature //! @{ -/** @example samples/cpp/lsd_lines.cpp -An example using the LineSegmentDetector -\image html building_lsd.png "Sample output image" width=434 height=300 -*/ - /** @brief Line segment detector class following the algorithm described at @cite Rafael12 . + +@note Implementation has been removed due original code license conflict + */ class CV_EXPORTS_W LineSegmentDetector : public Algorithm { @@ -1282,6 +1280,8 @@ to edit those, as to tailor it for their own application. is chosen. @param _density_th Minimal density of aligned region points in the enclosing rectangle. @param _n_bins Number of bins in pseudo-ordering of gradient modulus. + +@note Implementation has been removed due original code license conflict */ CV_EXPORTS_W Ptr createLineSegmentDetector( int _refine = LSD_REFINE_STD, double _scale = 0.8, diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index d06759c2bb..1ec984d290 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -42,123 +42,7 @@ #include "precomp.hpp" #include -///////////////////////////////////////////////////////////////////////////////////////// -// Default LSD parameters -// SIGMA_SCALE 0.6 - Sigma for Gaussian filter is computed as sigma = sigma_scale/scale. -// QUANT 2.0 - Bound to the quantization error on the gradient norm. -// ANG_TH 22.5 - Gradient angle tolerance in degrees. -// LOG_EPS 0.0 - Detection threshold: -log10(NFA) > log_eps -// DENSITY_TH 0.7 - Minimal density of region points in rectangle. -// N_BINS 1024 - Number of bins in pseudo-ordering of gradient modulus. - -#define M_3_2_PI (3 * CV_PI) / 2 // 3/2 pi -#define M_2__PI (2 * CV_PI) // 2 pi - -#ifndef M_LN10 -#define M_LN10 2.30258509299404568402 -#endif - -#define NOTDEF double(-1024.0) // Label for pixels with undefined gradient. - -#define NOTUSED 0 // Label for pixels not used in yet. -#define USED 1 // Label for pixels already used in detection. - -#define RELATIVE_ERROR_FACTOR 100.0 - -const double DEG_TO_RADS = CV_PI / 180; - -#define log_gamma(x) ((x)>15.0?log_gamma_windschitl(x):log_gamma_lanczos(x)) - -struct edge -{ - cv::Point p; - bool taken; -}; - -///////////////////////////////////////////////////////////////////////////////////////// - -inline double distSq(const double x1, const double y1, - const double x2, const double y2) -{ - return (x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1); -} - -inline double dist(const double x1, const double y1, - const double x2, const double y2) -{ - return sqrt(distSq(x1, y1, x2, y2)); -} - -// Signed angle difference -inline double angle_diff_signed(const double& a, const double& b) -{ - double diff = a - b; - while(diff <= -CV_PI) diff += M_2__PI; - while(diff > CV_PI) diff -= M_2__PI; - return diff; -} - -// Absolute value angle difference -inline double angle_diff(const double& a, const double& b) -{ - return std::fabs(angle_diff_signed(a, b)); -} - -// Compare doubles by relative error. -inline bool double_equal(const double& a, const double& b) -{ - // trivial case - if(a == b) return true; - - double abs_diff = fabs(a - b); - double aa = fabs(a); - double bb = fabs(b); - double abs_max = (aa > bb)? aa : bb; - - if(abs_max < DBL_MIN) abs_max = DBL_MIN; - - return (abs_diff / abs_max) <= (RELATIVE_ERROR_FACTOR * DBL_EPSILON); -} - -inline bool AsmallerB_XoverY(const edge& a, const edge& b) -{ - if (a.p.x == b.p.x) return a.p.y < b.p.y; - else return a.p.x < b.p.x; -} - -/** - * Computes the natural logarithm of the absolute value of - * the gamma function of x using Windschitl method. - * See http://www.rskey.org/gamma.htm - */ -inline double log_gamma_windschitl(const double& x) -{ - return 0.918938533204673 + (x-0.5)*log(x) - x - + 0.5*x*log(x*sinh(1/x) + 1/(810.0*pow(x, 6.0))); -} - -/** - * Computes the natural logarithm of the absolute value of - * the gamma function of x using the Lanczos approximation. - * See http://www.rskey.org/gamma.htm - */ -inline double log_gamma_lanczos(const double& x) -{ - static double q[7] = { 75122.6331530, 80916.6278952, 36308.2951477, - 8687.24529705, 1168.92649479, 83.8676043424, - 2.50662827511 }; - double a = (x + 0.5) * log(x + 5.5) - (x + 5.5); - double b = 0; - for(int n = 0; n < 7; ++n) - { - a -= log(x + double(n)); - b += q[n] * pow(x, double(n)); - } - return a + log(b); -} -/////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -namespace cv{ +namespace cv { class LineSegmentDetectorImpl CV_FINAL : public LineSegmentDetector { @@ -229,164 +113,7 @@ public: int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) CV_OVERRIDE; private: - Mat image; - Mat scaled_image; - Mat_ angles; // in rads - Mat_ modgrad; - Mat_ used; - - int img_width; - int img_height; - double LOG_NT; - - bool w_needed; - bool p_needed; - bool n_needed; - - const double SCALE; - const int doRefine; - const double SIGMA_SCALE; - const double QUANT; - const double ANG_TH; - const double LOG_EPS; - const double DENSITY_TH; - const int N_BINS; - - struct RegionPoint { - int x; - int y; - uchar* used; - double angle; - double modgrad; - }; - - struct normPoint - { - Point2i p; - int norm; - }; - - std::vector ordered_points; - - struct rect - { - double x1, y1, x2, y2; // first and second point of the line segment - double width; // rectangle width - double x, y; // center of the rectangle - double theta; // angle - double dx,dy; // (dx,dy) is vector oriented as the line segment - double prec; // tolerance angle - double p; // probability of a point with angle within 'prec' - }; - LineSegmentDetectorImpl& operator= (const LineSegmentDetectorImpl&); // to quiet MSVC - -/** - * Detect lines in the whole input image. - * - * @param lines Return: A vector of Vec4f elements specifying the beginning and ending point of a line. - * Where Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. - * Returned lines are strictly oriented depending on the gradient. - * @param widths Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. - * @param precisions Return: Vector of precisions with which the lines are found. - * @param nfas Return: Vector containing number of false alarms in the line region, with precision of 10%. - * The bigger the value, logarithmically better the detection. - * * -1 corresponds to 10 mean false alarms - * * 0 corresponds to 1 mean false alarm - * * 1 corresponds to 0.1 mean false alarms - */ - void flsd(std::vector& lines, - std::vector& widths, std::vector& precisions, - std::vector& nfas); - -/** - * Finds the angles and the gradients of the image. Generates a list of pseudo ordered points. - * - * @param threshold The minimum value of the angle that is considered defined, otherwise NOTDEF - * @param n_bins The number of bins with which gradients are ordered by, using bucket sort. - * @param ordered_points Return: Vector of coordinate points that are pseudo ordered by magnitude. - * Pixels would be ordered by norm value, up to a precision given by max_grad/n_bins. - */ - void ll_angle(const double& threshold, const unsigned int& n_bins); - -/** - * Grow a region starting from point s with a defined precision, - * returning the containing points size and the angle of the gradients. - * - * @param s Starting point for the region. - * @param reg Return: Vector of points, that are part of the region - * @param reg_angle Return: The mean angle of the region. - * @param prec The precision by which each region angle should be aligned to the mean. - */ - void region_grow(const Point2i& s, std::vector& reg, - double& reg_angle, const double& prec); - -/** - * Finds the bounding rotated rectangle of a region. - * - * @param reg The region of points, from which the rectangle to be constructed from. - * @param reg_angle The mean angle of the region. - * @param prec The precision by which points were found. - * @param p Probability of a point with angle within 'prec'. - * @param rec Return: The generated rectangle. - */ - void region2rect(const std::vector& reg, const double reg_angle, - const double prec, const double p, rect& rec) const; - -/** - * Compute region's angle as the principal inertia axis of the region. - * @return Regions angle. - */ - double get_theta(const std::vector& reg, const double& x, - const double& y, const double& reg_angle, const double& prec) const; - -/** - * An estimation of the angle tolerance is performed by the standard deviation of the angle at points - * near the region's starting point. Then, a new region is grown starting from the same point, but using the - * estimated angle tolerance. If this fails to produce a rectangle with the right density of region points, - * 'reduce_region_radius' is called to try to satisfy this condition. - */ - bool refine(std::vector& reg, double reg_angle, - const double prec, double p, rect& rec, const double& density_th); - -/** - * Reduce the region size, by elimination the points far from the starting point, until that leads to - * rectangle with the right density of region points or to discard the region if too small. - */ - bool reduce_region_radius(std::vector& reg, double reg_angle, - const double prec, double p, rect& rec, double density, const double& density_th); - -/** - * Try some rectangles variations to improve NFA value. Only if the rectangle is not meaningful (i.e., log_nfa <= log_eps). - * @return The new NFA value. - */ - double rect_improve(rect& rec) const; - -/** - * Calculates the number of correctly aligned points within the rectangle. - * @return The new NFA value. - */ - double rect_nfa(const rect& rec) const; - -/** - * Computes the NFA values based on the total number of points, points that agree. - * n, k, p are the binomial parameters. - * @return The new NFA value. - */ - double nfa(const int& n, const int& k, const double& p) const; - -/** - * Is the point at place 'address' aligned to angle theta, up to precision 'prec'? - * @return Whether the point is aligned. - */ - bool isAligned(int x, int y, const double& theta, const double& prec) const; - -public: - // Compare norm - static inline bool compare_norm( const normPoint& n1, const normPoint& n2 ) - { - return (n1.norm > n2.norm); - } }; ///////////////////////////////////////////////////////////////////////////////////////// @@ -404,13 +131,12 @@ CV_EXPORTS Ptr createLineSegmentDetector( LineSegmentDetectorImpl::LineSegmentDetectorImpl(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps, double _density_th, int _n_bins) - : img_width(0), img_height(0), LOG_NT(0), w_needed(false), p_needed(false), n_needed(false), - SCALE(_scale), doRefine(_refine), SIGMA_SCALE(_sigma_scale), QUANT(_quant), - ANG_TH(_ang_th), LOG_EPS(_log_eps), DENSITY_TH(_density_th), N_BINS(_n_bins) { CV_Assert(_scale > 0 && _sigma_scale > 0 && _quant >= 0 && _ang_th > 0 && _ang_th < 180 && _density_th >= 0 && _density_th < 1 && _n_bins > 0); + CV_UNUSED(_refine); CV_UNUSED(_log_eps); + CV_Error(Error::StsNotImplemented, "Implementation has been removed due original code license issues"); } void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, @@ -418,708 +144,11 @@ void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, { CV_INSTRUMENT_REGION(); - image = _image.getMat(); - CV_Assert(!image.empty() && image.type() == CV_8UC1); - - std::vector lines; - std::vector w, p, n; - w_needed = _width.needed(); - p_needed = _prec.needed(); - if (doRefine < LSD_REFINE_ADV) - n_needed = false; - else - n_needed = _nfa.needed(); - - flsd(lines, w, p, n); - - Mat(lines).copyTo(_lines); - if(w_needed) Mat(w).copyTo(_width); - if(p_needed) Mat(p).copyTo(_prec); - if(n_needed) Mat(n).copyTo(_nfa); - - // Clear used structures - ordered_points.clear(); + CV_UNUSED(_image); CV_UNUSED(_lines); + CV_UNUSED(_width); CV_UNUSED(_prec); CV_UNUSED(_nfa); + CV_Error(Error::StsNotImplemented, "Implementation has been removed due original code license issues"); } -void LineSegmentDetectorImpl::flsd(std::vector& lines, - std::vector& widths, std::vector& precisions, - std::vector& nfas) -{ - // Angle tolerance - const double prec = CV_PI * ANG_TH / 180; - const double p = ANG_TH / 180; - const double rho = QUANT / sin(prec); // gradient magnitude threshold - - if(SCALE != 1) - { - Mat gaussian_img; - const double sigma = (SCALE < 1)?(SIGMA_SCALE / SCALE):(SIGMA_SCALE); - const double sprec = 3; - const unsigned int h = (unsigned int)(ceil(sigma * sqrt(2 * sprec * log(10.0)))); - Size ksize(1 + 2 * h, 1 + 2 * h); // kernel size - GaussianBlur(image, gaussian_img, ksize, sigma); - // Scale image to needed size - resize(gaussian_img, scaled_image, Size(), SCALE, SCALE, INTER_LINEAR_EXACT); - ll_angle(rho, N_BINS); - } - else - { - scaled_image = image; - ll_angle(rho, N_BINS); - } - - LOG_NT = 5 * (log10(double(img_width)) + log10(double(img_height))) / 2 + log10(11.0); - const size_t min_reg_size = size_t(-LOG_NT/log10(p)); // minimal number of points in region that can give a meaningful event - - // // Initialize region only when needed - // Mat region = Mat::zeros(scaled_image.size(), CV_8UC1); - used = Mat_::zeros(scaled_image.size()); // zeros = NOTUSED - std::vector reg; - - // Search for line segments - for(size_t i = 0, points_size = ordered_points.size(); i < points_size; ++i) - { - const Point2i& point = ordered_points[i].p; - if((used.at(point) == NOTUSED) && (angles.at(point) != NOTDEF)) - { - double reg_angle; - region_grow(ordered_points[i].p, reg, reg_angle, prec); - - // Ignore small regions - if(reg.size() < min_reg_size) { continue; } - - // Construct rectangular approximation for the region - rect rec; - region2rect(reg, reg_angle, prec, p, rec); - - double log_nfa = -1; - if(doRefine > LSD_REFINE_NONE) - { - // At least REFINE_STANDARD lvl. - if(!refine(reg, reg_angle, prec, p, rec, DENSITY_TH)) { continue; } - - if(doRefine >= LSD_REFINE_ADV) - { - // Compute NFA - log_nfa = rect_improve(rec); - if(log_nfa <= LOG_EPS) { continue; } - } - } - // Found new line - - // Add the offset - rec.x1 += 0.5; rec.y1 += 0.5; - rec.x2 += 0.5; rec.y2 += 0.5; - - // scale the result values if a sub-sampling was performed - if(SCALE != 1) - { - rec.x1 /= SCALE; rec.y1 /= SCALE; - rec.x2 /= SCALE; rec.y2 /= SCALE; - rec.width /= SCALE; - } - - //Store the relevant data - lines.push_back(Vec4f(float(rec.x1), float(rec.y1), float(rec.x2), float(rec.y2))); - if(w_needed) widths.push_back(rec.width); - if(p_needed) precisions.push_back(rec.p); - if(n_needed && doRefine >= LSD_REFINE_ADV) nfas.push_back(log_nfa); - } - } -} - -void LineSegmentDetectorImpl::ll_angle(const double& threshold, - const unsigned int& n_bins) -{ - //Initialize data - angles = Mat_(scaled_image.size()); - modgrad = Mat_(scaled_image.size()); - - img_width = scaled_image.cols; - img_height = scaled_image.rows; - - // Undefined the down and right boundaries - angles.row(img_height - 1).setTo(NOTDEF); - angles.col(img_width - 1).setTo(NOTDEF); - - // Computing gradient for remaining pixels - double max_grad = -1; - for(int y = 0; y < img_height - 1; ++y) - { - const uchar* scaled_image_row = scaled_image.ptr(y); - const uchar* next_scaled_image_row = scaled_image.ptr(y+1); - double* angles_row = angles.ptr(y); - double* modgrad_row = modgrad.ptr(y); - for(int x = 0; x < img_width-1; ++x) - { - int DA = next_scaled_image_row[x + 1] - scaled_image_row[x]; - int BC = scaled_image_row[x + 1] - next_scaled_image_row[x]; - int gx = DA + BC; // gradient x component - int gy = DA - BC; // gradient y component - double norm = std::sqrt((gx * gx + gy * gy) / 4.0); // gradient norm - - modgrad_row[x] = norm; // store gradient - - if (norm <= threshold) // norm too small, gradient no defined - { - angles_row[x] = NOTDEF; - } - else - { - angles_row[x] = fastAtan2(float(gx), float(-gy)) * DEG_TO_RADS; // gradient angle computation - if (norm > max_grad) { max_grad = norm; } - } - - } - } - - // Compute histogram of gradient values - double bin_coef = (max_grad > 0) ? double(n_bins - 1) / max_grad : 0; // If all image is smooth, max_grad <= 0 - for(int y = 0; y < img_height - 1; ++y) - { - const double* modgrad_row = modgrad.ptr(y); - for(int x = 0; x < img_width - 1; ++x) - { - normPoint _point; - int i = int(modgrad_row[x] * bin_coef); - _point.p = Point(x, y); - _point.norm = i; - ordered_points.push_back(_point); - } - } - - // Sort - std::sort(ordered_points.begin(), ordered_points.end(), compare_norm); -} - -void LineSegmentDetectorImpl::region_grow(const Point2i& s, std::vector& reg, - double& reg_angle, const double& prec) -{ - reg.clear(); - - // Point to this region - RegionPoint seed; - seed.x = s.x; - seed.y = s.y; - seed.used = &used.at(s); - reg_angle = angles.at(s); - seed.angle = reg_angle; - seed.modgrad = modgrad.at(s); - reg.push_back(seed); - - float sumdx = float(std::cos(reg_angle)); - float sumdy = float(std::sin(reg_angle)); - *seed.used = USED; - - //Try neighboring regions - for (size_t i = 0;i(yy); - const double* angles_row = angles.ptr(yy); - const double* modgrad_row = modgrad.ptr(yy); - for(int xx = xx_min; xx <= xx_max; ++xx) - { - uchar& is_used = used_row[xx]; - if(is_used != USED && - (isAligned(xx, yy, reg_angle, prec))) - { - const double& angle = angles_row[xx]; - // Add point - is_used = USED; - RegionPoint region_point; - region_point.x = xx; - region_point.y = yy; - region_point.used = &is_used; - region_point.modgrad = modgrad_row[xx]; - region_point.angle = angle; - reg.push_back(region_point); - - // Update region's angle - sumdx += cos(float(angle)); - sumdy += sin(float(angle)); - // reg_angle is used in the isAligned, so it needs to be updates? - reg_angle = fastAtan2(sumdy, sumdx) * DEG_TO_RADS; - } - } - } - } -} - -void LineSegmentDetectorImpl::region2rect(const std::vector& reg, - const double reg_angle, const double prec, const double p, rect& rec) const -{ - double x = 0, y = 0, sum = 0; - for(size_t i = 0; i < reg.size(); ++i) - { - const RegionPoint& pnt = reg[i]; - const double& weight = pnt.modgrad; - x += double(pnt.x) * weight; - y += double(pnt.y) * weight; - sum += weight; - } - - // Weighted sum must differ from 0 - CV_Assert(sum > 0); - - x /= sum; - y /= sum; - - double theta = get_theta(reg, x, y, reg_angle, prec); - - // Find length and width - double dx = cos(theta); - double dy = sin(theta); - double l_min = 0, l_max = 0, w_min = 0, w_max = 0; - - for(size_t i = 0; i < reg.size(); ++i) - { - double regdx = double(reg[i].x) - x; - double regdy = double(reg[i].y) - y; - - double l = regdx * dx + regdy * dy; - double w = -regdx * dy + regdy * dx; - - if(l > l_max) l_max = l; - else if(l < l_min) l_min = l; - if(w > w_max) w_max = w; - else if(w < w_min) w_min = w; - } - - // Store values - rec.x1 = x + l_min * dx; - rec.y1 = y + l_min * dy; - rec.x2 = x + l_max * dx; - rec.y2 = y + l_max * dy; - rec.width = w_max - w_min; - rec.x = x; - rec.y = y; - rec.theta = theta; - rec.dx = dx; - rec.dy = dy; - rec.prec = prec; - rec.p = p; - - // Min width of 1 pixel - if(rec.width < 1.0) rec.width = 1.0; -} - -double LineSegmentDetectorImpl::get_theta(const std::vector& reg, const double& x, - const double& y, const double& reg_angle, const double& prec) const -{ - double Ixx = 0.0; - double Iyy = 0.0; - double Ixy = 0.0; - - // Compute inertia matrix - for(size_t i = 0; i < reg.size(); ++i) - { - const double& regx = reg[i].x; - const double& regy = reg[i].y; - const double& weight = reg[i].modgrad; - double dx = regx - x; - double dy = regy - y; - Ixx += dy * dy * weight; - Iyy += dx * dx * weight; - Ixy -= dx * dy * weight; - } - - // Check if inertia matrix is null - CV_Assert(!(double_equal(Ixx, 0) && double_equal(Iyy, 0) && double_equal(Ixy, 0))); - - // Compute smallest eigenvalue - double lambda = 0.5 * (Ixx + Iyy - sqrt((Ixx - Iyy) * (Ixx - Iyy) + 4.0 * Ixy * Ixy)); - - // Compute angle - double theta = (fabs(Ixx)>fabs(Iyy))? - double(fastAtan2(float(lambda - Ixx), float(Ixy))): - double(fastAtan2(float(Ixy), float(lambda - Iyy))); // in degs - theta *= DEG_TO_RADS; - - // Correct angle by 180 deg if necessary - if(angle_diff(theta, reg_angle) > prec) { theta += CV_PI; } - - return theta; -} - -bool LineSegmentDetectorImpl::refine(std::vector& reg, double reg_angle, - const double prec, double p, rect& rec, const double& density_th) -{ - double density = double(reg.size()) / (dist(rec.x1, rec.y1, rec.x2, rec.y2) * rec.width); - - if (density >= density_th) { return true; } - - // Try to reduce angle tolerance - double xc = double(reg[0].x); - double yc = double(reg[0].y); - const double& ang_c = reg[0].angle; - double sum = 0, s_sum = 0; - int n = 0; - - for (size_t i = 0; i < reg.size(); ++i) - { - *(reg[i].used) = NOTUSED; - if (dist(xc, yc, reg[i].x, reg[i].y) < rec.width) - { - const double& angle = reg[i].angle; - double ang_d = angle_diff_signed(angle, ang_c); - sum += ang_d; - s_sum += ang_d * ang_d; - ++n; - } - } - CV_Assert(n > 0); - double mean_angle = sum / double(n); - // 2 * standard deviation - double tau = 2.0 * sqrt((s_sum - 2.0 * mean_angle * sum) / double(n) + mean_angle * mean_angle); - - // Try new region - region_grow(Point(reg[0].x, reg[0].y), reg, reg_angle, tau); - - if (reg.size() < 2) { return false; } - - region2rect(reg, reg_angle, prec, p, rec); - density = double(reg.size()) / (dist(rec.x1, rec.y1, rec.x2, rec.y2) * rec.width); - - if (density < density_th) - { - return reduce_region_radius(reg, reg_angle, prec, p, rec, density, density_th); - } - else - { - return true; - } -} - -bool LineSegmentDetectorImpl::reduce_region_radius(std::vector& reg, double reg_angle, - const double prec, double p, rect& rec, double density, const double& density_th) -{ - // Compute region's radius - double xc = double(reg[0].x); - double yc = double(reg[0].y); - double radSq1 = distSq(xc, yc, rec.x1, rec.y1); - double radSq2 = distSq(xc, yc, rec.x2, rec.y2); - double radSq = radSq1 > radSq2 ? radSq1 : radSq2; - - while(density < density_th) - { - radSq *= 0.75*0.75; // Reduce region's radius to 75% of its value - // Remove points from the region and update 'used' map - for (size_t i = 0; i < reg.size(); ++i) - { - if(distSq(xc, yc, double(reg[i].x), double(reg[i].y)) > radSq) - { - // Remove point from the region - *(reg[i].used) = NOTUSED; - std::swap(reg[i], reg[reg.size() - 1]); - reg.pop_back(); - --i; // To avoid skipping one point - } - } - - if(reg.size() < 2) { return false; } - - // Re-compute rectangle - region2rect(reg ,reg_angle, prec, p, rec); - - // Re-compute region points density - density = double(reg.size()) / - (dist(rec.x1, rec.y1, rec.x2, rec.y2) * rec.width); - } - - return true; -} - -double LineSegmentDetectorImpl::rect_improve(rect& rec) const -{ - double delta = 0.5; - double delta_2 = delta / 2.0; - - double log_nfa = rect_nfa(rec); - - if(log_nfa > LOG_EPS) return log_nfa; // Good rectangle - - // Try to improve - // Finer precision - rect r = rect(rec); // Copy - for(int n = 0; n < 5; ++n) - { - r.p /= 2; - r.prec = r.p * CV_PI; - double log_nfa_new = rect_nfa(r); - if(log_nfa_new > log_nfa) - { - log_nfa = log_nfa_new; - rec = rect(r); - } - } - if(log_nfa > LOG_EPS) return log_nfa; - - // Try to reduce width - r = rect(rec); - for(unsigned int n = 0; n < 5; ++n) - { - if((r.width - delta) >= 0.5) - { - r.width -= delta; - double log_nfa_new = rect_nfa(r); - if(log_nfa_new > log_nfa) - { - rec = rect(r); - log_nfa = log_nfa_new; - } - } - } - if(log_nfa > LOG_EPS) return log_nfa; - - // Try to reduce one side of rectangle - r = rect(rec); - for(unsigned int n = 0; n < 5; ++n) - { - if((r.width - delta) >= 0.5) - { - r.x1 += -r.dy * delta_2; - r.y1 += r.dx * delta_2; - r.x2 += -r.dy * delta_2; - r.y2 += r.dx * delta_2; - r.width -= delta; - double log_nfa_new = rect_nfa(r); - if(log_nfa_new > log_nfa) - { - rec = rect(r); - log_nfa = log_nfa_new; - } - } - } - if(log_nfa > LOG_EPS) return log_nfa; - - // Try to reduce other side of rectangle - r = rect(rec); - for(unsigned int n = 0; n < 5; ++n) - { - if((r.width - delta) >= 0.5) - { - r.x1 -= -r.dy * delta_2; - r.y1 -= r.dx * delta_2; - r.x2 -= -r.dy * delta_2; - r.y2 -= r.dx * delta_2; - r.width -= delta; - double log_nfa_new = rect_nfa(r); - if(log_nfa_new > log_nfa) - { - rec = rect(r); - log_nfa = log_nfa_new; - } - } - } - if(log_nfa > LOG_EPS) return log_nfa; - - // Try finer precision - r = rect(rec); - for(unsigned int n = 0; n < 5; ++n) - { - if((r.width - delta) >= 0.5) - { - r.p /= 2; - r.prec = r.p * CV_PI; - double log_nfa_new = rect_nfa(r); - if(log_nfa_new > log_nfa) - { - rec = rect(r); - log_nfa = log_nfa_new; - } - } - } - - return log_nfa; -} - -double LineSegmentDetectorImpl::rect_nfa(const rect& rec) const -{ - int total_pts = 0, alg_pts = 0; - double half_width = rec.width / 2.0; - double dyhw = rec.dy * half_width; - double dxhw = rec.dx * half_width; - - edge ordered_x[4]; - edge* min_y = &ordered_x[0]; - edge* max_y = &ordered_x[0]; // Will be used for loop range - - ordered_x[0].p.x = int(rec.x1 - dyhw); ordered_x[0].p.y = int(rec.y1 + dxhw); ordered_x[0].taken = false; - ordered_x[1].p.x = int(rec.x2 - dyhw); ordered_x[1].p.y = int(rec.y2 + dxhw); ordered_x[1].taken = false; - ordered_x[2].p.x = int(rec.x2 + dyhw); ordered_x[2].p.y = int(rec.y2 - dxhw); ordered_x[2].taken = false; - ordered_x[3].p.x = int(rec.x1 + dyhw); ordered_x[3].p.y = int(rec.y1 - dxhw); ordered_x[3].taken = false; - - std::sort(ordered_x, ordered_x + 4, AsmallerB_XoverY); - - // Find min y. And mark as taken. find max y. - for(unsigned int i = 1; i < 4; ++i) - { - if(min_y->p.y > ordered_x[i].p.y) {min_y = &ordered_x[i]; } - if(max_y->p.y < ordered_x[i].p.y) {max_y = &ordered_x[i]; } - } - min_y->taken = true; - - // Find leftmost untaken point; - edge* leftmost = 0; - for(unsigned int i = 0; i < 4; ++i) - { - if(!ordered_x[i].taken) - { - if(!leftmost) // if uninitialized - { - leftmost = &ordered_x[i]; - } - else if (leftmost->p.x > ordered_x[i].p.x) - { - leftmost = &ordered_x[i]; - } - } - } - CV_Assert(leftmost != NULL); - leftmost->taken = true; - - // Find rightmost untaken point; - edge* rightmost = 0; - for(unsigned int i = 0; i < 4; ++i) - { - if(!ordered_x[i].taken) - { - if(!rightmost) // if uninitialized - { - rightmost = &ordered_x[i]; - } - else if (rightmost->p.x < ordered_x[i].p.x) - { - rightmost = &ordered_x[i]; - } - } - } - CV_Assert(rightmost != NULL); - rightmost->taken = true; - - // Find last untaken point; - edge* tailp = 0; - for(unsigned int i = 0; i < 4; ++i) - { - if(!ordered_x[i].taken) - { - if(!tailp) // if uninitialized - { - tailp = &ordered_x[i]; - } - else if (tailp->p.x > ordered_x[i].p.x) - { - tailp = &ordered_x[i]; - } - } - } - CV_Assert(tailp != NULL); - tailp->taken = true; - - double flstep = (min_y->p.y != leftmost->p.y) ? - (min_y->p.x - leftmost->p.x) / (min_y->p.y - leftmost->p.y) : 0; //first left step - double slstep = (leftmost->p.y != tailp->p.x) ? - (leftmost->p.x - tailp->p.x) / (leftmost->p.y - tailp->p.x) : 0; //second left step - - double frstep = (min_y->p.y != rightmost->p.y) ? - (min_y->p.x - rightmost->p.x) / (min_y->p.y - rightmost->p.y) : 0; //first right step - double srstep = (rightmost->p.y != tailp->p.x) ? - (rightmost->p.x - tailp->p.x) / (rightmost->p.y - tailp->p.x) : 0; //second right step - - double lstep = flstep, rstep = frstep; - - double left_x = min_y->p.x, right_x = min_y->p.x; - - // Loop around all points in the region and count those that are aligned. - int min_iter = min_y->p.y; - int max_iter = max_y->p.y; - for(int y = min_iter; y <= max_iter; ++y) - { - if (y < 0 || y >= img_height) continue; - - for(int x = int(left_x); x <= int(right_x); ++x) - { - if (x < 0 || x >= img_width) continue; - - ++total_pts; - if(isAligned(x, y, rec.theta, rec.prec)) - { - ++alg_pts; - } - } - - if(y >= leftmost->p.y) { lstep = slstep; } - if(y >= rightmost->p.y) { rstep = srstep; } - - left_x += lstep; - right_x += rstep; - } - - return nfa(total_pts, alg_pts, rec.p); -} - -double LineSegmentDetectorImpl::nfa(const int& n, const int& k, const double& p) const -{ - // Trivial cases - if(n == 0 || k == 0) { return -LOG_NT; } - if(n == k) { return -LOG_NT - double(n) * log10(p); } - - double p_term = p / (1 - p); - - double log1term = (double(n) + 1) - log_gamma(double(k) + 1) - - log_gamma(double(n-k) + 1) - + double(k) * log(p) + double(n-k) * log(1.0 - p); - double term = exp(log1term); - - if(double_equal(term, 0)) - { - if(k > n * p) return -log1term / M_LN10 - LOG_NT; - else return -LOG_NT; - } - - // Compute more terms if needed - double bin_tail = term; - double tolerance = 0.1; // an error of 10% in the result is accepted - for(int i = k + 1; i <= n; ++i) - { - double bin_term = double(n - i + 1) / double(i); - double mult_term = bin_term * p_term; - term *= mult_term; - bin_tail += term; - if(bin_term < 1) - { - double err = term * ((1 - pow(mult_term, double(n-i+1))) / (1 - mult_term) - 1); - if(err < tolerance * fabs(-log10(bin_tail) - LOG_NT) * bin_tail) break; - } - - } - return -log10(bin_tail) - LOG_NT; -} - -inline bool LineSegmentDetectorImpl::isAligned(int x, int y, const double& theta, const double& prec) const -{ - if(x < 0 || y < 0 || x >= angles.cols || y >= angles.rows) { return false; } - const double& a = angles.at(y, x); - if(a == NOTDEF) { return false; } - - // It is assumed that 'theta' and 'a' are in the range [-pi,pi] - double n_theta = theta - a; - if(n_theta < 0) { n_theta = -n_theta; } - if(n_theta > M_3_2_PI) - { - n_theta -= M_2__PI; - if(n_theta < 0) n_theta = -n_theta; - } - - return n_theta <= prec; -} - - void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, InputArray lines) { CV_INSTRUMENT_REGION(); diff --git a/modules/imgproc/test/test_lsd.cpp b/modules/imgproc/test/test_lsd.cpp index 43d00b4928..e162a3c6f4 100644 --- a/modules/imgproc/test/test_lsd.cpp +++ b/modules/imgproc/test/test_lsd.cpp @@ -5,6 +5,8 @@ namespace opencv_test { namespace { +#if 0 // LSD implementation has been removed due original code license issues + const Size img_size(640, 480); const int LSD_TEST_SEED = 0x134679; const int EPOCHS = 20; @@ -402,4 +404,6 @@ TEST_F(Imgproc_LSD_Common, compareSegmentsVec4i) ASSERT_EQ(result2, 11); } +#endif + }} // namespace diff --git a/samples/cpp/lsd_lines.cpp b/samples/cpp/lsd_lines.cpp deleted file mode 100644 index 3feed9cbc2..0000000000 --- a/samples/cpp/lsd_lines.cpp +++ /dev/null @@ -1,73 +0,0 @@ -#include "opencv2/imgproc.hpp" -#include "opencv2/imgcodecs.hpp" -#include "opencv2/highgui.hpp" -#include - -using namespace std; -using namespace cv; - -int main(int argc, char** argv) -{ - cv::CommandLineParser parser(argc, argv, - "{input i|building.jpg|input image}" - "{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}" - "{canny c|false|use Canny edge detector}" - "{overlay o|false|show result on input image}" - "{help h|false|show help message}"); - - if (parser.get("help")) - { - parser.printMessage(); - return 0; - } - - parser.printMessage(); - - String filename = samples::findFile(parser.get("input")); - bool useRefine = parser.get("refine"); - bool useCanny = parser.get("canny"); - bool overlay = parser.get("overlay"); - - Mat image = imread(filename, IMREAD_GRAYSCALE); - - if( image.empty() ) - { - cout << "Unable to load " << filename; - return 1; - } - - imshow("Source Image", image); - - if (useCanny) - { - Canny(image, image, 50, 200, 3); // Apply Canny edge detector - } - - // Create and LSD detector with standard or no refinement. - Ptr ls = useRefine ? createLineSegmentDetector(LSD_REFINE_STD) : createLineSegmentDetector(LSD_REFINE_NONE); - - double start = double(getTickCount()); - vector lines_std; - - // Detect the lines - ls->detect(image, lines_std); - - double duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency(); - std::cout << "It took " << duration_ms << " ms." << std::endl; - - // Show found lines - if (!overlay || useCanny) - { - image = Scalar(0, 0, 0); - } - - ls->drawSegments(image, lines_std); - - String window_name = useRefine ? "Result - standard refinement" : "Result - no refinement"; - window_name += useCanny ? " - Canny edge detector used" : ""; - - imshow(window_name, image); - - waitKey(); - return 0; -} From f5b58e5fc99c63362d73df1835b5d265d76f2d9d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 Mar 2019 18:45:33 +0300 Subject: [PATCH 08/19] bindings: backport generator from OpenCV 4.x - better handling of enum arguments - less merge conflicts --- modules/core/misc/python/pyopencv_umat.hpp | 36 ++ modules/core/misc/python/shadow_umat.hpp | 59 +++ modules/java/generator/gen_java.py | 104 ++++- modules/python/bindings/CMakeLists.txt | 11 +- modules/python/src2/cv2.cpp | 415 ++++++------------ modules/python/src2/gen2.py | 219 ++++++--- modules/python/src2/hdr_parser.py | 79 ++-- .../misc/python/pyopencv_stitching.hpp | 8 +- .../videoio/misc/python/pyopencv_videoio.hpp | 27 -- 9 files changed, 517 insertions(+), 441 deletions(-) create mode 100644 modules/core/misc/python/pyopencv_umat.hpp create mode 100644 modules/core/misc/python/shadow_umat.hpp diff --git a/modules/core/misc/python/pyopencv_umat.hpp b/modules/core/misc/python/pyopencv_umat.hpp new file mode 100644 index 0000000000..697adaf202 --- /dev/null +++ b/modules/core/misc/python/pyopencv_umat.hpp @@ -0,0 +1,36 @@ +#ifdef HAVE_OPENCV_CORE + +#include "opencv2/core/mat.hpp" + +typedef std::vector vector_Range; + +CV_PY_TO_CLASS(UMat); +CV_PY_FROM_CLASS(UMat); + +static bool cv_mappable_to(const Ptr& src, Ptr& dst) +{ + //dst.reset(new UMat(src->getUMat(ACCESS_RW))); + dst.reset(new UMat()); + src->copyTo(*dst); + return true; +} + +static void* cv_UMat_queue() +{ + return cv::ocl::Queue::getDefault().ptr(); +} + +static void* cv_UMat_context() +{ + return cv::ocl::Context::getDefault().ptr(); +} + +static Mat cv_UMat_get(const UMat* _self) +{ + Mat m; + m.allocator = &g_numpyAllocator; + _self->copyTo(m); + return m; +} + +#endif diff --git a/modules/core/misc/python/shadow_umat.hpp b/modules/core/misc/python/shadow_umat.hpp new file mode 100644 index 0000000000..dd83b9cc1d --- /dev/null +++ b/modules/core/misc/python/shadow_umat.hpp @@ -0,0 +1,59 @@ +#error This is a shadow header file, which is not intended for processing by any compiler. \ + Only bindings parser should handle this file. + +namespace cv +{ + +class CV_EXPORTS_W UMat +{ +public: + //! default constructor + CV_WRAP UMat(UMatUsageFlags usageFlags = USAGE_DEFAULT); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + CV_WRAP UMat(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + CV_WRAP UMat(Size size, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + //! constucts 2D matrix and fills it with the specified value _s. + CV_WRAP UMat(int rows, int cols, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + CV_WRAP UMat(Size size, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + + //! Mat is mappable to UMat + CV_WRAP_MAPPABLE(Ptr); + + //! returns the OpenCL queue used by OpenCV UMat + CV_WRAP_PHANTOM(static void* queue()); + + //! returns the OpenCL context used by OpenCV UMat + CV_WRAP_PHANTOM(static void* context()); + + //! copy constructor + CV_WRAP UMat(const UMat& m); + + //! creates a matrix header for a part of the bigger matrix + CV_WRAP UMat(const UMat& m, const Range& rowRange, const Range& colRange = Range::all()); + CV_WRAP UMat(const UMat& m, const Rect& roi); + CV_WRAP UMat(const UMat& m, const std::vector& ranges); + + //CV_WRAP_AS(get) Mat getMat(int flags CV_WRAP_DEFAULT(ACCESS_RW)) const; + //! returns a numpy matrix + CV_WRAP_PHANTOM(Mat get() const); + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + CV_WRAP bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + CV_WRAP bool isSubmatrix() const; + + /*! Returns the OpenCL buffer handle on which UMat operates on. + The UMat instance should be kept alive during the use of the handle to prevent the buffer to be + returned to the OpenCV buffer pool. + */ + CV_WRAP void* handle(int/*AccessFlag*/ accessFlags) const; + + // offset of the submatrix (or 0) + CV_PROP_RW size_t offset; +}; + +} // namespace cv diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index d3a4664d38..b1ea133edf 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -140,17 +140,18 @@ class GeneralInfo(): def fullName(self, isCPP=False): result = ".".join([self.fullClass(), self.name]) - return result if not isCPP else result.replace(".", "::") + return result if not isCPP else get_cname(result) def fullClass(self, isCPP=False): result = ".".join([f for f in [self.namespace] + self.classpath.split(".") if len(f)>0]) - return result if not isCPP else result.replace(".", "::") + return result if not isCPP else get_cname(result) class ConstInfo(GeneralInfo): - def __init__(self, decl, addedManually=False, namespaces=[]): + def __init__(self, decl, addedManually=False, namespaces=[], enumType=None): GeneralInfo.__init__(self, "const", decl, namespaces) - self.cname = self.name.replace(".", "::") + self.cname = get_cname(self.name) self.value = decl[1] + self.enumType = enumType self.addedManually = addedManually if self.namespace in namespaces_dict: self.name = '%s_%s' % (namespaces_dict[self.namespace], self.name) @@ -166,6 +167,25 @@ class ConstInfo(GeneralInfo): return True return False +def normalize_field_name(name): + return name.replace(".","_").replace("[","").replace("]","").replace("_getNativeObjAddr()","_nativeObj") + +def normalize_class_name(name): + return re.sub(r"^cv\.", "", name).replace(".", "_") + +def get_cname(name): + return name.replace(".", "::") + +def cast_from(t): + if t in type_dict and "cast_from" in type_dict[t]: + return type_dict[t]["cast_from"] + return t + +def cast_to(t): + if t in type_dict and "cast_to" in type_dict[t]: + return type_dict[t]["cast_to"] + return t + class ClassPropInfo(): def __init__(self, decl): # [f_ctype, f_name, '', '/RW'] self.ctype = decl[0] @@ -178,7 +198,7 @@ class ClassPropInfo(): class ClassInfo(GeneralInfo): def __init__(self, decl, namespaces=[]): # [ 'class/struct cname', ': base', [modlist] ] GeneralInfo.__init__(self, "class", decl, namespaces) - self.cname = self.name.replace(".", "::") + self.cname = get_cname(self.name) self.methods = [] self.methods_suffixes = {} self.consts = [] # using a list to save the occurrence order @@ -303,7 +323,7 @@ class ArgInfo(): class FuncInfo(GeneralInfo): def __init__(self, decl, namespaces=[]): # [ funcname, return_ctype, [modifiers], [args] ] GeneralInfo.__init__(self, "func", decl, namespaces) - self.cname = decl[0].replace(".", "::") + self.cname = get_cname(decl[0]) self.jname = self.name self.isconstructor = self.name == self.classname if "[" in self.name: @@ -403,13 +423,16 @@ class JavaWrapperGenerator(object): ) logging.info('ok: class %s, name: %s, base: %s', classinfo, name, classinfo.base) - def add_const(self, decl): # [ "const cname", val, [], [] ] - constinfo = ConstInfo(decl, namespaces=self.namespaces) + def add_const(self, decl, enumType=None): # [ "const cname", val, [], [] ] + constinfo = ConstInfo(decl, namespaces=self.namespaces, enumType=enumType) if constinfo.isIgnored(): logging.info('ignored: %s', constinfo) - elif not self.isWrapped(constinfo.classname): - logging.info('class not found: %s', constinfo) else: + if not self.isWrapped(constinfo.classname): + logging.info('class not found: %s', constinfo) + constinfo.name = constinfo.classname + '_' + constinfo.name + constinfo.classname = '' + ci = self.getClass(constinfo.classname) duplicate = ci.getConst(constinfo.name) if duplicate: @@ -421,6 +444,18 @@ class JavaWrapperGenerator(object): ci.addConst(constinfo) logging.info('ok: %s', constinfo) + def add_enum(self, decl): # [ "enum cname", "", [], [] ] + enumType = decl[0].rsplit(" ", 1)[1] + if enumType.endswith(""): + enumType = None + else: + ctype = normalize_class_name(enumType) + type_dict[ctype] = { "cast_from" : "int", "cast_to" : get_cname(enumType), "j_type" : "int", "jn_type" : "int", "jni_type" : "jint", "suffix" : "I" } + const_decls = decl[3] + + for decl in const_decls: + self.add_const(decl, enumType) + def add_func(self, decl): fi = FuncInfo(decl, namespaces=self.namespaces) classname = fi.classname or self.Module @@ -479,6 +514,9 @@ class JavaWrapperGenerator(object): self.add_class(decl) elif name.startswith("const"): self.add_const(decl) + elif name.startswith("enum"): + # enum + self.add_enum(decl) else: # function self.add_func(decl) @@ -518,7 +556,7 @@ class JavaWrapperGenerator(object): if self.isWrapped(t): return self.getClass(t).fullName(isCPP=True) else: - return t + return cast_from(t) def gen_func(self, ci, fi, prop_name=''): logging.info("%s", fi) @@ -551,7 +589,7 @@ class JavaWrapperGenerator(object): msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype self.skipped_func_list.append(c_decl + "\n" + msg) j_code.write( " "*4 + msg ) - logging.warning("SKIP:" + c_decl.strip() + "\t due to RET type" + fi.ctype) + logging.warning("SKIP:" + c_decl.strip() + "\t due to RET type " + fi.ctype) return for a in fi.args: if a.ctype not in type_dict: @@ -563,7 +601,7 @@ class JavaWrapperGenerator(object): msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or "I") self.skipped_func_list.append(c_decl + "\n" + msg) j_code.write( " "*4 + msg ) - logging.warning("SKIP:" + c_decl.strip() + "\t due to ARG type" + a.ctype + "/" + (a.out or "I")) + logging.warning("SKIP:" + c_decl.strip() + "\t due to ARG type " + a.ctype + "/" + (a.out or "I")) return self.ported_func_list.append(c_decl) @@ -642,7 +680,7 @@ class JavaWrapperGenerator(object): if "I" in a.out or not a.out or self.isWrapped(a.ctype): # input arg, pass by primitive fields for f in fields: jn_args.append ( ArgInfo([ f[0], a.name + f[1], "", [], "" ]) ) - jni_args.append( ArgInfo([ f[0], a.name + f[1].replace(".","_").replace("[","").replace("]","").replace("_getNativeObjAddr()","_nativeObj"), "", [], "" ]) ) + jni_args.append( ArgInfo([ f[0], a.name + normalize_field_name(f[1]), "", [], "" ]) ) if "O" in a.out and not self.isWrapped(a.ctype): # out arg, pass as double[] jn_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) ) jni_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) ) @@ -690,7 +728,7 @@ class JavaWrapperGenerator(object): " private static native $type $name($args);\n").substitute(\ type = type_dict[fi.ctype].get("jn_type", "double[]"), \ name = fi.jname + '_' + str(suffix_counter), \ - args = ", ".join(["%s %s" % (type_dict[a.ctype]["jn_type"], a.name.replace(".","_").replace("[","").replace("]","").replace("_getNativeObjAddr()","_nativeObj")) for a in jn_args]) + args = ", ".join(["%s %s" % (type_dict[a.ctype]["jn_type"], normalize_field_name(a.name)) for a in jn_args]) ) ); # java part: @@ -848,7 +886,7 @@ class JavaWrapperGenerator(object): if not a.out and not "jni_var" in type_dict[a.ctype]: # explicit cast to C type to avoid ambiguous call error on platforms (mingw) # where jni types are different from native types (e.g. jint is not the same as int) - jni_name = "(%s)%s" % (a.ctype, jni_name) + jni_name = "(%s)%s" % (cast_to(a.ctype), jni_name) if not a.ctype: # hidden jni_name = a.defval cvargs.append( type_dict[a.ctype].get("jni_name", jni_name) % {"n" : a.name}) @@ -922,11 +960,35 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.private_consts]) ) if ci.consts: - logging.info("%s", ci.consts) - ci.j_code.write(""" + enumTypes = set(map(lambda c: c.enumType, ci.consts)) + grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} + for typeName, consts in grouped_consts.items(): + logging.info("%s", consts) + if typeName: + typeName = typeName.rsplit(".", 1)[-1] +###################### Utilize Java enums ###################### +# ci.j_code.write(""" +# public enum {1} {{ +# {0}; +# +# private final int id; +# {1}(int id) {{ this.id = id; }} +# {1}({1} _this) {{ this.id = _this.id; }} +# public int getValue() {{ return id; }} +# }}\n\n""".format((",\n"+" "*8).join(["%s(%s)" % (c.name, c.value) for c in consts]), typeName) +# ) +################################################################ + ci.j_code.write(""" + // C++: enum {1} public static final int - %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.consts]) - ) + {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in consts]), typeName) + ) + else: + ci.j_code.write(""" + // C++: enum + public static final int + {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in consts])) + ) # methods for fi in ci.getAllMethods(): self.gen_func(ci, fi) @@ -1123,7 +1185,7 @@ if __name__ == "__main__": with open(srcfiles_fname) as f: srcfiles = [os.path.join(module_location, str(l).strip()) for l in f.readlines() if str(l).strip()] else: - re_bad = re.compile(r'(private|.inl.hpp$|_inl.hpp$|.details.hpp$|_winrt.hpp$|/cuda/)') + re_bad = re.compile(r'(private|.inl.hpp$|_inl.hpp$|.details.hpp$|_winrt.hpp$|/cuda/|/legacy/)') # .h files before .hpp h_files = [] hpp_files = [] diff --git a/modules/python/bindings/CMakeLists.txt b/modules/python/bindings/CMakeLists.txt index 7b2d5c5ed6..d96d7e88ff 100644 --- a/modules/python/bindings/CMakeLists.txt +++ b/modules/python/bindings/CMakeLists.txt @@ -26,15 +26,15 @@ foreach(m ${OPENCV_PYTHON_MODULES}) list(APPEND opencv_hdrs "${hdr}") endif() endforeach() + file(GLOB hdr ${OPENCV_MODULE_${m}_LOCATION}/misc/python/shadow*.hpp) + list(APPEND opencv_hdrs ${hdr}) file(GLOB userdef_hdrs ${OPENCV_MODULE_${m}_LOCATION}/misc/python/pyopencv*.hpp) list(APPEND opencv_userdef_hdrs ${userdef_hdrs}) endforeach(m) # header blacklist ocv_list_filterout(opencv_hdrs "modules/.*\\\\.h$") -ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda") -ocv_list_filterout(opencv_hdrs "modules/cuda.*") -ocv_list_filterout(opencv_hdrs "modules/cudev") +ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda/") ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/") ocv_list_filterout(opencv_hdrs "modules/core/.*/opencl/") ocv_list_filterout(opencv_hdrs "modules/.+/utils/.*") @@ -43,7 +43,12 @@ ocv_list_filterout(opencv_hdrs "modules/.*_inl\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*\\\\.details\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*\\\\.private\\\\.h*") ocv_list_filterout(opencv_hdrs "modules/.*/private\\\\.h*") +ocv_list_filterout(opencv_hdrs "modules/.*/legacy/.*") ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker\\\\.hpp") # Conditional compilation +if(NOT HAVE_CUDA) + ocv_list_filterout(opencv_hdrs "modules/cuda.*") + ocv_list_filterout(opencv_hdrs "modules/cudev") +endif() set(cv2_generated_files "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_include.h" diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 17abbc5b18..c9adbbda23 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -11,6 +11,20 @@ #pragma warning(pop) #endif +template // TEnable is used for SFINAE checks +struct PyOpenCV_Converter +{ + //static inline bool to(PyObject* obj, T& p, const char* name); + //static inline PyObject* from(const T& src); +}; + +template static +bool pyopencv_to(PyObject* obj, T& p, const char* name = "") { return PyOpenCV_Converter::to(obj, p, name); } + +template static +PyObject* pyopencv_from(const T& src) { return PyOpenCV_Converter::from(src); } + + #define CV_PY_FN_WITH_KW_(fn, flags) (PyCFunction)(void*)(PyCFunctionWithKeywords)(fn), (flags) | METH_VARARGS | METH_KEYWORDS #define CV_PY_FN_NOARGS_(fn, flags) (PyCFunction)(fn), (flags) | METH_NOARGS @@ -28,6 +42,68 @@ # define CV_PYTHON_TYPE_HEAD_INIT() PyObject_HEAD_INIT(&PyType_Type) 0, #endif +#define CV_PY_TO_CLASS(TYPE) \ +template<> \ +bool pyopencv_to(PyObject* dst, TYPE& src, const char* name) \ +{ \ + if (!dst || dst == Py_None) \ + return true; \ + Ptr ptr; \ + \ + if (!pyopencv_to(dst, ptr, name)) return false; \ + src = *ptr; \ + return true; \ +} + +#define CV_PY_FROM_CLASS(TYPE) \ +template<> \ +PyObject* pyopencv_from(const TYPE& src) \ +{ \ + Ptr ptr(new TYPE()); \ + \ + *ptr = src; \ + return pyopencv_from(ptr); \ +} + +#define CV_PY_TO_CLASS_PTR(TYPE) \ +template<> \ +bool pyopencv_to(PyObject* dst, TYPE*& src, const char* name) \ +{ \ + if (!dst || dst == Py_None) \ + return true; \ + Ptr ptr; \ + \ + if (!pyopencv_to(dst, ptr, name)) return false; \ + src = ptr; \ + return true; \ +} + +#define CV_PY_FROM_CLASS_PTR(TYPE) \ +static PyObject* pyopencv_from(TYPE*& src) \ +{ \ + return pyopencv_from(Ptr(src)); \ +} + +#define CV_PY_TO_ENUM(TYPE) \ +template<> \ +bool pyopencv_to(PyObject* dst, TYPE& src, const char* name) \ +{ \ + if (!dst || dst == Py_None) \ + return true; \ + int underlying = 0; \ + \ + if (!pyopencv_to(dst, underlying, name)) return false; \ + src = static_cast(underlying); \ + return true; \ +} + +#define CV_PY_FROM_ENUM(TYPE) \ +template<> \ +PyObject* pyopencv_from(const TYPE& src) \ +{ \ + return pyopencv_from(static_cast(src)); \ +} + #include "pyopencv_generated_include.h" #include "opencv2/core/types_c.h" @@ -37,7 +113,7 @@ #include -static PyObject* opencv_error = 0; +static PyObject* opencv_error = NULL; static int failmsg(const char *fmt, ...) { @@ -98,6 +174,12 @@ try \ } \ catch (const cv::Exception &e) \ { \ + PyObject_SetAttrString(opencv_error, "file", PyString_FromString(e.file.c_str())); \ + PyObject_SetAttrString(opencv_error, "func", PyString_FromString(e.func.c_str())); \ + PyObject_SetAttrString(opencv_error, "line", PyInt_FromLong(e.line)); \ + PyObject_SetAttrString(opencv_error, "code", PyInt_FromLong(e.code)); \ + PyObject_SetAttrString(opencv_error, "msg", PyString_FromString(e.msg.c_str())); \ + PyObject_SetAttrString(opencv_error, "err", PyString_FromString(e.err.c_str())); \ PyErr_SetString(opencv_error, e.what()); \ return 0; \ } @@ -113,6 +195,7 @@ typedef std::vector vector_size_t; typedef std::vector vector_Point; typedef std::vector vector_Point2f; typedef std::vector vector_Point3f; +typedef std::vector vector_Size; typedef std::vector vector_Vec2f; typedef std::vector vector_Vec3f; typedef std::vector vector_Vec4f; @@ -223,12 +306,6 @@ public: NumpyAllocator g_numpyAllocator; -template static -bool pyopencv_to(PyObject* obj, T& p, const char* name = ""); - -template static -PyObject* pyopencv_from(const T& src); - enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 }; // special case, when the converter needs full ArgInfo structure @@ -435,15 +512,6 @@ bool pyopencv_to(PyObject* o, Matx<_Tp, m, n>& mx, const char* name) return pyopencv_to(o, mx, ArgInfo(name, 0)); } -template -bool pyopencv_to(PyObject *o, Ptr& p, const char *name) -{ - if (!o || o == Py_None) - return true; - p = makePtr(); - return pyopencv_to(o, *p, name); -} - template<> PyObject* pyopencv_from(const Mat& m) { @@ -468,279 +536,39 @@ PyObject* pyopencv_from(const Matx<_Tp, m, n>& matx) } template -PyObject* pyopencv_from(const cv::Ptr& p) -{ - if (!p) - Py_RETURN_NONE; - return pyopencv_from(*p); -} - -typedef struct { - PyObject_HEAD - UMat* um; -} cv2_UMatWrapperObject; - -static bool PyObject_IsUMat(PyObject *o); - -// UMatWrapper init - try to map arguments from python to UMat constructors -static int UMatWrapper_init(PyObject* self_, PyObject *args, PyObject *kwds) +struct PyOpenCV_Converter< cv::Ptr > { - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) + static PyObject* from(const cv::Ptr& p) { - PyErr_SetString(PyExc_TypeError, "Internal error"); - return -1; + if (!p) + Py_RETURN_NONE; + return pyopencv_from(*p); } - self->um = NULL; + static bool to(PyObject *o, Ptr& p, const char *name) { - // constructor () - const char *kwlist[] = {NULL}; - if (PyArg_ParseTupleAndKeywords(args, kwds, "", (char**) kwlist)) { - self->um = new UMat(); - return 0; - } - PyErr_Clear(); - } - { - // constructor (rows, cols, type) - const char *kwlist[] = {"rows", "cols", "type", NULL}; - int rows, cols, type; - if (PyArg_ParseTupleAndKeywords(args, kwds, "iii", (char**) kwlist, &rows, &cols, &type)) { - self->um = new UMat(rows, cols, type); - return 0; - } - PyErr_Clear(); - } - { - // constructor (m, rowRange, colRange) - const char *kwlist[] = {"m", "rowRange", "colRange", NULL}; - PyObject *obj = NULL; - int y0 = -1, y1 = -1, x0 = -1, x1 = -1; - if (PyArg_ParseTupleAndKeywords(args, kwds, "O(ii)|(ii)", (char**) kwlist, &obj, &y0, &y1, &x0, &x1) && PyObject_IsUMat(obj)) { - UMat *um_other = ((cv2_UMatWrapperObject *) obj)->um; - Range rowRange(y0, y1); - Range colRange = (x0 >= 0 && x1 >= 0) ? Range(x0, x1) : Range::all(); - self->um = new UMat(*um_other, rowRange, colRange); - return 0; - } - PyErr_Clear(); - } - { - // constructor (m) - const char *kwlist[] = {"m", NULL}; - PyObject *obj = NULL; - if (PyArg_ParseTupleAndKeywords(args, kwds, "O", (char**) kwlist, &obj)) { - // constructor (UMat m) - if (PyObject_IsUMat(obj)) { - UMat *um_other = ((cv2_UMatWrapperObject *) obj)->um; - self->um = new UMat(*um_other); - return 0; - } - // python specific constructor from array like object - Mat m; - if (pyopencv_to(obj, m, ArgInfo("UMatWrapper.np_mat", 0))) { - self->um = new UMat(); - m.copyTo(*self->um); - return 0; - } - } - PyErr_Clear(); + if (!o || o == Py_None) + return true; + p = makePtr(); + return pyopencv_to(o, *p, name); } - PyErr_SetString(PyExc_TypeError, "no matching UMat constructor found/supported"); - return -1; -} - -static void UMatWrapper_dealloc(cv2_UMatWrapperObject* self) -{ - if (self->um) - delete self->um; -#if PY_MAJOR_VERSION >= 3 - Py_TYPE(self)->tp_free((PyObject*)self); -#else - self->ob_type->tp_free((PyObject*)self); -#endif -} - -// UMatWrapper.get() - returns numpy array by transferring UMat data to Mat and than wrapping it to numpy array -// (using numpy allocator - and so without unnecessary copy) -static PyObject * UMatWrapper_get(PyObject* self_, PyObject * /*args*/) -{ - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) - return failmsgp("Incorrect type of self (must be 'cv2_UMatWrapperObject')"); - Mat m; - m.allocator = &g_numpyAllocator; - self->um->copyTo(m); - - return pyopencv_from(m); -} - -// UMatWrapper.handle() - returns the OpenCL handle of the UMat object -static PyObject * UMatWrapper_handle(PyObject* self_, PyObject *args, PyObject *kwds) -{ - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) - return failmsgp("Incorrect type of self (must be 'cv2_UMatWrapperObject')"); - const char *kwlist[] = {"accessFlags", NULL}; - int accessFlags; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "i", (char**) kwlist, &accessFlags)) - return 0; - return PyLong_FromVoidPtr(self->um->handle(accessFlags)); -} - -// UMatWrapper.isContinuous() - returns true if the matrix data is continuous -static PyObject * UMatWrapper_isContinuous(PyObject* self_, PyObject * /*args*/) -{ - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) - return failmsgp("Incorrect type of self (must be 'cv2_UMatWrapperObject')"); - return PyBool_FromLong(self->um->isContinuous()); -} - -// UMatWrapper.isContinuous() - returns true if the matrix is a submatrix of another matrix -static PyObject * UMatWrapper_isSubmatrix(PyObject* self_, PyObject * /*args*/) -{ - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) - return failmsgp("Incorrect type of self (must be 'cv2_UMatWrapperObject')"); - return PyBool_FromLong(self->um->isSubmatrix()); -} - -// UMatWrapper.context() - returns the OpenCL context used by OpenCV UMat -static PyObject * UMatWrapper_context(PyObject* /*self_*/, PyObject * /*args*/) -{ - return PyLong_FromVoidPtr(cv::ocl::Context::getDefault().ptr()); -} - -// UMatWrapper.context() - returns the OpenCL queue used by OpenCV UMat -static PyObject * UMatWrapper_queue(PyObject* /*self_*/, PyObject * /*args*/) -{ - return PyLong_FromVoidPtr(cv::ocl::Queue::getDefault().ptr()); -} - -static PyObject * UMatWrapper_offset_getter(PyObject* self_, void*) -{ - cv2_UMatWrapperObject* self = (cv2_UMatWrapperObject*)self_; - if (self == NULL) - return failmsgp("Incorrect type of self (must be 'cv2_UMatWrapperObject')"); - return PyLong_FromSsize_t(self->um->offset); -} - -static PyMethodDef UMatWrapper_methods[] = { - {"get", CV_PY_FN_NOARGS(UMatWrapper_get), - "Returns numpy array" - }, - {"handle", CV_PY_FN_WITH_KW(UMatWrapper_handle), - "Returns UMat native handle" - }, - {"isContinuous", CV_PY_FN_NOARGS(UMatWrapper_isContinuous), - "Returns true if the matrix data is continuous" - }, - {"isSubmatrix", CV_PY_FN_NOARGS(UMatWrapper_isSubmatrix), - "Returns true if the matrix is a submatrix of another matrix" - }, - {"context", CV_PY_FN_NOARGS_(UMatWrapper_context, METH_STATIC), - "Returns OpenCL context handle" - }, - {"queue", CV_PY_FN_NOARGS_(UMatWrapper_queue, METH_STATIC), - "Returns OpenCL queue handle" - }, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -static PyGetSetDef UMatWrapper_getset[] = { - {(char*) "offset", (getter) UMatWrapper_offset_getter, NULL, NULL, NULL}, - {NULL, NULL, NULL, NULL, NULL} /* Sentinel */ }; -static PyTypeObject cv2_UMatWrapperType = { -#if PY_MAJOR_VERSION >= 3 - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ -#endif - "cv2.UMat", /* tp_name */ - sizeof(cv2_UMatWrapperObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)UMatWrapper_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - "OpenCV 3 UMat wrapper. Used for T-API support.", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - UMatWrapper_methods, /* tp_methods */ - 0, /* tp_members */ - UMatWrapper_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)UMatWrapper_init, /* tp_init */ - 0, /* tp_alloc */ - PyType_GenericNew, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0, /* tp_version_tag */ -#if PY_MAJOR_VERSION >= 3 - 0, /* tp_finalize */ -#endif -}; - -static bool PyObject_IsUMat(PyObject *o) { - return (o != NULL) && PyObject_TypeCheck(o, &cv2_UMatWrapperType); -} - -static bool pyopencv_to(PyObject* o, UMat& um, const ArgInfo info) { - if (PyObject_IsUMat(o)) { - um = *((cv2_UMatWrapperObject *) o)->um; +template<> +bool pyopencv_to(PyObject* obj, void*& ptr, const char* name) +{ + CV_UNUSED(name); + if (!obj || obj == Py_None) return true; - } - Mat m; - if (!pyopencv_to(o, m, info)) { + if (!PyLong_Check(obj)) return false; - } - - m.copyTo(um); - return true; + ptr = PyLong_AsVoidPtr(obj); + return ptr != NULL && !PyErr_Occurred(); } -template<> -bool pyopencv_to(PyObject* o, UMat& um, const char* name) +static PyObject* pyopencv_from(void*& ptr) { - return pyopencv_to(o, um, ArgInfo(name, 0)); -} - -template<> -PyObject* pyopencv_from(const UMat& m) { - PyObject *o = PyObject_CallObject((PyObject *) &cv2_UMatWrapperType, NULL); - *((cv2_UMatWrapperObject *) o)->um = m; - return o; + return PyLong_FromVoidPtr(ptr); } static bool pyopencv_to(PyObject *o, Scalar& s, const ArgInfo info) @@ -1483,6 +1311,19 @@ template<> struct pyopencvVecConverter } }; +template<> struct pyopencvVecConverter +{ + static bool to(PyObject* obj, std::vector& value, const ArgInfo info) + { + return pyopencv_to_generic_vec(obj, value, info); + } + + static PyObject* from(const std::vector& value) + { + return pyopencv_from_generic_vec(value); + } +}; + template<> struct pyopencvVecConverter { static bool to(PyObject* obj, std::vector& value, const ArgInfo info) @@ -1814,6 +1655,7 @@ static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name") # pragma GCC diagnostic ignored "-Wmissing-field-initializers" #endif +#include "pyopencv_generated_enums.h" #include "pyopencv_custom_headers.h" #include "pyopencv_generated_types.h" #include "pyopencv_generated_funcs.h" @@ -1927,18 +1769,17 @@ void initcv2() PyDict_SetItemString(d, "__version__", PyString_FromString(CV_VERSION)); - opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL); + PyObject *opencv_error_dict = PyDict_New(); + PyDict_SetItemString(opencv_error_dict, "file", Py_None); + PyDict_SetItemString(opencv_error_dict, "func", Py_None); + PyDict_SetItemString(opencv_error_dict, "line", Py_None); + PyDict_SetItemString(opencv_error_dict, "code", Py_None); + PyDict_SetItemString(opencv_error_dict, "msg", Py_None); + PyDict_SetItemString(opencv_error_dict, "err", Py_None); + opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, opencv_error_dict); + Py_DECREF(opencv_error_dict); PyDict_SetItemString(d, "error", opencv_error); -//Registering UMatWrapper python class in cv2 module: - if (PyType_Ready(&cv2_UMatWrapperType) < 0) -#if PY_MAJOR_VERSION >= 3 - return NULL; -#else - return; -#endif - - #if PY_MAJOR_VERSION >= 3 #define PUBLISH_OBJECT(name, type) Py_INCREF(&type);\ PyModule_AddObject(m, name, (PyObject *)&type); @@ -1949,8 +1790,6 @@ void initcv2() PyModule_AddObject(m, name, (PyObject *)&type); #endif - PUBLISH_OBJECT("UMat", cv2_UMatWrapperType); - #include "pyopencv_generated_type_publish.h" #define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I)) diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index d8bce41ff3..747f507934 100755 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -10,19 +10,23 @@ if sys.version_info[0] >= 3: else: from cStringIO import StringIO +forbidden_arg_types = ["void*"] + ignored_arg_types = ["RNG*"] +pass_by_val_types = ["Point*", "Point2f*", "Rect*", "String*", "double*", "float*", "int*"] + gen_template_check_self = Template(""" $cname* _self_ = NULL; if(PyObject_TypeCheck(self, &pyopencv_${name}_Type)) _self_ = ${amp}((pyopencv_${name}_t*)self)->v${get}; - if (_self_ == NULL) + if (!_self_) return failmsgp("Incorrect type of self (must be '${name}' or its derivative)"); """) gen_template_check_self_algo = Template(""" $cname* _self_ = NULL; if(PyObject_TypeCheck(self, &pyopencv_${name}_Type)) _self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.get()); - if (_self_ == NULL) + if (!_self_) return failmsgp("Incorrect type of self (must be '${name}' or its derivative)"); """) @@ -68,27 +72,40 @@ static void pyopencv_${name}_dealloc(PyObject* self) PyObject_Del(self); } -template<> PyObject* pyopencv_from(const ${cname}& r) +template<> +struct PyOpenCV_Converter< ${cname} > { - pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); - new (&m->v) ${cname}(r); //Copy constructor - return (PyObject*)m; -} + static PyObject* from(const ${cname}& r) + { + pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); + new (&m->v) ${cname}(r); //Copy constructor + return (PyObject*)m; + } -template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name) -{ - if( src == NULL || src == Py_None ) - return true; - if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type)) + static bool to(PyObject* src, ${cname}& dst, const char* name) { + if(!src || src == Py_None) + return true; + if(PyObject_TypeCheck(src, &pyopencv_${name}_Type)) + { + dst = ((pyopencv_${name}_t*)src)->v; + return true; + } failmsg("Expected ${cname} for argument '%%s'", name); return false; } - dst = ((pyopencv_${name}_t*)src)->v; - return true; -} +}; """ % head_init_str) +gen_template_mappable = Template(""" + { + ${mappable} _src; + if (pyopencv_to(src, _src, name)) + { + return cv_mappable_to(_src, dst); + } + } +""") gen_template_type_decl = Template(""" struct pyopencv_${name}_t @@ -110,26 +127,31 @@ static void pyopencv_${name}_dealloc(PyObject* self) PyObject_Del(self); } -template<> PyObject* pyopencv_from(const Ptr<${cname}>& r) +template<> +struct PyOpenCV_Converter< Ptr<${cname}> > { - pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); - new (&(m->v)) Ptr<$cname1>(); // init Ptr with placement new - m->v = r; - return (PyObject*)m; -} + static PyObject* from(const Ptr<${cname}>& r) + { + pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); + new (&(m->v)) Ptr<$cname1>(); // init Ptr with placement new + m->v = r; + return (PyObject*)m; + } -template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name) -{ - if( src == NULL || src == Py_None ) - return true; - if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type)) + static bool to(PyObject* src, Ptr<${cname}>& dst, const char* name) { + if(!src || src == Py_None) + return true; + if(PyObject_TypeCheck(src, &pyopencv_${name}_Type)) + { + dst = ((pyopencv_${name}_t*)src)->v.dynamicCast<${cname}>(); + return true; + } + ${mappable_code} failmsg("Expected ${cname} for argument '%%s'", name); return false; } - dst = ((pyopencv_${name}_t*)src)->v.dynamicCast<${cname}>(); - return true; -} +}; """ % head_init_str) @@ -192,7 +214,7 @@ gen_template_get_prop_algo = Template(""" static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure) { $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); - if (_self_ == NULL) + if (!_self_) return failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return pyopencv_from(_self_${access}${member}); } @@ -201,7 +223,7 @@ static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *clo gen_template_set_prop = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { - if (value == NULL) + if (!value) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; @@ -213,13 +235,13 @@ static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value gen_template_set_prop_algo = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { - if (value == NULL) + if (!value) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; } $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); - if (_self_ == NULL) + if (!_self_) { failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return -1; @@ -265,6 +287,7 @@ class ClassInfo(object): self.isalgorithm = False self.methods = {} self.props = [] + self.mappables = [] self.consts = {} self.base = None self.constructor = None @@ -402,7 +425,7 @@ class ArgInfo(object): self.py_outputarg = False def isbig(self): - return self.tp == "Mat" or self.tp == "vector_Mat"\ + return self.tp == "Mat" or self.tp == "vector_Mat" or self.tp == "cuda::GpuMat"\ or self.tp == "UMat" or self.tp == "vector_UMat" # or self.tp.startswith("vector") def crepr(self): @@ -410,10 +433,11 @@ class ArgInfo(object): class FuncVariant(object): - def __init__(self, classname, name, decl, isconstructor): + def __init__(self, classname, name, decl, isconstructor, isphantom=False): self.classname = classname self.name = self.wname = name self.isconstructor = isconstructor + self.isphantom = isphantom self.docstring = decl[5] @@ -461,6 +485,7 @@ class FuncVariant(object): argno += 1 if a.name in self.array_counters: continue + assert not a.tp in forbidden_arg_types, 'Forbidden type "{}" for argument "{}" in "{}" ("{}")'.format(a.tp, a.name, self.name, self.classname) if a.tp in ignored_arg_types: continue if a.returnarg: @@ -520,17 +545,17 @@ class FuncVariant(object): class FuncInfo(object): - def __init__(self, classname, name, cname, isconstructor, namespace, isclassmethod): + def __init__(self, classname, name, cname, isconstructor, namespace, is_static): self.classname = classname self.name = name self.cname = cname self.isconstructor = isconstructor self.namespace = namespace - self.isclassmethod = isclassmethod + self.is_static = is_static self.variants = [] - def add_variant(self, decl): - self.variants.append(FuncVariant(self.classname, self.name, decl, self.isconstructor)) + def add_variant(self, decl, isphantom=False): + self.variants.append(FuncVariant(self.classname, self.name, decl, self.isconstructor, isphantom)) def get_wrapper_name(self): name = self.name @@ -541,8 +566,8 @@ class FuncInfo(object): else: classname = "" - if self.isclassmethod: - name += "_cls" + if self.is_static: + name += "_static" return "pyopencv_" + self.namespace.replace('.','_') + '_' + classname + name @@ -601,7 +626,7 @@ class FuncInfo(object): return Template(' {"$py_funcname", CV_PY_FN_WITH_KW_($wrap_funcname, $flags), "$py_docstring"},\n' ).substitute(py_funcname = self.variants[0].wname, wrap_funcname=self.get_wrapper_name(), - flags = 'METH_CLASS' if self.isclassmethod else '0', py_docstring = full_docstring) + flags = 'METH_STATIC' if self.is_static else '0', py_docstring = full_docstring) def gen_code(self, codegen): all_classes = codegen.classes @@ -618,7 +643,7 @@ class FuncInfo(object): selfinfo = all_classes[self.classname] if not self.isconstructor: amp = "&" if selfinfo.issimple else "" - if self.isclassmethod: + if self.is_static: pass elif selfinfo.isalgorithm: code += gen_template_check_self_algo.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp) @@ -638,6 +663,9 @@ class FuncInfo(object): all_cargs = [] parse_arglist = [] + if v.isphantom and ismethod and not self.is_static: + code_args += "_self_" + # declare all the C function arguments, # add necessary conversions from Python objects to code_cvt_list, # form the function/method call, @@ -646,7 +674,7 @@ class FuncInfo(object): if a.tp in ignored_arg_types: defval = a.defval if not defval and a.tp.endswith("*"): - defval = 0 + defval = "0" assert defval if not code_args.endswith("("): code_args += ", " @@ -656,15 +684,15 @@ class FuncInfo(object): tp1 = tp = a.tp amp = "" defval0 = "" - if tp.endswith("*"): + if tp in pass_by_val_types: tp = tp1 = tp[:-1] amp = "&" if tp.endswith("*"): defval0 = "0" tp1 = tp.replace("*", "_ptr") - if tp1.endswith("*"): - print("Error: type with star: a.tp=%s, tp=%s, tp1=%s" % (a.tp, tp, tp1)) - sys.exit(-1) + tp_candidates = [a.tp, normalize_class_name(self.namespace + "." + a.tp)] + if any(tp in codegen.enums.keys() for tp in tp_candidates): + defval0 = "static_cast<%s>(%d)" % (a.tp, 0) amapping = simple_argtype_mapping.get(tp, (tp, "O", defval0)) parse_name = a.name @@ -686,6 +714,9 @@ class FuncInfo(object): if "UMat" in tp: if "Mat" in defval and "UMat" not in defval: defval = defval.replace("Mat", "UMat") + if "cuda::GpuMat" in tp: + if "Mat" in defval and "GpuMat" not in defval: + defval = defval.replace("Mat", "cuda::GpuMat") # "tp arg = tp();" is equivalent to "tp arg;" in the case of complex types if defval == tp + "()" and amapping[1] == "O": defval = "" @@ -712,13 +743,15 @@ class FuncInfo(object): code_prelude = templ_prelude.substitute(name=selfinfo.name, cname=selfinfo.cname) code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname, args=code_args) + if v.isphantom: + code_fcall = code_fcall.replace("new " + selfinfo.cname, self.cname.replace("::", "_")) else: code_prelude = "" code_fcall = "" if v.rettype: code_decl += " " + v.rettype + " retval;\n" code_fcall += "retval = " - if ismethod and not self.isclassmethod: + if not v.isphantom and ismethod and not self.is_static: code_fcall += "_self_->" + self.cname else: code_fcall += self.cname @@ -754,7 +787,7 @@ class FuncInfo(object): parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]), code_cvt = " &&\n ".join(code_cvt_list)) else: - code_parse = "if(PyObject_Size(args) == 0 && (kw == NULL || PyObject_Size(kw) == 0))" + code_parse = "if(PyObject_Size(args) == 0 && (!kw || PyObject_Size(kw) == 0))" if len(v.py_outlist) == 0: code_ret = "Py_RETURN_NONE" @@ -799,7 +832,7 @@ class FuncInfo(object): #if dump: pprint(vars(classinfo)) if self.isconstructor: py_name = 'cv.' + classinfo.wname - elif self.isclassmethod: + elif self.is_static: py_name = '.'.join([self.namespace, classinfo.sname + '_' + self.variants[0].wname]) else: cname = classinfo.cname + '::' + cname @@ -833,7 +866,9 @@ class PythonWrapperGenerator(object): self.classes = {} self.namespaces = {} self.consts = {} + self.enums = {} self.code_include = StringIO() + self.code_enums = StringIO() self.code_types = StringIO() self.code_funcs = StringIO() self.code_type_reg = StringIO() @@ -890,6 +925,18 @@ class PythonWrapperGenerator(object): py_signatures.append(dict(name=py_name, value=value)) #print(cname + ' => ' + str(py_name) + ' (value=' + value + ')') + def add_enum(self, name, decl): + wname = normalize_class_name(name) + if wname.endswith(""): + wname = None + else: + self.enums[wname] = name + const_decls = decl[3] + + for decl in const_decls: + name = decl[0] + self.add_const(name.replace("const ", "").strip(), decl) + def add_func(self, decl): namespace, classes, barename = self.split_decl_name(decl[0]) cname = "::".join(namespace+classes+[barename]) @@ -902,35 +949,46 @@ class PythonWrapperGenerator(object): namespace = '.'.join(namespace) isconstructor = name == bareclassname - isclassmethod = False + is_static = False + isphantom = False + mappable = None for m in decl[2]: if m == "/S": - isclassmethod = True + is_static = True + elif m == "/phantom": + isphantom = True + cname = cname.replace("::", "_") elif m.startswith("="): name = m[1:] + elif m.startswith("/mappable="): + mappable = m[10:] + self.classes[classname].mappables.append(mappable) + return + if isconstructor: name = "_".join(classes[:-1]+[name]) - if isclassmethod: + if is_static: # Add it as a method to the class func_map = self.classes[classname].methods - func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod)) - func.add_variant(decl) + func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static)) + func.add_variant(decl, isphantom) # Add it as global function g_name = "_".join(classes+[name]) func_map = self.namespaces.setdefault(namespace, Namespace()).funcs func = func_map.setdefault(g_name, FuncInfo("", g_name, cname, isconstructor, namespace, False)) - func.add_variant(decl) + func.add_variant(decl, isphantom) else: if classname and not isconstructor: - cname = barename + if not isphantom: + cname = barename func_map = self.classes[classname].methods else: func_map = self.namespaces.setdefault(namespace, Namespace()).funcs - func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod)) - func.add_variant(decl) + func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static)) + func.add_variant(decl, isphantom) if classname and isconstructor: self.classes[classname].constructor = func @@ -949,10 +1007,10 @@ class PythonWrapperGenerator(object): self.code_ns_reg.write('static ConstDef consts_%s[] = {\n'%wname) for name, cname in sorted(ns.consts.items()): - self.code_ns_reg.write(' {"%s", %s},\n'%(name, cname)) + self.code_ns_reg.write(' {"%s", static_cast(%s)},\n'%(name, cname)) compat_name = re.sub(r"([a-z])([A-Z])", r"\1_\2", name).upper() if name != compat_name: - self.code_ns_reg.write(' {"%s", %s},\n'%(compat_name, cname)) + self.code_ns_reg.write(' {"%s", static_cast(%s)},\n'%(compat_name, cname)) self.code_ns_reg.write(' {NULL, 0}\n};\n\n') def gen_namespaces_reg(self): @@ -963,6 +1021,21 @@ class PythonWrapperGenerator(object): self.code_ns_reg.write(' init_submodule(root, MODULESTR"%s", methods_%s, consts_%s);\n' % (ns_name[2:], wname, wname)) self.code_ns_reg.write('};\n') + def gen_enum_reg(self, enum_name): + name_seg = enum_name.split(".") + is_enum_class = False + if len(name_seg) >= 2 and name_seg[-1] == name_seg[-2]: + enum_name = ".".join(name_seg[:-1]) + is_enum_class = True + + wname = normalize_class_name(enum_name) + cname = enum_name.replace(".", "::") + + code = "" + if re.sub(r"^cv\.", "", enum_name) != wname: + code += "typedef enum {0} {1};\n".format(cname, wname) + code += "CV_PY_FROM_ENUM({0});\nCV_PY_TO_ENUM({0});\n\n".format(wname) + self.code_enums.write(code) def save(self, path, name, buf): with open(path + "/" + name, "wt") as f: @@ -975,14 +1048,15 @@ class PythonWrapperGenerator(object): def gen(self, srcfiles, output_path): self.clear() - self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True) + self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=False) # step 1: scan the headers and build more descriptive maps of classes, consts, functions for hdr in srcfiles: decls = self.parser.parse(hdr) if len(decls) == 0: continue - self.code_include.write( '#include "{0}"\n'.format(hdr[hdr.rindex('opencv2/'):]) ) + if hdr.find('opencv2/') >= 0: #Avoid including the shadow files + self.code_include.write( '#include "{0}"\n'.format(hdr[hdr.rindex('opencv2/'):]) ) for decl in decls: name = decl[0] if name.startswith("struct") or name.startswith("class"): @@ -994,6 +1068,9 @@ class PythonWrapperGenerator(object): elif name.startswith("const"): # constant self.add_const(name.replace("const ", "").strip(), decl) + elif name.startswith("enum"): + # enum + self.add_enum(name.rsplit(" ", 1)[1], decl) else: # function self.add_func(decl) @@ -1043,8 +1120,11 @@ class PythonWrapperGenerator(object): templ = gen_template_simple_type_decl else: templ = gen_template_type_decl + mappable_code = "\n".join([ + gen_template_mappable.substitute(cname=classinfo.cname, mappable=mappable) + for mappable in classinfo.mappables]) self.code_types.write(templ.substitute(name=name, wname=classinfo.wname, cname=classinfo.cname, sname=classinfo.sname, - cname1=("cv::Algorithm" if classinfo.isalgorithm else classinfo.cname))) + cname1=("cv::Algorithm" if classinfo.isalgorithm else classinfo.cname), mappable_code=mappable_code)) # register classes in the same order as they have been declared. # this way, base classes will be registered in Python before their derivatives. @@ -1070,7 +1150,13 @@ class PythonWrapperGenerator(object): self.gen_namespace(ns_name) self.gen_namespaces_reg() - # step 4: generate the code for constants + # step 4: generate the code for enum types + enumlist = list(self.enums.values()) + enumlist.sort() + for name in enumlist: + self.gen_enum_reg(name) + + # step 5: generate the code for constants constlist = list(self.consts.items()) constlist.sort() for name, constinfo in constlist: @@ -1079,6 +1165,7 @@ class PythonWrapperGenerator(object): # That's it. Now save all the files self.save(output_path, "pyopencv_generated_include.h", self.code_include) self.save(output_path, "pyopencv_generated_funcs.h", self.code_funcs) + self.save(output_path, "pyopencv_generated_enums.h", self.code_enums) self.save(output_path, "pyopencv_generated_types.h", self.code_types) self.save(output_path, "pyopencv_generated_type_reg.h", self.code_type_reg) self.save(output_path, "pyopencv_generated_ns_reg.h", self.code_ns_reg) diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index 21faf1bacd..73b5ef4ed6 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -6,6 +6,7 @@ import os, sys, re, string, io # the list only for debugging. The real list, used in the real OpenCV build, is specified in CMakeLists.txt opencv_hdr_list = [ "../../core/include/opencv2/core.hpp", +"../../core/include/opencv2/core/mat.hpp", "../../core/include/opencv2/core/ocl.hpp", "../../flann/include/opencv2/flann/miniflann.hpp", "../../ml/include/opencv2/ml.hpp", @@ -32,8 +33,9 @@ original_return_type is None if the original_return_type is the same as return_v class CppHeaderParser(object): - def __init__(self, generate_umat_decls=False): + def __init__(self, generate_umat_decls=False, generate_gpumat_decls=False): self._generate_umat_decls = generate_umat_decls + self._generate_gpumat_decls = generate_gpumat_decls self.BLOCK_TYPE = 0 self.BLOCK_NAME = 1 @@ -375,11 +377,9 @@ class CppHeaderParser(object): decl[2].append("/A") if bool(re.match(r".*\)\s*const(\s*=\s*0)?", decl_str)): decl[2].append("/C") - if "virtual" in decl_str: - print(decl_str) return decl - def parse_func_decl(self, decl_str, use_umat=False, docstring=""): + def parse_func_decl(self, decl_str, mat="Mat", docstring=""): """ Parses the function or method declaration in the form: [([CV_EXPORTS] ) | CVAPI(rettype)] @@ -392,8 +392,7 @@ class CppHeaderParser(object): """ if self.wrap_mode: - if not (("CV_EXPORTS_AS" in decl_str) or ("CV_EXPORTS_W" in decl_str) or \ - ("CV_WRAP" in decl_str) or ("CV_WRAP_AS" in decl_str)): + if not (("CV_EXPORTS_AS" in decl_str) or ("CV_EXPORTS_W" in decl_str) or ("CV_WRAP" in decl_str)): return [] # ignore old API in the documentation check (for now) @@ -413,6 +412,16 @@ class CppHeaderParser(object): arg, npos3 = self.get_macro_arg(decl_str, npos) func_modlist.append("="+arg) decl_str = decl_str[:npos] + decl_str[npos3+1:] + npos = decl_str.find("CV_WRAP_PHANTOM") + if npos >= 0: + decl_str, _ = self.get_macro_arg(decl_str, npos) + func_modlist.append("/phantom") + npos = decl_str.find("CV_WRAP_MAPPABLE") + if npos >= 0: + mappable, npos3 = self.get_macro_arg(decl_str, npos) + func_modlist.append("/mappable="+mappable) + classname = top[1] + return ['.'.join([classname, classname]), None, func_modlist, [], None, None] virtual_method = False pure_virtual_method = False @@ -526,8 +535,6 @@ class CppHeaderParser(object): t, npos = self.find_next_token(decl_str, ["(", ")", ",", "<", ">"], npos) if not t: print("Error: no closing ')' at %d" % (self.lineno,)) - print(decl_str) - print(decl_str[arg_start:]) sys.exit(-1) if t == "<": angle_balance += 1 @@ -563,8 +570,6 @@ class CppHeaderParser(object): a = a[:eqpos].strip() arg_type, arg_name, modlist, argno = self.parse_arg(a, argno) if self.wrap_mode: - mat = "UMat" if use_umat else "Mat" - # TODO: Vectors should contain UMat, but this is not very easy to support and not very needed vector_mat = "vector_{}".format("Mat") vector_mat_template = "vector<{}>".format("Mat") @@ -629,8 +634,10 @@ class CppHeaderParser(object): block_type, block_name = b[self.BLOCK_TYPE], b[self.BLOCK_NAME] if block_type in ["file", "enum"]: continue - if block_type not in ["struct", "class", "namespace"]: - print("Error at %d: there are non-valid entries in the current block stack " % (self.lineno, self.block_stack)) + if block_type in ["enum struct", "enum class"] and block_name == name: + continue + if block_type not in ["struct", "class", "namespace", "enum struct", "enum class"]: + print("Error at %d: there are non-valid entries in the current block stack %s" % (self.lineno, self.block_stack)) sys.exit(-1) if block_name and (block_type == "namespace" or not qualified_name): n += block_name + "." @@ -639,7 +646,7 @@ class CppHeaderParser(object): n = "cv.Algorithm" return n - def parse_stmt(self, stmt, end_token, use_umat=False, docstring=""): + def parse_stmt(self, stmt, end_token, mat="Mat", docstring=""): """ parses the statement (ending with ';' or '}') or a block head (ending with '{') @@ -706,20 +713,19 @@ class CppHeaderParser(object): decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases]) return stmt_type, classname, True, decl - if stmt.startswith("enum"): - return "enum", "", True, None - - if stmt.startswith("namespace"): - stmt_list = stmt.split() + if stmt.startswith("enum") or stmt.startswith("namespace"): + stmt_list = stmt.rsplit(" ", 1) if len(stmt_list) < 2: stmt_list.append("") return stmt_list[0], stmt_list[1], True, None + if stmt.startswith("extern") and "\"C\"" in stmt: return "namespace", "", True, None - if end_token == "}" and context == "enum": + if end_token == "}" and context.startswith("enum"): decl = self.parse_enum(stmt) - return "enum", "", False, decl + name = stack_top[self.BLOCK_NAME] + return context, name, False, decl if end_token == ";" and stmt.startswith("typedef"): # TODO: handle typedef's more intelligently @@ -731,7 +737,7 @@ class CppHeaderParser(object): # since we filtered off the other places where '(' can normally occur: # - code blocks # - function pointer typedef's - decl = self.parse_func_decl(stmt, use_umat=use_umat, docstring=docstring) + decl = self.parse_func_decl(stmt, mat=mat, docstring=docstring) # we return parse_flag == False to prevent the parser to look inside function/method bodies # (except for tracking the nested blocks) return stmt_type, "", False, decl @@ -827,7 +833,7 @@ class CppHeaderParser(object): l = l[pos+2:] state = SCAN - if l.startswith('CV__'): # just ignore this lines + if l.startswith('CV__') or l.startswith('__CV_'): # just ignore these lines #print('IGNORE: ' + l) state = SCAN continue @@ -841,11 +847,17 @@ class CppHeaderParser(object): if not token: block_head += " " + l - break + block_head = block_head.strip() + if len(block_head) > 0 and block_head[-1] == ')' and block_head.startswith('CV_ENUM_FLAGS('): + l = '' + token = ';' + else: + break if token == "//": block_head += " " + l[:pos] - break + l = '' + continue if token == "/*": block_head += " " + l[:pos] @@ -896,20 +908,29 @@ class CppHeaderParser(object): docstring = docstring.strip() stmt_type, name, parse_flag, decl = self.parse_stmt(stmt, token, docstring=docstring) if decl: - if stmt_type == "enum": - for d in decl: - decls.append(d) + if stmt_type.startswith("enum"): + decls.append([stmt_type + " " + self.get_dotted_name(name), "", [], decl, None, ""]) else: decls.append(decl) + if self._generate_gpumat_decls and "cv.cuda." in decl[0]: + # If function takes as one of arguments Mat or vector - we want to create the + # same declaration working with GpuMat (this is important for T-Api access) + args = decl[3] + has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0 + if has_mat: + _, _, _, gpumat_decl = self.parse_stmt(stmt, token, mat="cuda::GpuMat", docstring=docstring) + decls.append(gpumat_decl) + if self._generate_umat_decls: # If function takes as one of arguments Mat or vector - we want to create the # same declaration working with UMat (this is important for T-Api access) args = decl[3] has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0 if has_mat: - _, _, _, umat_decl = self.parse_stmt(stmt, token, use_umat=True, docstring=docstring) + _, _, _, umat_decl = self.parse_stmt(stmt, token, mat="UMat", docstring=docstring) decls.append(umat_decl) + docstring = "" if stmt_type == "namespace": chunks = [block[1] for block in self.block_stack if block[0] == 'namespace'] + [name] @@ -952,7 +973,7 @@ class CppHeaderParser(object): print() if __name__ == '__main__': - parser = CppHeaderParser(generate_umat_decls=True) + parser = CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=False) decls = [] for hname in opencv_hdr_list: decls += parser.parse(hname) diff --git a/modules/stitching/misc/python/pyopencv_stitching.hpp b/modules/stitching/misc/python/pyopencv_stitching.hpp index e5d0cd2481..8456ad7b91 100644 --- a/modules/stitching/misc/python/pyopencv_stitching.hpp +++ b/modules/stitching/misc/python/pyopencv_stitching.hpp @@ -1,9 +1,3 @@ #ifdef HAVE_OPENCV_STITCHING typedef Stitcher::Status Status; - -template<> -PyObject* pyopencv_from(const Status& value) -{ - return PyInt_FromLong(value); -} -#endif \ No newline at end of file +#endif diff --git a/modules/videoio/misc/python/pyopencv_videoio.hpp b/modules/videoio/misc/python/pyopencv_videoio.hpp index 453a57a126..8ea62ebd73 100644 --- a/modules/videoio/misc/python/pyopencv_videoio.hpp +++ b/modules/videoio/misc/python/pyopencv_videoio.hpp @@ -1,33 +1,6 @@ #ifdef HAVE_OPENCV_VIDEOIO typedef std::vector vector_VideoCaptureAPIs; -template<> -bool pyopencv_to(PyObject *o, cv::VideoCaptureAPIs &v, const char *name) -{ - (void)name; - v = CAP_ANY; - if (!o || o == Py_None) - return false; - else if (PyLong_Check(o)) - { - v = VideoCaptureAPIs((int64)PyLong_AsLongLong(o)); - return true; - } - else if (PyInt_Check(o)) - { - v = VideoCaptureAPIs((int64)PyInt_AS_LONG(o)); - return true; - } - else - return false; -} - -template<> -PyObject* pyopencv_from(const cv::VideoCaptureAPIs &v) -{ - return pyopencv_from((int)(v)); -} - template<> struct pyopencvVecConverter { static bool to(PyObject* obj, std::vector& value, const ArgInfo info) From 030e955db7fc89de691dd8bcfa82d0c4881f28ab Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 Mar 2019 20:52:35 +0000 Subject: [PATCH 09/19] python: support Python list for cv::Range --- modules/python/src2/cv2.cpp | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index c9adbbda23..fbe2bbca51 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -819,6 +819,40 @@ bool pyopencv_to(PyObject* obj, Range& r, const char* name) CV_UNUSED(name); if(!obj || obj == Py_None) return true; + while (PySequence_Check(obj)) + { + PyObject *fi = PySequence_Fast(obj, name); + if (fi == NULL) + break; + if (2 != PySequence_Fast_GET_SIZE(fi)) + { + failmsg("Range value for argument '%s' is longer than 2", name); + Py_DECREF(fi); + return false; + } + { + PyObject *item = PySequence_Fast_GET_ITEM(fi, 0); + if (PyInt_Check(item)) { + r.start = (int)PyInt_AsLong(item); + } else { + failmsg("Range.start value for argument '%s' is not integer", name); + Py_DECREF(fi); + break; + } + } + { + PyObject *item = PySequence_Fast_GET_ITEM(fi, 1); + if (PyInt_Check(item)) { + r.end = (int)PyInt_AsLong(item); + } else { + failmsg("Range.end value for argument '%s' is not integer", name); + Py_DECREF(fi); + break; + } + } + Py_DECREF(fi); + return true; + } if(PyObject_Size(obj) == 0) { r = Range::all(); From 0a98bc0ee9921c6bfee71ebe206c7911fc7abefa Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 Mar 2019 21:32:03 +0000 Subject: [PATCH 10/19] java: avoid enum values with references on other enums declaration order is not fixed Error message: Core.java:97: error: illegal forward reference Hamming_normType = NORM_HAMMING, --- modules/java/generator/gen_java.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index b1ea133edf..3aa25c11c4 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -953,11 +953,19 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname def gen_class(self, ci): logging.info("%s", ci) # constants + consts_map = {c.name: c for c in ci.private_consts} + consts_map.update({c.name: c for c in ci.consts}) + def const_value(v): + if v in consts_map: + target = consts_map[v] + assert target.value != v + return const_value(target.value) + return v if ci.private_consts: logging.info("%s", ci.private_consts) ci.j_code.write(""" private static final int - %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.private_consts]) + %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in ci.private_consts]) ) if ci.consts: enumTypes = set(map(lambda c: c.enumType, ci.consts)) @@ -975,19 +983,19 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname # {1}(int id) {{ this.id = id; }} # {1}({1} _this) {{ this.id = _this.id; }} # public int getValue() {{ return id; }} -# }}\n\n""".format((",\n"+" "*8).join(["%s(%s)" % (c.name, c.value) for c in consts]), typeName) +# }}\n\n""".format((",\n"+" "*8).join(["%s(%s)" % (c.name, const_value(c.value)) for c in consts]), typeName) # ) ################################################################ ci.j_code.write(""" // C++: enum {1} public static final int - {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in consts]), typeName) + {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts]), typeName) ) else: ci.j_code.write(""" // C++: enum public static final int - {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in consts])) + {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts])) ) # methods for fi in ci.getAllMethods(): From 80d37ba69830009e78624a7f29da10e929ffe669 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 2 Mar 2019 14:49:21 +0000 Subject: [PATCH 11/19] dnn: fix usage of CV_LOG_VERBOSE macro --- modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp b/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp index edac295fd5..a41bf92aea 100644 --- a/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp +++ b/modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp @@ -1826,7 +1826,7 @@ void OCL4DNNConvSpatial::setupConvolution(const UMat &bottom, } else { - CV_LOG_VERBOSE(NULL, "Kernel " << config->kernelName << " pass verification"); + CV_LOG_VERBOSE(NULL, 0, "Kernel " << config->kernelName << " pass verification"); } } catch (...) From 5a2faab2e617dcf476f4955cf748e514242e2962 Mon Sep 17 00:00:00 2001 From: Namgoo Lee Date: Sun, 3 Mar 2019 16:40:43 +0000 Subject: [PATCH 12/19] CUDA 10.1 Build Issue Fix --- .../include/opencv2/cudev/warp/shuffle.hpp | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/modules/cudev/include/opencv2/cudev/warp/shuffle.hpp b/modules/cudev/include/opencv2/cudev/warp/shuffle.hpp index d317b2c449..fd22d2a317 100644 --- a/modules/cudev/include/opencv2/cudev/warp/shuffle.hpp +++ b/modules/cudev/include/opencv2/cudev/warp/shuffle.hpp @@ -157,43 +157,6 @@ CV_CUDEV_SHFL_VEC_INST(double) // shfl_up -template -__device__ __forceinline__ T compatible_shfl_up(T val, uint delta, int width = warpSize) -{ -#if __CUDACC_VER_MAJOR__ < 9 - - return shfl_up(val, delta, width); - -#else // __CUDACC_VER_MAJOR__ < 9 - -#if CV_CUDEV_ARCH >= 700 - return shfl_up_sync(0xFFFFFFFFU, val, delta, width); -#else - const int block_size = Block::blockSize(); - const int residual = block_size & (warpSize - 1); - - if (0 == residual) - return shfl_up_sync(0xFFFFFFFFU, val, delta, width); - else - { - const int n_warps = divUp(block_size, warpSize); - const int warp_id = Warp::warpId(); - - if (warp_id < n_warps - 1) - return shfl_up_sync(0xFFFFFFFFU, val, delta, width); - else - { - // We are at the last threads of a block whose number of threads - // is not a multiple of the warp size - uint mask = (1LU << residual) - 1; - return shfl_up_sync(mask, val, delta, width); - } - } -#endif - -#endif // __CUDACC_VER_MAJOR__ < 9 -} - #if __CUDACC_VER_MAJOR__ >= 9 template @@ -300,6 +263,43 @@ CV_CUDEV_SHFL_UP_VEC_INST(double) #endif +template +__device__ __forceinline__ T compatible_shfl_up(T val, uint delta, int width = warpSize) +{ +#if __CUDACC_VER_MAJOR__ < 9 + + return shfl_up(val, delta, width); + +#else // __CUDACC_VER_MAJOR__ < 9 + +#if CV_CUDEV_ARCH >= 700 + return shfl_up_sync(0xFFFFFFFFU, val, delta, width); +#else + const int block_size = Block::blockSize(); + const int residual = block_size & (warpSize - 1); + + if (0 == residual) + return shfl_up_sync(0xFFFFFFFFU, val, delta, width); + else + { + const int n_warps = divUp(block_size, warpSize); + const int warp_id = Warp::warpId(); + + if (warp_id < n_warps - 1) + return shfl_up_sync(0xFFFFFFFFU, val, delta, width); + else + { + // We are at the last threads of a block whose number of threads + // is not a multiple of the warp size + uint mask = (1LU << residual) - 1; + return shfl_up_sync(mask, val, delta, width); + } + } +#endif + +#endif // __CUDACC_VER_MAJOR__ < 9 +} + // shfl_down __device__ __forceinline__ uchar shfl_down(uchar val, uint delta, int width = warpSize) From 480c6dae6d4abef574d40e2a458312a732d3e60c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 4 Mar 2019 15:23:14 +0300 Subject: [PATCH 13/19] android: backport build_sdk.py --- platforms/android/build_sdk.py | 77 +++++++++++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 6 deletions(-) diff --git a/platforms/android/build_sdk.py b/platforms/android/build_sdk.py index ea75fb8100..29ca1926c7 100755 --- a/platforms/android/build_sdk.py +++ b/platforms/android/build_sdk.py @@ -48,6 +48,16 @@ def check_dir(d, create=False, clean=False): os.makedirs(d) return d +def check_executable(cmd): + try: + log.debug("Executing: %s" % cmd) + result = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + log.debug("Result: %s" % (result+'\n').split('\n')[0]) + return True + except Exception as e: + log.debug('Failed: %s' % e) + return False + def determine_engine_version(manifest_path): with open(manifest_path, "rt") as f: return re.search(r'android:versionName="(\d+\.\d+)"', f.read(), re.MULTILINE).group(1) @@ -135,6 +145,38 @@ class Builder: self.opencv_version = determine_opencv_version(os.path.join(self.opencvdir, "modules", "core", "include", "opencv2", "core", "version.hpp")) self.engine_version = determine_engine_version(os.path.join(self.opencvdir, "platforms", "android", "service", "engine", "AndroidManifest.xml")) self.use_ccache = False if config.no_ccache else True + self.cmake_path = self.get_cmake() + self.ninja_path = self.get_ninja() + + def get_cmake(self): + if not self.config.use_android_buildtools and check_executable(['cmake', '--version']): + log.info("Using cmake from PATH") + return 'cmake' + # look to see if Android SDK's cmake is installed + android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake') + if os.path.exists(android_cmake): + cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'cmake'), '--version'])] + if len(cmake_subdirs) > 0: + # there could be more than one - just take the first one + cmake_from_sdk = os.path.join(android_cmake, cmake_subdirs[0], 'bin', 'cmake') + log.info("Using cmake from Android SDK: %s", cmake_from_sdk) + return cmake_from_sdk + raise Fail("Can't find cmake") + + def get_ninja(self): + if not self.config.use_android_buildtools and check_executable(['ninja', '--version']): + log.info("Using ninja from PATH") + return 'ninja' + # Android SDK's cmake includes a copy of ninja - look to see if its there + android_cmake = os.path.join(os.environ['ANDROID_SDK'], 'cmake') + if os.path.exists(android_cmake): + cmake_subdirs = [f for f in os.listdir(android_cmake) if check_executable([os.path.join(android_cmake, f, 'bin', 'ninja'), '--version'])] + if len(cmake_subdirs) > 0: + # there could be more than one - just take the first one + ninja_from_sdk = os.path.join(android_cmake, cmake_subdirs[0], 'bin', 'ninja') + log.info("Using ninja from Android SDK: %s", ninja_from_sdk) + return ninja_from_sdk + raise Fail("Can't find ninja") def get_toolchain_file(self): if not self.config.force_opencv_toolchain: @@ -160,9 +202,10 @@ class Builder: rm_one(d) def build_library(self, abi, do_install): - cmd = ["cmake", "-GNinja"] + cmd = [self.cmake_path, "-GNinja"] cmake_vars = dict( CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(), + INSTALL_CREATE_DISTRIB="ON", WITH_OPENCL="OFF", WITH_IPP=("ON" if abi.haveIPP() else "OFF"), WITH_TBB="ON", @@ -173,6 +216,8 @@ class Builder: BUILD_ANDROID_EXAMPLES="ON", INSTALL_ANDROID_EXAMPLES="ON", ) + if self.ninja_path != 'ninja': + cmake_vars['CMAKE_MAKE_PROGRAM'] = self.ninja_path if self.config.extra_modules_path is not None: cmd.append("-DOPENCV_EXTRA_MODULES_PATH='%s'" % self.config.extra_modules_path) @@ -187,20 +232,22 @@ class Builder: cmd.append(self.opencvdir) execute(cmd) if do_install: - execute(["ninja"]) + execute([self.ninja_path]) for c in ["libs", "dev", "java", "samples"]: - execute(["cmake", "-DCOMPONENT=%s" % c, "-P", "cmake_install.cmake"]) + execute([self.cmake_path, "-DCOMPONENT=%s" % c, "-P", "cmake_install.cmake"]) else: - execute(["ninja", "install/strip"]) + execute([self.ninja_path, "install/strip"]) def build_engine(self, abi, engdest): - cmd = ["cmake", "-GNinja"] + cmd = [self.cmake_path, "-GNinja"] cmake_vars = dict( CMAKE_TOOLCHAIN_FILE=self.get_toolchain_file(), WITH_OPENCL="OFF", WITH_IPP="OFF", BUILD_ANDROID_SERVICE = 'ON' ) + if self.ninja_path != 'ninja': + cmake_vars['CMAKE_MAKE_PROGRAM'] = self.ninja_path cmake_vars.update(abi.cmake_vars) cmd += [ "-D%s='%s'" % (k, v) for (k, v) in cmake_vars.items() if v is not None] cmd.append(self.opencvdir) @@ -227,7 +274,7 @@ class Builder: log.info("Generating XML config: %s", xmlname) ET.ElementTree(r).write(xmlname, encoding="utf-8") - execute(["ninja", "opencv_engine"]) + execute([self.ninja_path, "opencv_engine"]) execute(["ant", "-f", os.path.join(apkdest, "build.xml"), "debug"], shell=(sys.platform == 'win32')) # TODO: Sign apk @@ -299,6 +346,7 @@ if __name__ == "__main__": parser.add_argument('--config', default='ndk-10.config.py', type=str, help="Package build configuration", ) parser.add_argument('--ndk_path', help="Path to Android NDK to use for build") parser.add_argument('--sdk_path', help="Path to Android SDK to use for build") + parser.add_argument('--use_android_buildtools', action="store_true", help='Use cmake/ninja build tools from Android SDK') parser.add_argument("--extra_modules_path", help="Path to extra modules to use for build") parser.add_argument('--sign_with', help="Certificate to sign the Manager apk") parser.add_argument('--build_doc', action="store_true", help="Build javadoc") @@ -316,6 +364,23 @@ if __name__ == "__main__": if args.sdk_path is not None: os.environ["ANDROID_SDK"] = args.sdk_path + if not 'ANDROID_HOME' in os.environ and 'ANDROID_SDK' in os.environ: + os.environ['ANDROID_HOME'] = os.environ["ANDROID_SDK"] + + if not 'ANDROID_SDK' in os.environ: + raise Fail("SDK location not set. Either pass --sdk_path or set ANDROID_SDK environment variable") + + # look for an NDK installed with the Android SDK + if not 'ANDROID_NDK' in os.environ and 'ANDROID_SDK' in os.environ and os.path.exists(os.path.join(os.environ["ANDROID_SDK"], 'ndk-bundle')): + os.environ['ANDROID_NDK'] = os.path.join(os.environ["ANDROID_SDK"], 'ndk-bundle') + + if not 'ANDROID_NDK' in os.environ: + raise Fail("NDK location not set. Either pass --ndk_path or set ANDROID_NDK environment variable") + + if not check_executable(['ccache', '--version']): + log.info("ccache not found - disabling ccache support") + args.no_ccache = True + if os.path.realpath(args.work_dir) == os.path.realpath(SCRIPT_DIR): raise Fail("Specify workdir (building from script directory is not supported)") if os.path.realpath(args.work_dir) == os.path.realpath(args.opencv_dir): From 93dab9e1e99724279922a3ad5c1c564751e071f6 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 4 Mar 2019 18:45:01 +0300 Subject: [PATCH 14/19] videoio(v4l): fix build due missing defines - V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT - V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH --- modules/videoio/src/cap_v4l.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/videoio/src/cap_v4l.cpp b/modules/videoio/src/cap_v4l.cpp index 7cf985adb5..822f37344f 100644 --- a/modules/videoio/src/cap_v4l.cpp +++ b/modules/videoio/src/cap_v4l.cpp @@ -243,6 +243,14 @@ make & enjoy! #define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23) #endif +// https://github.com/opencv/opencv/issues/13929 +#ifndef V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364) +#endif +#ifndef V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365) +#endif + /* Defaults - If your board can do better, set it here. Set for the most common type inputs. */ #define DEFAULT_V4L_WIDTH 640 #define DEFAULT_V4L_HEIGHT 480 From 8ee1be7d0942bb621b1a98946e32daa6c6ecbb1c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 5 Mar 2019 13:38:47 +0300 Subject: [PATCH 15/19] cmake: support MSVS 2019 --- cmake/OpenCVDetectCXXCompiler.cmake | 2 ++ cmake/templates/OpenCVConfig.root-WIN32.cmake.in | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/cmake/OpenCVDetectCXXCompiler.cmake b/cmake/OpenCVDetectCXXCompiler.cmake index 8c9ff0345a..d7fb4f9a80 100644 --- a/cmake/OpenCVDetectCXXCompiler.cmake +++ b/cmake/OpenCVDetectCXXCompiler.cmake @@ -143,6 +143,8 @@ elseif(MSVC) set(OpenCV_RUNTIME vc14) elseif(MSVC_VERSION MATCHES "^191[0-9]$") set(OpenCV_RUNTIME vc15) + elseif(MSVC_VERSION MATCHES "^192[0-9]$") + set(OpenCV_RUNTIME vc16) else() message(WARNING "OpenCV does not recognize MSVC_VERSION \"${MSVC_VERSION}\". Cannot set OpenCV_RUNTIME") endif() diff --git a/cmake/templates/OpenCVConfig.root-WIN32.cmake.in b/cmake/templates/OpenCVConfig.root-WIN32.cmake.in index 687298acbc..76027bf92e 100644 --- a/cmake/templates/OpenCVConfig.root-WIN32.cmake.in +++ b/cmake/templates/OpenCVConfig.root-WIN32.cmake.in @@ -113,6 +113,16 @@ elseif(MSVC) if(NOT has_VS2017) set(OpenCV_RUNTIME vc14) # selecting previous compatible runtime version endif() + elseif(MSVC_VERSION MATCHES "^192[0-9]$") + set(OpenCV_RUNTIME vc16) + check_one_config(has_VS2019) + if(NOT has_VS2019) + set(OpenCV_RUNTIME vc15) # selecting previous compatible runtime version + check_one_config(has_VS2017) + if(NOT has_VS2017) + set(OpenCV_RUNTIME vc14) # selecting previous compatible runtime version + endif() + endif() endif() elseif(MINGW) set(OpenCV_RUNTIME mingw) From 98f89f90794fdded08ece4cff60fee119f28d61f Mon Sep 17 00:00:00 2001 From: RAJKIRAN NATARAJAN Date: Sat, 2 Mar 2019 15:19:00 -0500 Subject: [PATCH 16/19] Need -DBUILD_DOCS CMake flag to build docs Must have that flag. Otherwise following "doxygen overview" tutorial won't work. --- .../documenting_opencv/documentation_tutorial.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown b/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown index 0f26dafaa0..0379a3e382 100644 --- a/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown +++ b/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown @@ -32,11 +32,11 @@ Generate documentation {#tutorial_documentation_generate} - Create build directory near the sources folder(s) and go into it - Run cmake (assuming you put sources to _opencv_ folder): @code{.sh} - cmake ../opencv + cmake -DBUILD_DOCS=ON ../opencv @endcode Or if you get contrib sources too: @code{.sh} - cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules ../opencv + cmake -DBUILD_DOCS=ON -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules ../opencv @endcode - Run make: @code{.sh} From 35edad3e74464cc6b5873272637c8703cc8edc76 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 5 Mar 2019 14:47:04 +0300 Subject: [PATCH 17/19] build: fix warnings --- modules/core/src/cuda_host_mem.cpp | 2 +- modules/core/src/opengl.cpp | 4 ++-- modules/highgui/src/window_QT.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/core/src/cuda_host_mem.cpp b/modules/core/src/cuda_host_mem.cpp index 6a79320d7c..ab2c611845 100644 --- a/modules/core/src/cuda_host_mem.cpp +++ b/modules/core/src/cuda_host_mem.cpp @@ -206,7 +206,7 @@ void cv::cuda::HostMem::create(int rows_, int cols_, int type_) cols = cols_; step = elemSize() * cols; int sz[] = { rows, cols }; - size_t steps[] = { step, CV_ELEM_SIZE(type_) }; + size_t steps[] = { step, (size_t)CV_ELEM_SIZE(type_) }; flags = updateContinuityFlag(flags, 2, sz, steps); if (alloc_type == SHARED) diff --git a/modules/core/src/opengl.cpp b/modules/core/src/opengl.cpp index d98663b09b..14f5588680 100644 --- a/modules/core/src/opengl.cpp +++ b/modules/core/src/opengl.cpp @@ -1726,7 +1726,7 @@ void convertToGLTexture2D(InputArray src, Texture2D& texture) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); size_t offset = 0; // TODO size_t dst_origin[3] = {0, 0, 0}; - size_t region[3] = {u.cols, u.rows, 1}; + size_t region[3] = { (size_t)u.cols, (size_t)u.rows, 1}; status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); @@ -1786,7 +1786,7 @@ void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); size_t offset = 0; // TODO size_t src_origin[3] = {0, 0, 0}; - size_t region[3] = {u.cols, u.rows, 1}; + size_t region[3] = { (size_t)u.cols, (size_t)u.rows, 1}; status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); diff --git a/modules/highgui/src/window_QT.cpp b/modules/highgui/src/window_QT.cpp index 9425c7be14..1acd0d1672 100644 --- a/modules/highgui/src/window_QT.cpp +++ b/modules/highgui/src/window_QT.cpp @@ -103,7 +103,7 @@ CV_IMPL CvFont cvFontQt(const char* nameFont, int pointSize,CvScalar color,int w float dx;//spacing letter in Qt (0 default) in pixel int line_type;//<- pointSize in Qt */ - CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight,spacing,pointSize}; + CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight, (float)spacing, pointSize}; return f; } From bbf39b0964e9246358877b0d72391e9c0b010f0d Mon Sep 17 00:00:00 2001 From: catree Date: Wed, 20 Feb 2019 18:08:43 +0100 Subject: [PATCH 18/19] Add Hand-Eye calibration methods (Tsai, Park, Horaud, Andreff, Daniilidis). --- doc/opencv.bib | 65 ++ modules/calib3d/doc/pics/hand-eye_figure.png | Bin 0 -> 21200 bytes modules/calib3d/include/opencv2/calib3d.hpp | 143 +++- modules/calib3d/src/calibration_handeye.cpp | 770 ++++++++++++++++++ .../test/test_calibration_hand_eye.cpp | 381 +++++++++ 5 files changed, 1358 insertions(+), 1 deletion(-) create mode 100644 modules/calib3d/doc/pics/hand-eye_figure.png create mode 100644 modules/calib3d/src/calibration_handeye.cpp create mode 100644 modules/calib3d/test/test_calibration_hand_eye.cpp diff --git a/doc/opencv.bib b/doc/opencv.bib index aa2bac5062..e2af456532 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -17,6 +17,21 @@ number = {7}, url = {http://www.bmva.org/bmvc/2013/Papers/paper0013/paper0013.pdf} } +@inproceedings{Andreff99, + author = {Andreff, Nicolas and Horaud, Radu and Espiau, Bernard}, + title = {On-line Hand-eye Calibration}, + booktitle = {Proceedings of the 2Nd International Conference on 3-D Digital Imaging and Modeling}, + series = {3DIM'99}, + year = {1999}, + isbn = {0-7695-0062-5}, + location = {Ottawa, Canada}, + pages = {430--436}, + numpages = {7}, + url = {http://dl.acm.org/citation.cfm?id=1889712.1889775}, + acmid = {1889775}, + publisher = {IEEE Computer Society}, + address = {Washington, DC, USA}, +} @inproceedings{Arandjelovic:2012:TTE:2354409.2355123, author = {Arandjelovic, Relja}, title = {Three Things Everyone Should Know to Improve Object Retrieval}, @@ -180,6 +195,14 @@ volume = {9}, publisher = {Walter de Gruyter} } +@article{Daniilidis98, + author = {Konstantinos Daniilidis}, + title = {Hand-Eye Calibration Using Dual Quaternions}, + journal = {International Journal of Robotics Research}, + year = {1998}, + volume = {18}, + pages = {286--298} +} @inproceedings{DM03, author = {Drago, Fr{\'e}d{\'e}ric and Myszkowski, Karol and Annen, Thomas and Chiba, Norishige}, title = {Adaptive logarithmic mapping for displaying high contrast scenes}, @@ -431,6 +454,24 @@ publisher = {Cambridge university press}, url = {http://cds.cern.ch/record/1598612/files/0521540518_TOC.pdf} } +@article{Horaud95, + author = {Horaud, Radu and Dornaika, Fadi}, + title = {Hand-eye Calibration}, + journal = {Int. J. Rob. Res.}, + issue_date = {June 1995}, + volume = {14}, + number = {3}, + month = jun, + year = {1995}, + issn = {0278-3649}, + pages = {195--210}, + numpages = {16}, + url = {http://dx.doi.org/10.1177/027836499501400301}, + doi = {10.1177/027836499501400301}, + acmid = {208622}, + publisher = {Sage Publications, Inc.}, + address = {Thousand Oaks, CA, USA} +} @article{Horn81, author = {Horn, Berthold KP and Schunck, Brian G}, title = {Determining Optical Flow}, @@ -667,6 +708,18 @@ number = {2}, publisher = {Elsevier} } +@article{Park94, + author = {F. C. Park and B. J. Martin}, + journal = {IEEE Transactions on Robotics and Automation}, + title = {Robot sensor calibration: solving AX=XB on the Euclidean group}, + year = {1994}, + volume = {10}, + number = {5}, + pages = {717-721}, + doi = {10.1109/70.326576}, + ISSN = {1042-296X}, + month = {Oct} +} @inproceedings{PM03, author = {P{\'e}rez, Patrick and Gangnet, Michel and Blake, Andrew}, title = {Poisson image editing}, @@ -841,6 +894,18 @@ number = {1}, publisher = {Taylor \& Francis} } +@article{Tsai89, + author = {R. Y. Tsai and R. K. Lenz}, + journal = {IEEE Transactions on Robotics and Automation}, + title = {A new technique for fully autonomous and efficient 3D robotics hand/eye calibration}, + year = {1989}, + volume = {5}, + number = {3}, + pages = {345-358}, + doi = {10.1109/70.34770}, + ISSN = {1042-296X}, + month = {June} +} @inproceedings{UES01, author = {Uyttendaele, Matthew and Eden, Ashley and Skeliski, R}, title = {Eliminating ghosting and exposure artifacts in image mosaics}, diff --git a/modules/calib3d/doc/pics/hand-eye_figure.png b/modules/calib3d/doc/pics/hand-eye_figure.png new file mode 100644 index 0000000000000000000000000000000000000000..ff41d33f8c6a49a3b32f5c6928958881a649136b GIT binary patch literal 21200 zcmX_o1z6MH7dPGAF+h-Jbd6R*8YG0#0|Z2Bba#WajF1o%QK``#j#LIncS%Wi*E@gz z_kEu|+n(pU?>XPkse8`7_uR$5)KMiSU?4z4LnBsKQ`SdA!yrLJL-)eNL0!?@sm4To z;n--ZDx*FAdwpy#%|KnjhpL%)q9~^Sz0no9Klq_8;(DoTtKhC-FcM3%nG9*Qp`krN zQ&)a&=r^~YZb6&d!QYqMo`!@dShqf0o+2Xzf1JYk>(&MN4dLZ52FV z07L)<&?(Y`6qd%9Ck#!$x0Zb``(Ak4a-(7YrDZ+O;MIu#2;Y+^GDSSr|No&U$}oY& zrs9QhgBcvQ^bGgXk@Rl6;hOSlHuwQI2tNoHseg*k8Wj~ez{wQG5tepPizY;tjiZI# z;aSmii%`qTez zI2LAT!Z-7|!u|aeduxGwhN=nuiVQ?aS5irZ0#f7duz3>NDntV}|&i);CU4n0@d|2Sitj$Yvl@az{yeNf{s9v zg?JTPV2L#)UlRDsZ;u=+ZLTP?)iFvxIuMH|8L-zyestwN&w|TEqA$K&vzpz2&2$bU z%aD7@2jjwkl7Lki1dQTvpOas?_>>qIGuvg=W$r!ZqYb9&yX{g?H|j$IN72kR;T*+w zd0j_@D+YOCD8coxr9xvi2Cs%gIHJ0pA>GnW{gq(mINzP4xitva3L=*rKRvH4P>apF^_s98)I_+kdjo;Oy z$%=z6a0)PQ438FjHREFFpDjzfEf2wqt!`}w>5Dyl&<;o)t@gtU!~G}A74gXPBH?ji z!>{(%U~Gt0F>zstN~eGahIVuv@0ROI7{$Dp4NaYGxVWZsAgR%NZIn6DNP3I8q>Y*j z)m6%g7blVYV0=GHLT{IfP3}aT6;((Q+q@V$EgGM#H6pb6k@gQe@I4qshJS&}j3I5< zTquy;EXhR^avUF!!t2M>NxO|rU2gjmi?ByDUGzvN zKjvGs@oyosyc#snHJ&$?siI+3JO#YLd@BaTN~}=>BX3p~DMTAQz(2XjhJ8D&eqG)s zPM&elwj8XCX&rRlGxeq8;?M545jBAHhL$QmrA1uGW~`hWh(^D3=|F~ACvbz zy_fxYe=B>$29LV+$JjJDj%+D<(r8OgYm8oI8(;sW*K6+u7lvTZ9R^JwMR;(&15t%u zuX_k?<<~#A%jnBZr+%$H4RY{eCt92)!KH~K%Hj>o=Wznyf%Oj7;mR8uyAn^?c)LG? z?j2i?Oz_t-#ULwdd7MJ@5wgOybv9Rt&Q!*yi+N$QlZ}fSU9VTTfuX9tKw3p(q%oxC zmZ%#hD-D^dI1lrzeiQrFAHmd0#n-&G{E4H%46$#3indSaqDrwA6I(*20;w)S{$^wg%JzKL}CFtq-Y3>1?#dq0h z{A>j^o?$P=@}j7Z57>XJRym`{h5!WSOegXm!gOVeR~R?TvC*K(y5{BO2i%F4UBTJS z?G{n}&GAzAj4-d@hRQ8pF7ywyGyM{-w+e}XEj^8_H-aioD^E4dY%Q@Xfz?BmU3Lui zig=?)F!DLjS@-%pIAY~9#ytO@`2z-T^;t0+da}Y#Z8t>E9nl_e^T5yW{CbyqsVbYv zl|g`%S7qg})(Zd@E;;d6V3NCo_VOt#s?V4hhAErF8R2yM2alE5t!(mg0wnK>4r`Q5MWCbVghJ z{0E~yw_Ez1z0*--Y=^+}5TrG~bBuI#DkY{bY$il=Q;Up)*A}#C3#D7VP9$=_ugTu> zwWzLtv<;r88SJ7%jXn|4J2B#Yx}T+14FtJpj7q&ECL-BdBvJh80T|{CqaC^z_~G7~ z&dW5S(OV6VXezgcN~)5lo$1g!W(N2I^KM{B2i0i>cPZ=z*(w=!JKvQKDe+U*s5kcP zn@P(&5y6e8@nI==Fy3pb!WppUqT%`A12^!s=)2dJpM(e}S-a}H$e(=>xzyuZMPvRMYxS>w|L3_Gyc{@uv4jjf zczs|s@Iemv!dfTe!_<{5I{*dW4j3Pur9yqEFy`RW^c{KVwto9Cx>NJ}Z%VWiGy6Xt zYS~k!jIj#jJ{jb&=jS8k~GRzQ|{ITe;Gbibln;TFUDiTrO74H zZ*7uWtZYIEo;sRfc8@H>OR7qPN6It9`X{_mX@ps>I+s*$YvD0#WDahkcDtWg5xsR$ zmZf}#+)(b(!Iv)GsxFZbe7cYgHn2d2`l>r8JhZ3(4Tk)A+d?PX0!Oq{aPezvZ>M#y zegOHU=D@XiQgm{F6z;VoDtUY*!d_x+Axe{yIBK6sA@zR@DIN5jsIUG_Z}vx;4SlOd z=!m{yT^P^v{XN0|2+SjU+f;{w>&p3sx!3O{A*&J1ws&E63RI)x=uNm~F8WS4n1N`m z&5xmkd0z!66yuVRVO)a{o4o)+u01%GA!HOurTUd$a!poDl)MQcCYN5ku+h$(ZTO0WX_e4%hBYf}pChx^d#2b{q z)NG+#n2i&owFjDq15ENhD7k-H|MSn4*s+X#0-_H604@0yD#<9HY~uvQmEb1lP6pN9 z*E)F(ncWVrp%F*sj*~R8fWX=)iGFTATglE&_WpDj-TpGEa<(vNCwP6a5K9_TGUEFm z6A{OK%Ut)@qaPTvSN%;C^w*pey18r&8=ug7_3B5s6@5X~hU?WlG!(SBsyCJssIg~m z>yur0#Ro9}g%Zn(IHWBXRdlBJ+JapYzB*zD9r&y3_5%LA9a1Xr%tc3*u%^-BpdDh^ zPM~VwG$NYb1e4w$%WoWA)DkZ5I6F?I6}#E=kg@zF-=5@h13+eC%P%_LY)}4Vl-d22t z|6|oZKySNUyLg0|cD0rh{ga~Vd!HZ)(q%eZ#bhadVy$2sjvAtFK@bj8kcl&e9m)xA zT&mf3M9Ii)fhaY9@Td1%W-B={ilfTPo_=m%E*nWhX-U2HV^qV>fR0ny=iThpd3ffD z6h8Q@%FcO&&eY@8mPc*xO3~#g<~X&xxC%V)HEQR2#=@vMWmNu|0TobJaeh1c?=FV^ z2x|My0}f=C@fif-ONbj@S7-ghTWm)NBoZYNxM+`4@W)t=6@1J4LJsSf)%-i2IuC z9d4|bjEV-lOye+rThK5=hskXZ6#?+@+O4c8gWv+~K$WLEnv9K#2D>0`8!S0KrVDv^ zU;j#X#dkzp_S0$x!!XXUfhY>hIe>JJx!WBFvhmGRa1qyF&N6F!Y)436iJMvs{jx`F z3dLo-4f8C16{LD=*~C~{*ABb1=zmRIH1V6-kC^Wr$G_A8>zNAq^hI1~^0ek2aW7EO zw$lu=3L9q7_+|~FaTM12r{4`vUbrn$|LlyUrh@dwe#3{u0H5)f8q{UzRhWzaJdZpN zNAfh`(~F+&f~-9v$ie$Y3tHq@YU>wcu{{0$g6t8cjDJ>$s1}3zq6Pr6#E%2*uibvB z-D-a+nY1<`{x;z*4{gasrN9Vx5x4IP#2yfh$c zejC5;0T+e3>%@8&&i^B*swk5;08Lk#{N6BP*oocf zPYBtO6jc1kJYmk8{TQe|{Dc|TjdiC$C0rF{0-X+%>kfLS{{5ioMk{4Rp!N!pv;S+_ zj~Ox0ufRR!eRF)NLd@`-j%sm25T}YuK*{0Vr<=Qvt9;6`0Q^0sZgQhC>$~@!HE-IzNUtW^1Q(W`9(!CGm0bV*{RDuV(bebT!4L zR1?9Qwzix^{W5gBgBUN|6-IF1{U}yLtPqu}WfY=k&)zT+Z@k<_g{rb^1vm9Z9qFvQ z5RKyJs2^Q~!hkK2WhCV1yM|JQx9c~l7xPTn%;V+Sg=aw>8NJVGVgo+vX{T~eVICx0 z(Y1$Z%PA<(W`Mt8yJ4mThAg4|rA35DaxBh@tBBow@~gYl+w0Uqd&g&VNv{6h-f zA4}v%seE0rypVzPcK<3t2eVumKO-D+>M~;(D;mdo*RLoZns7aHItKWbkur};OL3rW ztRS^IqmM5j1pL}%Od?B_E@4%2(|6<>cI?g`G%;D+BU-I_Fr$Q^J0>49`S zJmA%CmZ0;$!b>BUz!G?OMdx(wx)<;h>6?DoGEIDxKXrh8ZXX`|r<{!x$tRZgbjuv5 zXS#4!Y?pg6+f~8NWp0Rg9w+@rAsb&p!}YZNYO^e(AwLUNed$Xz+rR`%vDOHWUA5}+ z1{?kYAvM;Co#e=`6?&dCu9y_3Jp0v;-XT0HY3IVV`a~J$O_x@_9w%vTfBTt-q<6cW zS8M;}Nn!uxWipteT$fN5tJHRsn{Z>h^}1SYnn~=vt)0+O_^mPGISN6G4b~B)ryn%z zYv2ZUaA^xayh-uwJR|@_Y~T%C9^GPk!U12bbJeDL#}fN<02==h<$99Aeo63}-desZ z)C(mS*FI?RbUEUHabC@1khWWe$NK{*+5Gc}@}90fK>?IICWC{cZVOjhi&DOh`^d9( zbsumcb7~4>?B2eKp7fp7`j2bC6WSPhyOOc~H_b-Noh0-AuA9j0?famVN&M|^tT}TJ z!G(T+5DoyU(*4S_i%F~6^cWp1c}na()u9wd7^XldyU>A7f+Ow3{iUE9+>zu%K|(>o zCjH(+nx3?k`zgTZ^ghTkzitAw0NP8?U$_`-e(r+mIf=X?#b0ov$ z$@g0+8|(`|ptq^iX|kz9u?(R&{b;(>NQh_8jAUr+M z8_X)@rbmk)$&)2|*TItN9-U|r=?+Zi4ND;I^}~fPaqly>Gcbhrr*4HTp5}`9%l~99 zujYW-N8N@-!%T=Y!fg833(zIxA2Q#V578s;m!cz<)qbd( zf2t3C*s^YxX8)(b0U`rkKOM@#5@IP1LjkHL(~Qz5sLda)V}kG2Jr8B;H~=$8uco7|z`o zLE;H6JG0zEWu}k)7SQOmo+v$ERJ2TCMR={K*isCJ1KJwfTqvdx zIhb=jCx~h<+T{HK<`S`EAuhV8z8Lcby3wR9IL_->Vl_Lyy$-$O;_JOG1ve3{_3loz zz3eiED%zs0d{y$ynBLUe0Ixn6`NKQMm}VD^IXgSZ>o^+RYY+;T^5-)Q%M?hb*_G>D zH>KAQYJ$*1kIgL9e7z`ww+zPS5j*#3I*lD${ zsyG%Gz+cc>)yO!zwy5btmlu3y^{hGhF`11detO-Lp;;bm@MQW?i7A)4E zpGXgtRAdamI!@(4y=&#PM;1vv-TeQ22jx9cN=`^wmM?1-v_KZ+1N-2}E2i5%3{qPE zur@Sjt--M=_0RlA3Z^l$;$QsQ_OY7!@gqNU6Ut%W|Q9Z^|c^cF&e=jCb6Ql;hBWJvYOyYL0dygdD5RPVU507Fw z6SNtwhaW={9P+vyw?Cx$)ZQ|P*GcaQ{2L3Rz9yH23$q@$sVpZk*ad7)f#a;I>G$$K zb45agnUBoJi6GB@Rc&1`;K#b*D_F%xMMg!EdG`iJF>Zy_AHHBB{~^`bE26K%a|F%C zUqAyE6ot$U_$H~A-UY-^T2XL6ZtW2*_5Y`g9>tmX!kZgQ9Nn0p4yNjoOkE!Hxf_D- zf-d>Wrs!6FmFJ#gotB|*lcKtbYK#rl|7TY)V;Pc`CidEO4E~+c2T=o};dQy^g0cz$ z*Y3UF9kxhnU7vfgK4_&kkKF(Fn79)u3cgtYjyBkTdx7|d_;=;6V4vO{aRv}_h znGpl7d+}7*EbZY?m-zm(eE>(l-5Y1M|CC4QOJ2az70|(Ww$6JoNeD`VH9u)9v`7~ZYUQO;-{Q|KXjI?_34ju43k_Ad1t!buE2JXLuL*>?kIM3>G`bFmmHP53u@i<+&HnUSmJpCds~McjJ4pK9gc7rZp*m<#u_u-Ct8)~^%gW9 z#&!J!m6M9Hvf3>>S^w#dG{CW^8v*{l`Sy78p-?BJ_K*Pl*P+Oi+zqWCLsD9gRYR0M zd*#}nw*2IE?lqIAQCkB}za`BK;I#fLc5t}stDw%t!>yuM2ZwwsT#NQ>{9<46*&JL~ zj3D25FGVr@dQVR$yI=(&0Kz-UGOM%{S~z#S%%bzN_r8T~*ua&L2l6tjV>6t}T|Gz% z1K?H^@`Ityn#)%($g)?y#o8JvC69nXc=Oxt6`@uoa3G0UA2g38{2(9*PfvcgmEMW2 z*0PEoijCY2l|2IVYMQ0Ik~AFZ=zODP2s|+Jm2?+*%AM3;s{R ztE-%l9D>ePgYa1hYiz#>xS_jbVlNHq81B;Bz#dN(s5dwYZ`a5P1I(v>U&A_@oy<+7Fze@MD>wt{KJg2-Sl|e_1krg zxM^OV3C;Jzs_9~h-S>4~sahGmMdnk`d#~@z=!(iCCtbwUA(u^Pqzgk6J=MZbo`ssH z7w?+H*q6?2gBR2Jzge6k^Yz&Q4Aq|@)@QzB(Mo*2A+_NOYN17vtP~@zU7fsl2Ri7? z4TRt0ZEe-U9-2h3r-db<*h3*Amtz2X0;n!ZS<{ADgseKNLR4nVdGRWeb|#>`)!kCF zGj1|AQ%*hqARpZXnR2;Ln@O23Rt6~qJK4FS-;x4!PCa^B!RNXPgwlk{{5{>*j5okvZ$y&qEH}QK1?xxtk z^Nhg3HCXOP1Q++x(V#v{v;zUTNET)CrMpqIk^&djL08aoW~_%K6;3UvAc7ih%H(AG zGn2&KD6e$U_W+%wTih!tqY9>@ya}=YVWqcryeNGdlgbCX?;Rj}H){p}}5gz$+u`PL#UL6P% zuyuAX^pPi5NaNIh$jK55OoD_^)d;pwp* zKEw-RE`nsQmaE7a1a$iL0xcaYQ+cDl3a?csgq{Ndb4^*7PRh25q>6!(Nm&Lx^4X+y+*V|Ct?qTaO$cNl_+1|IIBwR%#q}T22Q?{RkgHR3r0D?F+$VEBWKOBss4S=4KVT){3}lB=b9h(Nl5pKKaP{a zR?sSR(R39v&=aZ4sqXan$*~z=@N>&S^2@y4QHK?>{dKFuim4H90hX)c_I`9+v;)2% zyMQ*r+RWLJZJRXr`9b&~AMf;-WyDQqFEt9Ko|OnZOqTll zr5@Hzw_*)EajawBK&{Vhu@2jhvjKdt3p8gntpPBx?T@J}XMDFedrS$mnHYqN?E|8p zLc?BH-j8fxJ(DV4rAPXosgF_0=@D;PU%z0`_vj!7f0-{0yA8|42yt`#!xJr_yt$oH z;T@yG5Y=sYP?FlWaBcxFR!On#ptp9fuUaMn!Y$0W%tP4ZoE~GbsEDi}I+@M1e<_hU zj_#90@|d~ikGdL78%30xx2Mw3T1wvycqe6eELjLv8mb1P1HSTT$BV_+;r3Sd@2p6< zwvXDtl0dua9e3+&nr(T^K1}PVe{9fMp`sufhf}RG1uqA-bzMxy0_W1%m^p39h14l0 z2r)`+SAUfnk6y^Z;VUNK(XwD}7ZXawb!*9$LT@%eAEe;HgokH=#f+^fduLa6{X>q` zhCft6?R4(Z&R&`zX^4K^d1YHi#VjWZkxWATLcq zRJgN1TXZT#QMqq1YIg#ry!v~1$b4!Ds!+Mc+%Oq4S-HR4c?cI8>~vTuYT<^R1Q*`N^qXM*rH@edAx(BsF|#=3 zW9HRdvkY#4Ko13_U4D@(XYjMk+ve`+*g*TQNxw|SL%2V_!7Bs$>0|eBx<(u`Z`ukK zoQ%JcwANH)v{4^A_)#wIYpav!PBT9jdsou;h$z=r{ac^P|M~?%*3^Nu5PMHBb3%#h z@F?Z!>q=@y>)nR3mROak?I{0&&~TNMyH`bM{*m_m{VZ^fg2SD?2fx%mnN$w8-|9)( z+SuIHzi`>lnlkgL@H%pI)P!PD!H?%v5qta7B664XQc5l{uEM#f-jbj~YF|SlI{LBi}wN?ApK7De^NpvIZX{2D$jK(+qiX z!Ja}GhN8PXnD1-DoW2g#sD6@ZybbE_M4A}3O)Xvv<8B4TkGBN$z36~$8R^J67Z!(f z;tjePxDgfKmEcRcO-SKTO!%n~#Mp$k$of-Ng z9~*$AZ1q~9SX)vB%Z!`?fwFE41Z0(Ji9bqMLOZnn9P;Ewr@U@{!D(i8xuTZ z!{4_n39c3pf#s;8PIGP)9?PY;eDM}*Zwm96Thskn$w-x z5AJ;!S@v3>1*|)53&yyGn%-hNSxCY8`Th&K>XhXJ8hGVQmMrueaX;36(N zTH;|5*B8`c$3t&SY4C3F{-WBAF6A4{^+-H1eWiB0t;7P)E)hX!UqHg0&xp1mosrcv}^PQ|>oGirh{dpBtJf=r4Ep#MY7Ql0Ui1j>Nc@@w|~#1Bnq< z1P&Mp6m{tIT<}*fK0&hi=;Q0nFKPpE*hB`2mDlji2v<|fDn2@gWC}C;4J}@k{9UsQ zXnx3l0a^t!si)Nc^@B9>UJ9oo?AjU43)`YNRua0&z2EJQlnmZkw&UT3d4`>ec*!wB zbVho?REfEN_plCWIeNyq!2TX-dX_{V#Zbw3*fY}9a99UC{NRJ$usA>6-)QK*(2!j_ z=>Qff7CZ7$zSNONuV#g<9$|3JAs~XA=3m;V%tCmkn=1;Ul=z%^9100KTG3$UJAO!w zE!xN}TW@cV!pjBF?URu$?P=qsWhRp!QcPi!<~+)uC@Y)EhJvd(8f;iU=wig~y60NR zC%%cHZG`^acg<_q8GC6zHjP6^Tag!ym>+R6g0_jnxwGSk=Z+fhL>WkoOhEjpe5 z2HH1x_kY+|PMrQeM?1KF!*4|1n0|Xd$`*R4HW=kD*&0|!cll9PFsGz75Tg2g0kH}x zT~`5#E|i6R!fmP$?QF&I1YRVB`DTj}N%Uh@1{0dBOMIz0 z`fIC!hue9+YQ`0j;Lh@E?t{q-o9YcmT9PENxqGO$pug+PSNjyuNBN|U7k^FGJin=~q8aV&YZJtNizx9g9LgK~%9Jh2 z@NFjjInV0vzNA=>S;|Bjd;}grDfQ)v)e1|%JNt}d9sE)YP`Re(`3rL9`vu^)_n#J? ztr+WCJ(?@`B=rKoTrFAJtO2&KWN$g(7Cs2xWX^CLVAKgFL{THUdjTw$8lOFTM--uY z$sX>q@UA_zWIqV_wrD~WF5Y_|rGbwN7h3i(A-+n3Xif#%J`k5E>US2^*lDI=Tzt8M zm$4JX%2b~#lFNU}^ZRRTA8J*gYQ7Lwu_{1wG}D^irWHx->eKKHG3=BYSZ%I!?a%R+ z2QbxpSMDLtUm+ywJ6BAj^P~KAwB;rX){JT>(B>1vD)i~bZMio>5gKV2(jGk~#eieI zv9()*XSwJuJsOSd;Q-(?(U()mtMqTax0G>7thn5~38XRNY$4tZ6^TX+QbfP;BK^j0 z$7oK;SCy~Ku<&{%m>|~gg^Ja$5%!j-Kx>yx$|r7V9mpzeSBwld|9!D(uIrg!?QcAb zzj4>#7UdqVsVCkis;CTwj6})l6*&_wS@(arzZeOMdpWfkTN4gXRK3cCB)MWUU zh#K*DmmgkwN|hyCUPdnVQ~(m)$W~wS4B}*m@gF322E+%qKMOEOOhjvY=H)ctjvqUX z(J`ScyP!UZmpDq_(jaVDz$4;1C-vhTZ&Zr7YFoYxB)oB7zHa$`cU&aB%gH1=Q%H{v zr(1Y;?&b2~Ekw_%U+Q_I;O4v@i5>wiJOQ!9KzVAd(hAH$}E~}wEt*n{Ln%o(9vIb(|shmUBr*2JpB5H7m0tT z9${=U*fpK?V9Rcyy-j@6(}T2^vS4UqxSElz)~m-R<`Wyqs!*EL>0RO+oWP!0p>Qr1 z=fw{!XGg)L^}K3qVoMEY9nM8}y0Y9R^TWfsxE%bf?gQq&K~RON?YE%P&sPi~J$J+*P|6r~a5Qr- z>}3puI$*2!t^3~Uu;~6?gxwmzYV8y}+b+sN*mUm&%&P&|H>C`KWXN22*1z(TP*&GV z>V3e~b2TZRKpycT@egQMY%ZU;XpGhiBuTBL)i@off#~*8c;^TqY!e2GgMo#X3_Yc~cCa~Fv4tl?@ zmFR);)-54y0Vq%z-~V=BWIx^A!ziRbD9UD6h`Seld{^$k&J@A%Fl(qJr_TV_KhGo> z<;>Nb$udZ>vAxVM+QA^laiee%IOdM-cH-XSddq79R~d1aL9RR2vGC z%LFf;AAGfD;mL>Jk(A*sgea`21NpY5I|8>vf%M}UT`-jkcM@2>LEOu@ITKFaRUtAz z^+j49xiDS$Q3c$7ffyXlcbI*}V)XkBB%(5yzuZW%J@d`srt%+4hKDo}mQ6RZ&o>U0 zdp@@0e9Ec`S-zpfr%q(WZ(D3Of1-4sUk$iolMaxwmIAHc3nXW*=Mx8|l&mfS$^cD* z^}cg+bc1|CpLK5AQIpfh-~41ejMgSs508A-WrfdE>_=EI-wcdN+{y}}(!4`jY z5WS*d4})^np{&O0Zyd8K1CZXCn9T$3ZcN*cw#P_2u{2@AM(bmn=%F-tg3t@*U0 z1jWv%8}@guj7NFwS`z-8AiA3}Dx*c*H8+f}n#iNqo@FUkl@S?axr+>ui|f;?0mN97 zR3A#QR@2fT=qEIOuQVtS5h``0?a{0gd|1kUdLHmLVF1*J@5M?nG^AX3AnCmNn_QFa z{fp!;et)w3tBXZqZ$g#_hh79sUfua(xoSR>k!YfQAn$sb-8tBOP!vdKuQJa-`1BqZ zD&4SfEW9b>j`jnwqO6Hyt!d<;k?d-7ECkQVRb%$~8wX2RFO!IM;JaGFg6wjw?X)u+dw zpAegYWFvFV#+A9sMHYWk)NV>}TzC@O7tj_@IVY>Mc^MigBYt+}yp4JD5)is^hS0zN z67K`vk0LejS>nS`u+AYrtuFe|%O4Y+qay%c%s2=Sei!5R>a4GrdkH;c@bt3Ca0}~- z(a2k3&^%J=mt~UOv%GKC)Vz;99&gHt7d~y`g-!u^KPep9{TlYiH=Qa4qt9TRR_c1t zjdKGjp0Hqsy{fLiYUE}Xd=!A)Yd7QYe!|^k@q9-~;(%Ey;-HgCUF{rlYeN!V8@{|a z=-zfauiV(FuvjVw2S5|%-Q=KbN7SPZ^5v@bXlB;E-|E=eDY|oohY}Fx-$7xp5R7>l z)?-Zj9bMineUQs3YZuh_p|I5@5kGbInJ@8j4XVdWXOL<-Kg{anZ0I?d0}*6@wQs?4 zXR-pa&FlJW$hAL5K$x_stpucKKw02t1dt^DW8nw>&+&G`EU^IE)|?ee_IblOl2iAA ze)VkHbJJ|q^InyU(^iMjVp&uH8!|vVe!LkW<@+qFZ&4B^e z{tazIPy_9#{bjka0B%Qjxs@WKX!ET1ZEI6xwIr;`tzGB|-l5$M#ar5~5EU*~b0B*ss3=gK z6Yu*Wz8#@{A&pw$^tNMH6ZO8;9bnQjBK%Fd$(NAU!+bLGA0|CXl$~AXv-MlM9m~Er zUyR(_NO!bAVVSJUq?)|btf-SP1t;HQ4%EHn!3x$trv+4|Uf|=J0Djpxu5|h{-znIV zvvFZ;Sr4a&96$vlzJce~8{zM}Spq{8sLe$DC_NR=9y&}#R};aAJjLDjW!6r;(>9xD z@?sx)+JRwqsZ<+jr>aK|0E_N;{KuN^A?&$MKw>wo_snpkZH|f{TdUIv6I?C*R>OI1 z1ARsM`)Qy*MH7pPVLZbr*{G52AUVu){bzPob(ZSlUZBk7$G2t^f)F^>Wd0U z77w_0-JBojcRLtiMIyU#yVyu&85lg7KOK+#(2)@cHXJnJuqTwue^v65;_L8%^v#>U zMm;YjcV);ym}O^DjC8E4WYQLSVOH|C)8ZtFQ0_iFV2|YTSWgA`*Tq{fl^M-Wu{G!0 z9}dDyTtutC!>;lDlmv|3Z%>US+eU^2+IjDPvfT=BpmuIJ5pU`7wA?X1QjkCeWywkU zZI^a71bj6>Shnm9>(hKn(`OV7ZLfIvq@Taqr??G&t?&c-w+r0I|w#V6O}{QgR>8G6NOhxk#r zykfBFbs&pp_Py!HY3p=;*Vh;q#Wt7{{P*{R;at6h`mpx+(kCV~{`6)7W_V@=fAM7B z*-7VBZpP;=hB+*1UdE?hNPYjMzSc?|chu|HjJ0E@f>hl`0~XsG1F=L|+-y6vCQk&Q z@CB8lB-0|619|@Lf67H-oi{*GW<&04<{xZysM3=Ri%07KN-c$ z%WQB^g~>$BG8~yMY3*;U5x-G1tSF30Sbq?%!XUTxu+*o`#t^2v(6i!jc?!wRLSOPM zcE_P%mU_2|rQu{S_j7baw!m}1!DWVtu*8yLu<>QwBe`XId2M+AOI9GNAtSa8n60`W z8cp3i*C(gt>LaSon*M9Swav#zuw`qJP<*#7oZ2ZDON+Z^5hR(ol)X7|atXm7?Q1R! zY;KsRHuE^qp=iNSXnACTmAa_f()`p&_w?o#37=u|ge~^tXF4VBIWVkdvK4MC4>HYA z#EJaUK<&f(G2?`$E|07bu3&kWs-er+=;Y2f=QxD|o&9+EcdK4!dREupR|vm*YH9+C#?PaR@$*~*3~BnONf1B zImKHLQl*w1U1HO@bh#G*PdN1p`{hv-Wceqs3^K?{+fwMu54T`JjPM39z(j5R7Yk}Y z%KU%=wV%xbTHdSg6%CbiA+IFo_g3vQA*@3g@QoO&>iXNDYxx$4)9AEyvb&T(uhv}X zgq~SQbctIhh9+%cEtwePn*5-+uKO(uQW&nkRPp=Ik;tagt2p8?QenQa8mdZdVHSR+ zfo=%S=ApMRoGMzggU|%9)At7+7Zv%HRjR*5{=nap)#w|PmJ3VTde}9RASjd{7_kYH z4ZZzDGJmR5&WvR3z=wQbi|D$|d!!T{G5&RFXw-y~u5=V_ja!n&s`RMEYJZ63IlJm1o_tSVKeY!J17Q$X2LjA#0Nprh9s z1p6-K=+&qAOa?Lvh_#udjiqp1!cBAa;AlN<2g3UD*xf2>C9a82(VJ$5{*>C%WW?H{+`a^4F!D9W4=;4BW*;iGO1pljb1+G*OT-wd=iMH? zK`Y6RopY}#GD_=FT+h$ykR=#;??`=RHI#0%0RJ8%A=ZOnm`T|#4KR5m$YmIf7mbZys^tYhtb!3E-AjfW zK7&oWGpOSnceA=G{+4*^GDkAlz&B{bOqFaRFBg8O?@@Vb zVDa~ryuDGgMdd*+ZpU&;uZJyif{e0wFY97TJmhCZgk3pOgCt79yj5r7C>{D$a#hF* z3sGch*&C$XHhV~dXzON~JfbLe4(Yy==OkYk;U>a?Xr}Mln$U(qAFX7c?g#KSVAUnZ zWJf90pQ*(|-$2>89IZczE_B<;Zic#Z(&(YXl&nylCB7|NV^hM+wTXeum2v!@Z3!wK z-oIY99wc5%*P&E52}PsP1<+>RfQYaPwADW_N8T|;eT6l{^Z46&??N0#JnhYMM!q_} zmK5_5G5TRJ@aLtlqsezVjS~t5afnVD;Z@(ti>~&8&gXG&vkCG@^7Q(AZ=`>qVy=RE zt=j7xb8$J5gV3D*)NSA`!O_$3(BIDV6@l-iYD893V-kWQ!&4}IbTmtm&_o3uMI1iEW9 zn7^=cuBO^0c9Q|4J=6V|PMxx8LFi^rgDEv3qS{KDao=;Ib0!75J&KXs)%P$~Egb1< zWet{<5AW)C>A|d(p777UD62m0F~cg+XNevB3al?9W4q7Olr;fgVuzGAt1S|A$lF8UikO+@qEoCWvgbk6kBkuAZnr|Z9CbZdmj@Z*=_DsRm3%L&fP`V);YzMlMj5db6+ zENfLp&&|U-_+wwx94gb&xnXlMZ?n5TUX6?xU)(bGWx4zNaIdYM>!;J5@KPH`UeD8T zh-f7U8TM_DzXlN8F|Na+1ld)WA6af}4jt-B8w_p@n`^(B> zEnzZab@&Kgu*gO{*>g}=0OJ2P2frTS1q*~1=|I~OOPE53f|pM4sLADwTj@{UtVoHg zPToaKUd+&Fs_?HqIT_>UK(O+kizy2+gkLXWhT=oqk>Ae>0a)hFCs6V`eJ zTdtdkF2v{chU0e?gS8_|!%N|Lm;_IglTE-yUkviTQcij@Luro=eT1JB5A-FPu=PdkF9PI z;^n3iNI}Cz-)*5S_?oP{&@M&OWHrQka_j~%NA?3HJ=cXIGRLX^Pd8`&4rTlP@fo|v znz4?F2qQ+8u_qo|6xnH%WsDRxX_#zTvYWAtrHrx16A5W#mu0fXSVu}l1|dt9ghKk< zegBN_FZXZPah=C~ocDcP=lgoUUjrrJD#81LxTJ9LnI(@WR?doB;jH_u5=W3TkmWVj zn~b(60c{YmWEP&J6O^6$ER%%p)r}|-S=Ag!C-H^b996@`_kvvh2#tdhHjM|4#7l8KAeub`i`+$?DMHY!US>*9OllO486Xy@w({xoEn&?U1;Dx z$tBC67vbC^GrR82mGNMk-G`JDXZ(H|aRwOm+m7tQFn2~zfg5@7xw>UD> zEr^{?K`mWgcbWeNIb*)EuCo|K>miF`_4N9-Uw-1i6V~!y2Y+LleRrpwqh)URD8?ek z&#Rj44lP#m2gzW3Hk8xP(nh(B`aAp;WNsZgNN##e$HiQuKI&9G@nT|iaHH&il?oH3 z|4F`^&jHu03S+vPABCBggWtE<8licZ!68x)3m^ZqGObmFgKb&FTK0sqk-%xrO*BPl zP%}_lDP2>zWi_RDJtfo+r@G8DGvv)n#@#d?y7fbL+oH8PRz6T4LO0d`<2$emkZ$-Z zJS;vz62Jt2SJpkw!o95Sd16TUf4a%;!2u5>?H3yV{G;1l*iWR*x=A1L)cquMeG7(A zpXSL5*95b;5Jca~_O(ryC7+AVT&U&qxhg2%8SgU9VNpM2A6we_(Zn zhPA~MPd&mf`2RL|u*us-{B=hz?K3LiseoG2AiJj14r8LhUQEk;@?Nk@($KZ4Qq!? zS^Qxob-(m85Uo%rrfOtzs( zQHp6Zv_v@_(PJY@%V(<)IB4X@k-tI0eqd(1s;>6!C$Q{E=5zBCUFZ04<4Cm`~ zBT&5ohSn9muSc;%VSy3|w_n0oKfjB7ccZ#Y;`Z$worY&<9~I3|x{2Nw?`L5Hz9vp8 z3j(*oI0>>tuqT!esnQmopm@J!;`is<@~-?<;r7pZ!9y=jr&XtvZFfj1iz`&c{Z_7A zkU^`v!q_&-GJi0hwbwSd+n-cMhb^lHBMI!alzq+Yk@j?Q7@u~43xT`7KFi?pZno21 zOFyGoc0`)F==AzQb4VfSS8;)*i$~IGi!{n~_!O;RuEx5#*GobtTqwI%-^FaznBjSz z%1ZW}b7ErQv4fY1l5giT*gs;3s&1ky3*xszb$YY$}ZqBAvi`_vVK0vj|jC5O-JB4T940YS%cYwXa& zU4)_)^QTo}=~xP!DPR83=#d6L#}jIM2WxO&pko0ccQbr){Ji=J`p&q0dQx(3f^*Qx z%t@X{I4&D#1nhn*XR+c4gVs$mqHYcDs>Ah8NL6`BC@$Nq{OXL>BOSkwY%DP}SnTXS zIv@O2uqOHG^v_HnyJ4BoYur_$#)Bb6+ywWZOX=phMYcqVS@{`gEh77Oq8AIB+r;Q^ za)U;2v8`!|0nS~>x`949SnR|ZcF`m$A^tE4!emCXlyY)CqJjJQb9f(`R9=U~sdxgp z{NBe>9KnOlyIxAtt^jiIc1Iz$Pw5L?QXe{G7;WG7zuh7qRagz%jh3IB(q6$eS zqRrU@qz9(YBo@V)zHk>E&d>C*-FCL4L?4Gpho(dXzh1r5i30u&g-aTj<(j@aJn{~` zqJz9Y=n9jADm|@|v}mEW-I-oS3|+)&yFkin385#A=_XfQmsFWJCK{2jdqLt=hhwcg z+}_ZWHS#U*?(jp3pN4yrpQ~Z)y%nzPE5x;zEv-FkW}4%Y$cM?>}un( zl4gF%fR}e1XN_pFoAQncT5VGyb9`ohN2fKw_+U?fRmzU}$%C@S5ib8NaES2JS>Io; z{{{y+PFvR)o7z7ifGO!Tk29U5W8hyVyB%8x#rcVv^gnIcLrWZCY)YD(E^Ninugq%*cg(#R}(1 zJd&YV+)6y~5pUo*E%^j8X^o=hY z1;PJXh5w>wWl@6so!rH!7Vj$uT&pg!d7^7n~!Ar{+Nf{BRWPy=DEJ z);s)5A&}9b(lS6~Uyf%%)@XUZ zVhO<%LURUvg01r&Sq$5C8f@l&8|@2}?{QVqs_30_&T?({KaKUn+~I@Eu+*}X6_r*l zmL8yYmGj>E{ILzuQD3bnukhp_ISo5LxueG>!Y@6xK9tLL6%6;{E?gS76Dman_YVd( z0ZO?CPj%JC!2_GXQLKGG@9PN3K-F)#Gz_;0f3bYU%mMoI{6isy8}XJ93$NbVthOfx z;xo&ls4sX{e}K!)dd|_ z=uQ=dZ1-Xuw9!7C`42PraeiW|UUnqPc462g}TD zfn2641;!*o>A$1=nb#53%KS@RS%t8KuJ8oH_r)W_f#J#TiYu1?ifPlmqxj;q%o2*9 z-wy2-72-b}FQ5P;Y}Mhu>i!|Sw{DZq^yRwDM1o*uosx@vofXUIolj7knp?f%@Fh{k znDE8_GH_ov&AQeNis2>($E#m#1=_V-3M|+w;|b}%sg@~;@r75!5D$#2^jDJHqnl!W zY08=B#k0~*hnbmDYzoq*KF+Wg&1!jB$=HG-fxK_eB~~0{;NFEceiU-y+blO7b_5!* zZibry0x{?CE;6uKnp>3=YSr{-Q{5=J237lFK+z~pu%Te4lO2&m-GK_Y70m^E^3=VQ zN`2{*EX<^y=KY+6+2J5K)kb_GHG#7fzI4KP};OdTBNY4$zK@67%0= z;iiaN^vDo!#|@i5L|0lIC8WtZ$mH$uV!3?xmM@45BK5(f@Ad}X^nEF(0f*fOP0UzD zE@zO!5cmU|(C5%uFyyV42|+j|z5`{s*g>15S)j-x=PjdL@iLV{{OG0nSuQx<)rgtH z5<4e@;H@icHRsC22fX2U&}!R&;DnJ}#XDT; z{*j3X;-tOJ)|dYRVZ#`qN%jiq*BSOU}E|~mY#f@|=*3kcY4D}saeJA&6a417|g)M+2 zY$BHf87PMI9)6IcMS1X3iydS>{AO7{Z5cH4jb(j#3v}3WmKH_kq81lhtuN$*4pD}* z7GJ$9qQD4m$(HZfY8Lqibg1`_71tHQsTL0oNB!(w40sRuZg@VW);Ie1Iyk}^livug z<}R!8`!^bI|0p-3*O)7lD<#fw<>#CE46l@KKk#H26g9QK^F;`mtQjDJYw`!^SQ)qq ztu;U(mw%2eD7hU5MCAZlNJ1ANJphOTO7&G(2UuV1SNP59497DcF>6a!bW<7s3oc42otqaXCYG^c!yP0_=bX_x#J`PI zTJPJeZ8APqb)j^wS;M?r3T$qwM~7T-{Me!mhibpI`-|Ee7cBBcRkDoRDP`Zb#fD)< zGgSC;E@=WUCc`&6h-E#b30TPu-g71S>NWzq|J~qp^>+_-C|!^*P96$$<`ZHaOLF1} zCCrfED=uxP37QrjW2n*E>*ENCGlk-3I`D}FQCR!h`) that contains the rotation matrices for all the transformations +from gripper frame to robot base frame. +@param[in] t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point +expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$). +This is a vector (`vector`) that contains the translation vectors for all the transformations +from gripper frame to robot base frame. +@param[in] R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point +expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). +This is a vector (`vector`) that contains the rotation matrices for all the transformations +from calibration target frame to camera frame. +@param[in] t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point +expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). +This is a vector (`vector`) that contains the translation vectors for all the transformations +from calibration target frame to camera frame. +@param[out] R_cam2gripper Estimated rotation part extracted from the homogeneous matrix that transforms a point +expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). +@param[out] t_cam2gripper Estimated translation part extracted from the homogeneous matrix that transforms a point +expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). +@param[in] method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod + +The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the +rotation then the translation (separable solutions) and the following methods are implemented: + - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89 + - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94 + - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95 + +Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), +with the following implemented method: + - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99 + - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98 + +The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye") +mounted on a robot gripper ("hand") has to be estimated. + +![](pics/hand-eye_figure.png) + +The calibration procedure is the following: + - a static calibration pattern is used to estimate the transformation between the target frame + and the camera frame + - the robot gripper is moved in order to acquire several poses + - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for + instance the robot kinematics +\f[ + \begin{bmatrix} + X_b\\ + Y_b\\ + Z_b\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_g\\ + Y_g\\ + Z_g\\ + 1 + \end{bmatrix} +\f] + - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using + for instance a pose estimation method (PnP) from 2D-3D point correspondences +\f[ + \begin{bmatrix} + X_c\\ + Y_c\\ + Z_c\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_t\\ + Y_t\\ + Z_t\\ + 1 + \end{bmatrix} +\f] + +The Hand-Eye calibration procedure returns the following homogeneous transformation +\f[ + \begin{bmatrix} + X_g\\ + Y_g\\ + Z_g\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_c\\ + Y_c\\ + Z_c\\ + 1 + \end{bmatrix} +\f] + +This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\f$ equation: +\f[ + \begin{align*} + ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= + \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ + + (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &= + \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ + + \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ + \end{align*} +\f] + +\note +Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration). +\note +A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation. +So at least 3 different poses are required, but it is strongly recommended to use many more poses. + + */ +CV_EXPORTS_W void calibrateHandEye( InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base, + InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam, + OutputArray R_cam2gripper, OutputArray t_cam2gripper, + HandEyeCalibrationMethod method=CALIB_HAND_EYE_TSAI ); + /** @brief Converts points from Euclidean to homogeneous space. @param src Input vector of N-dimensional points. diff --git a/modules/calib3d/src/calibration_handeye.cpp b/modules/calib3d/src/calibration_handeye.cpp new file mode 100644 index 0000000000..18561c77fe --- /dev/null +++ b/modules/calib3d/src/calibration_handeye.cpp @@ -0,0 +1,770 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "precomp.hpp" +#include "opencv2/calib3d.hpp" + +namespace cv { + +static Mat homogeneousInverse(const Mat& T) +{ + CV_Assert(T.rows == 4 && T.cols == 4); + + Mat R = T(Rect(0, 0, 3, 3)); + Mat t = T(Rect(3, 0, 1, 3)); + Mat Rt = R.t(); + Mat tinv = -Rt * t; + Mat Tinv = Mat::eye(4, 4, T.type()); + Rt.copyTo(Tinv(Rect(0, 0, 3, 3))); + tinv.copyTo(Tinv(Rect(3, 0, 1, 3))); + + return Tinv; +} + +// q = rot2quatMinimal(R) +// +// R - 3x3 rotation matrix, or 4x4 homogeneous matrix +// q - 3x1 unit quaternion +// q = sin(theta/2) * v +// theta - rotation angle +// v - unit rotation axis, |v| = 1 +static Mat rot2quatMinimal(const Mat& R) +{ + CV_Assert(R.type() == CV_64FC1 && R.rows >= 3 && R.cols >= 3); + + double m00 = R.at(0,0), m01 = R.at(0,1), m02 = R.at(0,2); + double m10 = R.at(1,0), m11 = R.at(1,1), m12 = R.at(1,2); + double m20 = R.at(2,0), m21 = R.at(2,1), m22 = R.at(2,2); + double trace = m00 + m11 + m22; + + double qx, qy, qz; + if (trace > 0) { + double S = sqrt(trace + 1.0) * 2; // S=4*qw + qx = (m21 - m12) / S; + qy = (m02 - m20) / S; + qz = (m10 - m01) / S; + } else if ((m00 > m11)&(m00 > m22)) { + double S = sqrt(1.0 + m00 - m11 - m22) * 2; // S=4*qx + qx = 0.25 * S; + qy = (m01 + m10) / S; + qz = (m02 + m20) / S; + } else if (m11 > m22) { + double S = sqrt(1.0 + m11 - m00 - m22) * 2; // S=4*qy + qx = (m01 + m10) / S; + qy = 0.25 * S; + qz = (m12 + m21) / S; + } else { + double S = sqrt(1.0 + m22 - m00 - m11) * 2; // S=4*qz + qx = (m02 + m20) / S; + qy = (m12 + m21) / S; + qz = 0.25 * S; + } + + return (Mat_(3,1) << qx, qy, qz); +} + +static Mat skew(const Mat& v) +{ + CV_Assert(v.type() == CV_64FC1 && v.rows == 3 && v.cols == 1); + + double vx = v.at(0,0); + double vy = v.at(1,0); + double vz = v.at(2,0); + return (Mat_(3,3) << 0, -vz, vy, + vz, 0, -vx, + -vy, vx, 0); +} + +// R = quatMinimal2rot(q) +// +// q - 3x1 unit quaternion +// R - 3x3 rotation matrix +// q = sin(theta/2) * v +// theta - rotation angle +// v - unit rotation axis, |v| = 1 +static Mat quatMinimal2rot(const Mat& q) +{ + CV_Assert(q.type() == CV_64FC1 && q.rows == 3 && q.cols == 1); + + Mat p = q.t()*q; + double w = sqrt(1 - p.at(0,0)); + + Mat diag_p = Mat::eye(3,3,CV_64FC1)*p.at(0,0); + return 2*q*q.t() + 2*w*skew(q) + Mat::eye(3,3,CV_64FC1) - 2*diag_p; +} + +// q = rot2quat(R) +// +// q - 4x1 unit quaternion +// R - 3x3 rotation matrix +static Mat rot2quat(const Mat& R) +{ + CV_Assert(R.type() == CV_64FC1 && R.rows >= 3 && R.cols >= 3); + + double m00 = R.at(0,0), m01 = R.at(0,1), m02 = R.at(0,2); + double m10 = R.at(1,0), m11 = R.at(1,1), m12 = R.at(1,2); + double m20 = R.at(2,0), m21 = R.at(2,1), m22 = R.at(2,2); + double trace = m00 + m11 + m22; + + double qw, qx, qy, qz; + if (trace > 0) { + double S = sqrt(trace + 1.0) * 2; // S=4*qw + qw = 0.25 * S; + qx = (m21 - m12) / S; + qy = (m02 - m20) / S; + qz = (m10 - m01) / S; + } else if ((m00 > m11)&(m00 > m22)) { + double S = sqrt(1.0 + m00 - m11 - m22) * 2; // S=4*qx + qw = (m21 - m12) / S; + qx = 0.25 * S; + qy = (m01 + m10) / S; + qz = (m02 + m20) / S; + } else if (m11 > m22) { + double S = sqrt(1.0 + m11 - m00 - m22) * 2; // S=4*qy + qw = (m02 - m20) / S; + qx = (m01 + m10) / S; + qy = 0.25 * S; + qz = (m12 + m21) / S; + } else { + double S = sqrt(1.0 + m22 - m00 - m11) * 2; // S=4*qz + qw = (m10 - m01) / S; + qx = (m02 + m20) / S; + qy = (m12 + m21) / S; + qz = 0.25 * S; + } + + return (Mat_(4,1) << qw, qx, qy, qz); +} + +// R = quat2rot(q) +// +// q - 4x1 unit quaternion +// R - 3x3 rotation matrix +static Mat quat2rot(const Mat& q) +{ + CV_Assert(q.type() == CV_64FC1 && q.rows == 4 && q.cols == 1); + + double qw = q.at(0,0); + double qx = q.at(1,0); + double qy = q.at(2,0); + double qz = q.at(3,0); + + Mat R(3, 3, CV_64FC1); + R.at(0, 0) = 1 - 2*qy*qy - 2*qz*qz; + R.at(0, 1) = 2*qx*qy - 2*qz*qw; + R.at(0, 2) = 2*qx*qz + 2*qy*qw; + + R.at(1, 0) = 2*qx*qy + 2*qz*qw; + R.at(1, 1) = 1 - 2*qx*qx - 2*qz*qz; + R.at(1, 2) = 2*qy*qz - 2*qx*qw; + + R.at(2, 0) = 2*qx*qz - 2*qy*qw; + R.at(2, 1) = 2*qy*qz + 2*qx*qw; + R.at(2, 2) = 1 - 2*qx*qx - 2*qy*qy; + + return R; +} + +// Kronecker product or tensor product +// https://stackoverflow.com/a/36552682 +static Mat kron(const Mat& A, const Mat& B) +{ + CV_Assert(A.channels() == 1 && B.channels() == 1); + + Mat1d Ad, Bd; + A.convertTo(Ad, CV_64F); + B.convertTo(Bd, CV_64F); + + Mat1d Kd(Ad.rows * Bd.rows, Ad.cols * Bd.cols, 0.0); + for (int ra = 0; ra < Ad.rows; ra++) + { + for (int ca = 0; ca < Ad.cols; ca++) + { + Kd(Range(ra*Bd.rows, (ra + 1)*Bd.rows), Range(ca*Bd.cols, (ca + 1)*Bd.cols)) = Bd.mul(Ad(ra, ca)); + } + } + + Mat K; + Kd.convertTo(K, A.type()); + return K; +} + +// quaternion multiplication +static Mat qmult(const Mat& s, const Mat& t) +{ + CV_Assert(s.type() == CV_64FC1 && t.type() == CV_64FC1); + CV_Assert(s.rows == 4 && s.cols == 1); + CV_Assert(t.rows == 4 && t.cols == 1); + + double s0 = s.at(0,0); + double s1 = s.at(1,0); + double s2 = s.at(2,0); + double s3 = s.at(3,0); + + double t0 = t.at(0,0); + double t1 = t.at(1,0); + double t2 = t.at(2,0); + double t3 = t.at(3,0); + + Mat q(4, 1, CV_64FC1); + q.at(0,0) = s0*t0 - s1*t1 - s2*t2 - s3*t3; + q.at(1,0) = s0*t1 + s1*t0 + s2*t3 - s3*t2; + q.at(2,0) = s0*t2 - s1*t3 + s2*t0 + s3*t1; + q.at(3,0) = s0*t3 + s1*t2 - s2*t1 + s3*t0; + + return q; +} + +// dq = homogeneous2dualQuaternion(H) +// +// H - 4x4 homogeneous transformation: [R | t; 0 0 0 | 1] +// dq - 8x1 dual quaternion: +static Mat homogeneous2dualQuaternion(const Mat& H) +{ + CV_Assert(H.type() == CV_64FC1 && H.rows == 4 && H.cols == 4); + + Mat dualq(8, 1, CV_64FC1); + Mat R = H(Rect(0, 0, 3, 3)); + Mat t = H(Rect(3, 0, 1, 3)); + + Mat q = rot2quat(R); + Mat qt = Mat::zeros(4, 1, CV_64FC1); + t.copyTo(qt(Rect(0, 1, 1, 3))); + Mat qprime = 0.5 * qmult(qt, q); + + q.copyTo(dualq(Rect(0, 0, 1, 4))); + qprime.copyTo(dualq(Rect(0, 4, 1, 4))); + + return dualq; +} + +// H = dualQuaternion2homogeneous(dq) +// +// H - 4x4 homogeneous transformation: [R | t; 0 0 0 | 1] +// dq - 8x1 dual quaternion: +static Mat dualQuaternion2homogeneous(const Mat& dualq) +{ + CV_Assert(dualq.type() == CV_64FC1 && dualq.rows == 8 && dualq.cols == 1); + + Mat q = dualq(Rect(0, 0, 1, 4)); + Mat qprime = dualq(Rect(0, 4, 1, 4)); + + Mat R = quat2rot(q); + q.at(1,0) = -q.at(1,0); + q.at(2,0) = -q.at(2,0); + q.at(3,0) = -q.at(3,0); + + Mat qt = 2*qmult(qprime, q); + Mat t = qt(Rect(0, 1, 1, 3)); + + Mat H = Mat::eye(4, 4, CV_64FC1); + R.copyTo(H(Rect(0, 0, 3, 3))); + t.copyTo(H(Rect(3, 0, 1, 3))); + + return H; +} + +//Reference: +//R. Y. Tsai and R. K. Lenz, "A new technique for fully autonomous and efficient 3D robotics hand/eye calibration." +//In IEEE Transactions on Robotics and Automation, vol. 5, no. 3, pp. 345-358, June 1989. +//C++ code converted from Zoran Lazarevic's Matlab code: +//http://lazax.com/www.cs.columbia.edu/~laza/html/Stewart/matlab/handEye.m +static void calibrateHandEyeTsai(const std::vector& Hg, const std::vector& Hc, + Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + //Number of unique camera position pairs + int K = static_cast((Hg.size()*Hg.size() - Hg.size()) / 2.0); + //Will store: skew(Pgij+Pcij) + Mat A(3*K, 3, CV_64FC1); + //Will store: Pcij - Pgij + Mat B(3*K, 1, CV_64FC1); + + std::vector vec_Hgij, vec_Hcij; + vec_Hgij.reserve(static_cast(K)); + vec_Hcij.reserve(static_cast(K)); + + int idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + //Defines coordinate transformation from Gi to Gj + //Hgi is from Gi (gripper) to RW (robot base) + //Hgj is from Gj (gripper) to RW (robot base) + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; //eq 6 + vec_Hgij.push_back(Hgij); + //Rotation axis for Rgij which is the 3D rotation from gripper coordinate frame Gi to Gj + Mat Pgij = 2*rot2quatMinimal(Hgij); + + //Defines coordinate transformation from Ci to Cj + //Hci is from CW (calibration target) to Ci (camera) + //Hcj is from CW (calibration target) to Cj (camera) + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); //eq 7 + vec_Hcij.push_back(Hcij); + //Rotation axis for Rcij + Mat Pcij = 2*rot2quatMinimal(Hcij); + + //Left-hand side: skew(Pgij+Pcij) + skew(Pgij+Pcij).copyTo(A(Rect(0, idx*3, 3, 3))); + //Right-hand side: Pcij - Pgij + Mat diff = Pcij - Pgij; + diff.copyTo(B(Rect(0, idx*3, 1, 3))); + } + } + + Mat Pcg_; + //Rotation from camera to gripper is obtained from the set of equations: + // skew(Pgij+Pcij) * Pcg_ = Pcij - Pgij (eq 12) + solve(A, B, Pcg_, DECOMP_SVD); + + Mat Pcg_norm = Pcg_.t() * Pcg_; + //Obtained non-unit quaternion is scaled back to unit value that + //designates camera-gripper rotation + Mat Pcg = 2 * Pcg_ / sqrt(1 + Pcg_norm.at(0,0)); //eq 14 + + Mat Rcg = quatMinimal2rot(Pcg/2.0); + + idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + //Defines coordinate transformation from Gi to Gj + //Hgi is from Gi (gripper) to RW (robot base) + //Hgj is from Gj (gripper) to RW (robot base) + Mat Hgij = vec_Hgij[static_cast(idx)]; + //Defines coordinate transformation from Ci to Cj + //Hci is from CW (calibration target) to Ci (camera) + //Hcj is from CW (calibration target) to Cj (camera) + Mat Hcij = vec_Hcij[static_cast(idx)]; + + //Left-hand side: (Rgij - I) + Mat diff = Hgij(Rect(0,0,3,3)) - Mat::eye(3,3,CV_64FC1); + diff.copyTo(A(Rect(0, idx*3, 3, 3))); + + //Right-hand side: Rcg*Tcij - Tgij + diff = Rcg*Hcij(Rect(3, 0, 1, 3)) - Hgij(Rect(3, 0, 1, 3)); + diff.copyTo(B(Rect(0, idx*3, 1, 3))); + } + } + + Mat Tcg; + //Translation from camera to gripper is obtained from the set of equations: + // (Rgij - I) * Tcg = Rcg*Tcij - Tgij (eq 15) + solve(A, B, Tcg, DECOMP_SVD); + + R_cam2gripper = Rcg; + t_cam2gripper = Tcg; +} + +//Reference: +//F. Park, B. Martin, "Robot Sensor Calibration: Solving AX = XB on the Euclidean Group." +//In IEEE Transactions on Robotics and Automation, 10(5): 717-721, 1994. +//Matlab code: http://math.loyola.edu/~mili/Calibration/ +static void calibrateHandEyePark(const std::vector& Hg, const std::vector& Hc, + Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + Mat M = Mat::zeros(3, 3, CV_64FC1); + + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat Rgij = Hgij(Rect(0, 0, 3, 3)); + Mat Rcij = Hcij(Rect(0, 0, 3, 3)); + + Mat a, b; + Rodrigues(Rgij, a); + Rodrigues(Rcij, b); + + M += b * a.t(); + } + } + + Mat eigenvalues, eigenvectors; + eigen(M.t()*M, eigenvalues, eigenvectors); + + Mat v = Mat::zeros(3, 3, CV_64FC1); + for (int i = 0; i < 3; i++) { + v.at(i,i) = 1.0 / sqrt(eigenvalues.at(i,0)); + } + + Mat R = eigenvectors.t() * v * eigenvectors * M.t(); + R_cam2gripper = R; + + int K = static_cast((Hg.size()*Hg.size() - Hg.size()) / 2.0); + Mat C(3*K, 3, CV_64FC1); + Mat d(3*K, 1, CV_64FC1); + Mat I3 = Mat::eye(3, 3, CV_64FC1); + + int idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat Rgij = Hgij(Rect(0, 0, 3, 3)); + + Mat tgij = Hgij(Rect(3, 0, 1, 3)); + Mat tcij = Hcij(Rect(3, 0, 1, 3)); + + Mat I_tgij = I3 - Rgij; + I_tgij.copyTo(C(Rect(0, 3*idx, 3, 3))); + + Mat A_RB = tgij - R*tcij; + A_RB.copyTo(d(Rect(0, 3*idx, 1, 3))); + } + } + + Mat t; + solve(C, d, t, DECOMP_SVD); + t_cam2gripper = t; +} + +//Reference: +//R. Horaud, F. Dornaika, "Hand-Eye Calibration" +//In International Journal of Robotics Research, 14(3): 195-210, 1995. +//Matlab code: http://math.loyola.edu/~mili/Calibration/ +static void calibrateHandEyeHoraud(const std::vector& Hg, const std::vector& Hc, + Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + Mat A = Mat::zeros(4, 4, CV_64FC1); + + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat Rgij = Hgij(Rect(0, 0, 3, 3)); + Mat Rcij = Hcij(Rect(0, 0, 3, 3)); + + Mat qgij = rot2quat(Rgij); + double r0 = qgij.at(0,0); + double rx = qgij.at(1,0); + double ry = qgij.at(2,0); + double rz = qgij.at(3,0); + + // Q(r) Appendix A + Matx44d Qvi(r0, -rx, -ry, -rz, + rx, r0, -rz, ry, + ry, rz, r0, -rx, + rz, -ry, rx, r0); + + Mat qcij = rot2quat(Rcij); + r0 = qcij.at(0,0); + rx = qcij.at(1,0); + ry = qcij.at(2,0); + rz = qcij.at(3,0); + + // W(r) Appendix A + Matx44d Wvi(r0, -rx, -ry, -rz, + rx, r0, rz, -ry, + ry, -rz, r0, rx, + rz, ry, -rx, r0); + + // Ai = (Q(vi') - W(vi))^T (Q(vi') - W(vi)) + A += (Qvi - Wvi).t() * (Qvi - Wvi); + } + } + + Mat eigenvalues, eigenvectors; + eigen(A, eigenvalues, eigenvectors); + + Mat R = quat2rot(eigenvectors.row(3).t()); + R_cam2gripper = R; + + int K = static_cast((Hg.size()*Hg.size() - Hg.size()) / 2.0); + Mat C(3*K, 3, CV_64FC1); + Mat d(3*K, 1, CV_64FC1); + Mat I3 = Mat::eye(3, 3, CV_64FC1); + + int idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat Rgij = Hgij(Rect(0, 0, 3, 3)); + + Mat tgij = Hgij(Rect(3, 0, 1, 3)); + Mat tcij = Hcij(Rect(3, 0, 1, 3)); + + Mat I_tgij = I3 - Rgij; + I_tgij.copyTo(C(Rect(0, 3*idx, 3, 3))); + + Mat A_RB = tgij - R*tcij; + A_RB.copyTo(d(Rect(0, 3*idx, 1, 3))); + } + } + + Mat t; + solve(C, d, t, DECOMP_SVD); + t_cam2gripper = t; +} + +// sign function, return -1 if negative values, +1 otherwise +static int sign_double(double val) +{ + return (0 < val) - (val < 0); +} + +//Reference: +//N. Andreff, R. Horaud, B. Espiau, "On-line Hand-Eye Calibration." +//In Second International Conference on 3-D Digital Imaging and Modeling (3DIM'99), pages 430-436, 1999. +//Matlab code: http://math.loyola.edu/~mili/Calibration/ +static void calibrateHandEyeAndreff(const std::vector& Hg, const std::vector& Hc, + Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + int K = static_cast((Hg.size()*Hg.size() - Hg.size()) / 2.0); + Mat A(12*K, 12, CV_64FC1); + Mat B(12*K, 1, CV_64FC1); + + Mat I9 = Mat::eye(9, 9, CV_64FC1); + Mat I3 = Mat::eye(3, 3, CV_64FC1); + Mat O9x3 = Mat::zeros(9, 3, CV_64FC1); + Mat O9x1 = Mat::zeros(9, 1, CV_64FC1); + + int idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat Rgij = Hgij(Rect(0, 0, 3, 3)); + Mat Rcij = Hcij(Rect(0, 0, 3, 3)); + + Mat tgij = Hgij(Rect(3, 0, 1, 3)); + Mat tcij = Hcij(Rect(3, 0, 1, 3)); + + //Eq 10 + Mat a00 = I9 - kron(Rgij, Rcij); + Mat a01 = O9x3; + Mat a10 = kron(I3, tcij.t()); + Mat a11 = I3 - Rgij; + + a00.copyTo(A(Rect(0, idx*12, 9, 9))); + a01.copyTo(A(Rect(9, idx*12, 3, 9))); + a10.copyTo(A(Rect(0, idx*12 + 9, 9, 3))); + a11.copyTo(A(Rect(9, idx*12 + 9, 3, 3))); + + O9x1.copyTo(B(Rect(0, idx*12, 1, 9))); + tgij.copyTo(B(Rect(0, idx*12 + 9, 1, 3))); + } + } + + Mat X; + solve(A, B, X, DECOMP_SVD); + + Mat R = X(Rect(0, 0, 1, 9)); + int newSize[] = {3, 3}; + R = R.reshape(1, 2, newSize); + //Eq 15 + double det = determinant(R); + R = pow(sign_double(det) / abs(det), 1.0/3.0) * R; + + Mat w, u, vt; + SVDecomp(R, w, u, vt); + R = u*vt; + + if (determinant(R) < 0) + { + Mat diag = (Mat_(3,3) << 1.0, 0.0, 0.0, + 0.0, 1.0, 0.0, + 0.0, 0.0, -1.0); + R = u*diag*vt; + } + + R_cam2gripper = R; + + Mat t = X(Rect(0, 9, 1, 3)); + t_cam2gripper = t; +} + +//Reference: +//K. Daniilidis, "Hand-Eye Calibration Using Dual Quaternions." +//In The International Journal of Robotics Research,18(3): 286-298, 1998. +//Matlab code: http://math.loyola.edu/~mili/Calibration/ +static void calibrateHandEyeDaniilidis(const std::vector& Hg, const std::vector& Hc, + Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + int K = static_cast((Hg.size()*Hg.size() - Hg.size()) / 2.0); + Mat T = Mat::zeros(6*K, 8, CV_64FC1); + + int idx = 0; + for (size_t i = 0; i < Hg.size(); i++) + { + for (size_t j = i+1; j < Hg.size(); j++, idx++) + { + Mat Hgij = homogeneousInverse(Hg[j]) * Hg[i]; + Mat Hcij = Hc[j] * homogeneousInverse(Hc[i]); + + Mat dualqa = homogeneous2dualQuaternion(Hgij); + Mat dualqb = homogeneous2dualQuaternion(Hcij); + + Mat a = dualqa(Rect(0, 1, 1, 3)); + Mat b = dualqb(Rect(0, 1, 1, 3)); + + Mat aprime = dualqa(Rect(0, 5, 1, 3)); + Mat bprime = dualqb(Rect(0, 5, 1, 3)); + + //Eq 31 + Mat s00 = a - b; + Mat s01 = skew(a + b); + Mat s10 = aprime - bprime; + Mat s11 = skew(aprime + bprime); + Mat s12 = a - b; + Mat s13 = skew(a + b); + + s00.copyTo(T(Rect(0, idx*6, 1, 3))); + s01.copyTo(T(Rect(1, idx*6, 3, 3))); + s10.copyTo(T(Rect(0, idx*6 + 3, 1, 3))); + s11.copyTo(T(Rect(1, idx*6 + 3, 3, 3))); + s12.copyTo(T(Rect(4, idx*6 + 3, 1, 3))); + s13.copyTo(T(Rect(5, idx*6 + 3, 3, 3))); + } + } + + Mat w, u, vt; + SVDecomp(T, w, u, vt); + Mat v = vt.t(); + + Mat u1 = v(Rect(6, 0, 1, 4)); + Mat v1 = v(Rect(6, 4, 1, 4)); + Mat u2 = v(Rect(7, 0, 1, 4)); + Mat v2 = v(Rect(7, 4, 1, 4)); + + //Solves Eq 34, Eq 35 + Mat ma = u1.t()*v1; + Mat mb = u1.t()*v2 + u2.t()*v1; + Mat mc = u2.t()*v2; + + double a = ma.at(0,0); + double b = mb.at(0,0); + double c = mc.at(0,0); + + double s1 = (-b + sqrt(b*b - 4*a*c)) / (2*a); + double s2 = (-b - sqrt(b*b - 4*a*c)) / (2*a); + + Mat sol1 = s1*s1*u1.t()*u1 + 2*s1*u1.t()*u2 + u2.t()*u2; + Mat sol2 = s2*s2*u1.t()*u1 + 2*s2*u1.t()*u2 + u2.t()*u2; + double s, val; + if (sol1.at(0,0) > sol2.at(0,0)) + { + s = s1; + val = sol1.at(0,0); + } + else + { + s = s2; + val = sol2.at(0,0); + } + + double lambda2 = sqrt(1.0 / val); + double lambda1 = s * lambda2; + + Mat dualq = lambda1 * v(Rect(6, 0, 1, 8)) + lambda2*v(Rect(7, 0, 1, 8)); + Mat X = dualQuaternion2homogeneous(dualq); + + Mat R = X(Rect(0, 0, 3, 3)); + Mat t = X(Rect(3, 0, 1, 3)); + R_cam2gripper = R; + t_cam2gripper = t; +} + +void calibrateHandEye(InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base, + InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam, + OutputArray R_cam2gripper, OutputArray t_cam2gripper, + HandEyeCalibrationMethod method) +{ + CV_Assert(R_gripper2base.isMatVector() && t_gripper2base.isMatVector() && + R_target2cam.isMatVector() && t_target2cam.isMatVector()); + + std::vector R_gripper2base_, t_gripper2base_; + R_gripper2base.getMatVector(R_gripper2base_); + t_gripper2base.getMatVector(t_gripper2base_); + + std::vector R_target2cam_, t_target2cam_; + R_target2cam.getMatVector(R_target2cam_); + t_target2cam.getMatVector(t_target2cam_); + + CV_Assert(R_gripper2base_.size() == t_gripper2base_.size() && + R_target2cam_.size() == t_target2cam_.size() && + R_gripper2base_.size() == R_target2cam_.size()); + CV_Assert(R_gripper2base_.size() >= 3); + + //Notation used in Tsai paper + //Defines coordinate transformation from G (gripper) to RW (robot base) + std::vector Hg; + Hg.reserve(R_gripper2base_.size()); + for (size_t i = 0; i < R_gripper2base_.size(); i++) + { + Mat m = Mat::eye(4, 4, CV_64FC1); + Mat R = m(Rect(0, 0, 3, 3)); + R_gripper2base_[i].convertTo(R, CV_64F); + + Mat t = m(Rect(3, 0, 1, 3)); + t_gripper2base_[i].convertTo(t, CV_64F); + + Hg.push_back(m); + } + + //Defines coordinate transformation from CW (calibration target) to C (camera) + std::vector Hc; + Hc.reserve(R_target2cam_.size()); + for (size_t i = 0; i < R_target2cam_.size(); i++) + { + Mat m = Mat::eye(4, 4, CV_64FC1); + Mat R = m(Rect(0, 0, 3, 3)); + R_target2cam_[i].convertTo(R, CV_64F); + + Mat t = m(Rect(3, 0, 1, 3)); + t_target2cam_[i].convertTo(t, CV_64F); + + Hc.push_back(m); + } + + Mat Rcg = Mat::eye(3, 3, CV_64FC1); + Mat Tcg = Mat::zeros(3, 1, CV_64FC1); + + switch (method) + { + case CALIB_HAND_EYE_TSAI: + calibrateHandEyeTsai(Hg, Hc, Rcg, Tcg); + break; + + case CALIB_HAND_EYE_PARK: + calibrateHandEyePark(Hg, Hc, Rcg, Tcg); + break; + + case CALIB_HAND_EYE_HORAUD: + calibrateHandEyeHoraud(Hg, Hc, Rcg, Tcg); + break; + + case CALIB_HAND_EYE_ANDREFF: + calibrateHandEyeAndreff(Hg, Hc, Rcg, Tcg); + break; + + case CALIB_HAND_EYE_DANIILIDIS: + calibrateHandEyeDaniilidis(Hg, Hc, Rcg, Tcg); + break; + + default: + break; + } + + Rcg.copyTo(R_cam2gripper); + Tcg.copyTo(t_cam2gripper); +} +} diff --git a/modules/calib3d/test/test_calibration_hand_eye.cpp b/modules/calib3d/test/test_calibration_hand_eye.cpp new file mode 100644 index 0000000000..d2cef969b3 --- /dev/null +++ b/modules/calib3d/test/test_calibration_hand_eye.cpp @@ -0,0 +1,381 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" +#include "opencv2/calib3d.hpp" + +namespace opencv_test { namespace { + +class CV_CalibrateHandEyeTest : public cvtest::BaseTest +{ +public: + CV_CalibrateHandEyeTest() { + eps_rvec[CALIB_HAND_EYE_TSAI] = 1.0e-8; + eps_rvec[CALIB_HAND_EYE_PARK] = 1.0e-8; + eps_rvec[CALIB_HAND_EYE_HORAUD] = 1.0e-8; + eps_rvec[CALIB_HAND_EYE_ANDREFF] = 1.0e-8; + eps_rvec[CALIB_HAND_EYE_DANIILIDIS] = 1.0e-8; + + eps_tvec[CALIB_HAND_EYE_TSAI] = 1.0e-8; + eps_tvec[CALIB_HAND_EYE_PARK] = 1.0e-8; + eps_tvec[CALIB_HAND_EYE_HORAUD] = 1.0e-8; + eps_tvec[CALIB_HAND_EYE_ANDREFF] = 1.0e-8; + eps_tvec[CALIB_HAND_EYE_DANIILIDIS] = 1.0e-8; + + eps_rvec_noise[CALIB_HAND_EYE_TSAI] = 2.0e-2; + eps_rvec_noise[CALIB_HAND_EYE_PARK] = 2.0e-2; + eps_rvec_noise[CALIB_HAND_EYE_HORAUD] = 2.0e-2; + eps_rvec_noise[CALIB_HAND_EYE_ANDREFF] = 1.0e-2; + eps_rvec_noise[CALIB_HAND_EYE_DANIILIDIS] = 1.0e-2; + + eps_tvec_noise[CALIB_HAND_EYE_TSAI] = 5.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_PARK] = 5.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_HORAUD] = 5.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_ANDREFF] = 5.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_DANIILIDIS] = 5.0e-2; + } +protected: + virtual void run(int); + void generatePose(RNG& rng, double min_theta, double max_theta, + double min_tx, double max_tx, + double min_ty, double max_ty, + double min_tz, double max_tz, + Mat& R, Mat& tvec, + bool randSign=false); + void simulateData(RNG& rng, int nPoses, + std::vector &R_gripper2base, std::vector &t_gripper2base, + std::vector &R_target2cam, std::vector &t_target2cam, + bool noise, Mat& R_cam2gripper, Mat& t_cam2gripper); + Mat homogeneousInverse(const Mat& T); + std::string getMethodName(HandEyeCalibrationMethod method); + double sign_double(double val); + + double eps_rvec[5]; + double eps_tvec[5]; + double eps_rvec_noise[5]; + double eps_tvec_noise[5]; +}; + +void CV_CalibrateHandEyeTest::run(int) +{ + ts->set_failed_test_info(cvtest::TS::OK); + + RNG& rng = ts->get_rng(); + + std::vector > vec_rvec_diff(5); + std::vector > vec_tvec_diff(5); + std::vector > vec_rvec_diff_noise(5); + std::vector > vec_tvec_diff_noise(5); + + std::vector methods; + methods.push_back(CALIB_HAND_EYE_TSAI); + methods.push_back(CALIB_HAND_EYE_PARK); + methods.push_back(CALIB_HAND_EYE_HORAUD); + methods.push_back(CALIB_HAND_EYE_ANDREFF); + methods.push_back(CALIB_HAND_EYE_DANIILIDIS); + + const int nTests = 100; + for (int i = 0; i < nTests; i++) + { + const int nPoses = 10; + { + //No noise + std::vector R_gripper2base, t_gripper2base; + std::vector R_target2cam, t_target2cam; + Mat R_cam2gripper_true, t_cam2gripper_true; + + const bool noise = false; + simulateData(rng, nPoses, R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, noise, R_cam2gripper_true, t_cam2gripper_true); + + for (size_t idx = 0; idx < methods.size(); idx++) + { + Mat rvec_cam2gripper_true; + cv::Rodrigues(R_cam2gripper_true, rvec_cam2gripper_true); + + Mat R_cam2gripper_est, t_cam2gripper_est; + calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper_est, t_cam2gripper_est, methods[idx]); + + Mat rvec_cam2gripper_est; + cv::Rodrigues(R_cam2gripper_est, rvec_cam2gripper_est); + + double rvecDiff = cvtest::norm(rvec_cam2gripper_true, rvec_cam2gripper_est, NORM_L2); + double tvecDiff = cvtest::norm(t_cam2gripper_true, t_cam2gripper_est, NORM_L2); + + vec_rvec_diff[idx].push_back(rvecDiff); + vec_tvec_diff[idx].push_back(tvecDiff); + + const double epsilon_rvec = eps_rvec[idx]; + const double epsilon_tvec = eps_tvec[idx]; + + //Maybe a better accuracy test would be to compare the mean and std errors with some thresholds? + if (rvecDiff > epsilon_rvec || tvecDiff > epsilon_tvec) + { + ts->printf(cvtest::TS::LOG, "Invalid accuracy (no noise) for method: %s, rvecDiff: %f, epsilon_rvec: %f, tvecDiff: %f, epsilon_tvec: %f\n", + getMethodName(methods[idx]).c_str(), rvecDiff, epsilon_rvec, tvecDiff, epsilon_tvec); + ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); + } + } + } + + { + //Gaussian noise on transformations between calibration target frame and camera frame and between gripper and robot base frames + std::vector R_gripper2base, t_gripper2base; + std::vector R_target2cam, t_target2cam; + Mat R_cam2gripper_true, t_cam2gripper_true; + + const bool noise = true; + simulateData(rng, nPoses, R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, noise, R_cam2gripper_true, t_cam2gripper_true); + + for (size_t idx = 0; idx < methods.size(); idx++) + { + Mat rvec_cam2gripper_true; + cv::Rodrigues(R_cam2gripper_true, rvec_cam2gripper_true); + + Mat R_cam2gripper_est, t_cam2gripper_est; + calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper_est, t_cam2gripper_est, methods[idx]); + + Mat rvec_cam2gripper_est; + cv::Rodrigues(R_cam2gripper_est, rvec_cam2gripper_est); + + double rvecDiff = cvtest::norm(rvec_cam2gripper_true, rvec_cam2gripper_est, NORM_L2); + double tvecDiff = cvtest::norm(t_cam2gripper_true, t_cam2gripper_est, NORM_L2); + + vec_rvec_diff_noise[idx].push_back(rvecDiff); + vec_tvec_diff_noise[idx].push_back(tvecDiff); + + const double epsilon_rvec = eps_rvec_noise[idx]; + const double epsilon_tvec = eps_tvec_noise[idx]; + + //Maybe a better accuracy test would be to compare the mean and std errors with some thresholds? + if (rvecDiff > epsilon_rvec || tvecDiff > epsilon_tvec) + { + ts->printf(cvtest::TS::LOG, "Invalid accuracy (noise) for method: %s, rvecDiff: %f, epsilon_rvec: %f, tvecDiff: %f, epsilon_tvec: %f\n", + getMethodName(methods[idx]).c_str(), rvecDiff, epsilon_rvec, tvecDiff, epsilon_tvec); + ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); + } + } + } + } + + for (size_t idx = 0; idx < methods.size(); idx++) + { + { + double max_rvec_diff = *std::max_element(vec_rvec_diff[idx].begin(), vec_rvec_diff[idx].end()); + double mean_rvec_diff = std::accumulate(vec_rvec_diff[idx].begin(), + vec_rvec_diff[idx].end(), 0.0) / vec_rvec_diff[idx].size(); + double sq_sum_rvec_diff = std::inner_product(vec_rvec_diff[idx].begin(), vec_rvec_diff[idx].end(), + vec_rvec_diff[idx].begin(), 0.0); + double std_rvec_diff = std::sqrt(sq_sum_rvec_diff / vec_rvec_diff[idx].size() - mean_rvec_diff * mean_rvec_diff); + + double max_tvec_diff = *std::max_element(vec_tvec_diff[idx].begin(), vec_tvec_diff[idx].end()); + double mean_tvec_diff = std::accumulate(vec_tvec_diff[idx].begin(), + vec_tvec_diff[idx].end(), 0.0) / vec_tvec_diff[idx].size(); + double sq_sum_tvec_diff = std::inner_product(vec_tvec_diff[idx].begin(), vec_tvec_diff[idx].end(), + vec_tvec_diff[idx].begin(), 0.0); + double std_tvec_diff = std::sqrt(sq_sum_tvec_diff / vec_tvec_diff[idx].size() - mean_tvec_diff * mean_tvec_diff); + + std::cout << "\nMethod " << getMethodName(methods[idx]) << ":\n" + << "Max rvec error: " << max_rvec_diff << ", Mean rvec error: " << mean_rvec_diff + << ", Std rvec error: " << std_rvec_diff << "\n" + << "Max tvec error: " << max_tvec_diff << ", Mean tvec error: " << mean_tvec_diff + << ", Std tvec error: " << std_tvec_diff << std::endl; + } + { + double max_rvec_diff = *std::max_element(vec_rvec_diff_noise[idx].begin(), vec_rvec_diff_noise[idx].end()); + double mean_rvec_diff = std::accumulate(vec_rvec_diff_noise[idx].begin(), + vec_rvec_diff_noise[idx].end(), 0.0) / vec_rvec_diff_noise[idx].size(); + double sq_sum_rvec_diff = std::inner_product(vec_rvec_diff_noise[idx].begin(), vec_rvec_diff_noise[idx].end(), + vec_rvec_diff_noise[idx].begin(), 0.0); + double std_rvec_diff = std::sqrt(sq_sum_rvec_diff / vec_rvec_diff_noise[idx].size() - mean_rvec_diff * mean_rvec_diff); + + double max_tvec_diff = *std::max_element(vec_tvec_diff_noise[idx].begin(), vec_tvec_diff_noise[idx].end()); + double mean_tvec_diff = std::accumulate(vec_tvec_diff_noise[idx].begin(), + vec_tvec_diff_noise[idx].end(), 0.0) / vec_tvec_diff_noise[idx].size(); + double sq_sum_tvec_diff = std::inner_product(vec_tvec_diff_noise[idx].begin(), vec_tvec_diff_noise[idx].end(), + vec_tvec_diff_noise[idx].begin(), 0.0); + double std_tvec_diff = std::sqrt(sq_sum_tvec_diff / vec_tvec_diff_noise[idx].size() - mean_tvec_diff * mean_tvec_diff); + + std::cout << "Method (noise) " << getMethodName(methods[idx]) << ":\n" + << "Max rvec error: " << max_rvec_diff << ", Mean rvec error: " << mean_rvec_diff + << ", Std rvec error: " << std_rvec_diff << "\n" + << "Max tvec error: " << max_tvec_diff << ", Mean tvec error: " << mean_tvec_diff + << ", Std tvec error: " << std_tvec_diff << std::endl; + } + } +} + +void CV_CalibrateHandEyeTest::generatePose(RNG& rng, double min_theta, double max_theta, + double min_tx, double max_tx, + double min_ty, double max_ty, + double min_tz, double max_tz, + Mat& R, Mat& tvec, + bool random_sign) +{ + Mat axis(3, 1, CV_64FC1); + for (int i = 0; i < 3; i++) + { + axis.at(i,0) = rng.uniform(-1.0, 1.0); + } + double theta = rng.uniform(min_theta, max_theta); + if (random_sign) + { + theta *= sign_double(rng.uniform(-1.0, 1.0)); + } + + Mat rvec(3, 1, CV_64FC1); + rvec.at(0,0) = theta*axis.at(0,0); + rvec.at(1,0) = theta*axis.at(1,0); + rvec.at(2,0) = theta*axis.at(2,0); + + tvec.create(3, 1, CV_64FC1); + tvec.at(0,0) = rng.uniform(min_tx, max_tx); + tvec.at(1,0) = rng.uniform(min_ty, max_ty); + tvec.at(2,0) = rng.uniform(min_tz, max_tz); + + if (random_sign) + { + tvec.at(0,0) *= sign_double(rng.uniform(-1.0, 1.0)); + tvec.at(1,0) *= sign_double(rng.uniform(-1.0, 1.0)); + tvec.at(2,0) *= sign_double(rng.uniform(-1.0, 1.0)); + } + + cv::Rodrigues(rvec, R); +} + +void CV_CalibrateHandEyeTest::simulateData(RNG& rng, int nPoses, + std::vector &R_gripper2base, std::vector &t_gripper2base, + std::vector &R_target2cam, std::vector &t_target2cam, + bool noise, Mat& R_cam2gripper, Mat& t_cam2gripper) +{ + //to avoid generating values close to zero, + //we use positive range values and randomize the sign + const bool random_sign = true; + generatePose(rng, 10.0*CV_PI/180.0, 50.0*CV_PI/180.0, + 0.05, 0.5, 0.05, 0.5, 0.05, 0.5, + R_cam2gripper, t_cam2gripper, random_sign); + + Mat R_target2base, t_target2base; + generatePose(rng, 5.0*CV_PI/180.0, 85.0*CV_PI/180.0, + 0.5, 3.5, 0.5, 3.5, 0.5, 3.5, + R_target2base, t_target2base, random_sign); + + for (int i = 0; i < nPoses; i++) + { + Mat R_gripper2base_, t_gripper2base_; + generatePose(rng, 5.0*CV_PI/180.0, 45.0*CV_PI/180.0, + 0.5, 1.5, 0.5, 1.5, 0.5, 1.5, + R_gripper2base_, t_gripper2base_, random_sign); + + R_gripper2base.push_back(R_gripper2base_); + t_gripper2base.push_back(t_gripper2base_); + + Mat T_cam2gripper = Mat::eye(4, 4, CV_64FC1); + R_cam2gripper.copyTo(T_cam2gripper(Rect(0, 0, 3, 3))); + t_cam2gripper.copyTo(T_cam2gripper(Rect(3, 0, 1, 3))); + + Mat T_gripper2base = Mat::eye(4, 4, CV_64FC1); + R_gripper2base_.copyTo(T_gripper2base(Rect(0, 0, 3, 3))); + t_gripper2base_.copyTo(T_gripper2base(Rect(3, 0, 1, 3))); + + Mat T_base2cam = homogeneousInverse(T_cam2gripper) * homogeneousInverse(T_gripper2base); + Mat T_target2base = Mat::eye(4, 4, CV_64FC1); + R_target2base.copyTo(T_target2base(Rect(0, 0, 3, 3))); + t_target2base.copyTo(T_target2base(Rect(3, 0, 1, 3))); + Mat T_target2cam = T_base2cam * T_target2base; + + if (noise) + { + //Add some noise for the transformation between the target and the camera + Mat R_target2cam_noise = T_target2cam(Rect(0, 0, 3, 3)); + Mat rvec_target2cam_noise; + cv::Rodrigues(R_target2cam_noise, rvec_target2cam_noise); + rvec_target2cam_noise.at(0,0) += rng.gaussian(0.002); + rvec_target2cam_noise.at(1,0) += rng.gaussian(0.002); + rvec_target2cam_noise.at(2,0) += rng.gaussian(0.002); + + cv::Rodrigues(rvec_target2cam_noise, R_target2cam_noise); + + Mat t_target2cam_noise = T_target2cam(Rect(3, 0, 1, 3)); + t_target2cam_noise.at(0,0) += rng.gaussian(0.005); + t_target2cam_noise.at(1,0) += rng.gaussian(0.005); + t_target2cam_noise.at(2,0) += rng.gaussian(0.005); + + //Add some noise for the transformation between the gripper and the robot base + Mat R_gripper2base_noise = T_gripper2base(Rect(0, 0, 3, 3)); + Mat rvec_gripper2base_noise; + cv::Rodrigues(R_gripper2base_noise, rvec_gripper2base_noise); + rvec_gripper2base_noise.at(0,0) += rng.gaussian(0.001); + rvec_gripper2base_noise.at(1,0) += rng.gaussian(0.001); + rvec_gripper2base_noise.at(2,0) += rng.gaussian(0.001); + + cv::Rodrigues(rvec_gripper2base_noise, R_gripper2base_noise); + + Mat t_gripper2base_noise = T_gripper2base(Rect(3, 0, 1, 3)); + t_gripper2base_noise.at(0,0) += rng.gaussian(0.001); + t_gripper2base_noise.at(1,0) += rng.gaussian(0.001); + t_gripper2base_noise.at(2,0) += rng.gaussian(0.001); + } + + R_target2cam.push_back(T_target2cam(Rect(0, 0, 3, 3))); + t_target2cam.push_back(T_target2cam(Rect(3, 0, 1, 3))); + } +} + +Mat CV_CalibrateHandEyeTest::homogeneousInverse(const Mat& T) +{ + CV_Assert( T.rows == 4 && T.cols == 4 ); + + Mat R = T(Rect(0, 0, 3, 3)); + Mat t = T(Rect(3, 0, 1, 3)); + Mat Rt = R.t(); + Mat tinv = -Rt * t; + Mat Tinv = Mat::eye(4, 4, T.type()); + Rt.copyTo(Tinv(Rect(0, 0, 3, 3))); + tinv.copyTo(Tinv(Rect(3, 0, 1, 3))); + + return Tinv; +} + +std::string CV_CalibrateHandEyeTest::getMethodName(HandEyeCalibrationMethod method) +{ + std::string method_name = ""; + switch (method) + { + case CALIB_HAND_EYE_TSAI: + method_name = "Tsai"; + break; + + case CALIB_HAND_EYE_PARK: + method_name = "Park"; + break; + + case CALIB_HAND_EYE_HORAUD: + method_name = "Horaud"; + break; + + case CALIB_HAND_EYE_ANDREFF: + method_name = "Andreff"; + break; + + case CALIB_HAND_EYE_DANIILIDIS: + method_name = "Daniilidis"; + break; + + default: + break; + } + + return method_name; +} + +double CV_CalibrateHandEyeTest::sign_double(double val) +{ + return (0 < val) - (val < 0); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +TEST(Calib3d_CalibrateHandEye, regression) { CV_CalibrateHandEyeTest test; test.safe_run(); } + +}} // namespace From 99b39aa5bd0906f3eb240735cfa87688b46bc33b Mon Sep 17 00:00:00 2001 From: Vitaly Tuzov Date: Tue, 5 Mar 2019 17:21:21 +0300 Subject: [PATCH 19/19] Fixed out of bound reading in LINEAR_EXACT resize for 8UC3 --- modules/imgproc/src/resize.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/imgproc/src/resize.cpp b/modules/imgproc/src/resize.cpp index 996f6977b1..2882f26341 100644 --- a/modules/imgproc/src/resize.cpp +++ b/modules/imgproc/src/resize.cpp @@ -485,7 +485,7 @@ void hlineResizeCn(uint8_t* src, int, int *o v_store(ofst3, vx_load(ofst + i) * vx_setall_s32(3)); v_uint8 v_src01, v_src23; v_uint16 v_src0, v_src1, v_src2, v_src3; - v_zip(vx_lut_quads(src, ofst3), vx_lut_quads(src+3, ofst3), v_src01, v_src23); + v_zip(vx_lut_quads(src, ofst3), v_reinterpret_as_u8(v_reinterpret_as_u32(vx_lut_quads(src+2, ofst3)) >> 8), v_src01, v_src23); v_expand(v_src01, v_src0, v_src1); v_expand(v_src23, v_src2, v_src3);