Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14184/head
Alexander Alekhin 6 years ago
commit 7442100caa
  1. 8
      CMakeLists.txt
  2. 2
      doc/tutorials/dnn/dnn_googlenet/dnn_googlenet.markdown
  3. 152
      modules/core/include/opencv2/core/hal/intrin_avx.hpp
  4. 24
      modules/core/include/opencv2/core/matx.hpp
  5. 10
      modules/core/src/copy.cpp
  6. 199
      modules/core/src/lda.cpp
  7. 2
      modules/core/test/test_math.cpp
  8. 10
      modules/dnn/CMakeLists.txt
  9. 10
      modules/dnn/include/opencv2/dnn/dnn.hpp
  10. 43
      modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
  11. 5
      modules/dnn/perf/perf_net.cpp
  12. 7
      modules/dnn/src/dnn.cpp
  13. 10
      modules/dnn/src/layers/pooling_layer.cpp
  14. 124
      modules/dnn/src/op_inf_engine.cpp
  15. 16
      modules/dnn/src/op_inf_engine.hpp
  16. 16
      modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
  17. 3
      modules/dnn/src/tensorflow/tf_importer.cpp
  18. 84
      modules/dnn/test/test_backends.cpp
  19. 23
      modules/dnn/test/test_caffe_importer.cpp
  20. 293
      modules/dnn/test/test_common.cpp
  21. 309
      modules/dnn/test/test_common.hpp
  22. 55
      modules/dnn/test/test_darknet_importer.cpp
  23. 97
      modules/dnn/test/test_halide_layers.cpp
  24. 11
      modules/dnn/test/test_ie_models.cpp
  25. 33
      modules/dnn/test/test_layers.cpp
  26. 160
      modules/dnn/test/test_onnx_importer.cpp
  27. 153
      modules/dnn/test/test_tf_importer.cpp
  28. 20
      modules/dnn/test/test_torch_importer.cpp

@ -1185,7 +1185,13 @@ status(" 3rdparty dependencies:" ${deps_3rdparty})
# ========================== OpenCV modules ========================== # ========================== OpenCV modules ==========================
status("") status("")
status(" OpenCV modules:") status(" OpenCV modules:")
string(REPLACE "opencv_" "" OPENCV_MODULES_BUILD_ST "${OPENCV_MODULES_BUILD}") set(OPENCV_MODULES_BUILD_ST "")
foreach(the_module ${OPENCV_MODULES_BUILD})
if(NOT OPENCV_MODULE_${the_module}_CLASS STREQUAL "INTERNAL" OR the_module STREQUAL "opencv_ts")
list(APPEND OPENCV_MODULES_BUILD_ST "${the_module}")
endif()
endforeach()
string(REPLACE "opencv_" "" OPENCV_MODULES_BUILD_ST "${OPENCV_MODULES_BUILD_ST}")
string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_USER_ST "${OPENCV_MODULES_DISABLED_USER}") string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_USER_ST "${OPENCV_MODULES_DISABLED_USER}")
string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_AUTO_ST "${OPENCV_MODULES_DISABLED_AUTO}") string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_AUTO_ST "${OPENCV_MODULES_DISABLED_AUTO}")
string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_FORCE_ST "${OPENCV_MODULES_DISABLED_FORCE}") string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_FORCE_ST "${OPENCV_MODULES_DISABLED_FORCE}")

@ -25,7 +25,7 @@ Explanation
[bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel) [bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes: Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes:
[classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/classification_classes_ILSVRC2012.txt). [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/blob/master/samples/data/dnn/classification_classes_ILSVRC2012.txt).
Put these files into working dir of this program example. Put these files into working dir of this program example.

@ -2022,7 +2022,7 @@ inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b
b = v_uint64x4(b0); b = v_uint64x4(b0);
} }
inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& b, v_uint8x32& g, v_uint8x32& r ) inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& a, v_uint8x32& b, v_uint8x32& c )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32));
@ -2051,12 +2051,12 @@ inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& b, v_uint8x32& g,
g0 = _mm256_shuffle_epi8(g0, sh_g); g0 = _mm256_shuffle_epi8(g0, sh_g);
r0 = _mm256_shuffle_epi8(r0, sh_r); r0 = _mm256_shuffle_epi8(r0, sh_r);
b = v_uint8x32(b0); a = v_uint8x32(b0);
g = v_uint8x32(g0); b = v_uint8x32(g0);
r = v_uint8x32(r0); c = v_uint8x32(r0);
} }
inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& b, v_uint16x16& g, v_uint16x16& r ) inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& a, v_uint16x16& b, v_uint16x16& c )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16));
@ -2082,12 +2082,12 @@ inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& b, v_uint16x16&
g0 = _mm256_shuffle_epi8(g0, sh_g); g0 = _mm256_shuffle_epi8(g0, sh_g);
r0 = _mm256_shuffle_epi8(r0, sh_r); r0 = _mm256_shuffle_epi8(r0, sh_r);
b = v_uint16x16(b0); a = v_uint16x16(b0);
g = v_uint16x16(g0); b = v_uint16x16(g0);
r = v_uint16x16(r0); c = v_uint16x16(r0);
} }
inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& b, v_uint32x8& g, v_uint32x8& r ) inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& a, v_uint32x8& b, v_uint32x8& c )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 8));
@ -2104,12 +2104,12 @@ inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& b, v_uint32x8&
g0 = _mm256_shuffle_epi32(g0, 0xb1); g0 = _mm256_shuffle_epi32(g0, 0xb1);
r0 = _mm256_shuffle_epi32(r0, 0xc6); r0 = _mm256_shuffle_epi32(r0, 0xc6);
b = v_uint32x8(b0); a = v_uint32x8(b0);
g = v_uint32x8(g0); b = v_uint32x8(g0);
r = v_uint32x8(r0); c = v_uint32x8(r0);
} }
inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& b, v_uint64x4& g, v_uint64x4& r ) inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b, v_uint64x4& c )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 4)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 4));
@ -2122,12 +2122,12 @@ inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& b, v_uint64x4& g
__m256i g0 = _mm256_alignr_epi8(s12, s01, 8); __m256i g0 = _mm256_alignr_epi8(s12, s01, 8);
__m256i r0 = _mm256_unpackhi_epi64(s20r, s12); __m256i r0 = _mm256_unpackhi_epi64(s20r, s12);
b = v_uint64x4(b0); a = v_uint64x4(b0);
g = v_uint64x4(g0); b = v_uint64x4(g0);
r = v_uint64x4(r0); c = v_uint64x4(r0);
} }
inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& b, v_uint8x32& g, v_uint8x32& r, v_uint8x32& a ) inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& a, v_uint8x32& b, v_uint8x32& c, v_uint8x32& d )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32));
@ -2156,13 +2156,13 @@ inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& b, v_uint8x32& g,
__m256i r0 = _mm256_unpacklo_epi32(phl, phh); __m256i r0 = _mm256_unpacklo_epi32(phl, phh);
__m256i a0 = _mm256_unpackhi_epi32(phl, phh); __m256i a0 = _mm256_unpackhi_epi32(phl, phh);
b = v_uint8x32(b0); a = v_uint8x32(b0);
g = v_uint8x32(g0); b = v_uint8x32(g0);
r = v_uint8x32(r0); c = v_uint8x32(r0);
a = v_uint8x32(a0); d = v_uint8x32(a0);
} }
inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& b, v_uint16x16& g, v_uint16x16& r, v_uint16x16& a ) inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& a, v_uint16x16& b, v_uint16x16& c, v_uint16x16& d )
{ {
__m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16));
@ -2190,13 +2190,13 @@ inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& b, v_uint16x16&
__m256i r0 = _mm256_unpacklo_epi32(phl, phh); __m256i r0 = _mm256_unpacklo_epi32(phl, phh);
__m256i a0 = _mm256_unpackhi_epi32(phl, phh); __m256i a0 = _mm256_unpackhi_epi32(phl, phh);
b = v_uint16x16(b0); a = v_uint16x16(b0);
g = v_uint16x16(g0); b = v_uint16x16(g0);
r = v_uint16x16(r0); c = v_uint16x16(r0);
a = v_uint16x16(a0); d = v_uint16x16(a0);
} }
inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& b, v_uint32x8& g, v_uint32x8& r, v_uint32x8& a ) inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& a, v_uint32x8& b, v_uint32x8& c, v_uint32x8& d )
{ {
__m256i p0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i p0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i p1 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); __m256i p1 = _mm256_loadu_si256((const __m256i*)(ptr + 8));
@ -2218,13 +2218,13 @@ inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& b, v_uint32x8&
__m256i r0 = _mm256_unpacklo_epi32(phl, phh); __m256i r0 = _mm256_unpacklo_epi32(phl, phh);
__m256i a0 = _mm256_unpackhi_epi32(phl, phh); __m256i a0 = _mm256_unpackhi_epi32(phl, phh);
b = v_uint32x8(b0); a = v_uint32x8(b0);
g = v_uint32x8(g0); b = v_uint32x8(g0);
r = v_uint32x8(r0); c = v_uint32x8(r0);
a = v_uint32x8(a0); d = v_uint32x8(a0);
} }
inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& b, v_uint64x4& g, v_uint64x4& r, v_uint64x4& a ) inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b, v_uint64x4& c, v_uint64x4& d )
{ {
__m256i bgra0 = _mm256_loadu_si256((const __m256i*)ptr); __m256i bgra0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i bgra1 = _mm256_loadu_si256((const __m256i*)(ptr + 4)); __m256i bgra1 = _mm256_loadu_si256((const __m256i*)(ptr + 4));
@ -2241,10 +2241,10 @@ inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& b, v_uint64x4& g
__m256i r0 = _mm256_unpacklo_epi64(h02, h13); __m256i r0 = _mm256_unpacklo_epi64(h02, h13);
__m256i a0 = _mm256_unpackhi_epi64(h02, h13); __m256i a0 = _mm256_unpackhi_epi64(h02, h13);
b = v_uint64x4(b0); a = v_uint64x4(b0);
g = v_uint64x4(g0); b = v_uint64x4(g0);
r = v_uint64x4(r0); c = v_uint64x4(r0);
a = v_uint64x4(a0); d = v_uint64x4(a0);
} }
///////////////////////////// store interleave ///////////////////////////////////// ///////////////////////////// store interleave /////////////////////////////////////
@ -2353,7 +2353,7 @@ inline void v_store_interleave( uint64* ptr, const v_uint64x4& x, const v_uint64
} }
} }
inline void v_store_interleave( uchar* ptr, const v_uint8x32& b, const v_uint8x32& g, const v_uint8x32& r, inline void v_store_interleave( uchar* ptr, const v_uint8x32& a, const v_uint8x32& b, const v_uint8x32& c,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
const __m256i sh_b = _mm256_setr_epi8( const __m256i sh_b = _mm256_setr_epi8(
@ -2366,9 +2366,9 @@ inline void v_store_interleave( uchar* ptr, const v_uint8x32& b, const v_uint8x3
10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15,
10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15); 10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15);
__m256i b0 = _mm256_shuffle_epi8(b.val, sh_b); __m256i b0 = _mm256_shuffle_epi8(a.val, sh_b);
__m256i g0 = _mm256_shuffle_epi8(g.val, sh_g); __m256i g0 = _mm256_shuffle_epi8(b.val, sh_g);
__m256i r0 = _mm256_shuffle_epi8(r.val, sh_r); __m256i r0 = _mm256_shuffle_epi8(c.val, sh_r);
const __m256i m0 = _mm256_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, const __m256i m0 = _mm256_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0,
0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0); 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0);
@ -2403,7 +2403,7 @@ inline void v_store_interleave( uchar* ptr, const v_uint8x32& b, const v_uint8x3
} }
} }
inline void v_store_interleave( ushort* ptr, const v_uint16x16& b, const v_uint16x16& g, const v_uint16x16& r, inline void v_store_interleave( ushort* ptr, const v_uint16x16& a, const v_uint16x16& b, const v_uint16x16& c,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
const __m256i sh_b = _mm256_setr_epi8( const __m256i sh_b = _mm256_setr_epi8(
@ -2416,9 +2416,9 @@ inline void v_store_interleave( ushort* ptr, const v_uint16x16& b, const v_uint1
4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15,
4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15); 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15);
__m256i b0 = _mm256_shuffle_epi8(b.val, sh_b); __m256i b0 = _mm256_shuffle_epi8(a.val, sh_b);
__m256i g0 = _mm256_shuffle_epi8(g.val, sh_g); __m256i g0 = _mm256_shuffle_epi8(b.val, sh_g);
__m256i r0 = _mm256_shuffle_epi8(r.val, sh_r); __m256i r0 = _mm256_shuffle_epi8(c.val, sh_r);
const __m256i m0 = _mm256_setr_epi8(0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, const __m256i m0 = _mm256_setr_epi8(0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1,
0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0); 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0);
@ -2453,12 +2453,12 @@ inline void v_store_interleave( ushort* ptr, const v_uint16x16& b, const v_uint1
} }
} }
inline void v_store_interleave( unsigned* ptr, const v_uint32x8& b, const v_uint32x8& g, const v_uint32x8& r, inline void v_store_interleave( unsigned* ptr, const v_uint32x8& a, const v_uint32x8& b, const v_uint32x8& c,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i b0 = _mm256_shuffle_epi32(b.val, 0x6c); __m256i b0 = _mm256_shuffle_epi32(a.val, 0x6c);
__m256i g0 = _mm256_shuffle_epi32(g.val, 0xb1); __m256i g0 = _mm256_shuffle_epi32(b.val, 0xb1);
__m256i r0 = _mm256_shuffle_epi32(r.val, 0xc6); __m256i r0 = _mm256_shuffle_epi32(c.val, 0xc6);
__m256i p0 = _mm256_blend_epi32(_mm256_blend_epi32(b0, g0, 0x92), r0, 0x24); __m256i p0 = _mm256_blend_epi32(_mm256_blend_epi32(b0, g0, 0x92), r0, 0x24);
__m256i p1 = _mm256_blend_epi32(_mm256_blend_epi32(g0, r0, 0x92), b0, 0x24); __m256i p1 = _mm256_blend_epi32(_mm256_blend_epi32(g0, r0, 0x92), b0, 0x24);
@ -2488,12 +2488,12 @@ inline void v_store_interleave( unsigned* ptr, const v_uint32x8& b, const v_uint
} }
} }
inline void v_store_interleave( uint64* ptr, const v_uint64x4& b, const v_uint64x4& g, const v_uint64x4& r, inline void v_store_interleave( uint64* ptr, const v_uint64x4& a, const v_uint64x4& b, const v_uint64x4& c,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i s01 = _mm256_unpacklo_epi64(b.val, g.val); __m256i s01 = _mm256_unpacklo_epi64(a.val, b.val);
__m256i s12 = _mm256_unpackhi_epi64(g.val, r.val); __m256i s12 = _mm256_unpackhi_epi64(b.val, c.val);
__m256i s20 = _mm256_blend_epi32(r.val, b.val, 0xcc); __m256i s20 = _mm256_blend_epi32(c.val, a.val, 0xcc);
__m256i bgr0 = _mm256_permute2x128_si256(s01, s20, 0 + 2*16); __m256i bgr0 = _mm256_permute2x128_si256(s01, s20, 0 + 2*16);
__m256i bgr1 = _mm256_blend_epi32(s01, s12, 0x0f); __m256i bgr1 = _mm256_blend_epi32(s01, s12, 0x0f);
@ -2519,14 +2519,14 @@ inline void v_store_interleave( uint64* ptr, const v_uint64x4& b, const v_uint64
} }
} }
inline void v_store_interleave( uchar* ptr, const v_uint8x32& b, const v_uint8x32& g, inline void v_store_interleave( uchar* ptr, const v_uint8x32& a, const v_uint8x32& b,
const v_uint8x32& r, const v_uint8x32& a, const v_uint8x32& c, const v_uint8x32& d,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i bg0 = _mm256_unpacklo_epi8(b.val, g.val); __m256i bg0 = _mm256_unpacklo_epi8(a.val, b.val);
__m256i bg1 = _mm256_unpackhi_epi8(b.val, g.val); __m256i bg1 = _mm256_unpackhi_epi8(a.val, b.val);
__m256i ra0 = _mm256_unpacklo_epi8(r.val, a.val); __m256i ra0 = _mm256_unpacklo_epi8(c.val, d.val);
__m256i ra1 = _mm256_unpackhi_epi8(r.val, a.val); __m256i ra1 = _mm256_unpackhi_epi8(c.val, d.val);
__m256i bgra0_ = _mm256_unpacklo_epi16(bg0, ra0); __m256i bgra0_ = _mm256_unpacklo_epi16(bg0, ra0);
__m256i bgra1_ = _mm256_unpackhi_epi16(bg0, ra0); __m256i bgra1_ = _mm256_unpackhi_epi16(bg0, ra0);
@ -2561,14 +2561,14 @@ inline void v_store_interleave( uchar* ptr, const v_uint8x32& b, const v_uint8x3
} }
} }
inline void v_store_interleave( ushort* ptr, const v_uint16x16& b, const v_uint16x16& g, inline void v_store_interleave( ushort* ptr, const v_uint16x16& a, const v_uint16x16& b,
const v_uint16x16& r, const v_uint16x16& a, const v_uint16x16& c, const v_uint16x16& d,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i bg0 = _mm256_unpacklo_epi16(b.val, g.val); __m256i bg0 = _mm256_unpacklo_epi16(a.val, b.val);
__m256i bg1 = _mm256_unpackhi_epi16(b.val, g.val); __m256i bg1 = _mm256_unpackhi_epi16(a.val, b.val);
__m256i ra0 = _mm256_unpacklo_epi16(r.val, a.val); __m256i ra0 = _mm256_unpacklo_epi16(c.val, d.val);
__m256i ra1 = _mm256_unpackhi_epi16(r.val, a.val); __m256i ra1 = _mm256_unpackhi_epi16(c.val, d.val);
__m256i bgra0_ = _mm256_unpacklo_epi32(bg0, ra0); __m256i bgra0_ = _mm256_unpacklo_epi32(bg0, ra0);
__m256i bgra1_ = _mm256_unpackhi_epi32(bg0, ra0); __m256i bgra1_ = _mm256_unpackhi_epi32(bg0, ra0);
@ -2603,14 +2603,14 @@ inline void v_store_interleave( ushort* ptr, const v_uint16x16& b, const v_uint1
} }
} }
inline void v_store_interleave( unsigned* ptr, const v_uint32x8& b, const v_uint32x8& g, inline void v_store_interleave( unsigned* ptr, const v_uint32x8& a, const v_uint32x8& b,
const v_uint32x8& r, const v_uint32x8& a, const v_uint32x8& c, const v_uint32x8& d,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i bg0 = _mm256_unpacklo_epi32(b.val, g.val); __m256i bg0 = _mm256_unpacklo_epi32(a.val, b.val);
__m256i bg1 = _mm256_unpackhi_epi32(b.val, g.val); __m256i bg1 = _mm256_unpackhi_epi32(a.val, b.val);
__m256i ra0 = _mm256_unpacklo_epi32(r.val, a.val); __m256i ra0 = _mm256_unpacklo_epi32(c.val, d.val);
__m256i ra1 = _mm256_unpackhi_epi32(r.val, a.val); __m256i ra1 = _mm256_unpackhi_epi32(c.val, d.val);
__m256i bgra0_ = _mm256_unpacklo_epi64(bg0, ra0); __m256i bgra0_ = _mm256_unpacklo_epi64(bg0, ra0);
__m256i bgra1_ = _mm256_unpackhi_epi64(bg0, ra0); __m256i bgra1_ = _mm256_unpackhi_epi64(bg0, ra0);
@ -2645,14 +2645,14 @@ inline void v_store_interleave( unsigned* ptr, const v_uint32x8& b, const v_uint
} }
} }
inline void v_store_interleave( uint64* ptr, const v_uint64x4& b, const v_uint64x4& g, inline void v_store_interleave( uint64* ptr, const v_uint64x4& a, const v_uint64x4& b,
const v_uint64x4& r, const v_uint64x4& a, const v_uint64x4& c, const v_uint64x4& d,
hal::StoreMode mode=hal::STORE_UNALIGNED ) hal::StoreMode mode=hal::STORE_UNALIGNED )
{ {
__m256i bg0 = _mm256_unpacklo_epi64(b.val, g.val); __m256i bg0 = _mm256_unpacklo_epi64(a.val, b.val);
__m256i bg1 = _mm256_unpackhi_epi64(b.val, g.val); __m256i bg1 = _mm256_unpackhi_epi64(a.val, b.val);
__m256i ra0 = _mm256_unpacklo_epi64(r.val, a.val); __m256i ra0 = _mm256_unpacklo_epi64(c.val, d.val);
__m256i ra1 = _mm256_unpackhi_epi64(r.val, a.val); __m256i ra1 = _mm256_unpackhi_epi64(c.val, d.val);
__m256i bgra0 = _mm256_permute2x128_si256(bg0, ra0, 0 + 2*16); __m256i bgra0 = _mm256_permute2x128_si256(bg0, ra0, 0 + 2*16);
__m256i bgra1 = _mm256_permute2x128_si256(bg1, ra1, 0 + 2*16); __m256i bgra1 = _mm256_permute2x128_si256(bg1, ra1, 0 + 2*16);

@ -163,7 +163,7 @@ public:
template<int m1, int n1> Matx<_Tp, m1, n1> reshape() const; template<int m1, int n1> Matx<_Tp, m1, n1> reshape() const;
//! extract part of the matrix //! extract part of the matrix
template<int m1, int n1> Matx<_Tp, m1, n1> get_minor(int i, int j) const; template<int m1, int n1> Matx<_Tp, m1, n1> get_minor(int base_row, int base_col) const;
//! extract the matrix row //! extract the matrix row
Matx<_Tp, 1, n> row(int i) const; Matx<_Tp, 1, n> row(int i) const;
@ -191,8 +191,8 @@ public:
Matx<_Tp, m, n> div(const Matx<_Tp, m, n>& a) const; Matx<_Tp, m, n> div(const Matx<_Tp, m, n>& a) const;
//! element access //! element access
const _Tp& operator ()(int i, int j) const; const _Tp& operator ()(int row, int col) const;
_Tp& operator ()(int i, int j); _Tp& operator ()(int row, int col);
//! 1D element access //! 1D element access
const _Tp& operator ()(int i) const; const _Tp& operator ()(int i) const;
@ -742,13 +742,13 @@ Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const
template<typename _Tp, int m, int n> template<typename _Tp, int m, int n>
template<int m1, int n1> inline template<int m1, int n1> inline
Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int base_row, int base_col) const
{ {
CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); CV_DbgAssert(0 <= base_row && base_row+m1 <= m && 0 <= base_col && base_col+n1 <= n);
Matx<_Tp, m1, n1> s; Matx<_Tp, m1, n1> s;
for( int di = 0; di < m1; di++ ) for( int di = 0; di < m1; di++ )
for( int dj = 0; dj < n1; dj++ ) for( int dj = 0; dj < n1; dj++ )
s(di, dj) = (*this)(i+di, j+dj); s(di, dj) = (*this)(base_row+di, base_col+dj);
return s; return s;
} }
@ -779,17 +779,17 @@ typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const
} }
template<typename _Tp, int m, int n> inline template<typename _Tp, int m, int n> inline
const _Tp& Matx<_Tp, m, n>::operator()(int i, int j) const const _Tp& Matx<_Tp, m, n>::operator()(int row_idx, int col_idx) const
{ {
CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); CV_DbgAssert( (unsigned)row_idx < (unsigned)m && (unsigned)col_idx < (unsigned)n );
return this->val[i*n + j]; return this->val[row_idx*n + col_idx];
} }
template<typename _Tp, int m, int n> inline template<typename _Tp, int m, int n> inline
_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) _Tp& Matx<_Tp, m, n>::operator ()(int row_idx, int col_idx)
{ {
CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); CV_DbgAssert( (unsigned)row_idx < (unsigned)m && (unsigned)col_idx < (unsigned)n );
return val[i*n + j]; return val[row_idx*n + col_idx];
} }
template<typename _Tp, int m, int n> inline template<typename _Tp, int m, int n> inline

@ -918,7 +918,13 @@ int cv::borderInterpolate( int p, int len, int borderType )
{ {
CV_TRACE_FUNCTION_VERBOSE(); CV_TRACE_FUNCTION_VERBOSE();
CV_DbgAssert(len > 0);
#ifdef CV_STATIC_ANALYSIS
if(p >= 0 && p < len)
#else
if( (unsigned)p < (unsigned)len ) if( (unsigned)p < (unsigned)len )
#endif
; ;
else if( borderType == BORDER_REPLICATE ) else if( borderType == BORDER_REPLICATE )
p = p < 0 ? 0 : len - 1; p = p < 0 ? 0 : len - 1;
@ -934,7 +940,11 @@ int cv::borderInterpolate( int p, int len, int borderType )
else else
p = len - 1 - (p - len) - delta; p = len - 1 - (p - len) - delta;
} }
#ifdef CV_STATIC_ANALYSIS
while(p < 0 || p >= len);
#else
while( (unsigned)p >= (unsigned)len ); while( (unsigned)p >= (unsigned)len );
#endif
} }
else if( borderType == BORDER_WRAP ) else if( borderType == BORDER_WRAP )
{ {

@ -248,9 +248,6 @@ private:
// Holds the data dimension. // Holds the data dimension.
int n; int n;
// Stores real/imag part of a complex division.
double cdivr, cdivi;
// Pointer to internal memory. // Pointer to internal memory.
double *d, *e, *ort; double *d, *e, *ort;
double **V, **H; double **V, **H;
@ -297,8 +294,9 @@ private:
return arr; return arr;
} }
void cdiv(double xr, double xi, double yr, double yi) { static void complex_div(double xr, double xi, double yr, double yi, double& cdivr, double& cdivi) {
double r, dv; double r, dv;
CV_DbgAssert(std::abs(yr) + std::abs(yi) > 0.0);
if (std::abs(yr) > std::abs(yi)) { if (std::abs(yr) > std::abs(yi)) {
r = yi / yr; r = yi / yr;
dv = yr + r * yi; dv = yr + r * yi;
@ -324,24 +322,25 @@ private:
// Initialize // Initialize
const int max_iters_count = 1000 * this->n; const int max_iters_count = 1000 * this->n;
int nn = this->n; const int nn = this->n; CV_Assert(nn > 0);
int n1 = nn - 1; int n1 = nn - 1;
int low = 0; const int low = 0;
int high = nn - 1; const int high = nn - 1;
double eps = std::pow(2.0, -52.0); const double eps = std::numeric_limits<double>::epsilon();
double exshift = 0.0; double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
// Store roots isolated by balanc and compute matrix norm // Store roots isolated by balanc and compute matrix norm
double norm = 0.0; double norm = 0.0;
for (int i = 0; i < nn; i++) { for (int i = 0; i < nn; i++) {
#if 0 // 'if' condition is always false
if (i < low || i > high) { if (i < low || i > high) {
d[i] = H[i][i]; d[i] = H[i][i];
e[i] = 0.0; e[i] = 0.0;
} }
#endif
for (int j = std::max(i - 1, 0); j < nn; j++) { for (int j = std::max(i - 1, 0); j < nn; j++) {
norm = norm + std::abs(H[i][j]); norm += std::abs(H[i][j]);
} }
} }
@ -355,7 +354,7 @@ private:
if (norm < FLT_EPSILON) { if (norm < FLT_EPSILON) {
break; break;
} }
s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]); double s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
if (s == 0.0) { if (s == 0.0) {
s = norm; s = norm;
} }
@ -366,29 +365,26 @@ private:
} }
// Check for convergence // Check for convergence
// One root found
if (l == n1) { if (l == n1) {
// One root found
H[n1][n1] = H[n1][n1] + exshift; H[n1][n1] = H[n1][n1] + exshift;
d[n1] = H[n1][n1]; d[n1] = H[n1][n1];
e[n1] = 0.0; e[n1] = 0.0;
n1--; n1--;
iter = 0; iter = 0;
// Two roots found
} else if (l == n1 - 1) { } else if (l == n1 - 1) {
w = H[n1][n1 - 1] * H[n1 - 1][n1]; // Two roots found
p = (H[n1 - 1][n1 - 1] - H[n1][n1]) / 2.0; double w = H[n1][n1 - 1] * H[n1 - 1][n1];
q = p * p + w; double p = (H[n1 - 1][n1 - 1] - H[n1][n1]) * 0.5;
z = std::sqrt(std::abs(q)); double q = p * p + w;
double z = std::sqrt(std::abs(q));
H[n1][n1] = H[n1][n1] + exshift; H[n1][n1] = H[n1][n1] + exshift;
H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift; H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
x = H[n1][n1]; double x = H[n1][n1];
// Real pair
if (q >= 0) { if (q >= 0) {
// Real pair
if (p >= 0) { if (p >= 0) {
z = p + z; z = p + z;
} else { } else {
@ -402,10 +398,10 @@ private:
e[n1 - 1] = 0.0; e[n1 - 1] = 0.0;
e[n1] = 0.0; e[n1] = 0.0;
x = H[n1][n1 - 1]; x = H[n1][n1 - 1];
s = std::abs(x) + std::abs(z); double s = std::abs(x) + std::abs(z);
p = x / s; p = x / s;
q = z / s; q = z / s;
r = std::sqrt(p * p + q * q); double r = std::sqrt(p * p + q * q);
p = p / r; p = p / r;
q = q / r; q = q / r;
@ -433,9 +429,8 @@ private:
V[i][n1] = q * V[i][n1] - p * z; V[i][n1] = q * V[i][n1] - p * z;
} }
// Complex pair
} else { } else {
// Complex pair
d[n1 - 1] = x + p; d[n1 - 1] = x + p;
d[n1] = x + p; d[n1] = x + p;
e[n1 - 1] = z; e[n1 - 1] = z;
@ -444,28 +439,25 @@ private:
n1 = n1 - 2; n1 = n1 - 2;
iter = 0; iter = 0;
// No convergence yet
} else { } else {
// No convergence yet
// Form shift // Form shift
double x = H[n1][n1];
x = H[n1][n1]; double y = 0.0;
y = 0.0; double w = 0.0;
w = 0.0;
if (l < n1) { if (l < n1) {
y = H[n1 - 1][n1 - 1]; y = H[n1 - 1][n1 - 1];
w = H[n1][n1 - 1] * H[n1 - 1][n1]; w = H[n1][n1 - 1] * H[n1 - 1][n1];
} }
// Wilkinson's original ad hoc shift // Wilkinson's original ad hoc shift
if (iter == 10) { if (iter == 10) {
exshift += x; exshift += x;
for (int i = low; i <= n1; i++) { for (int i = low; i <= n1; i++) {
H[i][i] -= x; H[i][i] -= x;
} }
s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]); double s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]);
x = y = 0.75 * s; x = y = 0.75 * s;
w = -0.4375 * s * s; w = -0.4375 * s * s;
} }
@ -473,14 +465,14 @@ private:
// MATLAB's new ad hoc shift // MATLAB's new ad hoc shift
if (iter == 30) { if (iter == 30) {
s = (y - x) / 2.0; double s = (y - x) * 0.5;
s = s * s + w; s = s * s + w;
if (s > 0) { if (s > 0) {
s = std::sqrt(s); s = std::sqrt(s);
if (y < x) { if (y < x) {
s = -s; s = -s;
} }
s = x - w / ((y - x) / 2.0 + s); s = x - w / ((y - x) * 0.5 + s);
for (int i = low; i <= n1; i++) { for (int i = low; i <= n1; i++) {
H[i][i] -= s; H[i][i] -= s;
} }
@ -493,12 +485,16 @@ private:
if (iter > max_iters_count) if (iter > max_iters_count)
CV_Error(Error::StsNoConv, "Algorithm doesn't converge (complex eigen values?)"); CV_Error(Error::StsNoConv, "Algorithm doesn't converge (complex eigen values?)");
double p = std::numeric_limits<double>::quiet_NaN();
double q = std::numeric_limits<double>::quiet_NaN();
double r = std::numeric_limits<double>::quiet_NaN();
// Look for two consecutive small sub-diagonal elements // Look for two consecutive small sub-diagonal elements
int m = n1 - 2; int m = n1 - 2;
while (m >= l) { while (m >= l) {
z = H[m][m]; double z = H[m][m];
r = x - z; r = x - z;
s = y - z; double s = y - z;
p = (r * s - w) / H[m + 1][m] + H[m][m + 1]; p = (r * s - w) / H[m + 1][m] + H[m][m + 1];
q = H[m + 1][m + 1] - z - r - s; q = H[m + 1][m + 1] - z - r - s;
r = H[m + 2][m + 1]; r = H[m + 2][m + 1];
@ -527,6 +523,7 @@ private:
// Double QR step involving rows l:n and columns m:n // Double QR step involving rows l:n and columns m:n
for (int k = m; k < n1; k++) { for (int k = m; k < n1; k++) {
bool notlast = (k != n1 - 1); bool notlast = (k != n1 - 1);
if (k != m) { if (k != m) {
p = H[k][k - 1]; p = H[k][k - 1];
@ -542,7 +539,7 @@ private:
if (x == 0.0) { if (x == 0.0) {
break; break;
} }
s = std::sqrt(p * p + q * q + r * r); double s = std::sqrt(p * p + q * q + r * r);
if (p < 0) { if (p < 0) {
s = -s; s = -s;
} }
@ -555,7 +552,7 @@ private:
p = p + s; p = p + s;
x = p / s; x = p / s;
y = q / s; y = q / s;
z = r / s; double z = r / s;
q = q / p; q = q / p;
r = r / p; r = r / p;
@ -567,8 +564,8 @@ private:
p = p + r * H[k + 2][j]; p = p + r * H[k + 2][j];
H[k + 2][j] = H[k + 2][j] - p * z; H[k + 2][j] = H[k + 2][j] - p * z;
} }
H[k][j] = H[k][j] - p * x; H[k][j] -= p * x;
H[k + 1][j] = H[k + 1][j] - p * y; H[k + 1][j] -= p * y;
} }
// Column modification // Column modification
@ -579,8 +576,8 @@ private:
p = p + z * H[i][k + 2]; p = p + z * H[i][k + 2];
H[i][k + 2] = H[i][k + 2] - p * r; H[i][k + 2] = H[i][k + 2] - p * r;
} }
H[i][k] = H[i][k] - p; H[i][k] -= p;
H[i][k + 1] = H[i][k + 1] - p * q; H[i][k + 1] -= p * q;
} }
// Accumulate transformations // Accumulate transformations
@ -606,17 +603,19 @@ private:
} }
for (n1 = nn - 1; n1 >= 0; n1--) { for (n1 = nn - 1; n1 >= 0; n1--) {
p = d[n1]; double p = d[n1];
q = e[n1]; double q = e[n1];
// Real vector
if (q == 0) { if (q == 0) {
// Real vector
double z = std::numeric_limits<double>::quiet_NaN();
double s = std::numeric_limits<double>::quiet_NaN();
int l = n1; int l = n1;
H[n1][n1] = 1.0; H[n1][n1] = 1.0;
for (int i = n1 - 1; i >= 0; i--) { for (int i = n1 - 1; i >= 0; i--) {
w = H[i][i] - p; double w = H[i][i] - p;
r = 0.0; double r = 0.0;
for (int j = l; j <= n1; j++) { for (int j = l; j <= n1; j++) {
r = r + H[i][j] * H[j][n1]; r = r + H[i][j] * H[j][n1];
} }
@ -631,34 +630,38 @@ private:
} else { } else {
H[i][n1] = -r / (eps * norm); H[i][n1] = -r / (eps * norm);
} }
// Solve real equations
} else { } else {
x = H[i][i + 1]; // Solve real equations
y = H[i + 1][i]; CV_DbgAssert(!cvIsNaN(z));
double x = H[i][i + 1];
double y = H[i + 1][i];
q = (d[i] - p) * (d[i] - p) + e[i] * e[i]; q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
t = (x * s - z * r) / q; double t = (x * s - z * r) / q;
H[i][n1] = t; H[i][n1] = t;
if (std::abs(x) > std::abs(z)) { if (std::abs(x) > std::abs(z)) {
H[i + 1][n1] = (-r - w * t) / x; H[i + 1][n1] = (-r - w * t) / x;
} else { } else {
CV_DbgAssert(z != 0.0);
H[i + 1][n1] = (-s - y * t) / z; H[i + 1][n1] = (-s - y * t) / z;
} }
} }
// Overflow control // Overflow control
double t = std::abs(H[i][n1]);
t = std::abs(H[i][n1]);
if ((eps * t) * t > 1) { if ((eps * t) * t > 1) {
double inv_t = 1.0 / t;
for (int j = i; j <= n1; j++) { for (int j = i; j <= n1; j++) {
H[j][n1] = H[j][n1] / t; H[j][n1] *= inv_t;
} }
} }
} }
} }
// Complex vector
} else if (q < 0) { } else if (q < 0) {
// Complex vector
double z = std::numeric_limits<double>::quiet_NaN();
double r = std::numeric_limits<double>::quiet_NaN();
double s = std::numeric_limits<double>::quiet_NaN();
int l = n1 - 1; int l = n1 - 1;
// Last vector component imaginary so matrix is triangular // Last vector component imaginary so matrix is triangular
@ -667,9 +670,11 @@ private:
H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1]; H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1];
H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1]; H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1];
} else { } else {
cdiv(0.0, -H[n1 - 1][n1], H[n1 - 1][n1 - 1] - p, q); complex_div(
H[n1 - 1][n1 - 1] = cdivr; 0.0, -H[n1 - 1][n1],
H[n1 - 1][n1] = cdivi; H[n1 - 1][n1 - 1] - p, q,
H[n1 - 1][n1 - 1], H[n1 - 1][n1]
);
} }
H[n1][n1 - 1] = 0.0; H[n1][n1 - 1] = 0.0;
H[n1][n1] = 1.0; H[n1][n1] = 1.0;
@ -681,7 +686,7 @@ private:
ra = ra + H[i][j] * H[j][n1 - 1]; ra = ra + H[i][j] * H[j][n1 - 1];
sa = sa + H[i][j] * H[j][n1]; sa = sa + H[i][j] * H[j][n1];
} }
w = H[i][i] - p; double w = H[i][i] - p;
if (e[i] < 0.0) { if (e[i] < 0.0) {
z = w; z = w;
@ -690,41 +695,42 @@ private:
} else { } else {
l = i; l = i;
if (e[i] == 0) { if (e[i] == 0) {
cdiv(-ra, -sa, w, q); complex_div(
H[i][n1 - 1] = cdivr; -ra, -sa,
H[i][n1] = cdivi; w, q,
H[i][n1 - 1], H[i][n1]
);
} else { } else {
// Solve complex equations // Solve complex equations
x = H[i][i + 1]; double x = H[i][i + 1];
y = H[i + 1][i]; double y = H[i + 1][i];
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q; vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
vi = (d[i] - p) * 2.0 * q; vi = (d[i] - p) * 2.0 * q;
if (vr == 0.0 && vi == 0.0) { if (vr == 0.0 && vi == 0.0) {
vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x) vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
+ std::abs(y) + std::abs(z)); + std::abs(y) + std::abs(z));
} }
cdiv(x * r - z * ra + q * sa, complex_div(
x * s - z * sa - q * ra, vr, vi); x * r - z * ra + q * sa, x * s - z * sa - q * ra,
H[i][n1 - 1] = cdivr; vr, vi,
H[i][n1] = cdivi; H[i][n1 - 1], H[i][n1]);
if (std::abs(x) > (std::abs(z) + std::abs(q))) { if (std::abs(x) > (std::abs(z) + std::abs(q))) {
H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q
* H[i][n1]) / x; * H[i][n1]) / x;
H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1 H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1
- 1]) / x; - 1]) / x;
} else { } else {
cdiv(-r - y * H[i][n1 - 1], -s - y * H[i][n1], z, complex_div(
q); -r - y * H[i][n1 - 1], -s - y * H[i][n1],
H[i + 1][n1 - 1] = cdivr; z, q,
H[i + 1][n1] = cdivi; H[i + 1][n1 - 1], H[i + 1][n1]);
} }
} }
// Overflow control // Overflow control
t = std::max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1])); double t = std::max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
if ((eps * t) * t > 1) { if ((eps * t) * t > 1) {
for (int j = i; j <= n1; j++) { for (int j = i; j <= n1; j++) {
H[j][n1 - 1] = H[j][n1 - 1] / t; H[j][n1 - 1] = H[j][n1 - 1] / t;
@ -738,6 +744,7 @@ private:
// Vectors of isolated roots // Vectors of isolated roots
#if 0 // 'if' condition is always false
for (int i = 0; i < nn; i++) { for (int i = 0; i < nn; i++) {
if (i < low || i > high) { if (i < low || i > high) {
for (int j = i; j < nn; j++) { for (int j = i; j < nn; j++) {
@ -745,14 +752,15 @@ private:
} }
} }
} }
#endif
// Back transformation to get eigenvectors of original matrix // Back transformation to get eigenvectors of original matrix
for (int j = nn - 1; j >= low; j--) { for (int j = nn - 1; j >= low; j--) {
for (int i = low; i <= high; i++) { for (int i = low; i <= high; i++) {
z = 0.0; double z = 0.0;
for (int k = low; k <= std::min(j, high); k++) { for (int k = low; k <= std::min(j, high); k++) {
z = z + V[i][k] * H[k][j]; z += V[i][k] * H[k][j];
} }
V[i][j] = z; V[i][j] = z;
} }
@ -852,15 +860,15 @@ private:
// Releases all internal working memory. // Releases all internal working memory.
void release() { void release() {
// releases the working data // releases the working data
delete[] d; delete[] d; d = NULL;
delete[] e; delete[] e; e = NULL;
delete[] ort; delete[] ort; ort = NULL;
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
delete[] H[i]; if (H) delete[] H[i];
delete[] V[i]; if (V) delete[] V[i];
} }
delete[] H; delete[] H; H = NULL;
delete[] V; delete[] V; V = NULL;
} }
// Computes the Eigenvalue Decomposition for a matrix given in H. // Computes the Eigenvalue Decomposition for a matrix given in H.
@ -870,7 +878,7 @@ private:
d = alloc_1d<double> (n); d = alloc_1d<double> (n);
e = alloc_1d<double> (n); e = alloc_1d<double> (n);
ort = alloc_1d<double> (n); ort = alloc_1d<double> (n);
try { {
// Reduce to Hessenberg form. // Reduce to Hessenberg form.
orthes(); orthes();
// Reduce Hessenberg to real Schur form. // Reduce Hessenberg to real Schur form.
@ -888,11 +896,6 @@ private:
// Deallocate the memory by releasing all internal working data. // Deallocate the memory by releasing all internal working data.
release(); release();
} }
catch (...)
{
release();
throw;
}
} }
public: public:
@ -900,7 +903,11 @@ public:
// given in src. This function is a port of the EigenvalueSolver in JAMA, // given in src. This function is a port of the EigenvalueSolver in JAMA,
// which has been released to public domain by The MathWorks and the // which has been released to public domain by The MathWorks and the
// National Institute of Standards and Technology (NIST). // National Institute of Standards and Technology (NIST).
EigenvalueDecomposition(InputArray src, bool fallbackSymmetric = true) { EigenvalueDecomposition(InputArray src, bool fallbackSymmetric = true) :
n(0),
d(NULL), e(NULL), ort(NULL),
V(NULL), H(NULL)
{
compute(src, fallbackSymmetric); compute(src, fallbackSymmetric);
} }
@ -938,7 +945,7 @@ public:
} }
} }
~EigenvalueDecomposition() {} ~EigenvalueDecomposition() { release(); }
// Returns the eigenvalues of the Eigenvalue Decomposition. // Returns the eigenvalues of the Eigenvalue Decomposition.
Mat eigenvalues() const { return _eigenvalues; } Mat eigenvalues() const { return _eigenvalues; }

@ -1020,7 +1020,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
int i, j, cols; int i, j, cols;
int cn, depth, mat_depth; int cn, depth, mat_depth;
CvMat astub, bstub, *a, *b; CvMat astub, bstub, *a, *b;
double mat[16]; double mat[16] = {0.0};
a = cvGetMat( _src, &astub, 0, 0 ); a = cvGetMat( _src, &astub, 0, 0 );
b = cvGetMat( _dst, &bstub, 0, 0 ); b = cvGetMat( _dst, &bstub, 0, 0 );

@ -89,7 +89,15 @@ ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs})
ocv_create_module(${libs} ${INF_ENGINE_TARGET}) ocv_create_module(${libs} ${INF_ENGINE_TARGET})
ocv_add_samples() ocv_add_samples()
ocv_add_accuracy_tests(${INF_ENGINE_TARGET}) ocv_add_accuracy_tests(${INF_ENGINE_TARGET})
ocv_add_perf_tests(${INF_ENGINE_TARGET})
set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf")
file(GLOB_RECURSE perf_srcs "${perf_path}/*.cpp")
file(GLOB_RECURSE perf_hdrs "${perf_path}/*.hpp" "${perf_path}/*.h")
ocv_add_perf_tests(${INF_ENGINE_TARGET}
FILES test_common "${CMAKE_CURRENT_LIST_DIR}/test/test_common.cpp"
FILES Src ${perf_srcs}
FILES Include ${perf_hdrs}
)
ocv_option(${the_module}_PERF_CAFFE "Add performance tests of Caffe framework" OFF) ocv_option(${the_module}_PERF_CAFFE "Add performance tests of Caffe framework" OFF)
ocv_option(${the_module}_PERF_CLCAFFE "Add performance tests of clCaffe framework" OFF) ocv_option(${the_module}_PERF_CLCAFFE "Add performance tests of clCaffe framework" OFF)

@ -955,13 +955,6 @@ CV__DNN_INLINE_NS_BEGIN
CV_OUT std::vector<int>& indices, CV_OUT std::vector<int>& indices,
const float eta = 1.f, const int top_k = 0); const float eta = 1.f, const int top_k = 0);
/** @brief Release a Myriad device is binded by OpenCV.
*
* Single Myriad device cannot be shared across multiple processes which uses
* Inference Engine's Myriad plugin.
*/
CV_EXPORTS_W void resetMyriadDevice();
//! @} //! @}
CV__DNN_INLINE_NS_END CV__DNN_INLINE_NS_END
} }
@ -970,4 +963,7 @@ CV__DNN_INLINE_NS_END
#include <opencv2/dnn/layer.hpp> #include <opencv2/dnn/layer.hpp>
#include <opencv2/dnn/dnn.inl.hpp> #include <opencv2/dnn/dnn.inl.hpp>
/// @deprecated Include this header directly from application. Automatic inclusion will be removed
#include <opencv2/dnn/utils/inference_engine.hpp>
#endif /* OPENCV_DNN_DNN_HPP */ #endif /* OPENCV_DNN_DNN_HPP */

@ -0,0 +1,43 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef OPENCV_DNN_UTILS_INF_ENGINE_HPP
#define OPENCV_DNN_UTILS_INF_ENGINE_HPP
#include "../dnn.hpp"
namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN
/** @brief Release a Myriad device (binded by OpenCV).
*
* Single Myriad device cannot be shared across multiple processes which uses
* Inference Engine's Myriad plugin.
*/
CV_EXPORTS_W void resetMyriadDevice();
/* Values for 'OPENCV_DNN_IE_VPU_TYPE' parameter */
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_UNSPECIFIED ""
/// Intel(R) Movidius(TM) Neural Compute Stick, NCS (USB 03e7:2150), Myriad2 (https://software.intel.com/en-us/movidius-ncs)
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2"
/// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick)
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX"
/** @brief Returns Inference Engine VPU type.
*
* See values of `CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*` macros.
*/
CV_EXPORTS_W cv::String getInferenceEngineVPUType();
CV__DNN_INLINE_NS_END
}} // namespace
#endif // OPENCV_DNN_UTILS_INF_ENGINE_HPP

@ -185,6 +185,11 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "", processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "",
Mat(cv::Size(300, 300), CV_32FC3)); Mat(cv::Size(300, 300), CV_32FC3));
} }

@ -2146,10 +2146,6 @@ struct Net::Impl
} }
} }
} }
if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer.
// the optimization #2. if there is no layer that takes max pooling layer's computed // the optimization #2. if there is no layer that takes max pooling layer's computed
// max indices (and only some semantical segmentation networks might need this; // max indices (and only some semantical segmentation networks might need this;
// many others only take the maximum values), then we switch the max pooling // many others only take the maximum values), then we switch the max pooling
@ -2170,6 +2166,9 @@ struct Net::Impl
} }
} }
if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer.
// the optimization #3. if there is concat layer that concatenates channels // the optimization #3. if there is concat layer that concatenates channels
// from the inputs together (i.e. axis == 1) then we make the inputs of // from the inputs together (i.e. axis == 1) then we make the inputs of
// the concat layer to write to the concatenation output buffer // the concat layer to write to the concatenation output buffer

@ -148,10 +148,18 @@ public:
{ {
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{ {
if (preferableTarget == DNN_TARGET_MYRIAD) #ifdef HAVE_INF_ENGINE
if (preferableTarget == DNN_TARGET_MYRIAD) {
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX();
}
return type == MAX || type == AVE; return type == MAX || type == AVE;
}
else else
return type != STOCHASTIC; return type != STOCHASTIC;
#else
return false;
#endif
} }
else else
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// //
// Copyright (C) 2018, Intel Corporation, all rights reserved. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#include "precomp.hpp" #include "precomp.hpp"
@ -12,8 +12,14 @@
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
#include <ie_extension.h> #include <ie_extension.h>
#include <ie_plugin_dispatcher.hpp> #include <ie_plugin_dispatcher.hpp>
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <vpu/vpu_plugin_config.hpp>
#endif
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/utils/logger.hpp>
namespace cv { namespace dnn { namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
@ -683,6 +689,64 @@ static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEngineP
return sharedPlugins; return sharedPlugins;
} }
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) && !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_()
{
InferenceEngine::Builder::Network builder("");
InferenceEngine::idx_t inpId = builder.addLayer(
InferenceEngine::Builder::InputLayer().setPort(InferenceEngine::Port({1})));
#if INF_ENGINE_RELEASE <= 2018050000
InferenceEngine::idx_t clampId;
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ClampLayer();
auto& blobs = l.getConstantData();
auto blob = InferenceEngine::make_shared_blob<int16_t>(
InferenceEngine::Precision::FP16,
InferenceEngine::Layout::C, {1});
blob->allocate();
blobs[""] = blob;
clampId = builder.addLayer({inpId}, l);
}
builder.addLayer({InferenceEngine::PortInfo(clampId)}, InferenceEngine::Builder::OutputLayer());
#else
InferenceEngine::idx_t clampId = builder.addLayer({inpId}, InferenceEngine::Builder::ClampLayer());
builder.addLayer({InferenceEngine::PortInfo(clampId)},
InferenceEngine::Builder::OutputLayer().setPort(InferenceEngine::Port({},
InferenceEngine::Precision::FP16)));
#endif
InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork(
InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
InferenceEngine::TargetDevice device = InferenceEngine::TargetDevice::eMYRIAD;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
{
AutoLock lock(getInitializationMutex());
auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find(device);
if (pluginIt != sharedPlugins.end()) {
enginePtr = pluginIt->second;
} else {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getSuitablePlugin(device);
sharedPlugins[device] = enginePtr;
}
}
auto plugin = InferenceEngine::InferencePlugin(enginePtr);
try
{
auto netExec = plugin.LoadNetwork(cnn, {{InferenceEngine::VPUConfigParams::KEY_VPU_PLATFORM,
InferenceEngine::VPUConfigParams::VPU_2480}});
auto infRequest = netExec.CreateInferRequest();
} catch(...) {
return false;
}
return true;
}
#endif // >= 2018R5
void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
{ {
CV_Assert(!isInitialized()); CV_Assert(!isInitialized());
@ -784,7 +848,11 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const std::vector<MatShape> &internals) const
{ {
#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R3)
InferenceEngine::ICNNNetwork::InputShapes inShapes = const_cast<InferenceEngine::CNNNetwork&>(t_net).getInputShapes();
#else
InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes(); InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
#endif
InferenceEngine::ICNNNetwork::InputShapes::iterator itr; InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
bool equal_flag = true; bool equal_flag = true;
size_t i = 0; size_t i = 0;
@ -875,5 +943,59 @@ void resetMyriadDevice()
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
} }
#ifdef HAVE_INF_ENGINE
bool isMyriadX()
{
static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
return myriadX;
}
static std::string getInferenceEngineVPUType_()
{
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
if (param_vpu_type == "")
{
#if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT;
#elif INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
try {
bool isMyriadX_ = detectMyriadX_();
if (isMyriadX_)
{
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
}
else
{
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2;
}
}
catch (...)
{
CV_LOG_WARNING(NULL, "OpenCV-DNN: Failed Inference Engine VPU autodetection. Specify 'OPENCV_DNN_IE_VPU_TYPE' parameter.");
param_vpu_type.clear();
}
#else
CV_LOG_WARNING(NULL, "OpenCV-DNN: VPU auto-detection is not implemented. Consider specifying VPU type via 'OPENCV_DNN_IE_VPU_TYPE' parameter");
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2;
#endif
}
CV_LOG_INFO(NULL, "OpenCV-DNN: Inference Engine VPU type='" << param_vpu_type << "'");
return param_vpu_type;
}
cv::String getInferenceEngineVPUType()
{
static cv::String vpu_type = getInferenceEngineVPUType_();
return vpu_type;
}
#else // HAVE_INF_ENGINE
cv::String getInferenceEngineVPUType()
{
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
}
#endif // HAVE_INF_ENGINE
CV__DNN_INLINE_NS_END CV__DNN_INLINE_NS_END
}} // namespace dnn, namespace cv }} // namespace dnn, namespace cv

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// //
// Copyright (C) 2018, Intel Corporation, all rights reserved. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__ #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
@ -12,6 +12,8 @@
#include "opencv2/core/cvstd.hpp" #include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp" #include "opencv2/dnn.hpp"
#include "opencv2/dnn/utils/inference_engine.hpp"
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
#if defined(__GNUC__) && __GNUC__ >= 5 #if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push //#pragma GCC diagnostic push
@ -114,10 +116,8 @@ public:
virtual size_t getBatchSize() const CV_NOEXCEPT CV_OVERRIDE; virtual size_t getBatchSize() const CV_NOEXCEPT CV_OVERRIDE;
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R2) virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT CV_OVERRIDE;
virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT; virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT CV_OVERRIDE;
virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT;
#endif
void init(int targetId); void init(int targetId);
@ -279,6 +279,12 @@ private:
InferenceEngine::CNNNetwork t_net; InferenceEngine::CNNNetwork t_net;
}; };
CV__DNN_INLINE_NS_BEGIN
bool isMyriadX();
CV__DNN_INLINE_NS_END
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
bool haveInfEngine(); bool haveInfEngine();

@ -630,6 +630,21 @@ public:
} }
}; };
class SoftMaxSlimSubgraph : public Subgraph
{
public:
SoftMaxSlimSubgraph()
{
int input = addNodeToMatch("");
int shape = addNodeToMatch("Const");
int shapeOp = addNodeToMatch("Shape", input);
int reshape = addNodeToMatch("Reshape", input, shape);
int softmax = addNodeToMatch("Softmax", reshape);
addNodeToMatch("Reshape", softmax, shapeOp);
setFusedNode("Softmax", input);
}
};
void simplifySubgraphs(tensorflow::GraphDef& net) void simplifySubgraphs(tensorflow::GraphDef& net)
{ {
std::vector<Ptr<Subgraph> > subgraphs; std::vector<Ptr<Subgraph> > subgraphs;
@ -646,6 +661,7 @@ void simplifySubgraphs(tensorflow::GraphDef& net)
subgraphs.push_back(Ptr<Subgraph>(new ResizeBilinearSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new ResizeBilinearSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new UpsamplingKerasSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new UpsamplingKerasSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ReshapeAsShapeSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new ReshapeAsShapeSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new SoftMaxSlimSubgraph()));
int numNodes = net.node_size(); int numNodes = net.node_size();
std::vector<int> matchedNodesIds; std::vector<int> matchedNodesIds;

@ -661,7 +661,10 @@ void TFImporter::populateNet(Net dstNet)
RemoveIdentityOps(netTxt); RemoveIdentityOps(netTxt);
if (!netTxt.ByteSize()) if (!netTxt.ByteSize())
{
simplifySubgraphs(netBin); simplifySubgraphs(netBin);
sortByExecutionOrder(netBin);
}
std::set<String> layers_to_ignore; std::set<String> layers_to_ignore;

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// //
// Copyright (C) 2018, Intel Corporation, all rights reserved. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#include "test_precomp.hpp" #include "test_precomp.hpp"
@ -157,21 +157,29 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
throw SkipTestException(""); throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false); Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
float diffScores = (target == DNN_TARGET_OPENCL_FP16) ? 6e-3 : 0.0; float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1.5e-2 : 0.0;
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt", float diffSquares = (target == DNN_TARGET_MYRIAD) ? 0.063 : 0.0;
inp, "detection_out", "", diffScores); float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.252 : 0.0;
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
inp, "detection_out", "", diffScores, diffSquares, detectionConfThresh);
} }
TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height) TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false); Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false);
float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.029 : 0.0; float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.029 : 0.0;
float diffSquares = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0; float diffSquares = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt", processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
inp, "detection_out", "", diffScores, diffSquares); inp, "detection_out", "", diffScores, diffSquares);
} }
TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow) TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow)
@ -180,16 +188,22 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow)
throw SkipTestException(""); throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false); Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : 0.0; float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.095 : 0.0;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.06 : 0.0; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.216 : 0.2;
processNet("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", "dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt", processNet("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", "dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt",
inp, "detection_out", "", l1, lInf); inp, "detection_out", "", l1, lInf, detectionConfThresh);
} }
TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height) TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 560), Scalar(), false); Mat inp = blobFromImage(sample, 1.0f, Size(300, 560), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0; float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
@ -215,32 +229,54 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU) if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU)
throw SkipTestException(""); throw SkipTestException("");
double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0; double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.032 : 0.0;
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false); Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel",
"dnn/ssd_vgg16.prototxt", inp, "detection_out", "", scoreThreshold); "dnn/ssd_vgg16.prototxt", inp, "detection_out", "", scoreThreshold, lInf);
} }
TEST_P(DNNTestNetwork, OpenPose_pose_coco) TEST_P(DNNTestNetwork, OpenPose_pose_coco)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0056 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.072 : 0.0;
processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt", processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt",
Size(46, 46)); Size(46, 46), "", "", l1, lInf);
} }
TEST_P(DNNTestNetwork, OpenPose_pose_mpi) TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
// output range: [-0.001, 0.97]
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.16 : 0.0;
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt", processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt",
Size(46, 46)); Size(46, 46), "", "", l1, lInf);
} }
TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages) TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
// The same .caffemodel but modified .prototxt // The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi_faster_4_stages.prototxt", processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi_faster_4_stages.prototxt",
@ -250,17 +286,24 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
TEST_P(DNNTestNetwork, OpenFace) TEST_P(DNNTestNetwork, OpenFace)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_RELEASE == 2018050000 #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException(""); throw SkipTestException("Test is disabled for Myriad targets");
#elif INF_ENGINE_RELEASE < 2018040000 #elif INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX target");
#else
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4"); throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
#endif #endif
#endif #endif
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
processNet("dnn/openface_nn4.small2.v1.t7", "", Size(96, 96), ""); const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0024 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.0071 : 0.0;
processNet("dnn/openface_nn4.small2.v1.t7", "", Size(96, 96), "", "", l1, lInf);
} }
TEST_P(DNNTestNetwork, opencv_face_detector) TEST_P(DNNTestNetwork, opencv_face_detector)
@ -275,6 +318,11 @@ TEST_P(DNNTestNetwork, opencv_face_detector)
TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow) TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false)); Mat sample = imread(findDataFile("dnn/street.png", false));
@ -289,7 +337,7 @@ TEST_P(DNNTestNetwork, DenseNet_121)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
// Reference output values are in range [-3.807, 4.605]
float l1 = 0.0, lInf = 0.0; float l1 = 0.0, lInf = 0.0;
if (target == DNN_TARGET_OPENCL_FP16) if (target == DNN_TARGET_OPENCL_FP16)
{ {
@ -297,7 +345,7 @@ TEST_P(DNNTestNetwork, DenseNet_121)
} }
else if (target == DNN_TARGET_MYRIAD) else if (target == DNN_TARGET_MYRIAD)
{ {
l1 = 6e-2; lInf = 0.27; l1 = 0.1; lInf = 0.6;
} }
processNet("dnn/DenseNet_121.caffemodel", "dnn/DenseNet_121.prototxt", Size(224, 224), "", "", l1, lInf); processNet("dnn/DenseNet_121.caffemodel", "dnn/DenseNet_121.prototxt", Size(224, 224), "", "", l1, lInf);
} }

@ -376,6 +376,7 @@ TEST(Reproducibility_GoogLeNet_fp16, Accuracy)
TEST_P(Test_Caffe_nets, Colorization) TEST_P(Test_Caffe_nets, Colorization)
{ {
checkBackend(); checkBackend();
Mat inp = blobFromNPY(_tf("colorization_inp.npy")); Mat inp = blobFromNPY(_tf("colorization_inp.npy"));
Mat ref = blobFromNPY(_tf("colorization_out.npy")); Mat ref = blobFromNPY(_tf("colorization_out.npy"));
Mat kernel = blobFromNPY(_tf("colorization_pts_in_hull.npy")); Mat kernel = blobFromNPY(_tf("colorization_pts_in_hull.npy"));
@ -393,8 +394,12 @@ TEST_P(Test_Caffe_nets, Colorization)
Mat out = net.forward(); Mat out = net.forward();
// Reference output values are in range [-29.1, 69.5] // Reference output values are in range [-29.1, 69.5]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.25 : 4e-4; double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.25 : 4e-4;
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5.3 : 3e-3; double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5.3 : 3e-3;
if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
{
l1 = 0.6; lInf = 15;
}
normAssert(out, ref, "", l1, lInf); normAssert(out, ref, "", l1, lInf);
} }
@ -423,7 +428,7 @@ TEST_P(Test_Caffe_nets, DenseNet_121)
} }
else if (target == DNN_TARGET_MYRIAD) else if (target == DNN_TARGET_MYRIAD)
{ {
l1 = 0.097; lInf = 0.52; l1 = 0.11; lInf = 0.5;
} }
normAssert(out, ref, "", l1, lInf); normAssert(out, ref, "", l1, lInf);
} }
@ -515,12 +520,14 @@ INSTANTIATE_TEST_CASE_P(Test_Caffe, opencv_face_detector,
TEST_P(Test_Caffe_nets, FasterRCNN_vgg16) TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
{ {
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) #if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|| (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) throw SkipTestException("Test is disabled for DLIE OpenCL targets"); // very slow
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#endif #endif
)
throw SkipTestException("");
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849, static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849,
0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953, 0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953,
0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166); 0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166);

@ -0,0 +1,293 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Used in perf tests too, disabled: #include "test_precomp.hpp"
#include "opencv2/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/dnn.hpp"
#include "test_common.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/utils/logger.hpp>
namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN
void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
{
switch (v) {
case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
case DNN_BACKEND_OPENCV: *os << "OCV"; return;
case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
} // don't use "default:" to emit compiler warnings
*os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
}
void PrintTo(const cv::dnn::Target& v, std::ostream* os)
{
switch (v) {
case DNN_TARGET_CPU: *os << "CPU"; return;
case DNN_TARGET_OPENCL: *os << "OCL"; return;
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
case DNN_TARGET_FPGA: *os << "FPGA"; return;
} // don't use "default:" to emit compiler warnings
*os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
}
void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os)
{
PrintTo(get<0>(v), os);
*os << "/";
PrintTo(get<1>(v), os);
}
CV__DNN_INLINE_NS_END
}} // namespace
namespace opencv_test {
void normAssert(
cv::InputArray ref, cv::InputArray test, const char *comment /*= ""*/,
double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
{
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
EXPECT_LE(normL1, l1) << comment;
double normInf = cvtest::norm(ref, test, cv::NORM_INF);
EXPECT_LE(normInf, lInf) << comment;
}
std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
{
EXPECT_EQ(m.type(), CV_32FC1);
EXPECT_EQ(m.dims, 2);
EXPECT_EQ(m.cols, 4);
std::vector<cv::Rect2d> boxes(m.rows);
for (int i = 0; i < m.rows; ++i)
{
CV_Assert(m.row(i).isContinuous());
const float* data = m.ptr<float>(i);
double l = data[0], t = data[1], r = data[2], b = data[3];
boxes[i] = cv::Rect2d(l, t, r - l, b - t);
}
return boxes;
}
void normAssertDetections(
const std::vector<int>& refClassIds,
const std::vector<float>& refScores,
const std::vector<cv::Rect2d>& refBoxes,
const std::vector<int>& testClassIds,
const std::vector<float>& testScores,
const std::vector<cv::Rect2d>& testBoxes,
const char *comment /*= ""*/, double confThreshold /*= 0.0*/,
double scores_diff /*= 1e-5*/, double boxes_iou_diff /*= 1e-4*/)
{
std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
for (int i = 0; i < testBoxes.size(); ++i)
{
double testScore = testScores[i];
if (testScore < confThreshold)
continue;
int testClassId = testClassIds[i];
const cv::Rect2d& testBox = testBoxes[i];
bool matched = false;
for (int j = 0; j < refBoxes.size() && !matched; ++j)
{
if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
std::abs(testScore - refScores[j]) < scores_diff)
{
double interArea = (testBox & refBoxes[j]).area();
double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
if (std::abs(iou - 1.0) < boxes_iou_diff)
{
matched = true;
matchedRefBoxes[j] = true;
}
}
}
if (!matched)
std::cout << cv::format("Unmatched prediction: class %d score %f box ",
testClassId, testScore) << testBox << std::endl;
EXPECT_TRUE(matched) << comment;
}
// Check unmatched reference detections.
for (int i = 0; i < refBoxes.size(); ++i)
{
if (!matchedRefBoxes[i] && refScores[i] > confThreshold)
{
std::cout << cv::format("Unmatched reference: class %d score %f box ",
refClassIds[i], refScores[i]) << refBoxes[i] << std::endl;
EXPECT_LE(refScores[i], confThreshold) << comment;
}
}
}
// For SSD-based object detection networks which produce output of shape 1x1xNx7
// where N is a number of detections and an every detection is represented by
// a vector [batchId, classId, confidence, left, top, right, bottom].
void normAssertDetections(
cv::Mat ref, cv::Mat out, const char *comment /*= ""*/,
double confThreshold /*= 0.0*/, double scores_diff /*= 1e-5*/,
double boxes_iou_diff /*= 1e-4*/)
{
CV_Assert(ref.total() % 7 == 0);
CV_Assert(out.total() % 7 == 0);
ref = ref.reshape(1, ref.total() / 7);
out = out.reshape(1, out.total() / 7);
cv::Mat refClassIds, testClassIds;
ref.col(1).convertTo(refClassIds, CV_32SC1);
out.col(1).convertTo(testClassIds, CV_32SC1);
std::vector<float> refScores(ref.col(2)), testScores(out.col(2));
std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7));
std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7));
normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores,
testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
}
bool readFileInMemory(const std::string& filename, std::string& content)
{
std::ios::openmode mode = std::ios::in | std::ios::binary;
std::ifstream ifs(filename.c_str(), mode);
if (!ifs.is_open())
return false;
content.clear();
ifs.seekg(0, std::ios::end);
content.reserve(ifs.tellg());
ifs.seekg(0, std::ios::beg);
content.assign((std::istreambuf_iterator<char>(ifs)),
std::istreambuf_iterator<char>());
return true;
}
testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
bool withInferenceEngine /*= true*/,
bool withHalide /*= false*/,
bool withCpuOCV /*= true*/,
bool withVkCom /*= true*/
)
{
#ifdef HAVE_INF_ENGINE
bool withVPU = validateVPUType();
#endif
std::vector< tuple<Backend, Target> > targets;
std::vector< Target > available;
if (withHalide)
{
available = getAvailableTargets(DNN_BACKEND_HALIDE);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
}
#ifdef HAVE_INF_ENGINE
if (withInferenceEngine)
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
}
}
#else
CV_UNUSED(withInferenceEngine);
#endif
if (withVkCom)
{
available = getAvailableTargets(DNN_BACKEND_VKCOM);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
targets.push_back(make_tuple(DNN_BACKEND_VKCOM, *i));
}
{
available = getAvailableTargets(DNN_BACKEND_OPENCV);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (!withCpuOCV && *i == DNN_TARGET_CPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
}
}
if (targets.empty()) // validate at least CPU mode
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
return testing::ValuesIn(targets);
}
#ifdef HAVE_INF_ENGINE
static std::string getTestInferenceEngineVPUType()
{
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_TEST_DNN_IE_VPU_TYPE", "");
return param_vpu_type;
}
static bool validateVPUType_()
{
std::string test_vpu_type = getTestInferenceEngineVPUType();
if (test_vpu_type == "DISABLED" || test_vpu_type == "disabled")
{
return false;
}
std::vector<Target> available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
bool have_vpu_target = false;
for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD)
{
have_vpu_target = true;
break;
}
}
if (test_vpu_type.empty())
{
if (have_vpu_target)
{
CV_LOG_INFO(NULL, "OpenCV-DNN-Test: VPU type for testing is not specified via 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter.")
}
}
else
{
if (!have_vpu_target)
{
CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter requires VPU of type = '" << test_vpu_type << "', but VPU is not detected. STOP.");
exit(1);
}
std::string dnn_vpu_type = getInferenceEngineVPUType();
if (dnn_vpu_type != test_vpu_type)
{
CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'testing' and 'detected' VPU types mismatch: '" << test_vpu_type << "' vs '" << dnn_vpu_type << "'. STOP.");
exit(1);
}
}
return true;
}
bool validateVPUType()
{
static bool result = validateVPUType_();
return result;
}
#endif // HAVE_INF_ENGINE
} // namespace

@ -1,266 +1,77 @@
/*M/////////////////////////////////////////////////////////////////////////////////////// // This file is part of OpenCV project.
// // It is subject to the license terms in the LICENSE file found in the top-level directory
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // of this distribution and at http://opencv.org/license.html.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_TEST_COMMON_HPP__ #ifndef __OPENCV_TEST_COMMON_HPP__
#define __OPENCV_TEST_COMMON_HPP__ #define __OPENCV_TEST_COMMON_HPP__
#include "opencv2/dnn/utils/inference_engine.hpp"
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
#include "opencv2/core/ocl.hpp" #include "opencv2/core/ocl.hpp"
#endif #endif
namespace cv { namespace dnn { namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN CV__DNN_INLINE_NS_BEGIN
static inline void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
{
switch (v) {
case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
case DNN_BACKEND_OPENCV: *os << "OCV"; return;
case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
} // don't use "default:" to emit compiler warnings
*os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
}
static inline void PrintTo(const cv::dnn::Target& v, std::ostream* os)
{
switch (v) {
case DNN_TARGET_CPU: *os << "CPU"; return;
case DNN_TARGET_OPENCL: *os << "OCL"; return;
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
case DNN_TARGET_FPGA: *os << "FPGA"; return;
} // don't use "default:" to emit compiler warnings
*os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
}
void PrintTo(const cv::dnn::Backend& v, std::ostream* os);
void PrintTo(const cv::dnn::Target& v, std::ostream* os);
using opencv_test::tuple; using opencv_test::tuple;
using opencv_test::get; using opencv_test::get;
static inline void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os) void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os);
{
PrintTo(get<0>(v), os);
*os << "/";
PrintTo(get<1>(v), os);
}
CV__DNN_INLINE_NS_END CV__DNN_INLINE_NS_END
}} // namespace }} // namespace cv::dnn
static inline const std::string &getOpenCVExtraDir()
{
return cvtest::TS::ptr()->get_data_path();
}
static inline void normAssert(cv::InputArray ref, cv::InputArray test, const char *comment = "", namespace opencv_test {
double l1 = 0.00001, double lInf = 0.0001)
{
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
EXPECT_LE(normL1, l1) << comment;
double normInf = cvtest::norm(ref, test, cv::NORM_INF); using namespace cv::dnn;
EXPECT_LE(normInf, lInf) << comment;
}
static std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m) static inline const std::string &getOpenCVExtraDir()
{ {
EXPECT_EQ(m.type(), CV_32FC1); return cvtest::TS::ptr()->get_data_path();
EXPECT_EQ(m.dims, 2);
EXPECT_EQ(m.cols, 4);
std::vector<cv::Rect2d> boxes(m.rows);
for (int i = 0; i < m.rows; ++i)
{
CV_Assert(m.row(i).isContinuous());
const float* data = m.ptr<float>(i);
double l = data[0], t = data[1], r = data[2], b = data[3];
boxes[i] = cv::Rect2d(l, t, r - l, b - t);
}
return boxes;
} }
static inline void normAssertDetections(const std::vector<int>& refClassIds, void normAssert(
const std::vector<float>& refScores, cv::InputArray ref, cv::InputArray test, const char *comment = "",
const std::vector<cv::Rect2d>& refBoxes, double l1 = 0.00001, double lInf = 0.0001);
const std::vector<int>& testClassIds,
const std::vector<float>& testScores,
const std::vector<cv::Rect2d>& testBoxes,
const char *comment = "", double confThreshold = 0.0,
double scores_diff = 1e-5, double boxes_iou_diff = 1e-4)
{
std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
for (int i = 0; i < testBoxes.size(); ++i)
{
double testScore = testScores[i];
if (testScore < confThreshold)
continue;
int testClassId = testClassIds[i]; std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m);
const cv::Rect2d& testBox = testBoxes[i];
bool matched = false;
for (int j = 0; j < refBoxes.size() && !matched; ++j)
{
if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
std::abs(testScore - refScores[j]) < scores_diff)
{
double interArea = (testBox & refBoxes[j]).area();
double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
if (std::abs(iou - 1.0) < boxes_iou_diff)
{
matched = true;
matchedRefBoxes[j] = true;
}
}
}
if (!matched)
std::cout << cv::format("Unmatched prediction: class %d score %f box ",
testClassId, testScore) << testBox << std::endl;
EXPECT_TRUE(matched) << comment;
}
// Check unmatched reference detections. void normAssertDetections(
for (int i = 0; i < refBoxes.size(); ++i) const std::vector<int>& refClassIds,
{ const std::vector<float>& refScores,
if (!matchedRefBoxes[i] && refScores[i] > confThreshold) const std::vector<cv::Rect2d>& refBoxes,
{ const std::vector<int>& testClassIds,
std::cout << cv::format("Unmatched reference: class %d score %f box ", const std::vector<float>& testScores,
refClassIds[i], refScores[i]) << refBoxes[i] << std::endl; const std::vector<cv::Rect2d>& testBoxes,
EXPECT_LE(refScores[i], confThreshold) << comment; const char *comment = "", double confThreshold = 0.0,
} double scores_diff = 1e-5, double boxes_iou_diff = 1e-4);
}
}
// For SSD-based object detection networks which produce output of shape 1x1xNx7 // For SSD-based object detection networks which produce output of shape 1x1xNx7
// where N is a number of detections and an every detection is represented by // where N is a number of detections and an every detection is represented by
// a vector [batchId, classId, confidence, left, top, right, bottom]. // a vector [batchId, classId, confidence, left, top, right, bottom].
static inline void normAssertDetections(cv::Mat ref, cv::Mat out, const char *comment = "", void normAssertDetections(
double confThreshold = 0.0, double scores_diff = 1e-5, cv::Mat ref, cv::Mat out, const char *comment = "",
double boxes_iou_diff = 1e-4) double confThreshold = 0.0, double scores_diff = 1e-5,
{ double boxes_iou_diff = 1e-4);
CV_Assert(ref.total() % 7 == 0);
CV_Assert(out.total() % 7 == 0);
ref = ref.reshape(1, ref.total() / 7);
out = out.reshape(1, out.total() / 7);
cv::Mat refClassIds, testClassIds;
ref.col(1).convertTo(refClassIds, CV_32SC1);
out.col(1).convertTo(testClassIds, CV_32SC1);
std::vector<float> refScores(ref.col(2)), testScores(out.col(2));
std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7));
std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7));
normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores,
testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
}
static inline bool readFileInMemory(const std::string& filename, std::string& content)
{
std::ios::openmode mode = std::ios::in | std::ios::binary;
std::ifstream ifs(filename.c_str(), mode);
if (!ifs.is_open())
return false;
content.clear(); bool readFileInMemory(const std::string& filename, std::string& content);
ifs.seekg(0, std::ios::end); #ifdef HAVE_INF_ENGINE
content.reserve(ifs.tellg()); bool validateVPUType();
ifs.seekg(0, std::ios::beg); #endif
content.assign((std::istreambuf_iterator<char>(ifs)),
std::istreambuf_iterator<char>());
return true;
}
namespace opencv_test {
using namespace cv::dnn;
static inline
testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets( testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
bool withInferenceEngine = true, bool withInferenceEngine = true,
bool withHalide = false, bool withHalide = false,
bool withCpuOCV = true, bool withCpuOCV = true,
bool withVkCom = true bool withVkCom = true
) );
{
std::vector< tuple<Backend, Target> > targets;
std::vector< Target > available;
if (withHalide)
{
available = getAvailableTargets(DNN_BACKEND_HALIDE);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
}
if (withInferenceEngine)
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
}
if (withVkCom)
{
available = getAvailableTargets(DNN_BACKEND_VKCOM);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
targets.push_back(make_tuple(DNN_BACKEND_VKCOM, *i));
}
{
available = getAvailableTargets(DNN_BACKEND_OPENCV);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (!withCpuOCV && *i == DNN_TARGET_CPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
}
}
if (targets.empty()) // validate at least CPU mode
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
return testing::ValuesIn(targets);
}
} // namespace
namespace opencv_test {
using namespace cv::dnn;
class DNNTestLayer : public TestWithParam<tuple<Backend, Target> > class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
{ {
@ -276,29 +87,29 @@ public:
getDefaultThresholds(backend, target, &default_l1, &default_lInf); getDefaultThresholds(backend, target, &default_l1, &default_lInf);
} }
static void getDefaultThresholds(int backend, int target, double* l1, double* lInf) static void getDefaultThresholds(int backend, int target, double* l1, double* lInf)
{ {
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{ {
*l1 = 4e-3; *l1 = 4e-3;
*lInf = 2e-2; *lInf = 2e-2;
} }
else else
{ {
*l1 = 1e-5; *l1 = 1e-5;
*lInf = 1e-4; *lInf = 1e-4;
} }
} }
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0) static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{ {
if (inp && ref && inp->dims == 4 && ref->dims == 4 && if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
inp->size[0] != 1 && inp->size[0] != ref->size[0]) inp->size[0] != 1 && inp->size[0] != ref->size[0])
throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin"); throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
} }
} }
protected: protected:
void checkBackend(Mat* inp = 0, Mat* ref = 0) void checkBackend(Mat* inp = 0, Mat* ref = 0)
@ -309,4 +120,12 @@ protected:
} // namespace } // namespace
// src/op_inf_engine.hpp
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
#endif #endif

@ -267,6 +267,16 @@ public:
TEST_P(Test_Darknet_nets, YoloVoc) TEST_P(Test_Darknet_nets, YoloVoc)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is disabled");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX (need to update check function)");
#endif
// batchId, classId, confidence, left, top, right, bottom // batchId, classId, confidence, left, top, right, bottom
Mat ref = (Mat_<float>(6, 7) << 0, 6, 0.750469f, 0.577374f, 0.127391f, 0.902949f, 0.300809f, // a car Mat ref = (Mat_<float>(6, 7) << 0, 6, 0.750469f, 0.577374f, 0.127391f, 0.902949f, 0.300809f, // a car
0, 1, 0.780879f, 0.270762f, 0.264102f, 0.732475f, 0.745412f, // a bicycle 0, 1, 0.780879f, 0.270762f, 0.264102f, 0.732475f, 0.745412f, // a bicycle
@ -282,15 +292,24 @@ TEST_P(Test_Darknet_nets, YoloVoc)
std::string config_file = "yolo-voc.cfg"; std::string config_file = "yolo-voc.cfg";
std::string weights_file = "yolo-voc.weights"; std::string weights_file = "yolo-voc.weights";
// batch size 1 {
SCOPED_TRACE("batch size 1");
testDarknetModel(config_file, weights_file, ref.rowRange(0, 3), scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref.rowRange(0, 3), scoreDiff, iouDiff);
}
// batch size 2 {
SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff, 0.24, nmsThreshold); testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff, 0.24, nmsThreshold);
}
} }
TEST_P(Test_Darknet_nets, TinyYoloVoc) TEST_P(Test_Darknet_nets, TinyYoloVoc)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX (need to update check function)");
#endif
// batchId, classId, confidence, left, top, right, bottom // batchId, classId, confidence, left, top, right, bottom
Mat ref = (Mat_<float>(4, 7) << 0, 6, 0.761967f, 0.579042f, 0.159161f, 0.894482f, 0.31994f, // a car Mat ref = (Mat_<float>(4, 7) << 0, 6, 0.761967f, 0.579042f, 0.159161f, 0.894482f, 0.31994f, // a car
0, 11, 0.780595f, 0.129696f, 0.386467f, 0.445275f, 0.920994f, // a dog 0, 11, 0.780595f, 0.129696f, 0.386467f, 0.445275f, 0.920994f, // a dog
@ -303,18 +322,29 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
std::string config_file = "tiny-yolo-voc.cfg"; std::string config_file = "tiny-yolo-voc.cfg";
std::string weights_file = "tiny-yolo-voc.weights"; std::string weights_file = "tiny-yolo-voc.weights";
// batch size 1 {
SCOPED_TRACE("batch size 1");
testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff);
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test with 'batch size 2' is disabled for Myriad target (fixed in 2018R5)");
#endif #endif
// batch size 2 {
SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
}
} }
TEST_P(Test_Darknet_nets, YOLOv3) TEST_P(Test_Darknet_nets, YOLOv3)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
// batchId, classId, confidence, left, top, right, bottom // batchId, classId, confidence, left, top, right, bottom
Mat ref = (Mat_<float>(9, 7) << 0, 7, 0.952983f, 0.614622f, 0.150257f, 0.901369f, 0.289251f, // a truck Mat ref = (Mat_<float>(9, 7) << 0, 7, 0.952983f, 0.614622f, 0.150257f, 0.901369f, 0.289251f, // a truck
0, 1, 0.987908f, 0.150913f, 0.221933f, 0.742255f, 0.74626f, // a bicycle 0, 1, 0.987908f, 0.150913f, 0.221933f, 0.742255f, 0.74626f, // a bicycle
@ -332,13 +362,18 @@ TEST_P(Test_Darknet_nets, YOLOv3)
std::string config_file = "yolov3.cfg"; std::string config_file = "yolov3.cfg";
std::string weights_file = "yolov3.weights"; std::string weights_file = "yolov3.weights";
// batch size 1 {
SCOPED_TRACE("batch size 1");
testDarknetModel(config_file, weights_file, ref.rowRange(0, 3), scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref.rowRange(0, 3), scoreDiff, iouDiff);
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("Test with 'batch size 2' is disabled for DLIE/OpenCL target");
#endif
if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_MYRIAD) &&
(backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_OPENCL))
{ {
// batch size 2 SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
} }
} }

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// //
// Copyright (C) 2017, Intel Corporation, all rights reserved. // Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// This tests doesn't require any external data. They just compare outputs of // This tests doesn't require any external data. They just compare outputs of
@ -158,15 +158,26 @@ TEST_P(Deconvolution, Accuracy)
bool hasBias = get<6>(GetParam()); bool hasBias = get<6>(GetParam());
Backend backendId = get<0>(get<7>(GetParam())); Backend backendId = get<0>(get<7>(GetParam()));
Target targetId = get<1>(get<7>(GetParam())); Target targetId = get<1>(get<7>(GetParam()));
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && (targetId == DNN_TARGET_CPU || targetId == DNN_TARGET_MYRIAD) && if (backendId == DNN_BACKEND_INFERENCE_ENGINE && (targetId == DNN_TARGET_CPU || targetId == DNN_TARGET_MYRIAD) &&
dilation.width == 2 && dilation.height == 2) dilation.width == 2 && dilation.height == 2)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000)
hasBias && group != 1) if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU
&& hasBias && group != 1)
throw SkipTestException("Test is disabled for OpenVINO 2018R4"); throw SkipTestException("Test is disabled for OpenVINO 2018R4");
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& inChannels == 6 && outChannels == 4 && group == 1
&& kernel == Size(1, 3) && pad == Size(1, 0)
&& stride == Size(1, 1) && dilation == Size(1, 1))
throw SkipTestException("Test is disabled");
#endif
int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width}; int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width};
Mat weights(4, &sz[0], CV_32F); Mat weights(4, &sz[0], CV_32F);
randu(weights, -1.0f, 1.0f); randu(weights, -1.0f, 1.0f);
@ -270,10 +281,18 @@ TEST_P(AvePooling, Accuracy)
Size stride = get<3>(GetParam()); Size stride = get<3>(GetParam());
Backend backendId = get<0>(get<4>(GetParam())); Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
throw SkipTestException("Test is disabled for MyriadX target");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2018040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD &&
stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1)) stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1))
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4"); throw SkipTestException("Test is fixed in OpenVINO 2018R4");
#endif #endif
const int inWidth = (outSize.width - 1) * stride.width + kernel.width; const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
@ -315,6 +334,32 @@ TEST_P(MaxPooling, Accuracy)
Backend backendId = get<0>(get<5>(GetParam())); Backend backendId = get<0>(get<5>(GetParam()));
Target targetId = get<1>(get<5>(GetParam())); Target targetId = get<1>(get<5>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& inSize == Size(7, 6) && kernel == Size(3, 2)
&& (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1))
)
throw SkipTestException("Test is disabled in OpenVINO <= 2018R5");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& (kernel == Size(2, 2) || kernel == Size(3, 2))
&& stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
)
throw SkipTestException("Problems with output dimension in OpenVINO 2018R5");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1))
)
throw SkipTestException("Test is disabled for MyriadX target");
#endif
LayerParams lp; LayerParams lp;
lp.set("pool", "max"); lp.set("pool", "max");
lp.set("kernel_w", kernel.width); lp.set("kernel_w", kernel.width);
@ -516,6 +561,12 @@ TEST_P(ReLU, Accuracy)
float negativeSlope = get<0>(GetParam()); float negativeSlope = get<0>(GetParam());
Backend backendId = get<0>(get<1>(GetParam())); Backend backendId = get<0>(get<1>(GetParam()));
Target targetId = get<1>(get<1>(GetParam())); Target targetId = get<1>(get<1>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
&& negativeSlope < 0
)
throw SkipTestException("Test is disabled");
#endif
LayerParams lp; LayerParams lp;
lp.set("negative_slope", negativeSlope); lp.set("negative_slope", negativeSlope);
@ -538,6 +589,13 @@ TEST_P(NoParamActivation, Accuracy)
LayerParams lp; LayerParams lp;
lp.type = get<0>(GetParam()); lp.type = get<0>(GetParam());
lp.name = "testLayer"; lp.name = "testLayer";
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
&& lp.type == "AbsVal"
)
throw SkipTestException("Test is disabled");
#endif
testInPlaceActivation(lp, backendId, targetId); testInPlaceActivation(lp, backendId, targetId);
} }
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine( INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
@ -623,6 +681,20 @@ TEST_P(Concat, Accuracy)
Backend backendId = get<0>(get<2>(GetParam())); Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam())); Target targetId = get<1>(get<2>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
)
throw SkipTestException("Test is disabled for Myriad target"); // crash
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
)
throw SkipTestException("Test is disabled for DLIE/CPU target");
#endif
Net net; Net net;
std::vector<int> convLayerIds; std::vector<int> convLayerIds;
@ -691,10 +763,15 @@ TEST_P(Eltwise, Accuracy)
Backend backendId = get<0>(get<4>(GetParam())); Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD &&
(targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) inSize == Vec3i(1, 4, 5))
throw SkipTestException(""); throw SkipTestException("Test is disabled for Myriad target");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && numConv > 1)
throw SkipTestException("Test is disabled for DLIE backend");
#endif #endif
Net net; Net net;

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// //
// Copyright (C) 2018, Intel Corporation, all rights reserved. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#include "test_precomp.hpp" #include "test_precomp.hpp"
@ -221,8 +221,15 @@ TEST_P(DNNTestOpenVINO, models)
{ {
auto dstIt = cvOutputsMap.find(srcIt.first); auto dstIt = cvOutputsMap.find(srcIt.first);
CV_Assert(dstIt != cvOutputsMap.end()); CV_Assert(dstIt != cvOutputsMap.end());
double normInfIE = cvtest::norm(srcIt.second, cv::NORM_INF);
double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF); double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF);
EXPECT_EQ(normInf, 0); double eps = 0;
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
double fp16_eps = 1.0/1024;
eps = fp16_eps * 1/*ULP*/ * std::max(normInfIE, 1.0);
}
EXPECT_LE(normInf, eps) << "IE: " << normInfIE;
} }
} }

@ -236,9 +236,9 @@ TEST_P(Test_Caffe_layers, Dropout)
TEST_P(Test_Caffe_layers, Concat) TEST_P(Test_Caffe_layers, Concat)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException(""); throw SkipTestException("Test is disabled for Myriad targets");
#endif #endif
testLayerUsingCaffeModels("layer_concat"); testLayerUsingCaffeModels("layer_concat");
testLayerUsingCaffeModels("layer_concat_optim", true, false); testLayerUsingCaffeModels("layer_concat_optim", true, false);
@ -247,14 +247,19 @@ TEST_P(Test_Caffe_layers, Concat)
TEST_P(Test_Caffe_layers, Fused_Concat) TEST_P(Test_Caffe_layers, Fused_Concat)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{ throw SkipTestException("Test is disabled for DLIE due negative_slope parameter");
if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || #endif
(INF_ENGINE_RELEASE < 2018040000 && target == DNN_TARGET_CPU))
throw SkipTestException(""); #if defined(INF_ENGINE_RELEASE)
} if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16
|| (INF_ENGINE_RELEASE < 2018040000 && target == DNN_TARGET_CPU))
)
throw SkipTestException("Test is disabled for DLIE");
#endif #endif
checkBackend(); checkBackend();
// Test case // Test case
@ -312,7 +317,10 @@ TEST_P(Test_Caffe_layers, layer_prelu_fc)
{ {
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException(""); throw SkipTestException("");
testLayerUsingCaffeModels("layer_prelu_fc", true, false); // Reference output values are in range [-0.0001, 10.3906]
double l1 = (target == DNN_TARGET_MYRIAD) ? 0.005 : 0.0;
double lInf = (target == DNN_TARGET_MYRIAD) ? 0.021 : 0.0;
testLayerUsingCaffeModels("layer_prelu_fc", true, false, l1, lInf);
} }
//template<typename XMat> //template<typename XMat>
@ -358,6 +366,11 @@ TEST_P(Test_Caffe_layers, Reshape_Split_Slice)
TEST_P(Test_Caffe_layers, Conv_Elu) TEST_P(Test_Caffe_layers, Conv_Elu)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE <= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
Net net = readNetFromTensorflow(_tf("layer_elu_model.pb")); Net net = readNetFromTensorflow(_tf("layer_elu_model.pb"));
ASSERT_FALSE(net.empty()); ASSERT_FALSE(net.empty());
@ -938,7 +951,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
Mat out = net.forward(); Mat out = net.forward();
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.4e-3 : 1e-5; double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.5e-3 : 1e-5;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.8e-2 : 1e-4; double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.8e-2 : 1e-4;
normAssert(outDefault, out, "", l1, lInf); normAssert(outDefault, out, "", l1, lInf);

@ -2,14 +2,13 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2018, Intel Corporation, all rights reserved. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
namespace opencv_test { namespace { namespace opencv_test { namespace {
template<typename TString> template<typename TString>
@ -28,7 +27,8 @@ public:
pb pb
}; };
void testONNXModels(const String& basename, const Extension ext = npy, const double l1 = 0, const float lInf = 0) void testONNXModels(const String& basename, const Extension ext = npy,
const double l1 = 0, const float lInf = 0, const bool useSoftmax = false)
{ {
String onnxmodel = _tf("models/" + basename + ".onnx"); String onnxmodel = _tf("models/" + basename + ".onnx");
Mat inp, ref; Mat inp, ref;
@ -51,7 +51,21 @@ public:
net.setPreferableTarget(target); net.setPreferableTarget(target);
net.setInput(inp); net.setInput(inp);
Mat out = net.forward(); Mat out = net.forward("");
if (useSoftmax)
{
LayerParams lp;
Net netSoftmax;
netSoftmax.addLayerToPrev("softmaxLayer", "SoftMax", lp);
netSoftmax.setPreferableBackend(DNN_BACKEND_OPENCV);
netSoftmax.setInput(out);
out = netSoftmax.forward();
netSoftmax.setInput(ref);
ref = netSoftmax.forward();
}
normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf); normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
} }
}; };
@ -65,6 +79,18 @@ TEST_P(Test_ONNX_layers, MaxPooling)
TEST_P(Test_ONNX_layers, Convolution) TEST_P(Test_ONNX_layers, Convolution)
{ {
testONNXModels("convolution"); testONNXModels("convolution");
}
TEST_P(Test_ONNX_layers, Two_convolution)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX"); // 2018R5+ is failed
#endif
// Reference output values are in range [-0.855, 0.611]
testONNXModels("two_convolution"); testONNXModels("two_convolution");
} }
@ -134,6 +160,11 @@ TEST_P(Test_ONNX_layers, Multiplication)
TEST_P(Test_ONNX_layers, Constant) TEST_P(Test_ONNX_layers, Constant)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
testONNXModels("constant"); testONNXModels("constant");
} }
@ -244,7 +275,8 @@ TEST_P(Test_ONNX_nets, CaffeNet)
TEST_P(Test_ONNX_nets, RCNN_ILSVRC13) TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
{ {
testONNXModels("rcnn_ilsvrc13", pb); // Reference output values are in range [-4.992, -1.161]
testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
} }
#ifdef OPENCV_32BIT_CONFIGURATION #ifdef OPENCV_32BIT_CONFIGURATION
@ -253,21 +285,8 @@ TEST_P(Test_ONNX_nets, DISABLED_VGG16) // memory usage >2Gb
TEST_P(Test_ONNX_nets, VGG16) TEST_P(Test_ONNX_nets, VGG16)
#endif #endif
{ {
double l1 = default_l1; // output range: [-69; 72], after Softmax [0; 0.96]
double lInf = default_lInf; testONNXModels("vgg16", pb, default_l1, default_lInf, true);
// output range: [-69; 72]
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) {
l1 = 0.087;
lInf = 0.585;
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) {
lInf = 1.2e-4;
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
l1 = 0.131;
#endif
testONNXModels("vgg16", pb, l1, lInf);
} }
#ifdef OPENCV_32BIT_CONFIGURATION #ifdef OPENCV_32BIT_CONFIGURATION
@ -276,19 +295,9 @@ TEST_P(Test_ONNX_nets, DISABLED_VGG16_bn) // memory usage >2Gb
TEST_P(Test_ONNX_nets, VGG16_bn) TEST_P(Test_ONNX_nets, VGG16_bn)
#endif #endif
{ {
double l1 = default_l1; // output range: [-16; 27], after Softmax [0; 0.67]
double lInf = default_lInf; const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
// output range: [-16; 27] testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) {
l1 = 0.0086;
lInf = 0.037;
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)) {
l1 = 0.031;
lInf = 0.2;
}
testONNXModels("vgg16-bn", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, ZFNet) TEST_P(Test_ONNX_nets, ZFNet)
@ -298,56 +307,62 @@ TEST_P(Test_ONNX_nets, ZFNet)
TEST_P(Test_ONNX_nets, ResNet18v1) TEST_P(Test_ONNX_nets, ResNet18v1)
{ {
// output range: [-16; 22] // output range: [-16; 22], after Softmax [0, 0.51]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.022 : default_l1; testONNXModels("resnet18v1", pb, default_l1, default_lInf, true);
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.12 : default_lInf;
testONNXModels("resnet18v1", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, ResNet50v1) TEST_P(Test_ONNX_nets, ResNet50v1)
{ {
// output range: [-67; 75] // output range: [-67; 75], after Softmax [0, 0.98]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.6 : 1.25e-5; testONNXModels("resnet50v1", pb, default_l1, default_lInf, true);
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.51 : 1.2e-4;
testONNXModels("resnet50v1", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC) TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
{ {
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
|| target == DNN_TARGET_MYRIAD) { if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException(""); throw SkipTestException("Test is disabled for DLIE targets");
} #endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#endif
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
throw SkipTestException("Test is disabled for OpenCL targets");
testONNXModels("resnet101_duc_hdc", pb); testONNXModels("resnet101_duc_hdc", pb);
} }
TEST_P(Test_ONNX_nets, TinyYolov2) TEST_P(Test_ONNX_nets, TinyYolov2)
{ {
if (cvtest::skipUnstableTests || if (cvtest::skipUnstableTests)
(backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))) { throw SkipTestException("Skip unstable test");
throw SkipTestException(""); #if defined(INF_ENGINE_RELEASE)
} if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE OpenCL targets");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
// output range: [-11; 8] // output range: [-11; 8]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1; double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf; double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
testONNXModels("tiny_yolo2", pb, l1, lInf); testONNXModels("tiny_yolo2", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, CNN_MNIST) TEST_P(Test_ONNX_nets, CNN_MNIST)
{ {
// output range: [-1952; 6574] // output range: [-1952; 6574], after Softmax [0; 1]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 3.82 : 4.4e-4; testONNXModels("cnn_mnist", pb, default_l1, default_lInf, true);
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 13.5 : 2e-3;
testONNXModels("cnn_mnist", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, MobileNet_v2) TEST_P(Test_ONNX_nets, MobileNet_v2)
{ {
// output range: [-166; 317] // output range: [-166; 317], after Softmax [0; 1]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 7e-5; testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true);
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2.87 : 5e-4;
testONNXModels("mobilenetv2", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, LResNet100E_IR) TEST_P(Test_ONNX_nets, LResNet100E_IR)
@ -372,9 +387,17 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
TEST_P(Test_ONNX_nets, Emotion_ferplus) TEST_P(Test_ONNX_nets, Emotion_ferplus)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
double l1 = default_l1; double l1 = default_l1;
double lInf = default_lInf; double lInf = default_lInf;
// Output values are in range [-2.01109, 2.11111]
// Output values are in range [-2.011, 2.111]
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
l1 = 0.007; l1 = 0.007;
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
@ -391,25 +414,20 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
TEST_P(Test_ONNX_nets, Inception_v2) TEST_P(Test_ONNX_nets, Inception_v2)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) testONNXModels("inception_v2", pb, default_l1, default_lInf, true);
throw SkipTestException("");
testONNXModels("inception_v2", pb);
} }
TEST_P(Test_ONNX_nets, DenseNet121) TEST_P(Test_ONNX_nets, DenseNet121)
{ {
// output range: [-87; 138] // output range: [-87; 138], after Softmax [0; 1]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.12 : 2.2e-5; testONNXModels("densenet121", pb, default_l1, default_lInf, true);
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.74 : 1.23e-4;
testONNXModels("densenet121", pb, l1, lInf);
} }
TEST_P(Test_ONNX_nets, Inception_v1) TEST_P(Test_ONNX_nets, Inception_v1)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000 #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for OpenVINO 2018R5"); throw SkipTestException("Test is disabled for Myriad targets");
#endif #endif
testONNXModels("inception_v1", pb); testONNXModels("inception_v1", pb);
} }

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2017, Intel Corporation, all rights reserved. // Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
/* /*
@ -133,12 +133,27 @@ TEST_P(Test_TensorFlow_layers, conv)
TEST_P(Test_TensorFlow_layers, padding) TEST_P(Test_TensorFlow_layers, padding)
{ {
runTensorFlowNet("padding_same");
runTensorFlowNet("padding_valid"); runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding"); runTensorFlowNet("spatial_padding");
runTensorFlowNet("keras_pad_concat"); runTensorFlowNet("keras_pad_concat");
} }
TEST_P(Test_TensorFlow_layers, padding_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
// Reference output values are in range [0.0006, 2.798]
runTensorFlowNet("padding_same");
}
TEST_P(Test_TensorFlow_layers, eltwise) TEST_P(Test_TensorFlow_layers, eltwise)
{ {
runTensorFlowNet("eltwise_add_mul"); runTensorFlowNet("eltwise_add_mul");
@ -181,6 +196,13 @@ TEST_P(Test_TensorFlow_layers, pooling)
// TODO: fix tests and replace to pooling // TODO: fix tests and replace to pooling
TEST_P(Test_TensorFlow_layers, ave_pool_same) TEST_P(Test_TensorFlow_layers, ave_pool_same)
{ {
// Reference output values are in range [-0.519531, 0.112976]
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
runTensorFlowNet("ave_pool_same"); runTensorFlowNet("ave_pool_same");
} }
@ -200,8 +222,11 @@ TEST_P(Test_TensorFlow_layers, matmul)
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException(""); throw SkipTestException("");
runTensorFlowNet("matmul"); runTensorFlowNet("matmul");
runTensorFlowNet("nhwc_reshape_matmul");
runTensorFlowNet("nhwc_transpose_reshape_matmul"); runTensorFlowNet("nhwc_transpose_reshape_matmul");
// Reference output values are in range [-5.688, 4.484]
double l1 = target == DNN_TARGET_MYRIAD ? 6.1e-3 : default_l1;
runTensorFlowNet("nhwc_reshape_matmul", false, l1);
} }
TEST_P(Test_TensorFlow_layers, reshape) TEST_P(Test_TensorFlow_layers, reshape)
@ -216,26 +241,36 @@ TEST_P(Test_TensorFlow_layers, reshape)
TEST_P(Test_TensorFlow_layers, flatten) TEST_P(Test_TensorFlow_layers, flatten)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException(""); throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
)
throw SkipTestException("Test is disabled for Myriad2");
#endif
runTensorFlowNet("flatten", true); runTensorFlowNet("flatten", true);
} }
TEST_P(Test_TensorFlow_layers, unfused_flatten) TEST_P(Test_TensorFlow_layers, unfused_flatten)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException(""); throw SkipTestException("Test is disabled for DLIE");
#endif
runTensorFlowNet("unfused_flatten"); runTensorFlowNet("unfused_flatten");
runTensorFlowNet("unfused_flatten_unknown_batch"); runTensorFlowNet("unfused_flatten_unknown_batch");
} }
TEST_P(Test_TensorFlow_layers, leaky_relu) TEST_P(Test_TensorFlow_layers, leaky_relu)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException(""); throw SkipTestException("Test is disabled for DLIE/OCL target (OpenVINO 2018R5)");
#endif #endif
runTensorFlowNet("leaky_relu_order1"); runTensorFlowNet("leaky_relu_order1");
runTensorFlowNet("leaky_relu_order2"); runTensorFlowNet("leaky_relu_order2");
@ -244,14 +279,30 @@ TEST_P(Test_TensorFlow_layers, leaky_relu)
TEST_P(Test_TensorFlow_layers, l2_normalize) TEST_P(Test_TensorFlow_layers, l2_normalize)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
runTensorFlowNet("l2_normalize"); runTensorFlowNet("l2_normalize");
} }
// TODO: fix it and add to l2_normalize // TODO: fix it and add to l2_normalize
TEST_P(Test_TensorFlow_layers, l2_normalize_3d) TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
throw SkipTestException(""); if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE for OpenCL targets");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#endif
runTensorFlowNet("l2_normalize_3d"); runTensorFlowNet("l2_normalize_3d");
} }
@ -300,6 +351,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD) TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
checkBackend(); checkBackend();
std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt", false); std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt", false);
std::string model = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pb", false); std::string model = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pb", false);
@ -320,6 +378,7 @@ TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
0, 3, 0.75838411, 0.44668293, 0.45907149, 0.49459291, 0.52197015, 0, 3, 0.75838411, 0.44668293, 0.45907149, 0.49459291, 0.52197015,
0, 10, 0.95932811, 0.38349164, 0.32528657, 0.40387636, 0.39165527, 0, 10, 0.95932811, 0.38349164, 0.32528657, 0.40387636, 0.39165527,
0, 10, 0.93973452, 0.66561931, 0.37841269, 0.68074018, 0.42907384); 0, 10, 0.93973452, 0.66561931, 0.37841269, 0.68074018, 0.42907384);
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0097 : default_l1; double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0097 : default_l1;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : default_lInf; double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : default_lInf;
normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff); normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
@ -329,6 +388,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
{ {
checkBackend(); checkBackend();
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
std::string model = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", false); std::string model = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", false);
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt", false); std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt", false);
@ -354,7 +420,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
"faster_rcnn_resnet50_coco_2018_01_28"}; "faster_rcnn_resnet50_coco_2018_01_28"};
checkBackend(); checkBackend();
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) || if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException(""); throw SkipTestException("");
@ -380,10 +446,11 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN) TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Unstable test case"); throw SkipTestException("Test is disabled for DLIE OpenCL targets in OpenVINO 2018R5");
#endif #endif
checkBackend(); checkBackend();
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false); std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false);
std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false); std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
@ -399,9 +466,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
net.setInput(blob); net.setInput(blob);
Mat out = net.forward(); Mat out = net.forward();
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : 1.1e-5; double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.048 : 1.1e-5;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.021 : default_lInf; double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.058 : default_lInf;
normAssertDetections(ref, out, "", 0.4, scoreDiff, iouDiff); normAssertDetections(ref, out, "", 0.45, scoreDiff, iouDiff);
} }
TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8) TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
@ -444,7 +511,13 @@ TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
// np.save('east_text_detection.geometry.npy', geometry) // np.save('east_text_detection.geometry.npy', geometry)
TEST_P(Test_TensorFlow_nets, EAST_text_detection) TEST_P(Test_TensorFlow_nets, EAST_text_detection)
{ {
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#endif
checkBackend(); checkBackend();
std::string netPath = findDataFile("dnn/frozen_east_text_detection.pb", false); std::string netPath = findDataFile("dnn/frozen_east_text_detection.pb", false);
std::string imgPath = findDataFile("cv/ximgproc/sources/08.png", false); std::string imgPath = findDataFile("cv/ximgproc/sources/08.png", false);
std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy", false); std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy", false);
@ -478,8 +551,8 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
} }
else if (target == DNN_TARGET_MYRIAD) else if (target == DNN_TARGET_MYRIAD)
{ {
lInf_scores = 0.214; lInf_scores = 0.41;
l1_geometry = 0.47; lInf_geometry = 15.34; l1_geometry = 0.28; lInf_geometry = 5.94;
} }
else else
{ {
@ -493,17 +566,40 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_nets, dnnBackendsAndTargets());
TEST_P(Test_TensorFlow_layers, fp16_weights) TEST_P(Test_TensorFlow_layers, fp16_weights)
{ {
const float l1 = 0.00071; float l1 = 0.00078;
const float lInf = 0.012; float lInf = 0.012;
runTensorFlowNet("fp16_single_conv", false, l1, lInf); runTensorFlowNet("fp16_single_conv", false, l1, lInf);
runTensorFlowNet("fp16_deconvolution", false, l1, lInf);
runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf); runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf);
runTensorFlowNet("fp16_padding_valid", false, l1, lInf);
runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf); runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf);
runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf);
runTensorFlowNet("fp16_max_pool_even", false, l1, lInf);
runTensorFlowNet("fp16_padding_same", false, l1, lInf);
runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf); runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf);
runTensorFlowNet("fp16_padding_valid", false, l1, lInf);
// Reference output values are in range [0.0889, 1.651]
runTensorFlowNet("fp16_max_pool_even", false, (target == DNN_TARGET_MYRIAD) ? 0.003 : l1, lInf);
if (target == DNN_TARGET_MYRIAD) {
l1 = 0.0041;
lInf = 0.024;
}
// Reference output values are in range [0, 10.75]
runTensorFlowNet("fp16_deconvolution", false, l1, lInf);
// Reference output values are in range [0.418, 2.297]
runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf);
}
TEST_P(Test_TensorFlow_layers, fp16_padding_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
// Reference output values are in range [-3.504, -0.002]
runTensorFlowNet("fp16_padding_same", false, 6e-4, 4e-3);
} }
TEST_P(Test_TensorFlow_layers, defun) TEST_P(Test_TensorFlow_layers, defun)
@ -549,6 +645,7 @@ TEST_P(Test_TensorFlow_layers, slice)
TEST_P(Test_TensorFlow_layers, softmax) TEST_P(Test_TensorFlow_layers, softmax)
{ {
runTensorFlowNet("keras_softmax"); runTensorFlowNet("keras_softmax");
runTensorFlowNet("slim_softmax");
} }
TEST_P(Test_TensorFlow_layers, relu6) TEST_P(Test_TensorFlow_layers, relu6)

@ -148,8 +148,8 @@ TEST_P(Test_Torch_layers, run_reshape_single_sample)
{ {
// Reference output values in range [14.4586, 18.4492]. // Reference output values in range [14.4586, 18.4492].
runTorchNet("net_reshape_single_sample", "", false, false, true, runTorchNet("net_reshape_single_sample", "", false, false, true,
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0073 : default_l1, (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.033 : default_l1,
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.025 : default_lInf); (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.05 : default_lInf);
} }
TEST_P(Test_Torch_layers, run_linear) TEST_P(Test_Torch_layers, run_linear)
@ -272,9 +272,9 @@ class Test_Torch_nets : public DNNTestLayer {};
TEST_P(Test_Torch_nets, OpenFace_accuracy) TEST_P(Test_Torch_nets, OpenFace_accuracy)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException(""); throw SkipTestException("Test is disabled for Myriad targets");
#endif #endif
checkBackend(); checkBackend();
@ -295,8 +295,12 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
net.setInput(inputBlob); net.setInput(inputBlob);
Mat out = net.forward(); Mat out = net.forward();
// Reference output values are in range [-0.17212, 0.263492]
// on Myriad problem layer: l4_Pooling - does not use pads_begin
float l1 = (target == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16) ? 1.5e-3 : 1e-3;
Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true); Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true);
normAssert(out, outRef, "", default_l1, default_lInf); normAssert(out, outRef, "", l1, lInf);
} }
static Mat getSegmMask(const Mat& scores) static Mat getSegmMask(const Mat& scores)
@ -393,6 +397,12 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
// -model models/instance_norm/feathers.t7 // -model models/instance_norm/feathers.t7
TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy) TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
checkBackend(); checkBackend();
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)

Loading…
Cancel
Save