@ -84,6 +84,10 @@ public:
void runTensorFlowNet ( const std : : string & prefix , bool hasText = false ,
double l1 = 0.0 , double lInf = 0.0 , bool memoryLoad = false , const std : : string & groupPrefix = " " )
{
if ( cvtest : : debugLevel > 0 )
{
std : : cout < < prefix < < groupPrefix < < std : : endl ;
}
std : : string netPath = path ( prefix + groupPrefix + " _net.pb " ) ;
std : : string netConfig = ( hasText ? path ( prefix + groupPrefix + " _net.pbtxt " ) : " " ) ;
std : : string inpPath = path ( prefix + " _in.npy " ) ;
@ -119,6 +123,16 @@ public:
net . setInput ( input ) ;
cv : : Mat output = net . forward ( ) ;
normAssert ( ref , output , " " , l1 ? l1 : default_l1 , lInf ? lInf : default_lInf ) ;
if ( cvtest : : debugLevel > 0 | | HasFailure ( ) )
{
std : : cout < < " input: " < < input . size < < std : : endl ;
std : : cout < < input . reshape ( 1 , 1 ) < < std : : endl ;
std : : cout < < " ref " < < ref . size < < std : : endl ;
std : : cout < < ref . reshape ( 1 , 1 ) < < std : : endl ;
std : : cout < < " output: " < < output . size < < std : : endl ;
std : : cout < < output . reshape ( 1 , 1 ) < < std : : endl ;
}
}
} ;
@ -133,7 +147,7 @@ TEST_P(Test_TensorFlow_layers, reduce_max)
{
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER ) ;
runTensorFlowNet ( " max_pool_by_axis " ) ;
runTensorFlowNet ( " max_pool_by_axis " , false , 0.0f , 0.0f ) ;
}
TEST_P ( Test_TensorFlow_layers , reduce_sum )
@ -145,7 +159,11 @@ TEST_P(Test_TensorFlow_layers, reduce_sum)
TEST_P ( Test_TensorFlow_layers , reduce_max_channel )
{
runTensorFlowNet ( " reduce_max_channel " ) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // incorrect result
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " reduce_max_channel " , false , 0.0f , 0.0f ) ;
}
TEST_P ( Test_TensorFlow_layers , reduce_sum_channel )
@ -155,6 +173,10 @@ TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
TEST_P ( Test_TensorFlow_layers , reduce_max_channel_keep_dims )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // incorrect result
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " reduce_max_channel " , false , 0.0 , 0.0 , false , " _keep_dims " ) ;
}
@ -221,13 +243,49 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet ( " keras_pad_concat " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric )
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_1 )
{
runTensorFlowNet ( " conv2d_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_2 )
{
runTensorFlowNet ( " conv2d_asymmetric_pads_nhwc " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_3 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_CPU ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_CPU , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " max_pool2d_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_4 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_CPU ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_CPU , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " max_pool2d_asymmetric_pads_nhwc " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_5 )
{
runTensorFlowNet ( " conv2d_backprop_input_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_6 )
{
runTensorFlowNet ( " conv2d_backprop_input_asymmetric_pads_nhwc " ) ;
}
@ -268,6 +326,13 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat)
TEST_P ( Test_TensorFlow_layers , concat_axis_1 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE Exception: Ngraph operation Transpose with name Flatten_1/flatten/Reshape/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ; // exception
@ -423,19 +488,77 @@ TEST_P(Test_TensorFlow_layers, pooling_reduce_sum)
runTensorFlowNet ( " reduce_sum " ) ; // a SUM pooling over all spatial dimensions.
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum2 )
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_0_false )
{
int axises [ ] = { 0 , 1 , 2 , 3 } ;
for ( int keepdims = 0 ; keepdims < = 1 ; + + keepdims )
runTensorFlowNet ( " reduce_sum_0_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_false )
{
runTensorFlowNet ( " reduce_sum_1_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_2_false )
{
runTensorFlowNet ( " reduce_sum_2_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_3_false )
{
runTensorFlowNet ( " reduce_sum_3_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_2_false )
{
# if defined(INF_ENGINE_RELEASE)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
{
for ( int i = 0 ; i < sizeof ( axises ) / sizeof ( axises [ 0 ] ) ; + + i )
{
runTensorFlowNet ( cv : : format ( " reduce_sum_%d_%s " , axises [ i ] , ( keepdims ? " True " : " False " ) ) ) ;
}
runTensorFlowNet ( cv : : format ( " reduce_sum_1_2_%s " , keepdims ? " True " : " False " ) ) ;
default_l1 = 0.01f ;
}
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
{
default_l1 = 0.01f ;
}
# endif
runTensorFlowNet ( " reduce_sum_1_2_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_0_true )
{
runTensorFlowNet ( " reduce_sum_0_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_true )
{
runTensorFlowNet ( " reduce_sum_1_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_2_true )
{
runTensorFlowNet ( " reduce_sum_2_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_3_true )
{
runTensorFlowNet ( " reduce_sum_3_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_2_true )
{
# if defined(INF_ENGINE_RELEASE)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
{
default_l1 = 0.01f ;
}
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
{
default_l1 = 0.01f ;
}
# endif
runTensorFlowNet ( " reduce_sum_1_2_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , max_pool_grad )
{
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
@ -715,13 +838,14 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
double scoreDiff = default_l1 , iouDiff = default_lInf ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.0043 ;
iouDiff = 0.037 ;
scoreDiff = 0.01 ;
iouDiff = 0.1 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
iouDiff = 0.04 ;
}
normAssertDetections ( ref , out , " " , 0.2 , scoreDiff , iouDiff ) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
expectNoFallbacksFromIE ( net ) ;
@ -815,16 +939,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
expectNoFallbacksFromIE ( net ) ;
}
TEST_P ( Test_TensorFlow_nets , Faster_RCNN )
TEST_P ( Test_TensorFlow_nets , Faster_RCNN_inception_v2_coco_2018_01_28 )
{
// FIXIT split test
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_LONG ,
CV_TEST_TAG_DEBUG_VERYLONG
) ;
static std : : string names [ ] = { " faster_rcnn_inception_v2_coco_2018_01_28 " ,
" faster_rcnn_resnet50_coco_2018_01_28 " } ;
# ifdef INF_ENGINE_RELEASE
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & &
@ -835,13 +956,82 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
// Assertion `prior_height > 0' failed.
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
if ( backend = = DNN_BACKEND_OPENCV & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
if ( backend = = DNN_BACKEND_CUDA & & target = = DNN_TARGET_CUDA_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_CUDA_FP16 ) ;
checkBackend ( ) ;
double scoresDiff = 1e-5 ;
double iouDiff = 1e-4 ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 | | backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
{
scoresDiff = 0.02 ;
iouDiff = 0.1 ;
}
std : : string name = " faster_rcnn_inception_v2_coco_2018_01_28 " ;
{
std : : string proto = findDataFile ( " dnn/ " + name + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + name + " .pb " , false ) ;
Net net = readNetFromTensorflow ( model , proto ) ;
net . setPreferableBackend ( backend ) ;
net . setPreferableTarget ( target ) ;
Mat img = imread ( findDataFile ( " dnn/dog416.png " ) ) ;
Mat blob = blobFromImage ( img , 1.0f , Size ( 800 , 600 ) , Scalar ( ) , true , false ) ;
net . setInput ( blob ) ;
Mat out = net . forward ( ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + name + " .detection_out.npy " ) ) ;
// accuracy (both OpenCV & IE)
if ( target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
normAssertDetections ( ref , out , name . c_str ( ) , 0.3 , scoresDiff , iouDiff ) ;
}
}
TEST_P ( Test_TensorFlow_nets , Faster_RCNN_resnet50_coco_2018_01_28 )
{
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_LONG ,
CV_TEST_TAG_DEBUG_VERYLONG
) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE exception: Ngraph operation Transpose with name FirstStageBoxPredictor/ClassPredictor/reshape_1/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
# endif
# ifdef INF_ENGINE_RELEASE
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & &
( INF_ENGINE_VER_MAJOR_LT ( 2019020000 ) | | target ! = DNN_TARGET_CPU ) )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
if ( INF_ENGINE_VER_MAJOR_GT ( 2019030000 ) & &
backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
// Assertion `prior_height > 0' failed.
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
if ( backend = = DNN_BACKEND_CUDA & & target = = DNN_TARGET_CUDA_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_CUDA_FP16 ) ;
@ -856,10 +1046,11 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
scoresDiff = 0.06 ;
iouDiff = 0.08 ;
}
for ( int i = 0 ; i < 2 ; + + i )
std : : string name = " faster_rcnn_resnet50_coco_2018_01_28 " ;
{
std : : string proto = findDataFile ( " dnn/ " + names [ i ] + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + names [ i ] + " .pb " , false ) ;
std : : string proto = findDataFile ( " dnn/ " + name + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + name + " .pb " , false ) ;
Net net = readNetFromTensorflow ( model , proto ) ;
net . setPreferableBackend ( backend ) ;
@ -870,8 +1061,13 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
net . setInput ( blob ) ;
Mat out = net . forward ( ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + names [ i ] + " .detection_out.npy " ) ) ;
normAssertDetections ( ref , out , names [ i ] . c_str ( ) , 0.3 , scoresDiff , iouDiff ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + name + " .detection_out.npy " ) ) ;
// accuracy
if ( target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
normAssertDetections ( ref , out , name . c_str ( ) , 0.3 , scoresDiff , iouDiff ) ;
}
}
@ -1282,6 +1478,10 @@ TEST_P(Test_TensorFlow_layers, resize_bilinear_down)
TEST_P ( Test_TensorFlow_layers , resize_concat_optimization )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target ! = DNN_TARGET_CPU ) // Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " resize_concat_optimization " ) ;
}
@ -1406,7 +1606,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
Mat outDetections = outs [ 0 ] ;
Mat outMasks = outs [ 1 ] ;
double scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.019 : 2e-5 ;
double scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.2 : 2e-5 ;
double iouDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.018 : default_lInf ;
normAssertDetections ( refDetections , outDetections , " " , /*threshold for zero confidence*/ 1e-5 , scoreDiff , iouDiff ) ;
@ -1440,7 +1640,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
double inter = cv : : countNonZero ( masks & refMasks ) ;
double area = cv : : countNonZero ( masks | refMasks ) ;
EXPECT_GE ( inter / area , 0.99 ) ;
EXPECT_GE ( inter / area , ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.98 : 0.99 ) ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
expectNoFallbacks ( net ) ;