@ -83,6 +83,10 @@ public:
void runTensorFlowNet ( const std : : string & prefix , bool hasText = false ,
double l1 = 0.0 , double lInf = 0.0 , bool memoryLoad = false , const std : : string & groupPrefix = " " )
{
if ( cvtest : : debugLevel > 0 )
{
std : : cout < < prefix < < groupPrefix < < std : : endl ;
}
std : : string netPath = path ( prefix + groupPrefix + " _net.pb " ) ;
std : : string netConfig = ( hasText ? path ( prefix + groupPrefix + " _net.pbtxt " ) : " " ) ;
std : : string inpPath = path ( prefix + " _in.npy " ) ;
@ -118,6 +122,16 @@ public:
net . setInput ( input ) ;
cv : : Mat output = net . forward ( ) ;
normAssert ( ref , output , " " , l1 ? l1 : default_l1 , lInf ? lInf : default_lInf ) ;
if ( cvtest : : debugLevel > 0 | | HasFailure ( ) )
{
std : : cout < < " input: " < < input . size < < std : : endl ;
std : : cout < < input . reshape ( 1 , 1 ) < < std : : endl ;
std : : cout < < " ref " < < ref . size < < std : : endl ;
std : : cout < < ref . reshape ( 1 , 1 ) < < std : : endl ;
std : : cout < < " output: " < < output . size < < std : : endl ;
std : : cout < < output . reshape ( 1 , 1 ) < < std : : endl ;
}
}
} ;
@ -132,7 +146,7 @@ TEST_P(Test_TensorFlow_layers, reduce_max)
{
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER ) ;
runTensorFlowNet ( " max_pool_by_axis " ) ;
runTensorFlowNet ( " max_pool_by_axis " , false , 0.0f , 0.0f ) ;
}
TEST_P ( Test_TensorFlow_layers , reduce_sum )
@ -144,7 +158,11 @@ TEST_P(Test_TensorFlow_layers, reduce_sum)
TEST_P ( Test_TensorFlow_layers , reduce_max_channel )
{
runTensorFlowNet ( " reduce_max_channel " ) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // incorrect result
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " reduce_max_channel " , false , 0.0f , 0.0f ) ;
}
TEST_P ( Test_TensorFlow_layers , reduce_sum_channel )
@ -154,6 +172,10 @@ TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
TEST_P ( Test_TensorFlow_layers , reduce_max_channel_keep_dims )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // incorrect result
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " reduce_max_channel " , false , 0.0 , 0.0 , false , " _keep_dims " ) ;
}
@ -220,13 +242,49 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet ( " keras_pad_concat " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric )
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_1 )
{
runTensorFlowNet ( " conv2d_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_2 )
{
runTensorFlowNet ( " conv2d_asymmetric_pads_nhwc " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_3 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_CPU ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_CPU , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " max_pool2d_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_4 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_CPU ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_CPU , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD ) // Exception: Unsupported pad value
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " max_pool2d_asymmetric_pads_nhwc " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_5 )
{
runTensorFlowNet ( " conv2d_backprop_input_asymmetric_pads_nchw " ) ;
}
TEST_P ( Test_TensorFlow_layers , padding_asymmetric_6 )
{
runTensorFlowNet ( " conv2d_backprop_input_asymmetric_pads_nhwc " ) ;
}
@ -267,6 +325,13 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat)
TEST_P ( Test_TensorFlow_layers , concat_axis_1 )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE Exception: Ngraph operation Transpose with name Flatten_1/flatten/Reshape/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ; // exception
@ -413,19 +478,77 @@ TEST_P(Test_TensorFlow_layers, pooling_reduce_sum)
runTensorFlowNet ( " reduce_sum " ) ; // a SUM pooling over all spatial dimensions.
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum2 )
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_0_false )
{
int axises [ ] = { 0 , 1 , 2 , 3 } ;
for ( int keepdims = 0 ; keepdims < = 1 ; + + keepdims )
runTensorFlowNet ( " reduce_sum_0_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_false )
{
runTensorFlowNet ( " reduce_sum_1_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_2_false )
{
runTensorFlowNet ( " reduce_sum_2_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_3_false )
{
runTensorFlowNet ( " reduce_sum_3_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_2_false )
{
# if defined(INF_ENGINE_RELEASE)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
{
for ( int i = 0 ; i < sizeof ( axises ) / sizeof ( axises [ 0 ] ) ; + + i )
{
runTensorFlowNet ( cv : : format ( " reduce_sum_%d_%s " , axises [ i ] , ( keepdims ? " True " : " False " ) ) ) ;
}
runTensorFlowNet ( cv : : format ( " reduce_sum_1_2_%s " , keepdims ? " True " : " False " ) ) ;
default_l1 = 0.01f ;
}
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
{
default_l1 = 0.01f ;
}
# endif
runTensorFlowNet ( " reduce_sum_1_2_False " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_0_true )
{
runTensorFlowNet ( " reduce_sum_0_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_true )
{
runTensorFlowNet ( " reduce_sum_1_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_2_true )
{
runTensorFlowNet ( " reduce_sum_2_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_3_true )
{
runTensorFlowNet ( " reduce_sum_3_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , pooling_reduce_sum_1_2_true )
{
# if defined(INF_ENGINE_RELEASE)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
{
default_l1 = 0.01f ;
}
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
{
default_l1 = 0.01f ;
}
# endif
runTensorFlowNet ( " reduce_sum_1_2_True " ) ;
}
TEST_P ( Test_TensorFlow_layers , max_pool_grad )
{
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
@ -642,8 +765,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
net . setInput ( inp ) ;
Mat out = net . forward ( ) ;
double scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.0043 : default_l1 ;
double iouDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.037 : default_lInf ;
double scoreDiff = default_l1 , iouDiff = default_lInf ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.01 ;
iouDiff = 0.1 ;
}
normAssertDetections ( ref , out , " " , 0.2 , scoreDiff , iouDiff ) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
expectNoFallbacksFromIE ( net ) ;
@ -720,16 +848,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
expectNoFallbacksFromIE ( net ) ;
}
TEST_P ( Test_TensorFlow_nets , Faster_RCNN )
TEST_P ( Test_TensorFlow_nets , Faster_RCNN_inception_v2_coco_2018_01_28 )
{
// FIXIT split test
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_LONG ,
CV_TEST_TAG_DEBUG_VERYLONG
) ;
static std : : string names [ ] = { " faster_rcnn_inception_v2_coco_2018_01_28 " ,
" faster_rcnn_resnet50_coco_2018_01_28 " } ;
# ifdef INF_ENGINE_RELEASE
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & &
@ -740,21 +865,89 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
// Assertion `prior_height > 0' failed.
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
if ( backend = = DNN_BACKEND_OPENCV & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
checkBackend ( ) ;
double scoresDiff = 1e-5 ;
double iouDiff = 1e-4 ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 | | backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
{
scoresDiff = 0.02 ;
iouDiff = 0.1 ;
}
std : : string name = " faster_rcnn_inception_v2_coco_2018_01_28 " ;
{
std : : string proto = findDataFile ( " dnn/ " + name + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + name + " .pb " , false ) ;
Net net = readNetFromTensorflow ( model , proto ) ;
net . setPreferableBackend ( backend ) ;
net . setPreferableTarget ( target ) ;
Mat img = imread ( findDataFile ( " dnn/dog416.png " ) ) ;
Mat blob = blobFromImage ( img , 1.0f , Size ( 800 , 600 ) , Scalar ( ) , true , false ) ;
net . setInput ( blob ) ;
Mat out = net . forward ( ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + name + " .detection_out.npy " ) ) ;
// accuracy (both OpenCV & IE)
if ( target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
normAssertDetections ( ref , out , name . c_str ( ) , 0.3 , scoresDiff , iouDiff ) ;
}
}
TEST_P ( Test_TensorFlow_nets , Faster_RCNN_resnet50_coco_2018_01_28 )
{
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_LONG ,
CV_TEST_TAG_DEBUG_VERYLONG
) ;
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE exception: Ngraph operation Transpose with name FirstStageBoxPredictor/ClassPredictor/reshape_1/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
# endif
# ifdef INF_ENGINE_RELEASE
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & &
( INF_ENGINE_VER_MAJOR_LT ( 2019020000 ) | | target ! = DNN_TARGET_CPU ) )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
if ( INF_ENGINE_VER_MAJOR_GT ( 2019030000 ) & &
backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
// Assertion `prior_height > 0' failed.
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
# endif
checkBackend ( ) ;
double scoresDiff = backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 2.9e-5 : 1e-5 ;
for ( int i = 0 ; i < 2 ; + + i )
double iouDiff = 1e-4 ;
std : : string name = " faster_rcnn_resnet50_coco_2018_01_28 " ;
{
std : : string proto = findDataFile ( " dnn/ " + names [ i ] + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + names [ i ] + " .pb " , false ) ;
std : : string proto = findDataFile ( " dnn/ " + name + " .pbtxt " ) ;
std : : string model = findDataFile ( " dnn/ " + name + " .pb " , false ) ;
Net net = readNetFromTensorflow ( model , proto ) ;
net . setPreferableBackend ( backend ) ;
@ -765,8 +958,13 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
net . setInput ( blob ) ;
Mat out = net . forward ( ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + names [ i ] + " .detection_out.npy " ) ) ;
normAssertDetections ( ref , out , names [ i ] . c_str ( ) , 0.3 , scoresDiff ) ;
Mat ref = blobFromNPY ( findDataFile ( " dnn/tensorflow/ " + name + " .detection_out.npy " ) ) ;
// accuracy
if ( target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
normAssertDetections ( ref , out , name . c_str ( ) , 0.3 , scoresDiff , iouDiff ) ;
}
}
@ -1152,6 +1350,10 @@ TEST_P(Test_TensorFlow_layers, resize_bilinear_down)
TEST_P ( Test_TensorFlow_layers , resize_concat_optimization )
{
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target ! = DNN_TARGET_CPU ) // Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# endif
runTensorFlowNet ( " resize_concat_optimization " ) ;
}
@ -1271,7 +1473,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
Mat outDetections = outs [ 0 ] ;
Mat outMasks = outs [ 1 ] ;
double scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.019 : 2e-5 ;
double scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.2 : 2e-5 ;
double iouDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.018 : default_lInf ;
normAssertDetections ( refDetections , outDetections , " " , /*threshold for zero confidence*/ 1e-5 , scoreDiff , iouDiff ) ;
@ -1305,7 +1507,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
double inter = cv : : countNonZero ( masks & refMasks ) ;
double area = cv : : countNonZero ( masks | refMasks ) ;
EXPECT_GE ( inter / area , 0.99 ) ;
EXPECT_GE ( inter / area , ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 0.98 : 0.99 ) ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
expectNoFallbacks ( net ) ;