Introduce relaxed accuracy thresholds for CL target in some dnn tests.

Partially addresses #9821
pull/19774/head
Aaron Greig 4 years ago
parent 3df6bc58e9
commit f59917bea1
  1. 26
      modules/dnn/test/test_halide_layers.cpp
  2. 13
      modules/dnn/test/test_layers.cpp
  3. 18
      modules/dnn/test/test_torch_importer.cpp

@ -16,7 +16,7 @@ using namespace cv;
using namespace cv::dnn;
using namespace testing;
static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true)
static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true, double l1 = 0.0, double lInf = 0.0)
{
DNNTestLayer::checkBackend(backendId, targetId);
if (randInput)
@ -33,8 +33,12 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
if (skipCheck)
return;
double l1, lInf;
DNNTestLayer::getDefaultThresholds(backendId, targetId, &l1, &lInf);
double default_l1, default_lInf;
DNNTestLayer::getDefaultThresholds(backendId, targetId, &default_l1, &default_lInf);
if (l1 == 0.0)
l1 = default_l1;
if (lInf == 0.0)
lInf = default_lInf;
#if 0
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
@ -43,11 +47,11 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
normAssert(outputDefault, outputHalide, "", l1, lInf);
}
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false)
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
{
Net net;
net.addLayerToPrev(params.name, params.type, params);
test(input, net, backendId, targetId, skipCheck);
test(input, net, backendId, targetId, skipCheck, true, l1, lInf);
}
static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
@ -251,7 +255,17 @@ TEST_P(LRN, Accuracy)
int sz[] = {1, inChannels, inSize.height, inSize.width};
Mat input(4, &sz[0], CV_32F);
test(lp, input, backendId, targetId);
double l1 = 0.0, lInf = 0.0;
// The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
if (targetId == DNN_TARGET_OPENCL)
{
l1 = 0.01;
lInf = 0.01;
}
test(lp, input, backendId, targetId, false, l1, lInf);
}
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(

@ -169,8 +169,17 @@ TEST_P(Test_Caffe_layers, Softmax)
TEST_P(Test_Caffe_layers, LRN)
{
testLayerUsingCaffeModels("layer_lrn_spatial");
testLayerUsingCaffeModels("layer_lrn_channels");
double l1 = 0.0, lInf = 0.0;
// The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
if (target == DNN_TARGET_OPENCL)
{
l1 = 0.01;
lInf = 0.01;
}
testLayerUsingCaffeModels("layer_lrn_spatial", false, true, l1, lInf);
testLayerUsingCaffeModels("layer_lrn_channels", false, true, l1, lInf);
}
TEST_P(Test_Caffe_layers, Convolution)

@ -218,9 +218,21 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTorchNet("net_conv_gemm_lrn", "", false, true, true,
target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
double l1 = 0.0, lInf = 0.0;
if (target == DNN_TARGET_OPENCL_FP16)
{
l1 = 0.046;
lInf = 0.023;
}
// The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
else if (target == DNN_TARGET_OPENCL)
{
l1 = 0.02;
lInf = 0.02;
}
runTorchNet("net_conv_gemm_lrn", "", false, true, true, l1, lInf);
}
TEST_P(Test_Torch_layers, net_inception_block)

Loading…
Cancel
Save