allow multiple inputs to resize, fix tests

pull/17528/head
YashasSamaga 5 years ago
parent 55ca0fcc27
commit 265acccd56
  1. 3
      modules/dnn/src/cuda4dnn/primitives/resize.hpp
  2. 5
      modules/dnn/test/test_darknet_importer.cpp
  3. 2
      modules/dnn/test/test_onnx_importer.cpp
  4. 7
      modules/dnn/test/test_tf_importer.cpp

@ -35,7 +35,8 @@ namespace cv { namespace dnn { namespace cuda4dnn {
const std::vector<cv::Ptr<BackendWrapper>>& outputs,
csl::Workspace& workspace) override
{
CV_Assert(inputs.size() == 1 && outputs.size() == 1);
// sometimes the target shape is taken from the second input; we don't use it however
CV_Assert((inputs.size() == 1 || inputs.size() == 2) && outputs.size() == 1);
auto input_wrapper = inputs[0].dynamicCast<wrapper_type>();
auto input = input_wrapper->getView();

@ -574,6 +574,11 @@ TEST_P(Test_Darknet_nets, YOLOv4)
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.006 : 8e-5;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.042 : 3e-4;
if (target == DNN_TARGET_CUDA_FP16)
{
scoreDiff = 0.008;
iouDiff = 0.03;
}
std::string config_file = "yolov4.cfg";
std::string weights_file = "yolov4.weights";

@ -355,6 +355,8 @@ TEST_P(Test_ONNX_layers, MatMul)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
testONNXModels("matmul_2d");
testONNXModels("matmul_3d");

@ -1067,6 +1067,8 @@ TEST_P(Test_TensorFlow_layers, tf2_prelu)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported; only across channels is supported
runTensorFlowNet("tf2_prelu");
}
@ -1239,6 +1241,11 @@ TEST_P(Test_TensorFlow_nets, EfficientDet)
0, 7, 0.8039304, 0.6118435263633728, 0.13175517320632935, 0.9065558314323425, 0.2943994700908661);
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 4e-3 : 1e-5;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-3 : 1e-4;
if (target == DNN_TARGET_CUDA_FP16)
{
scoreDiff = 0.002;
iouDiff = 0.003;
}
normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
expectNoFallbacksFromIE(net);
}

Loading…
Cancel
Save