Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14904/head
Alexander Alekhin 6 years ago
commit b95e93c20a
  1. 4
      modules/calib3d/CMakeLists.txt
  2. 18
      modules/core/include/opencv2/core/hal/intrin_neon.hpp
  3. 2
      modules/core/src/kmeans.cpp
  4. 2
      modules/dnn/perf/perf_caffe.cpp
  5. 6
      modules/dnn/perf/perf_net.cpp
  6. 7
      modules/dnn/src/caffe/caffe_io.cpp
  7. 30
      modules/dnn/src/layers/elementwise_layers.cpp
  8. 8
      modules/dnn/src/layers/eltwise_layer.cpp
  9. 52
      modules/dnn/src/op_inf_engine.cpp
  10. 3
      modules/dnn/src/opencl/activations.cl
  11. 10
      modules/dnn/src/tensorflow/tf_importer.cpp
  12. 84
      modules/dnn/test/test_backends.cpp
  13. 71
      modules/dnn/test/test_caffe_importer.cpp
  14. 20
      modules/dnn/test/test_common.hpp
  15. 47
      modules/dnn/test/test_common.impl.hpp
  16. 21
      modules/dnn/test/test_darknet_importer.cpp
  17. 18
      modules/dnn/test/test_googlenet.cpp
  18. 29
      modules/dnn/test/test_halide_layers.cpp
  19. 43
      modules/dnn/test/test_layers.cpp
  20. 19
      modules/dnn/test/test_main.cpp
  21. 12
      modules/dnn/test/test_misc.cpp
  22. 116
      modules/dnn/test/test_onnx_importer.cpp
  23. 106
      modules/dnn/test/test_tf_importer.cpp
  24. 30
      modules/dnn/test/test_torch_importer.cpp
  25. 6
      modules/flann/include/opencv2/flann.hpp
  26. 2
      modules/flann/include/opencv2/flann/dist.h
  27. 24
      modules/imgproc/src/filter.dispatch.cpp
  28. 24
      modules/imgproc/src/grabcut.cpp
  29. 61
      modules/imgproc/test/test_filter.cpp
  30. 7
      modules/js/src/embindgen.py
  31. 43
      modules/js/test/test_calib3d.js
  32. 83
      modules/js/test/test_imgproc.js
  33. 1
      modules/js/test/tests.html
  34. 4
      modules/js/test/tests.js
  35. 2
      modules/objdetect/test/test_qrcode.cpp
  36. 32
      modules/ts/include/opencv2/ts.hpp
  37. 28
      modules/ts/src/ts.cpp
  38. 72
      modules/ts/src/ts_tags.cpp
  39. 2
      modules/videoio/src/cap_mfx_common.cpp
  40. 4
      platforms/ios/cmake/Toolchains/common-ios-toolchain.cmake
  41. 2
      platforms/js/build_js.py
  42. 88
      samples/_winpack_build_sample.cmd
  43. 7
      samples/dnn/js_face_recognition.html

@ -3,4 +3,6 @@ set(debug_modules "")
if(DEBUG_opencv_calib3d)
list(APPEND debug_modules opencv_highgui)
endif()
ocv_define_module(calib3d opencv_imgproc opencv_features2d opencv_flann ${debug_modules} WRAP java python)
ocv_define_module(calib3d opencv_imgproc opencv_features2d opencv_flann ${debug_modules}
WRAP java python js
)

@ -875,13 +875,27 @@ OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_int64x2, s64)
OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_float64x2, f64)
#endif
#if defined(__clang__) && defined(__aarch64__)
// avoid LD2 instruction. details: https://github.com/opencv/opencv/issues/14863
#define OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \
inline _Tpvec v_load_low(const _Tp* ptr) \
{ \
typedef uint64 CV_DECL_ALIGNED(1) unaligned_uint64; \
uint64 v = *(unaligned_uint64*)ptr; \
return _Tpvec(v_reinterpret_as_##suffix(v_uint64x2(v, (uint64)123456))); \
}
#else
#define OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \
inline _Tpvec v_load_low(const _Tp* ptr) \
{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr), vdup_n_##suffix((_Tp)0))); }
#endif
#define OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(_Tpvec, _Tp, suffix) \
inline _Tpvec v_load(const _Tp* ptr) \
{ return _Tpvec(vld1q_##suffix(ptr)); } \
inline _Tpvec v_load_aligned(const _Tp* ptr) \
{ return _Tpvec(vld1q_##suffix(ptr)); } \
inline _Tpvec v_load_low(const _Tp* ptr) \
{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr), vdup_n_##suffix((_Tp)0))); } \
OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \
inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \
inline void v_store(_Tp* ptr, const _Tpvec& a) \

@ -238,7 +238,7 @@ double cv::kmeans( InputArray _data, int K,
attempts = std::max(attempts, 1);
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
CV_Assert( N >= K );
CV_CheckGE(N, K, "Number of clusters should be more than number of elements");
Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));

@ -38,7 +38,7 @@ namespace opencv_test {
static caffe::Net<float>* initNet(std::string proto, std::string weights)
{
proto = findDataFile(proto, false);
proto = findDataFile(proto);
weights = findDataFile(weights, false);
#ifdef HAVE_CLCAFFE

@ -35,7 +35,7 @@ public:
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
proto = findDataFile(proto);
if (backend == DNN_BACKEND_HALIDE)
{
if (halide_scheduler == "disabled")
@ -198,10 +198,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/dog416.png", false));
Mat sample = imread(findDataFile("dnn/dog416.png"));
Mat inp;
sample.convertTo(inp, CV_32FC3);
processNet("dnn/yolov3.cfg", "dnn/yolov3.weights", "", inp / 255);
processNet("dnn/yolov3.weights", "dnn/yolov3.cfg", "", inp / 255);
}
PERF_TEST_P_(DNNTestNetwork, EAST_text_detection)

@ -1137,7 +1137,12 @@ bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {
bool ReadProtoFromTextBuffer(const char* data, size_t len, Message* proto) {
ArrayInputStream input(data, len);
return google::protobuf::TextFormat::Parse(&input, proto);
#ifndef OPENCV_DNN_EXTERNAL_PROTOBUF
return google::protobuf::TextFormat::Parser(true).Parse(&input, proto);
#else
return google::protobuf::TextFormat::Parser().Parse(&input, proto);
#endif
}

@ -831,7 +831,8 @@ struct BNLLFunctor
for( int i = 0; i < len; i++ )
{
float x = srcptr[i];
dstptr[i] = log(1.f + exp(-abs(x)));
// https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17
dstptr[i] = x > 0 ? x + log(1. + exp(-x)) : log(1. + exp(x));
}
}
}
@ -839,8 +840,28 @@ struct BNLLFunctor
#ifdef HAVE_OPENCL
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
// TODO: implement OCL version
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);
String buildopt = oclGetTMacro(inputs[0]);
for (size_t i = 0; i < inputs.size(); i++)
{
UMat& src = inputs[i];
UMat& dst = outputs[i];
ocl::Kernel kernel("BNLLForward", ocl::dnn::activations_oclsrc, buildopt);
kernel.set(0, (int)src.total());
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
size_t gSize = src.total();
CV_Assert(kernel.run(1, &gSize, NULL, false));
}
return true;
}
#endif
@ -848,7 +869,8 @@ struct BNLLFunctor
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = log(1.0f + exp(-abs(input)));
// https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17
top(x, y, c, n) = max(input, 0) + log(1.0f + exp(-abs(input)));
}
#endif // HAVE_HALIDE

@ -140,7 +140,7 @@ public:
const std::vector<float>& coeffs, EltwiseOp op,
const ActivationLayer* activ, int nstripes)
{
CV_Check(dst.dims, 1 < dst.dims && dst.dims <= 4, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous());
CV_Check(dst.dims, 1 < dst.dims && dst.dims <= 5, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous());
CV_Assert(coeffs.empty() || coeffs.size() == (size_t)nsrcs);
for( int i = 0; i < nsrcs; i++ )
@ -156,9 +156,9 @@ public:
p.dst = &dst;
p.op = op;
p.nstripes = nstripes;
p.channels = (dst.dims == 4 ? dst.size[1] : 1);
p.planeSize = (dst.dims >= 3 ? dst.size[dst.dims - 1] * dst.size[dst.dims - 2] :
dst.size[dst.dims - 1]);
p.channels = (dst.dims >= 4 ? dst.size[1] : 1);
p.planeSize = dst.total(dst.dims >= 4 ? 2 : 1);
CV_Assert(dst.total() == dst.size[0] * p.channels * p.planeSize);
bool simpleCoeffs = true;

@ -410,6 +410,14 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
enginePtr = dispatcher.getSuitablePlugin(targetDevice);
sharedPlugins[targetDevice] = enginePtr;
std::vector<std::string> candidates;
std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", "");
if (!param_pluginPath.empty())
{
candidates.push_back(param_pluginPath);
}
if (targetDevice == InferenceEngine::TargetDevice::eCPU ||
targetDevice == InferenceEngine::TargetDevice::eFPGA)
{
@ -423,24 +431,36 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
{
if (!haveFeature[i])
continue;
#ifdef _WIN32
std::string libName = "cpu_extension" + suffixes[i] + ".dll";
#elif defined(__APPLE__)
std::string libName = "libcpu_extension" + suffixes[i] + ".dylib";
#else
std::string libName = "libcpu_extension" + suffixes[i] + ".so";
#endif // _WIN32
try
{
InferenceEngine::IExtensionPtr extension =
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
enginePtr->AddExtension(extension, 0);
break;
}
catch(...) {}
#ifdef _WIN32
candidates.push_back("cpu_extension" + suffixes[i] + ".dll");
#elif defined(__APPLE__)
candidates.push_back("libcpu_extension" + suffixes[i] + ".so"); // built as loadable module
candidates.push_back("libcpu_extension" + suffixes[i] + ".dylib"); // built as shared library
#else
candidates.push_back("libcpu_extension" + suffixes[i] + ".so");
#endif // _WIN32
}
// Some of networks can work without a library of extra layers.
}
bool found = false;
for (size_t i = 0; i != candidates.size(); ++i)
{
const std::string& libName = candidates[i];
try
{
InferenceEngine::IExtensionPtr extension =
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
enginePtr->AddExtension(extension, 0);
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true;
break;
}
catch(...) {}
}
if (!found && !candidates.empty())
{
CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
}
// Some of networks can work without a library of extra layers.
}
plugin = InferenceEngine::InferencePlugin(enginePtr);

@ -98,7 +98,8 @@ __kernel void SigmoidForward(const int count, __global const T* in, __global T*
__kernel void BNLLForward(const int n, __global const T* in, __global T* out) {
int index = get_global_id(0);
if (index < n) {
out[index] = in[index] > 0 ? in[index] + log(1.0f + exp(-in[index])) : log(1.0f + exp(in[index]));
T x = in[index];
out[index] = x > 0 ? x + log(1.0f + exp(-x)) : log(1.0f + exp(x));
}
}

@ -792,7 +792,7 @@ void TFImporter::populateNet(Net dstNet)
int predictedLayout = predictOutputDataLayout(net, layer, data_layouts);
data_layouts[name] = predictedLayout;
if (type == "Conv2D" || type == "SpaceToBatchND" || type == "DepthwiseConv2dNative" || type == "Pad" || type == "Conv3D")
if (type == "Conv2D" || type == "SpaceToBatchND" || type == "DepthwiseConv2dNative" || type == "Pad" || type == "MirrorPad" || type == "Conv3D")
{
// The first node of dilated convolution subgraph.
// Extract input node, dilation rate and paddings.
@ -804,6 +804,7 @@ void TFImporter::populateNet(Net dstNet)
if (next_layers.empty())
next_layers = getNextLayers(net, name, "DepthwiseConv2dNative");
}
if (type == "SpaceToBatchND")
{
// op: "SpaceToBatchND"
@ -830,7 +831,7 @@ void TFImporter::populateNet(Net dstNet)
name = layer.name();
type = layer.op();
}
else if (type == "Pad")
else if (type == "Pad" || type == "MirrorPad")
{
Mat paddings = getTensorContent(getConstBlob(layer, value_id, 1));
CV_Assert(paddings.type() == CV_32SC1);
@ -848,12 +849,15 @@ void TFImporter::populateNet(Net dstNet)
// N C H W
// 0 1 2 3 4 5 6 7
}
if (next_layers.empty() || paddings.total() != 8 ||
paddings.at<int32_t>(4) != paddings.at<int32_t>(5) ||
paddings.at<int32_t>(6) != paddings.at<int32_t>(7))
paddings.at<int32_t>(6) != paddings.at<int32_t>(7) || type == "MirrorPad")
{
// Just a single padding layer.
layerParams.set("paddings", DictValue::arrayInt<int*>((int*)paddings.data, paddings.total()));
if (type == "MirrorPad")
layerParams.set("type", "reflect");
int id = dstNet.addLayer(name, "Padding", layerParams);
layer_id[name] = id;

@ -37,7 +37,7 @@ public:
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
proto = findDataFile(proto);
// Create two networks - with default backend and target and a tested one.
Net netDefault = readNet(weights, proto);
@ -51,7 +51,7 @@ public:
net.setPreferableTarget(target);
if (backend == DNN_BACKEND_HALIDE && !halideScheduler.empty())
{
halideScheduler = findDataFile(halideScheduler, false);
halideScheduler = findDataFile(halideScheduler);
net.setHalideScheduler(halideScheduler);
}
Mat out = net.forward(outputLayer).clone();
@ -157,9 +157,10 @@ TEST_P(DNNTestNetwork, Inception_5h)
TEST_P(DNNTestNetwork, ENet)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
processNet("dnn/Enet-model-best.net", "", Size(512, 512), "l367_Deconvolution",
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_enet.yml" :
"dnn/halide_scheduler_enet.yml",
@ -170,8 +171,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1.5e-2 : 0.0;
float diffSquares = (target == DNN_TARGET_MYRIAD) ? 0.063 : 0.0;
@ -184,13 +185,13 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
Mat sample = imread(findDataFile("dnn/street.png", false));
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false);
float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.029 : 0.0;
float diffSquares = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
@ -203,8 +204,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.095 : 0.0;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
@ -217,13 +218,13 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow)
TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
Mat sample = imread(findDataFile("dnn/street.png", false));
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 560), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.06 : 0.0;
@ -236,8 +237,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 2e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.062 : 0.0;
@ -251,10 +252,10 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); // TODO HALIDE_CPU
double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.032 : 0.0;
Mat sample = imread(findDataFile("dnn/street.png", false));
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel",
"dnn/ssd_vgg16.prototxt", inp, "detection_out", "", scoreThreshold, lInf);
@ -264,13 +265,13 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
TEST_P(DNNTestNetwork, OpenPose_pose_coco)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
CV_TEST_TAG_DEBUG_LONG);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0056 : 0.0;
@ -285,11 +286,11 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
// output range: [-0.001, 0.97]
@ -304,11 +305,11 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
// The same .caffemodel but modified .prototxt
@ -323,11 +324,11 @@ TEST_P(DNNTestNetwork, OpenFace)
#if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#endif
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0024 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.0071 : 0.0;
processNet("dnn/openface_nn4.small2.v1.t7", "", Size(96, 96), "", "", l1, lInf);
@ -336,8 +337,8 @@ TEST_P(DNNTestNetwork, OpenFace)
TEST_P(DNNTestNetwork, opencv_face_detector)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png"));
Mat inp = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
processNet("dnn/opencv_face_detector.caffemodel", "dnn/opencv_face_detector.prototxt",
inp, "detection_out");
@ -353,11 +354,11 @@ TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.015 : 0.0;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0731 : 0.0;
@ -370,7 +371,7 @@ TEST_P(DNNTestNetwork, DenseNet_121)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
// Reference output values are in range [-3.807, 4.605]
float l1 = 0.0, lInf = 0.0;
if (target == DNN_TARGET_OPENCL_FP16)
@ -389,24 +390,25 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB, CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_RELEASE <= 2018050000
#if INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#endif
Mat img = imread(findDataFile("dnn/googlenet_1.png", false));
Mat img = imread(findDataFile("dnn/googlenet_1.png"));
Mat inp = blobFromImage(img, 1.0, Size(320, 240), Scalar(103.939, 116.779, 123.68), false, false);
// Output image has values in range [-143.526, 148.539].
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 4e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.45 : 2e-3;
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
#if defined(HAVE_INF_ENGINE) && INF_ENGINE_RELEASE >= 2019010000
#if defined(HAVE_INF_ENGINE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
expectNoFallbacksFromIE(net);
#endif
}

@ -48,7 +48,7 @@ namespace opencv_test { namespace {
template<typename TString>
static std::string _tf(TString filename)
{
return (getOpenCVExtraDir() + "/dnn/") + filename;
return findDataFile(std::string("dnn/") + filename);
}
class Test_Caffe_nets : public DNNTestLayer
@ -58,11 +58,11 @@ public:
double scoreDiff = 0.0, double iouDiff = 0.0)
{
checkBackend();
Net net = readNetFromCaffe(findDataFile("dnn/" + proto, false),
Net net = readNetFromCaffe(findDataFile("dnn/" + proto),
findDataFile("dnn/" + model, false));
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat img = imread(findDataFile("dnn/dog416.png", false));
Mat img = imread(findDataFile("dnn/dog416.png"));
resize(img, img, Size(800, 600));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(102.9801, 115.9465, 122.7717), false, false);
Mat imInfo = (Mat_<float>(1, 3) << img.rows, img.cols, 1.6f);
@ -80,11 +80,12 @@ public:
TEST(Test_Caffe, memory_read)
{
const string proto = findDataFile("dnn/bvlc_googlenet.prototxt", false);
const string proto = findDataFile("dnn/bvlc_googlenet.prototxt");
const string model = findDataFile("dnn/bvlc_googlenet.caffemodel", false);
std::vector<char> dataProto;
readFileContent(proto, dataProto);
std::vector<char> dataModel;
readFileContent(model, dataModel);
@ -112,7 +113,7 @@ TEST(Test_Caffe, read_googlenet)
TEST_P(Test_Caffe_nets, Axpy)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
String proto = _tf("axpy.prototxt");
Net net = readNetFromCaffe(proto);
@ -157,13 +158,12 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
{
Target targetId = get<1>(GetParam());
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (!ocl::useOpenCL() && targetId != DNN_TARGET_CPU)
throw SkipTestException("OpenCL is disabled");
ASSERT_TRUE(ocl::useOpenCL() || targetId == DNN_TARGET_CPU);
bool readFromMemory = get<0>(GetParam());
Net net;
{
const string proto = findDataFile("dnn/bvlc_alexnet.prototxt", false);
const string proto = findDataFile("dnn/bvlc_alexnet.prototxt");
const string model = findDataFile("dnn/bvlc_alexnet.caffemodel", false);
if (readFromMemory)
{
@ -196,7 +196,7 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
}
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_AlexNet, Combine(testing::Bool(),
Values(DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16)));
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV))));
TEST(Reproducibility_FCN, Accuracy)
{
@ -204,8 +204,8 @@ TEST(Reproducibility_FCN, Accuracy)
Net net;
{
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt", false);
const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel", false);
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt");
const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel");
net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty());
}
@ -233,7 +233,7 @@ TEST(Reproducibility_SSD, Accuracy)
applyTestTag(CV_TEST_TAG_MEMORY_512MB, CV_TEST_TAG_DEBUG_LONG);
Net net;
{
const string proto = findDataFile("dnn/ssd_vgg16.prototxt", false);
const string proto = findDataFile("dnn/ssd_vgg16.prototxt");
const string model = findDataFile("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", false);
net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty());
@ -328,10 +328,9 @@ TEST_P(Reproducibility_ResNet50, Accuracy)
{
Target targetId = GetParam();
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (!ocl::useOpenCL() && targetId != DNN_TARGET_CPU)
throw SkipTestException("OpenCL is disabled");
ASSERT_TRUE(ocl::useOpenCL() || targetId == DNN_TARGET_CPU);
Net net = readNetFromCaffe(findDataFile("dnn/ResNet-50-deploy.prototxt", false),
Net net = readNetFromCaffe(findDataFile("dnn/ResNet-50-deploy.prototxt"),
findDataFile("dnn/ResNet-50-model.caffemodel", false));
net.setPreferableBackend(DNN_BACKEND_OPENCV);
@ -361,15 +360,15 @@ TEST_P(Reproducibility_ResNet50, Accuracy)
}
}
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_ResNet50,
Values(DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16));
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV)));
typedef testing::TestWithParam<Target> Reproducibility_SqueezeNet_v1_1;
TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
{
int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt", false),
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt"),
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
@ -400,18 +399,18 @@ TEST(Reproducibility_AlexNet_fp16, Accuracy)
const float l1 = 1e-5;
const float lInf = 3e-3;
const string proto = findDataFile("dnn/bvlc_alexnet.prototxt", false);
const string proto = findDataFile("dnn/bvlc_alexnet.prototxt");
const string model = findDataFile("dnn/bvlc_alexnet.caffemodel", false);
shrinkCaffeModel(model, "bvlc_alexnet.caffemodel_fp16");
Net net = readNetFromCaffe(proto, "bvlc_alexnet.caffemodel_fp16");
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat sample = imread(findDataFile("dnn/grace_hopper_227.png", false));
Mat sample = imread(findDataFile("dnn/grace_hopper_227.png"));
net.setInput(blobFromImage(sample, 1.0f, Size(227, 227), Scalar(), false));
net.setInput(blobFromImage(sample, 1.0f, Size(227, 227), Scalar()));
Mat out = net.forward();
Mat ref = blobFromNPY(findDataFile("dnn/caffe_alexnet_prob.npy", false));
Mat ref = blobFromNPY(findDataFile("dnn/caffe_alexnet_prob.npy"));
normAssert(ref, out, "", l1, lInf);
}
@ -420,7 +419,7 @@ TEST(Reproducibility_GoogLeNet_fp16, Accuracy)
const float l1 = 1e-5;
const float lInf = 3e-3;
const string proto = findDataFile("dnn/bvlc_googlenet.prototxt", false);
const string proto = findDataFile("dnn/bvlc_googlenet.prototxt");
const string model = findDataFile("dnn/bvlc_googlenet.caffemodel", false);
shrinkCaffeModel(model, "bvlc_googlenet.caffemodel_fp16");
@ -506,7 +505,7 @@ TEST_P(Test_Caffe_nets, DenseNet_121)
TEST(Test_Caffe, multiple_inputs)
{
const string proto = findDataFile("dnn/layers/net_input.prototxt", false);
const string proto = findDataFile("dnn/layers/net_input.prototxt");
Net net = readNetFromCaffe(proto);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
@ -534,8 +533,8 @@ TEST(Test_Caffe, multiple_inputs)
TEST(Test_Caffe, shared_weights)
{
const string proto = findDataFile("dnn/layers/shared_weights.prototxt", false);
const string model = findDataFile("dnn/layers/shared_weights.caffemodel", false);
const string proto = findDataFile("dnn/layers/shared_weights.prototxt");
const string model = findDataFile("dnn/layers/shared_weights.caffemodel");
Net net = readNetFromCaffe(proto, model);
@ -563,7 +562,7 @@ TEST_P(opencv_face_detector, Accuracy)
dnn::Target targetId = (dnn::Target)(int)get<1>(GetParam());
Net net = readNetFromCaffe(proto, model);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
Mat img = imread(findDataFile("gpu/lbpcascade/er.png"));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
@ -599,10 +598,10 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Test is disabled for DLIE OpenCL targets"); // very slow
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849,
@ -617,9 +616,10 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.90121, 120.407, 115.83, 570.586, 528.395,
0, 7, 0.988779, 469.849, 75.1756, 718.64, 186.762,
0, 12, 0.967198, 138.588, 206.843, 329.766, 553.176);
@ -633,9 +633,10 @@ TEST_P(Test_Caffe_nets, RFCN)
CV_TEST_TAG_LONG,
CV_TEST_TAG_DEBUG_VERYLONG
);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
double scoreDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 4e-3 : default_l1;
double iouDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 8e-2 : default_lInf;
static Mat ref = (Mat_<float>(2, 7) << 0, 7, 0.991359, 491.822, 81.1668, 702.573, 178.234,

@ -11,6 +11,21 @@
#include "opencv2/core/ocl.hpp"
#endif
#define CV_TEST_TAG_DNN_SKIP_HALIDE "dnn_skip_halide"
#define CV_TEST_TAG_DNN_SKIP_OPENCL "dnn_skip_ocl"
#define CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 "dnn_skip_ocl_fp16"
#define CV_TEST_TAG_DNN_SKIP_IE "dnn_skip_ie"
#define CV_TEST_TAG_DNN_SKIP_IE_2018R5 "dnn_skip_ie_2018r5"
#define CV_TEST_TAG_DNN_SKIP_IE_2019R1 "dnn_skip_ie_2019r1"
#define CV_TEST_TAG_DNN_SKIP_IE_2019R1_1 "dnn_skip_ie_2019r1_1"
#define CV_TEST_TAG_DNN_SKIP_IE_OPENCL "dnn_skip_ie_ocl"
#define CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 "dnn_skip_ie_ocl_fp16"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
#define CV_TEST_TAG_DNN_SKIP_VULKAN "dnn_skip_vulkan"
namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN
@ -28,6 +43,8 @@ CV__DNN_INLINE_NS_END
namespace opencv_test {
void initDNNTests();
using namespace cv::dnn;
static inline const std::string &getOpenCVExtraDir()
@ -107,7 +124,10 @@ public:
{
if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
inp->size[0] != 1 && inp->size[0] != ref->size[0])
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
}
}
}

@ -279,6 +279,14 @@ static bool validateVPUType_()
exit(1);
}
}
if (have_vpu_target)
{
std::string dnn_vpu_type = getInferenceEngineVPUType();
if (dnn_vpu_type == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2)
registerGlobalSkipTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
if (dnn_vpu_type == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
registerGlobalSkipTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
}
return true;
}
@ -289,4 +297,43 @@ bool validateVPUType()
}
#endif // HAVE_INF_ENGINE
void initDNNTests()
{
const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif
if (extraTestDataPath)
cvtest::addDataSearchPath(extraTestDataPath);
registerGlobalSkipTag(
CV_TEST_TAG_DNN_SKIP_HALIDE,
CV_TEST_TAG_DNN_SKIP_OPENCL, CV_TEST_TAG_DNN_SKIP_OPENCL_FP16
);
#if defined(INF_ENGINE_RELEASE)
registerGlobalSkipTag(
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
CV_TEST_TAG_DNN_SKIP_IE_2018R5,
#elif INF_ENGINE_VER_MAJOR_EQ(2019010000)
CV_TEST_TAG_DNN_SKIP_IE_2019R1,
#elif INF_ENGINE_VER_MAJOR_EQ(2019010100)
CV_TEST_TAG_DNN_SKIP_IE_2019R1_1
#endif
CV_TEST_TAG_DNN_SKIP_IE
);
#endif
registerGlobalSkipTag(
// see validateVPUType(): CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16
);
#ifdef HAVE_VULKAN
registerGlobalSkipTag(
CV_TEST_TAG_DNN_SKIP_VULKAN
);
#endif
}
} // namespace

@ -82,7 +82,7 @@ TEST(Test_Darknet, read_yolo_voc_stream)
Mat ref;
Mat sample = imread(_tf("dog416.png"));
Mat inp = blobFromImage(sample, 1.0/255, Size(416, 416), Scalar(), true, false);
const std::string cfgFile = findDataFile("dnn/yolo-voc.cfg", false);
const std::string cfgFile = findDataFile("dnn/yolo-voc.cfg");
const std::string weightsFile = findDataFile("dnn/yolo-voc.weights", false);
// Import by paths.
{
@ -110,12 +110,13 @@ class Test_Darknet_layers : public DNNTestLayer
public:
void testDarknetLayer(const std::string& name, bool hasWeights = false)
{
std::string cfg = findDataFile("dnn/darknet/" + name + ".cfg", false);
Mat inp = blobFromNPY(findDataFile("dnn/darknet/" + name + "_in.npy"));
Mat ref = blobFromNPY(findDataFile("dnn/darknet/" + name + "_out.npy"));
std::string cfg = findDataFile("dnn/darknet/" + name + ".cfg");
std::string model = "";
if (hasWeights)
model = findDataFile("dnn/darknet/" + name + ".weights", false);
Mat inp = blobFromNPY(findDataFile("dnn/darknet/" + name + "_in.npy", false));
Mat ref = blobFromNPY(findDataFile("dnn/darknet/" + name + "_out.npy", false));
checkBackend(&inp, &ref);
@ -152,7 +153,7 @@ public:
Mat inp = blobFromImages(samples, 1.0/255, Size(416, 416), Scalar(), true, false);
Net net = readNet(findDataFile("dnn/" + cfg, false),
Net net = readNet(findDataFile("dnn/" + cfg),
findDataFile("dnn/" + weights, false));
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
@ -272,12 +273,12 @@ TEST_P(Test_Darknet_nets, YoloVoc)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is disabled");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX (need to update check function)");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function
#endif
// batchId, classId, confidence, left, top, right, bottom
@ -313,7 +314,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX (need to update check function)");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function
#endif
// batchId, classId, confidence, left, top, right, bottom
Mat ref = (Mat_<float>(4, 7) << 0, 6, 0.761967f, 0.579042f, 0.159161f, 0.894482f, 0.31994f, // a car
@ -345,7 +346,7 @@ TEST_P(Test_Darknet_nets, YOLOv3)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
// batchId, classId, confidence, left, top, right, bottom
@ -372,7 +373,7 @@ TEST_P(Test_Darknet_nets, YOLOv3)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("Test with 'batch size 2' is disabled for DLIE/OpenCL target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL) // Test with 'batch size 2' is disabled for DLIE/OpenCL target
#endif
{

@ -56,9 +56,9 @@ typedef testing::TestWithParam<Target> Reproducibility_GoogLeNet;
TEST_P(Reproducibility_GoogLeNet, Batching)
{
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
if (targetId == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt"),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
@ -87,9 +87,9 @@ TEST_P(Reproducibility_GoogLeNet, Batching)
TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
{
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
if (targetId == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt"),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
@ -118,9 +118,9 @@ TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
{
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
if (targetId == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt"),
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);

@ -34,6 +34,11 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
double l1, lInf;
DNNTestLayer::getDefaultThresholds(backendId, targetId, &l1, &lInf);
#if 0
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
#endif
normAssert(outputDefault, outputHalide, "", l1, lInf);
}
@ -165,7 +170,7 @@ TEST_P(Deconvolution, Accuracy)
&& inChannels == 6 && outChannels == 4 && group == 1
&& kernel == Size(1, 3) && pad == Size(1, 0)
&& stride == Size(1, 1) && dilation == Size(1, 1))
throw SkipTestException("Test is disabled");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width};
@ -231,7 +236,7 @@ TEST_P(LRN, Accuracy)
if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD &&
nrmType == "ACROSS_CHANNELS")
throw SkipTestException("This test case is disabled");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
LayerParams lp;
lp.set("norm_region", nrmType);
@ -276,7 +281,7 @@ TEST_P(AvePooling, Accuracy)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
throw SkipTestException("Test is disabled for MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
@ -324,7 +329,7 @@ TEST_P(MaxPooling, Accuracy)
&& (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1))
)
throw SkipTestException("Test is disabled in OpenVINO <= 2018R5");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
@ -332,7 +337,7 @@ TEST_P(MaxPooling, Accuracy)
&& (kernel == Size(2, 2) || kernel == Size(3, 2))
&& stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
)
throw SkipTestException("Problems with output dimension in OpenVINO 2018R5");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
@ -341,7 +346,7 @@ TEST_P(MaxPooling, Accuracy)
&& (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1))
)
throw SkipTestException("Test is disabled for MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
#endif
LayerParams lp;
@ -382,7 +387,7 @@ TEST_P(FullyConnected, Accuracy)
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
randu(weights, -1.0f, 1.0f);
@ -440,7 +445,7 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, SoftMax, Combine(
TEST_P(Test_Halide_layers, MaxPoolUnpool)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
LayerParams pool;
pool.set("pool", "max");
@ -656,14 +661,14 @@ TEST_P(Concat, Accuracy)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
)
throw SkipTestException("Test is disabled for Myriad target"); // crash
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); // crash
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
)
throw SkipTestException("Test is disabled for DLIE/CPU target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); // TODO: IE_CPU
#endif
Net net;
@ -737,12 +742,12 @@ TEST_P(Eltwise, Accuracy)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD &&
inSize == Vec3i(1, 4, 5))
throw SkipTestException("Test is disabled for Myriad target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && numConv > 1)
throw SkipTestException("Test is disabled for DLIE backend");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
#endif
Net net;

@ -142,15 +142,16 @@ TEST_P(Test_Caffe_layers, Convolution)
TEST_P(Test_Caffe_layers, DeConvolution)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU)
throw SkipTestException("Test is disabled for DLIE/CPU");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); // TODO IE_CPU
testLayerUsingCaffeModels("layer_deconvolution", true, false);
}
TEST_P(Test_Caffe_layers, InnerProduct)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
testLayerUsingCaffeModels("layer_inner_product", true);
}
@ -236,7 +237,7 @@ TEST_P(Test_Caffe_layers, Concat)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
#endif
testLayerUsingCaffeModels("layer_concat");
testLayerUsingCaffeModels("layer_concat_optim", true, false);
@ -246,15 +247,13 @@ TEST_P(Test_Caffe_layers, Concat)
TEST_P(Test_Caffe_layers, Fused_Concat)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE due negative_slope parameter");
if (backend == DNN_BACKEND_INFERENCE_ENGINE) // Test is disabled for DLIE due negative_slope parameter
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif
checkBackend();
@ -300,7 +299,7 @@ TEST_P(Test_Caffe_layers, Fused_Concat)
TEST_P(Test_Caffe_layers, Eltwise)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
testLayerUsingCaffeModels("layer_eltwise");
}
@ -313,7 +312,7 @@ TEST_P(Test_Caffe_layers, PReLU)
TEST_P(Test_Caffe_layers, layer_prelu_fc)
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
// Reference output values are in range [-0.0001, 10.3906]
double l1 = (target == DNN_TARGET_MYRIAD) ? 0.005 : 0.0;
double lInf = (target == DNN_TARGET_MYRIAD) ? 0.021 : 0.0;
@ -343,7 +342,7 @@ TEST_P(Test_Caffe_layers, layer_prelu_fc)
TEST_P(Test_Caffe_layers, Reshape_Split_Slice)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
Net net = readNetFromCaffe(_tf("reshape_and_slice_routines.prototxt"));
ASSERT_FALSE(net.empty());
@ -365,7 +364,7 @@ TEST_P(Test_Caffe_layers, Conv_Elu)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE <= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
Net net = readNetFromTensorflow(_tf("layer_elu_model.pb"));
@ -548,9 +547,11 @@ TEST(Layer_Test_ROIPooling, Accuracy)
TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
{
if ((backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt"));
Mat scores = blobFromNPY(_tf("net_faster_rcnn_proposal.scores.npy"));
@ -774,7 +775,8 @@ TEST_P(Test_Caffe_layers, Average_pooling_kernel_area)
TEST_P(Test_Caffe_layers, PriorBox_squares)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
LayerParams lp;
lp.name = "testPriorBox";
lp.type = "PriorBox";
@ -1307,7 +1309,8 @@ TEST_P(Test_Caffe_layers, DISABLED_Interp) // requires patched protobuf (availa
#endif
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
// Test a custom layer.
CV_DNN_REGISTER_LAYER_CLASS(Interp, CustomInterpLayer);
try

@ -1,24 +1,7 @@
#include "test_precomp.hpp"
static const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN("",
extraTestDataPath ? (void)cvtest::addDataSearchPath(extraTestDataPath) : (void)0
)
namespace opencv_test
{
using namespace cv;
using namespace cv::dnn;
}
CV_TEST_MAIN("", initDNNTests());

@ -62,18 +62,18 @@ TEST(imagesFromBlob, Regression)
TEST(readNet, Regression)
{
Net net = readNet(findDataFile("dnn/squeezenet_v1.1.prototxt", false),
Net net = readNet(findDataFile("dnn/squeezenet_v1.1.prototxt"),
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
EXPECT_FALSE(net.empty());
net = readNet(findDataFile("dnn/opencv_face_detector.caffemodel", false),
findDataFile("dnn/opencv_face_detector.prototxt", false));
findDataFile("dnn/opencv_face_detector.prototxt"));
EXPECT_FALSE(net.empty());
net = readNet(findDataFile("dnn/openface_nn4.small2.v1.t7", false));
EXPECT_FALSE(net.empty());
net = readNet(findDataFile("dnn/tiny-yolo-voc.cfg", false),
net = readNet(findDataFile("dnn/tiny-yolo-voc.cfg"),
findDataFile("dnn/tiny-yolo-voc.weights", false));
EXPECT_FALSE(net.empty());
net = readNet(findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false),
net = readNet(findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt"),
findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false));
EXPECT_FALSE(net.empty());
}
@ -158,9 +158,9 @@ TEST_P(setInput, normalization)
const bool kSwapRB = true;
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 && dtype != CV_32F)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
if (backend == DNN_BACKEND_VKCOM && dtype != CV_32F)
throw SkipTestException("");
throw SkipTestException(CV_TEST_TAG_DNN_SKIP_VULKAN);
Mat inp(5, 5, CV_8UC3);
randu(inp, 0, 255);

@ -12,15 +12,18 @@
namespace opencv_test { namespace {
template<typename TString>
static std::string _tf(TString filename)
static std::string _tf(TString filename, bool required = true)
{
String rootFolder = "dnn/onnx/";
return findDataFile(rootFolder + filename, false);
return findDataFile(std::string("dnn/onnx/") + filename, required);
}
class Test_ONNX_layers : public DNNTestLayer
{
public:
bool required;
Test_ONNX_layers() : required(true) { }
enum Extension
{
npy,
@ -31,7 +34,7 @@ public:
const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
bool checkNoFallbacks = true)
{
String onnxmodel = _tf("models/" + basename + ".onnx");
String onnxmodel = _tf("models/" + basename + ".onnx", required);
Mat inp, ref;
if (ext == npy) {
inp = blobFromNPY(_tf("data/input_" + basename + ".npy"));
@ -101,7 +104,7 @@ TEST_P(Test_ONNX_layers, Two_convolution)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX"); // 2018R5+ is failed
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
// Reference output values are in range [-0.855, 0.611]
testONNXModels("two_convolution");
@ -124,7 +127,7 @@ TEST_P(Test_ONNX_layers, Dropout)
TEST_P(Test_ONNX_layers, Linear)
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
testONNXModels("linear");
}
@ -140,12 +143,25 @@ TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
TEST_P(Test_ONNX_layers, Concatenation)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
}
testONNXModels("concatenation");
}
TEST_P(Test_ONNX_layers, Eltwise3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU on DLIE backend is supported");
testONNXModels("eltwise3d");
}
TEST_P(Test_ONNX_layers, AveragePooling)
{
testONNXModels("average_pooling");
@ -178,24 +194,32 @@ TEST_P(Test_ONNX_layers, BatchNormalization)
TEST_P(Test_ONNX_layers, BatchNormalization3D)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
}
testONNXModels("batch_norm_3d");
}
TEST_P(Test_ONNX_layers, Transpose)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
}
testONNXModels("transpose");
}
TEST_P(Test_ONNX_layers, Multiplication)
{
if ((backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
testONNXModels("mul");
}
@ -204,7 +228,7 @@ TEST_P(Test_ONNX_layers, Constant)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
testONNXModels("constant");
}
@ -248,8 +272,11 @@ TEST_P(Test_ONNX_layers, MultyInputs)
TEST_P(Test_ONNX_layers, DynamicReshape)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
}
testONNXModels("dynamic_reshape");
}
@ -275,11 +302,16 @@ TEST_P(Test_ONNX_layers, Softmax)
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
class Test_ONNX_nets : public Test_ONNX_layers {};
class Test_ONNX_nets : public Test_ONNX_layers
{
public:
Test_ONNX_nets() { required = false; }
};
TEST_P(Test_ONNX_nets, Alexnet)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
const String model = _tf("models/alexnet.onnx");
const String model = _tf("models/alexnet.onnx", false);
Net net = readNetFromONNX(model);
ASSERT_FALSE(net.empty());
@ -307,9 +339,9 @@ TEST_P(Test_ONNX_nets, Squeezenet)
TEST_P(Test_ONNX_nets, Googlenet)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
const String model = _tf("models/googlenet.onnx");
const String model = _tf("models/googlenet.onnx", false);
Net net = readNetFromONNX(model);
ASSERT_FALSE(net.empty());
@ -391,14 +423,18 @@ TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
{
if (backend == DNN_BACKEND_OPENCV)
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_OPENCL : CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
throw SkipTestException("Test is disabled for OpenCL targets");
}
testONNXModels("resnet101_duc_hdc", pb);
}
@ -412,12 +448,12 @@ TEST_P(Test_ONNX_nets, TinyYolov2)
if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE OpenCL targets");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
// output range: [-11; 8]
@ -444,9 +480,12 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
}
double l1 = default_l1;
double lInf = default_lInf;
@ -468,7 +507,7 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
double l1 = default_l1;
@ -506,16 +545,19 @@ TEST_P(Test_ONNX_nets, Inception_v1)
{
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
testONNXModels("inception_v1", pb);
}
TEST_P(Test_ONNX_nets, Shufflenet)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
}
testONNXModels("shufflenet", pb);
}
@ -527,7 +569,7 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx");
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
Mat image0 = imread(findDataFile("dnn/dog416.png"));
Mat image1 = imread(findDataFile("dnn/street.png"));

@ -74,7 +74,7 @@ TEST(Test_TensorFlow, inception_accuracy)
static std::string path(const std::string& file)
{
return findDataFile("dnn/tensorflow/" + file, false);
return findDataFile("dnn/tensorflow/" + file);
}
class Test_TensorFlow_layers : public DNNTestLayer
@ -146,6 +146,7 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding");
runTensorFlowNet("keras_pad_concat");
runTensorFlowNet("mirror_pad");
}
TEST_P(Test_TensorFlow_layers, padding_same)
@ -154,7 +155,7 @@ TEST_P(Test_TensorFlow_layers, padding_same)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
// Reference output values are in range [0.0006, 2.798]
runTensorFlowNet("padding_same");
@ -196,14 +197,19 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
TEST_P(Test_TensorFlow_layers, batch_norm3D)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
throw SkipTestException("");
}
runTensorFlowNet("batch_norm3d");
}
TEST_P(Test_TensorFlow_layers, slim_batch_norm)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
// Output values range: [-40.0597, 207.827]
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf;
@ -226,7 +232,7 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
runTensorFlowNet("ave_pool_same");
}
@ -266,7 +272,7 @@ TEST_P(Test_TensorFlow_layers, deconvolution)
TEST_P(Test_TensorFlow_layers, matmul)
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTensorFlowNet("matmul");
runTensorFlowNet("nhwc_transpose_reshape_matmul");
// Reference output values are in range [-5.688, 4.484]
@ -278,7 +284,7 @@ TEST_P(Test_TensorFlow_layers, matmul)
TEST_P(Test_TensorFlow_layers, reshape)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("shift_reshape_no_reorder");
runTensorFlowNet("reshape_no_reorder");
runTensorFlowNet("reshape_reduce");
@ -291,7 +297,7 @@ TEST_P(Test_TensorFlow_layers, flatten)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
)
throw SkipTestException("Test is disabled for Myriad2");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
#endif
runTensorFlowNet("flatten", true);
@ -307,7 +313,7 @@ TEST_P(Test_TensorFlow_layers, leaky_relu)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("Test is disabled for DLIE/OCL target (OpenVINO 2018R5)");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
runTensorFlowNet("leaky_relu_order1");
runTensorFlowNet("leaky_relu_order2");
@ -320,7 +326,7 @@ TEST_P(Test_TensorFlow_layers, l2_normalize)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
runTensorFlowNet("l2_normalize");
@ -333,11 +339,11 @@ TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE for OpenCL targets");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
runTensorFlowNet("l2_normalize_3d");
@ -352,23 +358,23 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
{
#if INF_ENGINE_VER_MAJOR_GE(2019010000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#else
throw SkipTestException("Test is disabled for Myriad");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
}
#endif
checkBackend();
std::string imgPath = findDataFile("dnn/street.png");
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt");
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
std::string imgPath = findDataFile("dnn/street.png", false);
Mat inp;
resize(imread(imgPath), inp, Size(300, 300));
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy", false));
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy"));
Net net = readNetFromTensorflow(netPath, netConfig);
net.setPreferableBackend(backend);
@ -393,15 +399,15 @@ TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
checkBackend();
std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt", false);
Mat img = imread(findDataFile("dnn/street.png"));
std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt");
std::string model = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("dnn/street.png", false));
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
net.setPreferableBackend(backend);
@ -431,14 +437,14 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt");
std::string model = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", false);
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("dnn/dog416.png", false));
Mat img = imread(findDataFile("dnn/dog416.png"));
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
net.setPreferableBackend(backend);
@ -466,20 +472,21 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
"faster_rcnn_resnet50_coco_2018_01_28"};
checkBackend();
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE ? 2.9e-5 : 1e-5;
for (int i = 0; i < 2; ++i)
{
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt", false);
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
Net net = readNetFromTensorflow(model, proto);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat img = imread(findDataFile("dnn/dog416.png", false));
Mat img = imread(findDataFile("dnn/dog416.png"));
Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
net.setInput(blob);
@ -494,16 +501,16 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Test is disabled for DLIE OpenCL targets in OpenVINO 2018R5");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif
checkBackend();
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false);
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt");
std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("dnn/dog416.png", false));
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_ppn_coco.detection_out.npy", false));
Mat img = imread(findDataFile("dnn/dog416.png"));
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_ppn_coco.detection_out.npy"));
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
net.setPreferableBackend(backend);
@ -521,11 +528,11 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
{
checkBackend();
std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt", false);
std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt");
std::string model = findDataFile("dnn/opencv_face_detector_uint8.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
Mat img = imread(findDataFile("gpu/lbpcascade/er.png"));
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
net.setPreferableBackend(backend);
@ -566,17 +573,17 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
checkBackend();
std::string netPath = findDataFile("dnn/frozen_east_text_detection.pb", false);
std::string imgPath = findDataFile("cv/ximgproc/sources/08.png", false);
std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy", false);
std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy", false);
std::string imgPath = findDataFile("cv/ximgproc/sources/08.png");
std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy");
std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy");
Net net = readNet(findDataFile("dnn/frozen_east_text_detection.pb", false));
Net net = readNet(netPath);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
@ -645,7 +652,7 @@ TEST_P(Test_TensorFlow_layers, fp16_padding_same)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
// Reference output values are in range [-3.504, -0.002]
@ -664,9 +671,10 @@ TEST_P(Test_TensorFlow_layers, quantized)
TEST_P(Test_TensorFlow_layers, lstm)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTensorFlowNet("lstm", true);
runTensorFlowNet("lstm", true, 0.0, 0.0, true);
}
@ -674,7 +682,7 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("split_equals");
}
@ -688,7 +696,7 @@ TEST_P(Test_TensorFlow_layers, slice)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
runTensorFlowNet("slice_4d");
runTensorFlowNet("strided_slice");
}
@ -705,7 +713,7 @@ TEST_P(Test_TensorFlow_layers, slim_softmax_v2)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
)
throw SkipTestException("Test is disabled for Myriad2");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
#endif
runTensorFlowNet("slim_softmax_v2");
}
@ -719,7 +727,7 @@ TEST_P(Test_TensorFlow_layers, relu6)
TEST_P(Test_TensorFlow_layers, subpixel)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("subpixel");
}
@ -740,7 +748,7 @@ TEST_P(Test_TensorFlow_layers, squeeze)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
)
throw SkipTestException("Test is disabled for Myriad2");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
#endif
int inpShapes[][4] = {{1, 3, 4, 2}, {1, 3, 1, 2}, {1, 3, 4, 1}, {1, 3, 4, 1}}; // TensorFlow's shape (NHWC)
int outShapes[][3] = {{3, 4, 2}, {1, 3, 2}, {1, 3, 4}, {1, 3, 4}};
@ -793,11 +801,11 @@ TEST(Test_TensorFlow, two_inputs)
TEST(Test_TensorFlow, Mask_RCNN)
{
applyTestTag(CV_TEST_TAG_MEMORY_1GB, CV_TEST_TAG_DEBUG_VERYLONG);
std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt", false);
Mat img = imread(findDataFile("dnn/street.png"));
std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt");
std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);
Net net = readNetFromTensorflow(model, proto);
Mat img = imread(findDataFile("dnn/street.png", false));
Mat refDetections = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_out.npy"));
Mat refMasks = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_masks.npy"));
Mat blob = blobFromImage(img, 1.0f, Size(800, 800), Scalar(), true, false);

@ -53,13 +53,13 @@ using namespace cv;
using namespace cv::dnn;
template<typename TStr>
static std::string _tf(TStr filename, bool inTorchDir = true)
static std::string _tf(TStr filename, bool inTorchDir = true, bool required = true)
{
String path = "dnn/";
if (inTorchDir)
path += "torch/";
path += filename;
return findDataFile(path, false);
return findDataFile(path, required);
}
TEST(Torch_Importer, simple_read)
@ -120,7 +120,7 @@ TEST_P(Test_Torch_layers, run_convolution)
TEST_P(Test_Torch_layers, run_pool_max)
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTorchNet("net_pool_max", "", true);
}
@ -137,7 +137,7 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size)
TEST_P(Test_Torch_layers, run_reshape)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
runTorchNet("net_reshape_batch");
runTorchNet("net_reshape_channels", "", false, true);
}
@ -153,7 +153,7 @@ TEST_P(Test_Torch_layers, run_reshape_single_sample)
TEST_P(Test_Torch_layers, run_linear)
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTorchNet("net_linear_2d");
}
@ -210,7 +210,7 @@ TEST_P(Test_Torch_layers, net_lp_pooling)
TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
runTorchNet("net_conv_gemm_lrn", "", false, true, true,
target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
@ -237,14 +237,14 @@ TEST_P(Test_Torch_layers, net_non_spatial)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
runTorchNet("net_non_spatial", "", false, true);
}
TEST_P(Test_Torch_layers, run_paralel)
{
if (backend != DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
throw SkipTestException("");
throw SkipTestException(""); // TODO: Check this
runTorchNet("net_parallel", "l5_torchMerge");
}
@ -253,7 +253,7 @@ TEST_P(Test_Torch_layers, net_residual)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL ||
target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Test is disabled for OpenVINO 2018R5");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif
runTorchNet("net_residual", "", false, true);
}
@ -264,7 +264,7 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
{
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
checkBackend();
@ -274,7 +274,7 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat sample = imread(findDataFile("cv/shared/lena.png", false));
Mat sample = imread(findDataFile("cv/shared/lena.png"));
Mat sampleF32(sample.size(), CV_32FC3);
sample.convertTo(sampleF32, sampleF32.type());
sampleF32 /= 255;
@ -339,7 +339,7 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
checkBackend();
if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
Net net;
{
@ -391,7 +391,7 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
#if defined INF_ENGINE_RELEASE
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX target");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif
checkBackend();
@ -399,7 +399,7 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
#if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_RELEASE <= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("");
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
#endif
#endif
@ -415,7 +415,7 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat img = imread(findDataFile("dnn/googlenet_1.png", false));
Mat img = imread(findDataFile("dnn/googlenet_1.png"));
Mat inputBlob = blobFromImage(img, 1.0, Size(), Scalar(103.939, 116.779, 123.68), false);
net.setInput(inputBlob);

@ -289,7 +289,7 @@ public:
int veclen() const { return nnIndex->veclen(); }
int size() const { return nnIndex->size(); }
int size() const { return (int)nnIndex->size(); }
::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); }
@ -297,6 +297,7 @@ public:
private:
::cvflann::Index<Distance>* nnIndex;
Mat _dataset;
};
//! @cond IGNORED
@ -312,10 +313,11 @@ private:
template <typename Distance>
GenericIndex<Distance>::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance)
: _dataset(dataset)
{
CV_Assert(dataset.type() == CvType<ElementType>::type());
CV_Assert(dataset.isContinuous());
::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
::cvflann::Matrix<ElementType> m_dataset((ElementType*)_dataset.ptr<ElementType>(0), _dataset.rows, _dataset.cols);
nnIndex = new ::cvflann::Index<Distance>(m_dataset, params, distance);

@ -114,7 +114,7 @@ struct L2_Simple
ResultType result = ResultType();
ResultType diff;
for(size_t i = 0; i < size; ++i ) {
diff = *a++ - *b++;
diff = (ResultType)(*a++ - *b++);
result += diff*diff;
}
return result;

@ -1112,6 +1112,7 @@ static bool ippFilter2D(int stype, int dtype, int kernel_type,
static bool dftFilter2D(int stype, int dtype, int kernel_type,
uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int full_width, int full_height,
int offset_x, int offset_y,
uchar * kernel_data, size_t kernel_step,
@ -1125,13 +1126,23 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
int dft_filter_size = checkHardwareSupport(CV_CPU_SSE3) && ((sdepth == CV_8U && (ddepth == CV_8U || ddepth == CV_16S)) || (sdepth == CV_32F && ddepth == CV_32F)) ? 130 : 50;
if (kernel_width * kernel_height < dft_filter_size)
return false;
// detect roi case
if( (offset_x != 0) || (offset_y != 0) )
{
return false;
}
if( (width != full_width) || (height != full_height) )
{
return false;
}
}
Point anchor = Point(anchor_x, anchor_y);
Mat kernel = Mat(Size(kernel_width, kernel_height), kernel_type, kernel_data, kernel_step);
Mat src(Size(full_width-offset_x, full_height-offset_y), stype, src_data, src_step);
Mat dst(Size(full_width, full_height), dtype, dst_data, dst_step);
Mat src(Size(width, height), stype, src_data, src_step);
Mat dst(Size(width, height), dtype, dst_data, dst_step);
Mat temp;
int src_channels = CV_MAT_CN(stype);
int dst_channels = CV_MAT_CN(dtype);
@ -1144,10 +1155,10 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
// we just use that.
int corrDepth = ddepth;
if ((ddepth == CV_32F || ddepth == CV_64F) && src_data != dst_data) {
temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
temp = Mat(Size(width, height), dtype, dst_data, dst_step);
} else {
corrDepth = ddepth == CV_64F ? CV_64F : CV_32F;
temp.create(Size(full_width, full_height), CV_MAKETYPE(corrDepth, dst_channels));
temp.create(Size(width, height), CV_MAKETYPE(corrDepth, dst_channels));
}
crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(corrDepth, src_channels),
@ -1158,9 +1169,9 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
}
} else {
if (src_data != dst_data)
temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
temp = Mat(Size(width, height), dtype, dst_data, dst_step);
else
temp.create(Size(full_width, full_height), dtype);
temp.create(Size(width, height), dtype);
crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(ddepth, src_channels),
anchor, delta, borderType);
@ -1293,6 +1304,7 @@ void filter2D(int stype, int dtype, int kernel_type,
res = dftFilter2D(stype, dtype, kernel_type,
src_data, src_step,
dst_data, dst_step,
width, height,
full_width, full_height,
offset_x, offset_y,
kernel_data, kernel_step,

@ -46,6 +46,8 @@
using namespace cv;
using namespace detail;
namespace {
/*
This is implementation of image segmentation algorithm GrabCut described in
"GrabCut - Interactive Foreground Extraction using Iterated Graph Cuts".
@ -229,6 +231,8 @@ void GMM::calcInverseCovAndDeterm(int ci, const double singularFix)
}
}
} // namespace
/*
Calculate beta - parameter of GrabCut algorithm.
beta = 1/(2*avg(sqr(||color[i] - color[j]||)))
@ -380,12 +384,20 @@ static void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM
}
}
CV_Assert( !bgdSamples.empty() && !fgdSamples.empty() );
Mat _bgdSamples( (int)bgdSamples.size(), 3, CV_32FC1, &bgdSamples[0][0] );
kmeans( _bgdSamples, GMM::componentsCount, bgdLabels,
TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType );
Mat _fgdSamples( (int)fgdSamples.size(), 3, CV_32FC1, &fgdSamples[0][0] );
kmeans( _fgdSamples, GMM::componentsCount, fgdLabels,
TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType );
{
Mat _bgdSamples( (int)bgdSamples.size(), 3, CV_32FC1, &bgdSamples[0][0] );
int num_clusters = GMM::componentsCount;
num_clusters = std::min(num_clusters, (int)bgdSamples.size());
kmeans( _bgdSamples, num_clusters, bgdLabels,
TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType );
}
{
Mat _fgdSamples( (int)fgdSamples.size(), 3, CV_32FC1, &fgdSamples[0][0] );
int num_clusters = GMM::componentsCount;
num_clusters = std::min(num_clusters, (int)fgdSamples.size());
kmeans( _fgdSamples, num_clusters, fgdLabels,
TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType );
}
bgdGMM.initLearning();
for( int i = 0; i < (int)bgdSamples.size(); i++ )

@ -2200,6 +2200,67 @@ TEST(Imgproc_Filter2D, dftFilter2d_regression_10683)
EXPECT_LE(cvtest::norm(dst, expected, NORM_INF), 2);
}
TEST(Imgproc_Filter2D, dftFilter2d_regression_13179)
{
uchar src_[24*24] = {
0, 40, 0, 0, 255, 0, 0, 78, 131, 0, 196, 0, 255, 0, 0, 0, 0, 255, 70, 0, 255, 0, 0, 0,
0, 0, 255, 204, 0, 0, 255, 93, 255, 0, 0, 255, 12, 0, 0, 0, 255, 121, 0, 255, 0, 0, 0, 255,
0, 178, 0, 25, 67, 0, 165, 0, 255, 0, 0, 181, 151, 175, 0, 0, 32, 0, 0, 255, 165, 93, 0, 255,
255, 255, 0, 0, 255, 126, 0, 0, 0, 0, 133, 29, 9, 0, 220, 255, 0, 142, 255, 255, 255, 0, 255, 0,
255, 32, 255, 0, 13, 237, 0, 0, 0, 0, 0, 19, 90, 0, 0, 85, 122, 62, 95, 29, 255, 20, 0, 0,
0, 0, 166, 41, 0, 48, 70, 0, 68, 0, 255, 0, 139, 7, 63, 144, 0, 204, 0, 0, 0, 98, 114, 255,
105, 0, 0, 0, 0, 255, 91, 0, 73, 0, 255, 0, 0, 0, 255, 198, 21, 0, 0, 0, 255, 43, 153, 128,
0, 98, 26, 0, 101, 0, 0, 0, 255, 0, 0, 0, 255, 77, 56, 0, 241, 0, 169, 132, 0, 255, 186, 255,
255, 87, 0, 1, 0, 0, 10, 39, 120, 0, 23, 69, 207, 0, 0, 0, 0, 84, 0, 0, 0, 0, 255, 0,
255, 0, 0, 136, 255, 77, 247, 0, 67, 0, 15, 255, 0, 143, 0, 243, 255, 0, 0, 238, 255, 0, 255, 8,
42, 0, 0, 255, 29, 0, 0, 0, 255, 255, 255, 75, 0, 0, 0, 255, 0, 0, 255, 38, 197, 0, 255, 87,
0, 123, 17, 0, 234, 0, 0, 149, 0, 0, 255, 16, 0, 0, 0, 255, 0, 255, 0, 38, 0, 114, 255, 76,
0, 0, 8, 0, 255, 0, 0, 0, 220, 0, 11, 255, 0, 0, 55, 98, 0, 0, 0, 255, 0, 175, 255, 110,
235, 0, 175, 0, 255, 227, 38, 206, 0, 0, 255, 246, 0, 0, 123, 183, 255, 0, 0, 255, 0, 156, 0, 54,
0, 255, 0, 202, 0, 0, 0, 0, 157, 0, 255, 63, 0, 0, 0, 0, 0, 255, 132, 0, 255, 0, 0, 0,
0, 0, 0, 255, 0, 0, 128, 126, 0, 243, 46, 7, 0, 211, 108, 166, 0, 0, 162, 227, 0, 204, 0, 51,
255, 216, 0, 0, 43, 0, 255, 40, 188, 188, 255, 0, 0, 255, 34, 0, 0, 168, 0, 0, 0, 35, 0, 0,
0, 80, 131, 255, 0, 255, 10, 0, 0, 0, 180, 255, 209, 255, 173, 34, 0, 66, 0, 49, 0, 255, 83, 0,
0, 204, 0, 91, 0, 0, 0, 205, 84, 0, 0, 0, 92, 255, 91, 0, 126, 0, 185, 145, 0, 0, 9, 0,
255, 0, 0, 255, 255, 0, 0, 255, 0, 0, 216, 0, 187, 221, 0, 0, 141, 0, 0, 209, 0, 0, 255, 0,
255, 0, 0, 154, 150, 0, 0, 0, 148, 0, 201, 255, 0, 255, 16, 0, 0, 160, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 255, 0, 255, 0, 255, 0, 255, 198, 255, 147, 131, 0, 255, 202, 0, 0, 0, 0, 255, 0,
0, 0, 0, 164, 181, 0, 0, 0, 69, 255, 31, 0, 255, 195, 0, 0, 255, 164, 109, 0, 0, 202, 0, 206,
0, 0, 61, 235, 33, 255, 77, 0, 0, 0, 0, 85, 0, 228, 0, 0, 0, 0, 255, 0, 0, 5, 255, 255
};
cv::Mat_<uchar> src(24, 24, src_);
uchar expected_[16*16] = {
0,255, 0, 0,255, 0, 0,255, 0, 0,255,255, 0,255, 0, 0,
0,255, 0, 0,255, 0, 0,255, 0, 0,255,255, 0,255, 0, 0,
0,255, 0, 0,255, 0, 0,255, 70, 0,255,255, 0,255, 0, 0,
0,234,138, 0,255, 0, 0,255, 8, 0,255,255, 0,255, 0, 0,
0, 0,255, 0,255,228, 0,255,255, 0,255,255, 0,255, 0, 5,
0, 0,255, 0,255, 0, 0,255, 0, 0,255,255, 0,255, 0, 0,
0,253, 0, 0,255, 0, 0,255, 0, 0,255,255, 0,255, 0, 0,
0,255, 0, 0,255, 0, 0,255, 0, 0,255, 93, 0,255, 0,255,
0,255, 0, 0,255, 0,182,255, 0, 0,255, 0, 0,255, 0, 0,
0, 0,253, 0,228, 0,255,255, 0, 0,255, 0, 0, 0, 0, 75,
0, 0,255, 0, 0, 0,255,255, 0,255,206, 0, 1,162, 0,255,
0, 0,255, 0, 0, 0,255,255, 0,255,255, 0, 0,255, 0,255,
0, 0,255, 0, 0, 0,255,255, 0,255,255, 0,255,255, 0,255,
0, 0,255,255, 0, 0,255, 0, 0,255,255, 0,255,168, 0,255,
0, 0,255,255, 0, 0,255, 26, 0,255,255, 0,255,255, 0,255,
0, 0,255,255, 0, 0,255, 0, 0,255,255, 0,255,255, 0,255,
};
cv::Mat_<uchar> expected(16, 16, expected_);
cv::Mat kernel = cv::getGaborKernel(cv::Size(13, 13), 8, 0, 3, 0.25);
cv::Mat roi(src, cv::Rect(0, 0, 16, 16));
cv::Mat filtered(16, 16, roi.type());
cv::filter2D(roi, filtered, -1, kernel);
EXPECT_LE(cvtest::norm(filtered, expected, cv::NORM_INF), 2);
}
TEST(Imgproc_MedianBlur, hires_regression_13409)
{
Mat src(2048, 2048, CV_8UC1), dst_hires, dst_ref;

@ -112,7 +112,8 @@ imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP
'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
'undistort','warpAffine','warpPerspective','watershed'],
'undistort','warpAffine','warpPerspective','watershed', \
'fillPoly', 'fillConvexPoly'],
'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize']}
objdetect = {'': ['groupRectangles'],
@ -170,6 +171,8 @@ aruco = {'': ['detectMarkers', 'drawDetectedMarkers', 'drawAxis', 'estimatePoseS
'aruco_CharucoBoard': ['create', 'draw'],
}
calib3d = {'': ['findHomography']}
def makeWhiteList(module_list):
wl = {}
for m in module_list:
@ -180,7 +183,7 @@ def makeWhiteList(module_list):
wl[k] = m[k]
return wl
white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco])
white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d])
# Features to be exported
export_enums = False

@ -0,0 +1,43 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
if (typeof module !== 'undefined' && module.exports) {
// The envrionment is Node.js
var cv = require('./opencv.js'); // eslint-disable-line no-var
}
QUnit.module('Camera Calibration and 3D Reconstruction', {});
QUnit.test('constants', function(assert) {
assert.strictEqual(typeof cv.LMEDS, 'number');
assert.strictEqual(typeof cv.RANSAC, 'number');
assert.strictEqual(typeof cv.RHO, 'number');
});
QUnit.test('findHomography', function(assert) {
let srcPoints = cv.matFromArray(4, 1, cv.CV_32FC2, [
56,
65,
368,
52,
28,
387,
389,
390,
]);
let dstPoints = cv.matFromArray(4, 1, cv.CV_32FC2, [
0,
0,
300,
0,
0,
300,
300,
300,
]);
const mat = cv.findHomography(srcPoints, dstPoints);
assert.ok(mat instanceof cv.Mat);
});

@ -201,6 +201,89 @@ QUnit.test('test_imgProc', function(assert) {
expected_img.delete();
compare_result.delete();
}
// fillPoly
{
let img_width = 6;
let img_height = 6;
let img = new cv.Mat.zeros(img_height, img_width, cv.CV_8UC1);
let npts = 4;
let square_point_data = new Uint8Array([
1, 1,
4, 1,
4, 4,
1, 4]);
let square_points = cv.matFromArray(npts, 1, cv.CV_32SC2, square_point_data);
let pts = new cv.MatVector();
pts.push_back (square_points);
let color = new cv.Scalar (255);
let expected_img_data = new Uint8Array([
0, 0, 0, 0, 0, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 0, 0, 0, 0, 0]);
let expected_img = cv.matFromArray(img_height, img_width, cv.CV_8UC1, expected_img_data);
cv.fillPoly(img, pts, color);
let compare_result = new cv.Mat(img_height, img_width, cv.CV_8UC1);
cv.compare (img, expected_img, compare_result, cv.CMP_EQ);
// expect every pixels are the same.
assert.equal (cv.countNonZero(compare_result), img.total());
img.delete();
square_points.delete();
pts.delete();
expected_img.delete();
compare_result.delete();
}
// fillConvexPoly
{
let img_width = 6;
let img_height = 6;
let img = new cv.Mat.zeros(img_height, img_width, cv.CV_8UC1);
let npts = 4;
let square_point_data = new Uint8Array([
1, 1,
4, 1,
4, 4,
1, 4]);
let square_points = cv.matFromArray(npts, 1, cv.CV_32SC2, square_point_data);
let color = new cv.Scalar (255);
let expected_img_data = new Uint8Array([
0, 0, 0, 0, 0, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 255, 255, 255, 255, 0,
0, 0, 0, 0, 0, 0]);
let expected_img = cv.matFromArray(img_height, img_width, cv.CV_8UC1, expected_img_data);
cv.fillConvexPoly(img, square_points, color);
let compare_result = new cv.Mat(img_height, img_width, cv.CV_8UC1);
cv.compare (img, expected_img, compare_result, cv.CMP_EQ);
// expect every pixels are the same.
assert.equal (cv.countNonZero(compare_result), img.total());
img.delete();
square_points.delete();
expected_img.delete();
compare_result.delete();
}
});
QUnit.test('test_segmentation', function(assert) {

@ -30,6 +30,7 @@
<script type="application/javascript" src="test_video.js"></script>
<script type="application/javascript" src="test_photo.js"></script>
<script type="application/javascript" src="test_features2d.js"></script>
<script type="application/javascript" src="test_calib3d.js"></script>
<script type='text/javascript'>
QUnit.config.autostart = false;

@ -46,7 +46,9 @@ testrunner.run(
code: 'opencv.js',
tests: ['test_mat.js', 'test_utils.js', 'test_imgproc.js',
'test_objdetect.js', 'test_video.js', 'test_features2d.js',
'test_photo.js'],
'test_photo.js',
'test_calib3d.js'
],
},
function(err, report) {
console.log(report.failed + ' failed, ' + report.passed + ' passed');

@ -75,7 +75,7 @@ TEST_P(Objdetect_QRCode, regression)
ASSERT_TRUE(qrcode.detect(src, corners));
#endif
const std::string dataset_config = findDataFile(root + "dataset_config.json", false);
const std::string dataset_config = findDataFile(root + "dataset_config.json");
FileStorage file_config(dataset_config, FileStorage::READ);
ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config;
{

@ -212,6 +212,36 @@ static inline void applyTestTag(const std::string& tag1, const std::string& tag2
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); }
/** Append global skip test tags
*/
void registerGlobalSkipTag(const std::string& skipTag);
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5);
}
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5, const std::string& tag6)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6);
}
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5, const std::string& tag6, const std::string& tag7)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6); registerGlobalSkipTag(tag7);
}
class TS;
int64 readSeed(const char* str);
@ -758,7 +788,7 @@ int main(int argc, char **argv) \
{ \
CV_TRACE_FUNCTION(); \
{ CV_TRACE_REGION("INIT"); \
using namespace cvtest; \
using namespace cvtest; using namespace opencv_test; \
TS* ts = TS::ptr(); \
ts->init(resourcesubdir); \
__CV_TEST_EXEC_ARGS(CV_TEST_INIT0_ ## INIT0) \

@ -911,25 +911,35 @@ void addDataSearchSubDirectory(const std::string& subdir)
static std::string findData(const std::string& relative_path, bool required, bool findDirectory)
{
#define TEST_TRY_FILE_WITH_PREFIX(prefix) \
#define CHECK_FILE_WITH_PREFIX(prefix, result) \
{ \
result.clear(); \
std::string path = path_join(prefix, relative_path); \
/*printf("Trying %s\n", path.c_str());*/ \
if (findDirectory) \
{ \
if (isDirectory(path)) \
return path; \
result = path; \
} \
else \
{ \
FILE* f = fopen(path.c_str(), "rb"); \
if(f) { \
fclose(f); \
return path; \
result = path; \
} \
} \
}
#define TEST_TRY_FILE_WITH_PREFIX(prefix) \
{ \
std::string result__; \
CHECK_FILE_WITH_PREFIX(prefix, result__); \
if (!result__.empty()) \
return result__; \
}
const std::vector<std::string>& search_path = TS::ptr()->data_search_path;
for(size_t i = search_path.size(); i > 0; i--)
{
@ -956,7 +966,17 @@ static std::string findData(const std::string& relative_path, bool required, boo
{
const std::string& subdir = search_subdir[i - 1];
std::string prefix = path_join(datapath, subdir);
TEST_TRY_FILE_WITH_PREFIX(prefix);
std::string result_;
CHECK_FILE_WITH_PREFIX(prefix, result_);
#if 1 // check for misused 'optional' mode
if (!required && !result_.empty())
{
std::cout << "TEST ERROR: Don't use 'optional' findData() for " << relative_path << std::endl;
CV_Assert(required || result_.empty());
}
#endif
if (!result_.empty())
return result_;
}
}
}

@ -13,6 +13,30 @@ static bool printTestTag = false;
static std::vector<std::string> currentDirectTestTags, currentImpliedTestTags;
static std::vector<const ::testing::TestInfo*> skipped_tests;
static std::map<std::string, int>& getTestTagsSkipCounts()
{
static std::map<std::string, int> testTagsSkipCounts;
return testTagsSkipCounts;
}
static std::map<std::string, int>& getTestTagsSkipExtraCounts()
{
static std::map<std::string, int> testTagsSkipExtraCounts;
return testTagsSkipExtraCounts;
}
static void increaseTagsSkipCount(const std::string& tag, bool isMain)
{
std::map<std::string, int>& counts = isMain ? getTestTagsSkipCounts() : getTestTagsSkipExtraCounts();
std::map<std::string, int>::iterator i = counts.find(tag);
if (i == counts.end())
{
counts[tag] = 1;
}
else
{
i->second++;
}
}
static std::vector<std::string>& getTestTagsSkipList()
{
static std::vector<std::string> testSkipWithTags;
@ -33,6 +57,17 @@ static std::vector<std::string>& getTestTagsSkipList()
return testSkipWithTags;
}
void registerGlobalSkipTag(const std::string& skipTag)
{
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (skipTag == skipTags[i])
return; // duplicate
}
skipTags.push_back(skipTag);
}
static std::vector<std::string>& getTestTagsForceList()
{
static std::vector<std::string> getTestTagsForceList;
@ -156,7 +191,27 @@ public:
{
if (!skipped_tests.empty())
{
std::cout << "[ SKIP ] " << skipped_tests.size() << " tests via tags" << std::endl;
std::cout << "[ SKIPSTAT ] " << skipped_tests.size() << " tests via tags" << std::endl;
const std::vector<std::string>& skipTags = getTestTagsSkipList();
const std::map<std::string, int>& counts = getTestTagsSkipCounts();
const std::map<std::string, int>& countsExtra = getTestTagsSkipExtraCounts();
for (std::vector<std::string>::const_iterator i = skipTags.begin(); i != skipTags.end(); ++i)
{
int c1 = 0;
std::map<std::string, int>::const_iterator i1 = counts.find(*i);
if (i1 != counts.end()) c1 = i1->second;
int c2 = 0;
std::map<std::string, int>::const_iterator i2 = countsExtra.find(*i);
if (i2 != countsExtra.end()) c2 = i2->second;
if (c2 > 0)
{
std::cout << "[ SKIPSTAT ] TAG='" << *i << "' skip " << c1 << " tests (" << c2 << " times in extra skip list)" << std::endl;
}
else if (c1 > 0)
{
std::cout << "[ SKIPSTAT ] TAG='" << *i << "' skip " << c1 << " tests" << std::endl;
}
}
}
skipped_tests.clear();
}
@ -255,13 +310,14 @@ void checkTestTags()
if (isTestTagForced(testTag))
return;
}
std::string skip_message;
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (isTestTagSkipped(testTag, skipTag))
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException("Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)");
increaseTagsSkipCount(skipTag, skip_message.empty());
if (skip_message.empty()) skip_message = "Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)";
}
}
const std::vector<std::string>& testTagsImplied = currentImpliedTestTags;
@ -270,10 +326,16 @@ void checkTestTags()
const std::string& testTag = testTagsImplied[i];
if (isTestTagSkipped(testTag, skipTag))
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException("Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)");
increaseTagsSkipCount(skipTag, skip_message.empty());
if (skip_message.empty()) skip_message = "Test with tag '" + testTag + "' is skipped (implied '" + skipTag + "' is in skip list)";
}
}
if (!skip_message.empty())
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException(skip_message);
}
}
static bool applyTestTagImpl(const std::string& tag, bool direct = false)

@ -17,7 +17,7 @@ using namespace cv;
bool DeviceHandler::init(MFXVideoSession &session)
{
mfxStatus res = MFX_ERR_NONE;
mfxIMPL impl = MFX_IMPL_AUTO;
mfxIMPL impl = MFX_IMPL_AUTO_ANY;
mfxVersion ver = { {19, 1} };
res = session.Init(impl, &ver);

@ -112,9 +112,9 @@ if(NOT __IN_TRY_COMPILE)
message(FATAL_ERROR "Can't prepare xcodebuild_wrapper")
endif()
if(APPLE_FRAMEWORK AND BUILD_SHARED_LIBS)
set(XCODEBUILD_EXTRA_ARGS "${XCODEBUILD_EXTRA_ARGS} IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET} -sdk ${CMAKE_OSX_SYSROOT}")
set(XCODEBUILD_EXTRA_ARGS "${XCODEBUILD_EXTRA_ARGS} IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET} CODE_SIGN_IDENTITY='' CODE_SIGNING_REQUIRED=NO -sdk ${CMAKE_OSX_SYSROOT}")
else()
set(XCODEBUILD_EXTRA_ARGS "${XCODEBUILD_EXTRA_ARGS} IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET} ARCHS=${IOS_ARCH} -sdk ${CMAKE_OSX_SYSROOT}")
set(XCODEBUILD_EXTRA_ARGS "${XCODEBUILD_EXTRA_ARGS} IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET} CODE_SIGN_IDENTITY='' CODE_SIGNING_REQUIRED=NO ARCHS=${IOS_ARCH} -sdk ${CMAKE_OSX_SYSROOT}")
endif()
configure_file("${CMAKE_CURRENT_LIST_DIR}/xcodebuild_wrapper.in" "${_xcodebuild_wrapper_tmp}" @ONLY)
file(COPY "${_xcodebuild_wrapper_tmp}" DESTINATION ${CMAKE_BINARY_DIR} FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)

@ -116,7 +116,7 @@ class Builder:
"-DWITH_QUIRC=OFF",
"-DBUILD_ZLIB=ON",
"-DBUILD_opencv_apps=OFF",
"-DBUILD_opencv_calib3d=ON", # No bindings provided. This module is used as a dependency for other modules.
"-DBUILD_opencv_calib3d=ON",
"-DBUILD_opencv_dnn=ON",
"-DBUILD_opencv_features2d=ON",
"-DBUILD_opencv_flann=ON", # No bindings provided. This module is used as a dependency for other modules.

@ -40,34 +40,42 @@ set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\bin\"
:: Detect compiler
cl /? >NUL 2>NUL <NUL
if %ERRORLEVEL% == 0 (
goto detect_cmake
)
PUSHD %CD%
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\VC\Auxiliary\Build\vcvars64.bat"
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
)
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvars64.bat"
)
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\VC\Auxiliary\Build\vcvars64.bat"
)
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
)
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvars64.bat"
)
IF ERRORLEVEL 1 (
CALL :try_call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
)
POPD
cl /? >NUL 2>NUL <NUL
if %ERRORLEVEL% NEQ 0 (
PUSHD %CD%
if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\VC\Auxiliary\Build\vcvars64.bat" (
CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\VC\Auxiliary\Build\vcvars64.bat"
goto check_msvc
)
if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvars64.bat" (
CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
goto check_msvc
)
if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvars64.bat" (
CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvars64.bat"
goto check_msvc
)
if exist "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" (
CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
goto check_msvc
)
:check_msvc
POPD
cl /? >NUL 2>NUL <NUL
if %ERRORLEVEL% NEQ 0 (
set "MSG=Can't detect Microsoft Visial Studio C++ compiler (cl.exe). MSVS 2015/2017 are supported only from standard locations"
goto die
)
set "MSG=Can't detect Microsoft Visial Studio C++ compiler (cl.exe). MSVS 2015/2017/2019 are supported only from standard locations"
goto die
)
:: Detect CMake
:detect_cmake
cmake --version >NUL 2>NUL
if %ERRORLEVEL% EQU 0 GOTO :CMAKE_FOUND
@ -84,6 +92,7 @@ goto die
set CMAKE_FOUND=1
call :execute cmake --version
echo CMake is detected
where cmake
:: Detect available MSVS version
if NOT DEFINED VisualStudioVersion (
@ -91,19 +100,30 @@ if NOT DEFINED VisualStudioVersion (
goto die
)
if "%VisualStudioVersion%" == "14.0" (
set CMAKE_GENERATOR="Visual Studio 14 Win64"
set "CMAKE_GENERATOR=-G^"Visual Studio 14 Win64^""
set "BUILD_DIR_SUFFIX=.vc14"
set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\x64\vc14\bin\"
) else (
if "%VisualStudioVersion%" == "15.0" (
set CMAKE_GENERATOR="Visual Studio 15 Win64"
set "CMAKE_GENERATOR=-G^"Visual Studio 15 Win64^""
set "BUILD_DIR_SUFFIX=.vc15"
set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\x64\vc15\bin\"
) else (
set "MSG=Unsupported MSVS version. VisualStudioVersion=%VisualStudioVersion%"
goto die
if "%VisualStudioVersion%" == "16.0" (
echo.==========================================
echo.* Note: MSVS 2019 requires CMake 3.14+ *
echo.==========================================
set "CMAKE_GENERATOR=-G^"Visual Studio 16 2019^" -A x64"
set "BUILD_DIR_SUFFIX=.vc16"
set "PATH=%PATH%;%SCRIPTDIR%\..\..\build\x64\vc15\bin\"
) else (
set "MSG=Unsupported MSVS version. VisualStudioVersion=%VisualStudioVersion%"
goto die
)
)
)
set "BUILD_DIR=%SRC_DIR%\build_%SRC_NAME%"
set "BUILD_DIR=%SRC_DIR%\build_%SRC_NAME%%BUILD_DIR_SUFFIX%"
call :set_title Create build directory
if NOT exist "%BUILD_DIR%" ( call :execute md "%BUILD_DIR%" )
PUSHD "%BUILD_DIR%"
@ -111,7 +131,7 @@ if NOT exist "%BUILD_DIR%/sample" ( call :execute md "%BUILD_DIR%/sample" )
call :execute copy /Y "%SCRIPTDIR%/CMakeLists.example.in" "%BUILD_DIR%/sample/CMakeLists.txt"
call :set_title Configuring via CMake
call :execute cmake -G%CMAKE_GENERATOR% "%BUILD_DIR%\sample" -DEXAMPLE_NAME=%SRC_NAME% "-DEXAMPLE_FILE=%SRC_FILENAME%" "-DOpenCV_DIR=%SCRIPTDIR%\..\..\build"
call :execute cmake %CMAKE_GENERATOR% "%BUILD_DIR%\sample" -DEXAMPLE_NAME=%SRC_NAME% "-DEXAMPLE_FILE=%SRC_FILENAME%" "-DOpenCV_DIR=%SCRIPTDIR%\..\..\build"
if %ERRORLEVEL% NEQ 0 (
set "MSG=CMake configuration step failed: %BUILD_DIR%"
goto die
@ -176,6 +196,14 @@ exit /B 0
endlocal & set %2=%_dir%
EXIT /B 0
:try_call
IF EXIST %1 (
CALL %*
EXIT /B
) ELSE (
EXIT /B 1
)
:: 'goto die' instead of 'call'
:die
TITLE OpenCV sample: ERROR: %MSG%

@ -3,12 +3,12 @@
<html>
<head>
<script async src="../../opencv.js" type="text/javascript"></script>
<script src="../../utils.js" type="text/javascript"></script>
<script type='text/javascript'>
var netDet = undefined, netRecogn = undefined;
var persons = {};
var utils = new Utils('');
//! [Run face detection model]
function detectFaces(img) {
@ -68,6 +68,7 @@ function recognize(face) {
//! [Recognize]
function loadModels(callback) {
var utils = new Utils('');
var proto = 'https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt';
var weights = 'https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel';
var recognModel = 'https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7';
@ -187,9 +188,9 @@ function main() {
};
// Load opencv.js
utils.loadOpenCv(() => {
cv['onRuntimeInitialized']=()=>{
main();
});
};
</script>
</head>

Loading…
Cancel
Save