Merge pull request #18220 from Omar-AE:hddl-supported

* added HDDL VPU support

* changed to return True in one line if any device connected

* dnn: use releaseHDDLPlugin()

* dnn(hddl): fix conditions
pull/18842/head
Omar Alzaibaq 4 years ago committed by GitHub
parent 2c6a2f0381
commit a316b11aaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      modules/dnn/include/opencv2/dnn/dnn.hpp
  2. 5
      modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
  3. 4
      modules/dnn/perf/perf_net.cpp
  4. 19
      modules/dnn/src/dnn.cpp
  5. 5
      modules/dnn/src/ie_ngraph.cpp
  6. 2
      modules/dnn/src/layers/blank_layer.cpp
  7. 5
      modules/dnn/src/layers/convolution_layer.cpp
  8. 5
      modules/dnn/src/layers/mvn_layer.cpp
  9. 3
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  10. 5
      modules/dnn/src/layers/padding_layer.cpp
  11. 2
      modules/dnn/src/layers/pooling_layer.cpp
  12. 10
      modules/dnn/src/layers/proposal_layer.cpp
  13. 2
      modules/dnn/src/layers/slice_layer.cpp
  14. 48
      modules/dnn/src/op_inf_engine.cpp
  15. 11
      modules/dnn/test/test_common.impl.hpp
  16. 2
      modules/dnn/test/test_ie_models.cpp
  17. 5
      samples/dnn/classification.py
  18. 5
      samples/dnn/human_parsing.py
  19. 5
      samples/dnn/object_detection.py
  20. 5
      samples/dnn/segmentation.py
  21. 5
      samples/dnn/virtual_try_on.py

@ -93,7 +93,8 @@ CV__DNN_INLINE_NS_BEGIN
DNN_TARGET_VULKAN,
DNN_TARGET_FPGA, //!< FPGA device with CPU fallbacks using Inference Engine's Heterogeneous plugin.
DNN_TARGET_CUDA,
DNN_TARGET_CUDA_FP16
DNN_TARGET_CUDA_FP16,
DNN_TARGET_HDDL
};
CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
@ -571,6 +572,7 @@ CV__DNN_INLINE_NS_BEGIN
* | DNN_TARGET_FPGA | | + | | |
* | DNN_TARGET_CUDA | | | | + |
* | DNN_TARGET_CUDA_FP16 | | | | + |
* | DNN_TARGET_HDDL | | + | | |
*/
CV_WRAP void setPreferableTarget(int targetId);

@ -58,6 +58,11 @@ CV_EXPORTS_W void resetMyriadDevice();
CV_EXPORTS_W cv::String getInferenceEngineVPUType();
/** @brief Release a HDDL plugin.
*/
CV_EXPORTS_W void releaseHDDLPlugin();
CV__DNN_INLINE_NS_END
}} // namespace

@ -130,7 +130,7 @@ PERF_TEST_P_(DNNTestNetwork, OpenFace)
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_HDDL))
throw SkipTestException("");
#endif
processNet("dnn/openface_nn4.small2.v1.t7", "", "",
@ -172,7 +172,7 @@ PERF_TEST_P_(DNNTestNetwork, DenseNet_121)
PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{
if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD))
(backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_HDDL)))
throw SkipTestException("");
// The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp

@ -122,6 +122,8 @@ public:
{
if (std::string::npos != i->find("MYRIAD") && target == DNN_TARGET_MYRIAD)
return true;
if (std::string::npos != i->find("HDDL") && target == DNN_TARGET_HDDL)
return true;
else if (std::string::npos != i->find("FPGA") && target == DNN_TARGET_FPGA)
return true;
else if (std::string::npos != i->find("CPU") && target == DNN_TARGET_CPU)
@ -184,6 +186,14 @@ private:
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
#endif
}
if (checkIETarget(DNN_TARGET_HDDL)) {
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_HDDL));
#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_HDDL));
#endif
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
@ -1379,6 +1389,7 @@ struct Net::Impl : public detail::NetImplBase
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_HDDL ||
preferableTarget == DNN_TARGET_FPGA
);
}
@ -1813,7 +1824,7 @@ struct Net::Impl : public detail::NetImplBase
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
supportsCPUFallback;
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
if (preferableTarget == DNN_TARGET_MYRIAD)
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
{
@ -1823,6 +1834,7 @@ struct Net::Impl : public detail::NetImplBase
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_HDDL ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
@ -1910,6 +1922,7 @@ struct Net::Impl : public detail::NetImplBase
// Convert weights in FP16 for specific targets.
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_HDDL ||
preferableTarget == DNN_TARGET_FPGA) && !fused)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
@ -2104,7 +2117,7 @@ struct Net::Impl : public detail::NetImplBase
bool customizable = ld.id != 0 && supportsCPUFallback;
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
if (preferableTarget == DNN_TARGET_MYRIAD)
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
{
@ -2114,6 +2127,7 @@ struct Net::Impl : public detail::NetImplBase
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_HDDL ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
@ -4541,6 +4555,7 @@ string Net::Impl::dump()
case DNN_TARGET_OPENCL: out << "OCL"; colorId = 1; break;
case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16"; colorId = 2; break;
case DNN_TARGET_MYRIAD: out << "MYRIAD"; colorId = 3; break;
case DNN_TARGET_HDDL: out << "HDDL"; colorId = 8; break;
case DNN_TARGET_VULKAN: out << "VULKAN"; colorId = 7; break;
case DNN_TARGET_FPGA: out << "FPGA"; colorId = 4; break;
case DNN_TARGET_CUDA: out << "CUDA"; colorId = 5; break;

@ -556,6 +556,9 @@ void InfEngineNgraphNet::init(Target targetId)
case DNN_TARGET_MYRIAD:
device_name = "MYRIAD";
break;
case DNN_TARGET_HDDL:
device_name = "HDDL";
break;
case DNN_TARGET_FPGA:
device_name = "FPGA";
break;
@ -683,7 +686,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
#endif
}
std::map<std::string, std::string> config;
if (device_name == "MYRIAD") {
if (device_name == "MYRIAD" || device_name == "HDDL") {
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
config.emplace("MYRIAD_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
#else

@ -125,7 +125,7 @@ public:
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (preferableTarget == DNN_TARGET_MYRIAD)
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
ieLayer.setType("Copy");
}

@ -325,9 +325,10 @@ public:
return false;
if (ksize == 3)
return preferableTarget == DNN_TARGET_CPU;
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
return false;
return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
return (!isMyriad || dilation.width == dilation.height);
}
#endif
if (backendId == DNN_BACKEND_OPENCV)

@ -126,7 +126,10 @@ public:
{
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
return !zeroDev && (!isMyriad || eps <= 1e-7f);
}
#endif
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)

@ -75,7 +75,8 @@ public:
if (pnorm != 2)
return false;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableTarget == DNN_TARGET_MYRIAD)
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && isMyriad)
return !acrossSpatial;
return startAxis == 1;

@ -103,9 +103,12 @@ public:
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
(preferableTarget != DNN_TARGET_MYRIAD ||
(!isMyriad ||
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
}
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||

@ -202,7 +202,7 @@ public:
return false;
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) {
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL) {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX();

@ -95,8 +95,14 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD);
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
return !isMyriad;
}
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,

@ -492,7 +492,7 @@ public:
std::vector<size_t> axes, offsets, dims;
int from, to, step;
int numDims = finalSliceRanges[0].size();
if (preferableTarget == DNN_TARGET_MYRIAD)
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
{
from = axis;
to = numDims;

@ -367,6 +367,7 @@ void InfEngineBackendNet::init(Target targetId)
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
// Inference Engine determines network precision by ports.
InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
targetId == DNN_TARGET_HDDL ||
targetId == DNN_TARGET_OPENCL_FP16) ?
InferenceEngine::Precision::FP16 :
InferenceEngine::Precision::FP32;
@ -391,6 +392,9 @@ void InfEngineBackendNet::init(Target targetId)
case DNN_TARGET_MYRIAD:
device_name = "MYRIAD";
break;
case DNN_TARGET_HDDL:
device_name = "HDDL";
break;
case DNN_TARGET_FPGA:
device_name = "FPGA";
break;
@ -652,20 +656,20 @@ InferenceEngine::Core& getCore(const std::string& id)
#endif
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_()
static bool detectMyriadX_(std::string device)
{
AutoLock lock(getInitializationMutex());
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
// Lightweight detection
InferenceEngine::Core& ie = getCore("MYRIAD");
InferenceEngine::Core& ie = getCore(device);
const std::vector<std::string> devices = ie.GetAvailableDevices();
for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
{
if (i->find("MYRIAD") != std::string::npos)
if (i->find(device) != std::string::npos)
{
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
CV_LOG_INFO(NULL, "Myriad device: " << name);
return name.find("MyriadX") != std::string::npos || name.find("Myriad X") != std::string::npos;
return name.find("MyriadX") != std::string::npos || name.find("Myriad X") != std::string::npos || name.find("HDDL") != std::string::npos;
}
}
return false;
@ -702,13 +706,13 @@ static bool detectMyriadX_()
InferenceEngine::InferenceEnginePluginPtr enginePtr;
{
auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find("MYRIAD");
auto pluginIt = sharedPlugins.find(device);
if (pluginIt != sharedPlugins.end()) {
enginePtr = pluginIt->second;
} else {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getPluginByDevice("MYRIAD");
sharedPlugins["MYRIAD"] = enginePtr;
enginePtr = dispatcher.getPluginByDevice(device);
sharedPlugins[device] = enginePtr;
}
}
auto plugin = InferenceEngine::InferencePlugin(enginePtr);
@ -719,9 +723,9 @@ static bool detectMyriadX_()
try
{
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
auto netExec = getCore(device).LoadNetwork(cnn, device, {{"VPU_PLATFORM", "VPU_2480"}});
#else
auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
auto netExec = getCore(device).LoadNetwork(cnn, device, {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
#endif
#endif
auto infRequest = netExec.CreateInferRequest();
@ -1155,6 +1159,25 @@ void resetMyriadDevice()
#endif // HAVE_INF_ENGINE
}
void releaseHDDLPlugin()
{
#ifdef HAVE_INF_ENGINE
AutoLock lock(getInitializationMutex());
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
getSharedPlugins().erase("HDDL");
#else
// Unregister both "HDDL" and "HETERO:HDDL,CPU" plugins
InferenceEngine::Core& ie = getCore("HDDL");
try
{
ie.UnregisterPlugin("HDDL");
ie.UnregisterPlugin("HETERO");
}
catch (...) {}
#endif
#endif // HAVE_INF_ENGINE
}
#ifdef HAVE_INF_ENGINE
bool isMyriadX()
{
@ -1170,10 +1193,11 @@ static std::string getInferenceEngineVPUType_()
#if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT;
#else
CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X or HDDL. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
try {
bool isMyriadX_ = detectMyriadX_();
if (isMyriadX_)
bool isMyriadX_ = detectMyriadX_("MYRIAD");
bool isHDDL_ = detectMyriadX_("HDDL");
if (isMyriadX_ || isHDDL_)
{
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
}

@ -40,6 +40,7 @@ void PrintTo(const cv::dnn::Target& v, std::ostream* os)
case DNN_TARGET_OPENCL: *os << "OCL"; return;
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
case DNN_TARGET_HDDL: *os << "HDDL"; return;
case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
case DNN_TARGET_FPGA: *os << "FPGA"; return;
case DNN_TARGET_CUDA: *os << "CUDA"; return;
@ -221,7 +222,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
}
@ -231,7 +232,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
@ -281,7 +282,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
}
@ -291,7 +292,7 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
@ -323,7 +324,7 @@ static bool validateVPUType_()
bool have_vpu_target = false;
for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD)
if (*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL)
{
have_vpu_target = true;
break;

@ -340,6 +340,8 @@ TEST_P(DNNTestOpenVINO, models)
// Single Myriad device cannot be shared across multiple processes.
if (targetId == DNN_TARGET_MYRIAD)
resetMyriadDevice();
if (targetId == DNN_TARGET_HDDL)
releaseHDDLPlugin();
EXPECT_NO_THROW(runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap)) << "runIE";
EXPECT_NO_THROW(runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap)) << "runCV";

@ -5,7 +5,7 @@ import numpy as np
from common import *
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@ -25,7 +25,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
'%d: NCS2 VPU, '
'%d: HDDL VPU' % targets)
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'classification')
parser = argparse.ArgumentParser(parents=[parser],

@ -46,7 +46,7 @@ import cv2 as cv
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
def preprocess(image):
@ -168,7 +168,8 @@ if __name__ == '__main__':
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
'%d: NCS2 VPU, '
'%d: HDDL VPU' % targets)
args, _ = parser.parse_known_args()
if not os.path.isfile(args.model):

@ -15,7 +15,7 @@ from tf_text_graph_ssd import createSSDGraph
from tf_text_graph_faster_rcnn import createFasterRCNNGraph
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@ -41,7 +41,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
'%d: NCS2 VPU, '
'%d: HDDL VPU' % targets)
parser.add_argument('--async', type=int, default=0,
dest='asyncN',
help='Number of asynchronous forwards at the same time. '

@ -6,7 +6,7 @@ import sys
from common import *
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@ -28,7 +28,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
'%d: NCS2 VPU, '
'%d: HDDL VPU' % targets)
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'segmentation')
parser = argparse.ArgumentParser(parents=[parser],

@ -17,7 +17,7 @@ from common import findFile
from human_parsing import parse_human
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@ -39,7 +39,8 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
'%d: NCS2 VPU, '
'%d: HDDL VPU' % targets)
args, _ = parser.parse_known_args()

Loading…
Cancel
Save