|
|
|
@ -225,7 +225,7 @@ void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_) |
|
|
|
|
class OpenCLBackendWrapper : public BackendWrapper |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL) |
|
|
|
|
OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL) |
|
|
|
|
{ |
|
|
|
|
m.copyTo(umat); |
|
|
|
|
host = &m; |
|
|
|
@ -233,7 +233,7 @@ public: |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
OpenCLBackendWrapper(const Ptr<BackendWrapper>& baseBuffer, Mat& m) |
|
|
|
|
: BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL) |
|
|
|
|
: BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL) |
|
|
|
|
{ |
|
|
|
|
Ptr<OpenCLBackendWrapper> base = baseBuffer.dynamicCast<OpenCLBackendWrapper>(); |
|
|
|
|
CV_Assert(!base.empty()); |
|
|
|
@ -654,7 +654,7 @@ private: |
|
|
|
|
|
|
|
|
|
static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m) |
|
|
|
|
{ |
|
|
|
|
if (backendId == DNN_BACKEND_DEFAULT) |
|
|
|
|
if (backendId == DNN_BACKEND_OPENCV) |
|
|
|
|
{ |
|
|
|
|
if (targetId == DNN_TARGET_CPU) |
|
|
|
|
return Ptr<BackendWrapper>(); |
|
|
|
@ -727,7 +727,7 @@ struct Net::Impl |
|
|
|
|
|
|
|
|
|
Ptr<BackendWrapper> wrap(Mat& host) |
|
|
|
|
{ |
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT && preferableTarget == DNN_TARGET_CPU) |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU) |
|
|
|
|
return Ptr<BackendWrapper>(); |
|
|
|
|
|
|
|
|
|
MatShape shape(host.dims); |
|
|
|
@ -738,7 +738,7 @@ struct Net::Impl |
|
|
|
|
if (backendWrappers.find(data) != backendWrappers.end()) |
|
|
|
|
{ |
|
|
|
|
Ptr<BackendWrapper> baseBuffer = backendWrappers[data]; |
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT) |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV) |
|
|
|
|
{ |
|
|
|
|
CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget)); |
|
|
|
|
return OpenCLBackendWrapper::create(baseBuffer, host); |
|
|
|
@ -850,9 +850,27 @@ struct Net::Impl |
|
|
|
|
{ |
|
|
|
|
CV_TRACE_FUNCTION(); |
|
|
|
|
|
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT) |
|
|
|
|
#ifdef HAVE_INF_ENGINE |
|
|
|
|
preferableBackend = DNN_BACKEND_INFERENCE_ENGINE; |
|
|
|
|
#else |
|
|
|
|
preferableBackend = DNN_BACKEND_OPENCV; |
|
|
|
|
#endif |
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV || |
|
|
|
|
preferableTarget == DNN_TARGET_CPU || |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL || |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16); |
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE || |
|
|
|
|
preferableTarget == DNN_TARGET_CPU || |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL); |
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_INFERENCE_ENGINE || |
|
|
|
|
preferableTarget == DNN_TARGET_CPU || |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL || |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16 || |
|
|
|
|
preferableTarget == DNN_TARGET_MYRIAD); |
|
|
|
|
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_) |
|
|
|
|
{ |
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget)) |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) |
|
|
|
|
#ifndef HAVE_OPENCL |
|
|
|
|
{ |
|
|
|
|
CV_LOG_WARNING(NULL, "DNN: OpenCL target is not available in this OpenCV build, switching to CPU."); |
|
|
|
@ -1036,7 +1054,7 @@ struct Net::Impl |
|
|
|
|
void initBackend() |
|
|
|
|
{ |
|
|
|
|
CV_TRACE_FUNCTION(); |
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT) |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV) |
|
|
|
|
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget)); |
|
|
|
|
else if (preferableBackend == DNN_BACKEND_HALIDE) |
|
|
|
|
initHalideBackend(); |
|
|
|
@ -1375,7 +1393,7 @@ struct Net::Impl |
|
|
|
|
std::vector<LayerPin> pinsForInternalBlobs; |
|
|
|
|
blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs, |
|
|
|
|
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, |
|
|
|
|
preferableBackend == DNN_BACKEND_DEFAULT && |
|
|
|
|
preferableBackend == DNN_BACKEND_OPENCV && |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16); |
|
|
|
|
ld.outputBlobsWrappers.resize(ld.outputBlobs.size()); |
|
|
|
|
for (int i = 0; i < ld.outputBlobs.size(); ++i) |
|
|
|
@ -1418,7 +1436,7 @@ struct Net::Impl |
|
|
|
|
|
|
|
|
|
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_) |
|
|
|
|
{ |
|
|
|
|
if( !fusion || preferableBackend != DNN_BACKEND_DEFAULT && |
|
|
|
|
if( !fusion || preferableBackend != DNN_BACKEND_OPENCV && |
|
|
|
|
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE) |
|
|
|
|
return; |
|
|
|
|
|
|
|
|
@ -1446,7 +1464,7 @@ struct Net::Impl |
|
|
|
|
// some other layers.
|
|
|
|
|
|
|
|
|
|
// TODO: OpenCL target support more fusion styles.
|
|
|
|
|
if ( preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget) && |
|
|
|
|
if ( preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget) && |
|
|
|
|
(!cv::ocl::useOpenCL() || (ld.layerInstance->type != "Convolution" && |
|
|
|
|
ld.layerInstance->type != "MVN")) ) |
|
|
|
|
continue; |
|
|
|
@ -1481,7 +1499,7 @@ struct Net::Impl |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (preferableBackend != DNN_BACKEND_DEFAULT) |
|
|
|
|
if (preferableBackend != DNN_BACKEND_OPENCV) |
|
|
|
|
continue; // Go to the next layer.
|
|
|
|
|
|
|
|
|
|
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
|
|
|
|
@ -1624,7 +1642,7 @@ struct Net::Impl |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (preferableBackend != DNN_BACKEND_DEFAULT) |
|
|
|
|
if (preferableBackend != DNN_BACKEND_OPENCV) |
|
|
|
|
continue; // Go to the next layer.
|
|
|
|
|
|
|
|
|
|
// the optimization #2. if there is no layer that takes max pooling layer's computed
|
|
|
|
@ -1735,7 +1753,7 @@ struct Net::Impl |
|
|
|
|
{ |
|
|
|
|
CV_Assert(layers[0].outputBlobs[i].total()); |
|
|
|
|
if (layers[0].outputBlobs[i].depth() == CV_32F && |
|
|
|
|
preferableBackend == DNN_BACKEND_DEFAULT && |
|
|
|
|
preferableBackend == DNN_BACKEND_OPENCV && |
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16) |
|
|
|
|
{ |
|
|
|
|
Mat mat = layers[0].outputBlobs[i].clone(); |
|
|
|
@ -1781,12 +1799,12 @@ struct Net::Impl |
|
|
|
|
TickMeter tm; |
|
|
|
|
tm.start(); |
|
|
|
|
|
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT || |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV || |
|
|
|
|
!layer->supportBackend(preferableBackend)) |
|
|
|
|
{ |
|
|
|
|
if( !ld.skip ) |
|
|
|
|
{ |
|
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget)) |
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) |
|
|
|
|
{ |
|
|
|
|
std::vector<UMat> umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers); |
|
|
|
|
layer->forward(OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers), |
|
|
|
@ -2132,7 +2150,7 @@ void Net::forward(OutputArrayOfArrays outputBlobs, const String& outputName) |
|
|
|
|
{ |
|
|
|
|
std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj(); |
|
|
|
|
|
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_DEFAULT && |
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_OPENCV && |
|
|
|
|
IS_DNN_OPENCL_TARGET(impl->preferableTarget)) |
|
|
|
|
{ |
|
|
|
|
if (impl->preferableTarget == DNN_TARGET_OPENCL) |
|
|
|
@ -2270,7 +2288,7 @@ void Net::setInput(InputArray blob, const String& name) |
|
|
|
|
ld.outputBlobsWrappers.resize(ld.outputBlobs.size()); |
|
|
|
|
MatShape prevShape = shape(ld.outputBlobs[pin.oid]); |
|
|
|
|
Mat blob_; |
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_DEFAULT && |
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_OPENCV && |
|
|
|
|
impl->preferableTarget == DNN_TARGET_OPENCL_FP16) |
|
|
|
|
{ |
|
|
|
|
Mat blob_mat = blob.getMat(); |
|
|
|
@ -2664,7 +2682,7 @@ int Layer::outputNameToIndex(const String&) |
|
|
|
|
|
|
|
|
|
bool Layer::supportBackend(int backendId) |
|
|
|
|
{ |
|
|
|
|
return backendId == DNN_BACKEND_DEFAULT; |
|
|
|
|
return backendId == DNN_BACKEND_OPENCV; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &) |
|
|
|
|