MVN layer using Intel's Inference Engine backend

pull/12130/head
Dmitry Kurtaev 6 years ago
parent 47e3e89e30
commit be08730cd6
  1. 2
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  2. 49
      modules/dnn/src/dnn.cpp
  3. 30
      modules/dnn/src/layers/batch_norm_layer.cpp
  4. 3
      modules/dnn/src/layers/convolution_layer.cpp
  5. 5
      modules/dnn/src/layers/eltwise_layer.cpp
  6. 5
      modules/dnn/src/layers/fully_connected_layer.cpp
  7. 52
      modules/dnn/src/layers/mvn_layer.cpp
  8. 6
      modules/dnn/test/test_tf_importer.cpp

@ -489,7 +489,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
static Ptr<EltwiseLayer> create(const LayerParams &params); static Ptr<EltwiseLayer> create(const LayerParams &params);
}; };
class CV_EXPORTS BatchNormLayer : public Layer class CV_EXPORTS BatchNormLayer : public ActivationLayer
{ {
public: public:
bool hasWeights, hasBias; bool hasWeights, hasBias;

@ -1471,6 +1471,8 @@ struct Net::Impl
{ {
node = layer->initInfEngine(ld.inputBlobsWrappers); node = layer->initInfEngine(ld.inputBlobsWrappers);
} }
else if (node.empty())
continue;
CV_Assert(!node.empty()); CV_Assert(!node.empty());
ld.backendNodes[preferableBackend] = node; ld.backendNodes[preferableBackend] = node;
@ -1715,40 +1717,41 @@ struct Net::Impl
if (preferableBackend != DNN_BACKEND_OPENCV) if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer. continue; // Go to the next layer.
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh while (nextData)
if ( !IS_DNN_OPENCL_TARGET(preferableTarget) ||
(IS_DNN_OPENCL_TARGET(preferableTarget) &&
nextData &&
((nextData->type == "ReLU") ||
(nextData->type == "ChannelsPReLU") ||
(nextData->type == "ReLU6") ||
(nextData->type == "TanH") ||
(nextData->type == "Power"))) )
{ {
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
if (IS_DNN_OPENCL_TARGET(preferableTarget) &&
nextData->type != "ReLU" &&
nextData->type != "ChannelsPReLU" &&
nextData->type != "ReLU6" &&
nextData->type != "TanH" &&
nextData->type != "Power")
break;
Ptr<ActivationLayer> nextActivLayer; Ptr<ActivationLayer> nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
if (nextActivLayer.empty())
if( nextData ) break;
nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
if( !nextActivLayer.empty() && pinsToKeep.count(lpNext) == 0 if (currLayer->setActivation(nextActivLayer))
&& currLayer->setActivation(nextActivLayer) )
{ {
LayerData *activData = nextData;
printf_(("\tfused with %s\n", nextActivLayer->name.c_str())); printf_(("\tfused with %s\n", nextActivLayer->name.c_str()));
activData->skip = true; nextData->skip = true;
ld.outputBlobs = layers[lpNext.lid].outputBlobs; ld.outputBlobs = layers[lpNext.lid].outputBlobs;
ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers; ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers;
if (nextData->consumers.size() == 1)
if ( IS_DNN_OPENCL_TARGET(preferableTarget) )
{
if ( !activData->consumers.empty() )
{ {
nextData = &layers[activData->consumers[0].lid]; int nextLayerId = nextData->consumers[0].lid;
lpNext = LayerPin(activData->consumers[0].lid, 0); nextData = &layers[nextLayerId];
lpNext = LayerPin(nextLayerId, 0);
} }
else
{
nextData = 0;
break;
} }
} }
else
break;
} }
// fuse convolution layer followed by eltwise + relu // fuse convolution layer followed by eltwise + relu

@ -268,6 +268,36 @@ public:
} }
} }
void forwardSlice(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const CV_OVERRIDE
{
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
{
int i = 0;
float w = weights_.at<float>(cn);
float b = bias_.at<float>(cn);
#if CV_SIMD128
v_float32x4 wV = v_setall_f32(w), bV = v_setall_f32(b);
for( ; i <= len - 16; i += 16 )
{
v_float32x4 x0 = v_load(srcptr + i);
v_float32x4 x1 = v_load(srcptr + i + 4);
v_float32x4 x2 = v_load(srcptr + i + 8);
v_float32x4 x3 = v_load(srcptr + i + 12);
x0 = v_muladd(x0, w, b);
x1 = v_muladd(x1, w, b);
x2 = v_muladd(x2, w, b);
x3 = v_muladd(x3, w, b);
v_store(dstptr + i, x0);
v_store(dstptr + i + 4, x1);
v_store(dstptr + i + 8, x2);
v_store(dstptr + i + 12, x3);
}
#endif
for( ; i < len; i++ )
dstptr[i] = w * srcptr[i] + b;
}
}
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
{ {
switch (node->backendId) switch (node->backendId)

@ -296,6 +296,9 @@ public:
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{ {
if (!activ.empty() && !layer.empty())
return false;
activ = layer; activ = layer;
if (activ.empty()) if (activ.empty())
reluslope.clear(); reluslope.clear();

@ -451,10 +451,15 @@ public:
} }
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
if (activ.empty() || layer.empty())
{ {
activ = layer; activ = layer;
return !activ.empty(); return !activ.empty();
} }
else
return false;
}
Ptr<ActivationLayer> activ; Ptr<ActivationLayer> activ;
}; };

@ -134,10 +134,15 @@ public:
} }
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
if (activ.empty() || layer.empty())
{ {
activ = layer; activ = layer;
return !activ.empty(); return !activ.empty();
} }
else
return false;
}
class FullyConnected : public ParallelLoopBody class FullyConnected : public ParallelLoopBody
{ {

@ -42,6 +42,7 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_inf_engine.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -66,27 +67,25 @@ public:
fuse_batch_norm = false; fuse_batch_norm = false;
fuse_relu = false; fuse_relu = false;
relu_slope = 0.f; relu_slope = 0.f;
zeroDev = false;
} }
Mat scale, shift; Mat scale, shift;
bool fuse_batch_norm; bool fuse_batch_norm;
virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE Ptr<ReLULayer> activ_relu;
float relu_slope;
bool fuse_relu;
bool zeroDev; // TODO: Doesn't considered in Intel's Inference Engine backend.
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{ {
if (!fuse_batch_norm) if (!layer.empty() && !fuse_relu && !fuse_batch_norm)
{ {
top->getScaleShift(scale, shift); layer->getScaleShift(scale, shift);
fuse_batch_norm = !scale.empty() || !shift.empty(); fuse_batch_norm = !scale.empty() || !shift.empty();
return fuse_batch_norm; return fuse_batch_norm;
} }
return false;
}
Ptr<ReLULayer> activ_relu;
float relu_slope;
bool fuse_relu;
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
if (!layer.empty() && preferableTarget == DNN_TARGET_OPENCL) if (!layer.empty() && preferableTarget == DNN_TARGET_OPENCL)
{ {
activ_relu = layer.dynamicCast<ReLULayer>(); activ_relu = layer.dynamicCast<ReLULayer>();
@ -97,6 +96,23 @@ public:
return fuse_relu; return fuse_relu;
} }
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
int splitDim = (acrossChannels) ? 1 : 2;
int i, newRows = 1;
for( i = 0; i < splitDim; i++ )
newRows *= inputs[0]->size[i];
zeroDev = inputs[0]->total() == newRows;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);
else
return backendId == DNN_BACKEND_OPENCV;
}
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
bool fast_forward_ocl(std::vector<UMat> &inputs, std::vector<UMat> &outputs) bool fast_forward_ocl(std::vector<UMat> &inputs, std::vector<UMat> &outputs)
{ {
@ -324,6 +340,22 @@ public:
} }
} }
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "MVN";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::MVNLayer> ieLayer(new InferenceEngine::MVNLayer(lp));
ieLayer->params["across_channels"] = acrossChannels ? "1" : "0";
ieLayer->params["normalize_variance"] = normVariance ? "1" : "0";
ieLayer->params["eps"] = format("%f", eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

@ -165,12 +165,6 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
runTensorFlowNet("unfused_batch_norm"); runTensorFlowNet("unfused_batch_norm");
runTensorFlowNet("fused_batch_norm_no_gamma"); runTensorFlowNet("fused_batch_norm_no_gamma");
runTensorFlowNet("unfused_batch_norm_no_gamma"); runTensorFlowNet("unfused_batch_norm_no_gamma");
}
TEST_P(Test_TensorFlow_layers, mvn_batch_norm)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
runTensorFlowNet("mvn_batch_norm"); runTensorFlowNet("mvn_batch_norm");
runTensorFlowNet("mvn_batch_norm_1x1"); runTensorFlowNet("mvn_batch_norm_1x1");
} }

Loading…
Cancel
Save