Fix IE backend considering future changes.

pull/13841/head
Dmitry Kurtaev 6 years ago
parent 3c70d966cb
commit ca5976e3d4
  1. 20
      modules/dnn/src/dnn.cpp
  2. 7
      modules/dnn/src/layers/batch_norm_layer.cpp
  3. 4
      modules/dnn/src/layers/blank_layer.cpp
  4. 15
      modules/dnn/src/layers/convolution_layer.cpp
  5. 6
      modules/dnn/src/layers/elementwise_layers.cpp
  6. 7
      modules/dnn/src/layers/fully_connected_layer.cpp
  7. 2
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  8. 6
      modules/dnn/src/layers/prior_box_layer.cpp
  9. 10
      modules/dnn/src/layers/scale_layer.cpp
  10. 75
      modules/dnn/src/op_inf_engine.cpp
  11. 6
      modules/dnn/src/op_inf_engine.hpp
  12. 3
      modules/dnn/test/test_halide_layers.cpp

@ -730,9 +730,9 @@ struct DataLayer : public Layer
biases->set(biasesVec);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
ieLayer.setWeights(weights);
ieLayer.setBiases(biases);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer);
addConstantData("biases", biases, ieLayer);
#else
InferenceEngine::LayerParams lp;
lp.name = name;
@ -1638,25 +1638,15 @@ struct Net::Impl
preferableTarget == DNN_TARGET_FPGA) && !fused)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
bool hasWeights = false;
for (const std::string& name : {"weights", "biases"})
{
auto it = ieNode->layer.getParameters().find(name);
if (it != ieNode->layer.getParameters().end())
{
InferenceEngine::Blob::CPtr bp = it->second.as<InferenceEngine::Blob::CPtr>();
it->second = (InferenceEngine::Blob::CPtr)convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
hasWeights = true;
InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
}
}
if (!hasWeights)
{
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<int16_t>(
InferenceEngine::Precision::FP16,
InferenceEngine::Layout::C, {1});
blob->allocate();
ieNode->layer.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)blob;
}
#else
auto& blobs = ieNode->layer.getConstantData();
if (blobs.empty())

@ -350,11 +350,10 @@ public:
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
const size_t numChannels = weights_.total();
ieLayer.setWeights(wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C));
ieLayer.setBiases(wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C));
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;

@ -125,7 +125,9 @@ public:
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0];
}
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
std::vector<size_t> shape(input->dims);
std::reverse(shape.begin(), shape.end());
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else

@ -493,11 +493,11 @@ public:
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)outCn);
ieLayer.setWeights(ieWeights);
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", ieWeights, l);
if (ieBiases)
ieLayer.setBiases(ieBiases);
addConstantData("biases", ieBiases, l);
InferenceEngine::Builder::Layer l = ieLayer;
if (!padMode.empty())
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
@ -1725,12 +1725,11 @@ public:
ieLayer.setGroup((size_t)group);
ieLayer.setOutDepth((size_t)numOutput);
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW));
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l);
if (hasBias())
{
ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C));
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;

@ -1134,10 +1134,10 @@ struct ChannelsPReLUFunctor
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
InferenceEngine::Builder::PReLULayer ieLayer("");
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
const size_t numChannels = scale.total();
ieLayer.setWeights(wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C));
return ieLayer;
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
return l;
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)

@ -448,11 +448,12 @@ public:
const int outNum = blobs[0].size[0];
ieLayer.setOutputNum(outNum);
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW));
InferenceEngine::Builder::Layer l = ieLayer;
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
if (blobs.size() > 1)
ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C));
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;

@ -295,7 +295,7 @@ public:
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
}
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
l.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)weights;
l.getParameters()["weights"] = weights;
#else
l.addConstantData("weights", weights);
#endif

@ -524,12 +524,12 @@ public:
if (_stepX == _stepY)
{
l.getParameters()["step"] = _stepX;
l.getParameters()["step_h"] = 0.0;
l.getParameters()["step_w"] = 0.0;
l.getParameters()["step_h"] = 0.0f;
l.getParameters()["step_w"] = 0.0f;
}
else
{
l.getParameters()["step"] = 0.0;
l.getParameters()["step"] = 0.0f;
l.getParameters()["step_h"] = _stepY;
l.getParameters()["step_w"] = _stepX;
}

@ -198,13 +198,13 @@ public:
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
if (hasWeights)
{
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C));
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
}
else
{
@ -214,11 +214,11 @@ public:
std::vector<float> ones(numChannels, 1);
weights->set(ones);
ieLayer.setWeights(weights);
addConstantData("weights", weights, l);
}
if (hasBias)
ieLayer.setBiases(wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;

@ -18,6 +18,11 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
@ -90,7 +95,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
it = layers.find(inpName);
if (it == layers.end())
{
InferenceEngine::Builder::InputLayer inpLayer(inpName);
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
std::vector<size_t> shape(inp->blob->dims());
std::reverse(shape.begin(), shape.end());
@ -119,6 +124,14 @@ void InfEngineBackendNet::init(int targetId)
for (int id : unconnectedLayersIds)
{
InferenceEngine::Builder::OutputLayer outLayer("myconv1");
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
// Inference Engine determines network precision by ports.
InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
targetId == DNN_TARGET_OPENCL_FP16) ?
InferenceEngine::Precision::FP16 :
InferenceEngine::Precision::FP32;
outLayer.setPort(InferenceEngine::Port({}, p));
#endif
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
}
cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
@ -167,12 +180,56 @@ void InfEngineBackendNet::init(int targetId)
initPlugin(cnn);
}
void InfEngineBackendNet::addLayer(const InferenceEngine::Builder::Layer& layer)
void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
// Add weights to network and connect them after input blobs.
std::map<std::string, InferenceEngine::Parameter>& params = layer.getParameters();
std::vector<int> blobsIds;
std::vector<int> portIds;
for (const std::string& name : {"weights", "biases"})
{
bool asInput = false;
int portId = 0;
for (int i = 0; i < layer.getInputPorts().size(); ++i)
{
const auto& port = layer.getInputPorts()[i];
auto it = port.getParameters().find("type");
if (it != port.getParameters().end() && it->second == name)
{
portId = i;
asInput = true;
break;
}
}
if (!asInput)
continue;
auto it = params.find(name);
if (it != params.end())
{
InferenceEngine::Blob::Ptr blob = it->second.as<InferenceEngine::Blob::Ptr>();
params.erase(it);
int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob));
blobsIds.push_back(blobId);
portIds.push_back(portId);
}
}
#endif
int id = netBuilder.addLayer(layer);
const std::string& layerName = layer.getName();
CV_Assert(layers.insert({layerName, id}).second);
unconnectedLayersIds.insert(id);
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
// By default, all the weights are connected to last ports ids.
for (int i = 0; i < blobsIds.size(); ++i)
{
netBuilder.connect((size_t)blobsIds[i], {(size_t)id, portIds[i]});
}
#endif
}
void InfEngineBackendNet::addOutput(const std::string& name)
@ -705,7 +762,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
{
std::string name = wrapper->dataPtr->name;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
name = name.empty() ? "id1" : name; // TODO: drop the magic input name.
name = name.empty() ? kDefaultInpLayerName : name;
#endif
allBlobs.insert({name, wrapper->blob});
}
@ -776,6 +833,18 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
return halfs;
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
InferenceEngine::Builder::Layer& l)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
l.getParameters()[name] = data;
#else
l.addConstantData(name, data);
#endif
}
#endif
#endif // HAVE_INF_ENGINE
bool haveInfEngine()

@ -162,7 +162,7 @@ public:
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
void addLayer(const InferenceEngine::Builder::Layer& layer);
void addLayer(InferenceEngine::Builder::Layer& layer);
void addOutput(const std::string& name);
@ -255,6 +255,10 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
#endif
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.

@ -695,7 +695,8 @@ TEST_P(Eltwise, Accuracy)
Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE &&
(targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
#endif

Loading…
Cancel
Save