|
|
|
@ -701,12 +701,6 @@ struct DataLayer : public Layer |
|
|
|
|
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE |
|
|
|
|
{ |
|
|
|
|
#ifdef HAVE_INF_ENGINE |
|
|
|
|
InferenceEngine::LayerParams lp; |
|
|
|
|
lp.name = name; |
|
|
|
|
lp.type = "ScaleShift"; |
|
|
|
|
lp.precision = InferenceEngine::Precision::FP32; |
|
|
|
|
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); |
|
|
|
|
|
|
|
|
|
CV_CheckEQ(inputsData.size(), (size_t)1, ""); |
|
|
|
|
CV_CheckEQ(inputsData[0].dims, 4, ""); |
|
|
|
|
const size_t numChannels = inputsData[0].size[1]; |
|
|
|
@ -717,7 +711,6 @@ struct DataLayer : public Layer |
|
|
|
|
{numChannels}); |
|
|
|
|
weights->allocate(); |
|
|
|
|
weights->set(std::vector<float>(numChannels, scaleFactors[0])); |
|
|
|
|
ieLayer->_weights = weights; |
|
|
|
|
|
|
|
|
|
// Mean subtraction
|
|
|
|
|
auto biases = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, |
|
|
|
@ -729,8 +722,21 @@ struct DataLayer : public Layer |
|
|
|
|
biasesVec[i] = -means[0][i] * scaleFactors[0]; |
|
|
|
|
} |
|
|
|
|
biases->set(biasesVec); |
|
|
|
|
ieLayer->_biases = biases; |
|
|
|
|
|
|
|
|
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) |
|
|
|
|
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); |
|
|
|
|
ieLayer.setWeights(weights); |
|
|
|
|
ieLayer.setBiases(biases); |
|
|
|
|
#else |
|
|
|
|
InferenceEngine::LayerParams lp; |
|
|
|
|
lp.name = name; |
|
|
|
|
lp.type = "ScaleShift"; |
|
|
|
|
lp.precision = InferenceEngine::Precision::FP32; |
|
|
|
|
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); |
|
|
|
|
|
|
|
|
|
ieLayer->_weights = weights; |
|
|
|
|
ieLayer->_biases = biases; |
|
|
|
|
#endif |
|
|
|
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); |
|
|
|
|
#endif // HAVE_INF_ENGINE
|
|
|
|
|
return Ptr<BackendNode>(); |
|
|
|
@ -1451,7 +1457,11 @@ struct Net::Impl |
|
|
|
|
if (layerNet != ieInpNode->net) |
|
|
|
|
{ |
|
|
|
|
// layerNet is empty or nodes are from different graphs.
|
|
|
|
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) |
|
|
|
|
ieInpNode->net->addOutput(ieInpNode->layer.getName()); |
|
|
|
|
#else |
|
|
|
|
ieInpNode->net->addOutput(ieInpNode->layer->name); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -1527,7 +1537,7 @@ struct Net::Impl |
|
|
|
|
|
|
|
|
|
// Build Inference Engine networks from sets of layers that support this
|
|
|
|
|
// backend. Split a whole model on several Inference Engine networks if
|
|
|
|
|
// some of layers is not implemented.
|
|
|
|
|
// some of layers are not implemented.
|
|
|
|
|
|
|
|
|
|
// Set of all input and output blobs wrappers for current network.
|
|
|
|
|
std::map<LayerPin, Ptr<BackendWrapper> > netBlobsWrappers; |
|
|
|
@ -1543,7 +1553,7 @@ struct Net::Impl |
|
|
|
|
{ |
|
|
|
|
addInfEngineNetOutputs(ld); |
|
|
|
|
net = Ptr<InfEngineBackendNet>(); |
|
|
|
|
netBlobsWrappers.clear(); |
|
|
|
|
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
|
|
|
|
layer->preferableTarget = DNN_TARGET_CPU; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
@ -1561,12 +1571,13 @@ struct Net::Impl |
|
|
|
|
if (ieInpNode->net != net) |
|
|
|
|
{ |
|
|
|
|
net = Ptr<InfEngineBackendNet>(); |
|
|
|
|
netBlobsWrappers.clear(); |
|
|
|
|
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) |
|
|
|
|
// The same blobs wrappers cannot be shared between two Inference Engine
|
|
|
|
|
// networks because of explicit references between layers and blobs.
|
|
|
|
|
// So we need to rewrap all the external blobs.
|
|
|
|
@ -1583,6 +1594,7 @@ struct Net::Impl |
|
|
|
|
ld.inputBlobsWrappers[i] = it->second; |
|
|
|
|
} |
|
|
|
|
netBlobsWrappers[LayerPin(ld.id, 0)] = ld.outputBlobsWrappers[0]; |
|
|
|
|
#endif // IE < R5
|
|
|
|
|
|
|
|
|
|
Ptr<BackendNode> node; |
|
|
|
|
if (!net.empty()) |
|
|
|
@ -1613,6 +1625,40 @@ struct Net::Impl |
|
|
|
|
CV_Assert(!ieNode.empty()); |
|
|
|
|
ieNode->net = net; |
|
|
|
|
|
|
|
|
|
// Convert weights in FP16 for specific targets.
|
|
|
|
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) |
|
|
|
|
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || |
|
|
|
|
preferableTarget == DNN_TARGET_MYRIAD || |
|
|
|
|
preferableTarget == DNN_TARGET_FPGA) && !fused) |
|
|
|
|
{ |
|
|
|
|
auto& blobs = ieNode->layer.getConstantData(); |
|
|
|
|
if (blobs.empty()) |
|
|
|
|
{ |
|
|
|
|
// In case of non weightable layer we have to specify
|
|
|
|
|
// it's precision adding dummy blob.
|
|
|
|
|
auto blob = InferenceEngine::make_shared_blob<int16_t>( |
|
|
|
|
InferenceEngine::Precision::FP16, |
|
|
|
|
InferenceEngine::Layout::C, {1}); |
|
|
|
|
blob->allocate(); |
|
|
|
|
blobs[""] = blob; |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
for (auto& it : blobs) |
|
|
|
|
it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second)); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (!fused) |
|
|
|
|
net->addLayer(ieNode->layer); |
|
|
|
|
|
|
|
|
|
net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName()); |
|
|
|
|
net->addBlobs(ld.inputBlobsWrappers); |
|
|
|
|
net->addBlobs(ld.outputBlobsWrappers); |
|
|
|
|
addInfEngineNetOutputs(ld); |
|
|
|
|
|
|
|
|
|
#else // IE >= R5
|
|
|
|
|
|
|
|
|
|
auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(ieNode->layer); |
|
|
|
|
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || |
|
|
|
|
preferableTarget == DNN_TARGET_MYRIAD || |
|
|
|
@ -1650,10 +1696,10 @@ struct Net::Impl |
|
|
|
|
if (!fused) |
|
|
|
|
net->addLayer(ieNode->layer); |
|
|
|
|
addInfEngineNetOutputs(ld); |
|
|
|
|
#endif // IE >= R5
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Initialize all networks.
|
|
|
|
|
std::set<InfEngineBackendNet> initializedNets; |
|
|
|
|
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) |
|
|
|
|
{ |
|
|
|
|
LayerData &ld = it->second; |
|
|
|
@ -2546,7 +2592,11 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin) |
|
|
|
|
Net cvNet; |
|
|
|
|
cvNet.setInputsNames(inputsNames); |
|
|
|
|
|
|
|
|
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) |
|
|
|
|
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); |
|
|
|
|
#else |
|
|
|
|
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(0)); |
|
|
|
|
#endif |
|
|
|
|
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet)); |
|
|
|
|
for (auto& it : ieNet.getOutputsInfo()) |
|
|
|
|
{ |
|
|
|
|