dnn: fixed IE support on Windows

pull/11575/head
Maksim Shabunin 7 years ago
parent 2f9b4439af
commit 895e10c317
  1. 2
      modules/dnn/src/layers/batch_norm_layer.cpp
  2. 2
      modules/dnn/src/layers/convolution_layer.cpp
  3. 4
      modules/dnn/src/layers/fully_connected_layer.cpp
  4. 2
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  5. 2
      modules/dnn/src/layers/scale_layer.cpp

@ -283,7 +283,7 @@ public:
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = weights_.total();
const size_t numChannels = weights_.total();
ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C);

@ -456,7 +456,7 @@ public:
if (hasBias() || fusedBias)
{
Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {outCn}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE

@ -427,9 +427,9 @@ public:
std::shared_ptr<InferenceEngine::FullyConnectedLayer> ieLayer(new InferenceEngine::FullyConnectedLayer(lp));
ieLayer->_out_num = blobs[0].size[0];
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {blobs[0].size[0], blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
if (blobs.size() > 1)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {ieLayer->_out_num}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();

@ -254,7 +254,7 @@ public:
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
const int numChannels = blobs[0].total();
const size_t numChannels = blobs[0].total();
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE

@ -178,7 +178,7 @@ public:
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = blobs[0].total();
const size_t numChannels = blobs[0].total();
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
if (hasBias)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {numChannels}, InferenceEngine::Layout::C);

Loading…
Cancel
Save