|
|
@ -46,14 +46,14 @@ public: |
|
|
|
{ |
|
|
|
{ |
|
|
|
std::vector<Mat> inputs; |
|
|
|
std::vector<Mat> inputs; |
|
|
|
inputs_arr.getMatVector(inputs); |
|
|
|
inputs_arr.getMatVector(inputs); |
|
|
|
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias); |
|
|
|
hasWeights = blobs.size() == 2 || (blobs.size() <= 1 && !hasBias); |
|
|
|
CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias); |
|
|
|
CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
virtual bool supportBackend(int backendId) CV_OVERRIDE |
|
|
|
virtual bool supportBackend(int backendId) CV_OVERRIDE |
|
|
|
{ |
|
|
|
{ |
|
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || |
|
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || |
|
|
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1) || |
|
|
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) || |
|
|
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0); |
|
|
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -78,10 +78,9 @@ public: |
|
|
|
Mat &outBlob = outputs[0]; |
|
|
|
Mat &outBlob = outputs[0]; |
|
|
|
// There is a mode when we multiply a first blob by a second one
|
|
|
|
// There is a mode when we multiply a first blob by a second one
|
|
|
|
// instead of trainable weights.
|
|
|
|
// instead of trainable weights.
|
|
|
|
Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat()); |
|
|
|
Mat weights = hasWeights ? (blobs.empty() ? inputs[1] : blobs[0]).reshape(1, 1) : Mat();; |
|
|
|
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat(); |
|
|
|
Mat bias = hasBias ? (blobs.empty() ? inputs[1] : blobs.back()).reshape(1, 1) : Mat(); |
|
|
|
if (!weights.empty()) |
|
|
|
|
|
|
|
weights = weights.reshape(1, 1); |
|
|
|
|
|
|
|
MatShape inpShape = shape(inpBlob); |
|
|
|
MatShape inpShape = shape(inpBlob); |
|
|
|
const int numWeights = !weights.empty() ? weights.total() : bias.total(); |
|
|
|
const int numWeights = !weights.empty() ? weights.total() : bias.total(); |
|
|
|
CV_Assert(numWeights != 0); |
|
|
|
CV_Assert(numWeights != 0); |
|
|
@ -229,28 +228,40 @@ public: |
|
|
|
#ifdef HAVE_DNN_NGRAPH |
|
|
|
#ifdef HAVE_DNN_NGRAPH |
|
|
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE |
|
|
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE |
|
|
|
{ |
|
|
|
{ |
|
|
|
CV_Assert(!blobs.empty()); |
|
|
|
auto ieInpNode0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; |
|
|
|
const size_t numChannels = blobs[0].total(); |
|
|
|
auto ieInpNode1 = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr; |
|
|
|
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; |
|
|
|
|
|
|
|
|
|
|
|
size_t numChannels = 1; |
|
|
|
|
|
|
|
if (blobs.empty()) |
|
|
|
|
|
|
|
for (const size_t& dim : ieInpNode1->get_shape()) |
|
|
|
|
|
|
|
numChannels *= dim; |
|
|
|
|
|
|
|
else |
|
|
|
|
|
|
|
numChannels = blobs[0].total(); |
|
|
|
|
|
|
|
|
|
|
|
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1); |
|
|
|
std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1); |
|
|
|
int cAxis = clamp(axis, shape.size()); |
|
|
|
int cAxis = clamp(axis, shape.size()); |
|
|
|
shape[cAxis] = numChannels; |
|
|
|
shape[cAxis] = numChannels; |
|
|
|
|
|
|
|
|
|
|
|
auto node = ieInpNode; |
|
|
|
auto node = ieInpNode0; |
|
|
|
if (hasWeights) |
|
|
|
if (hasWeights) |
|
|
|
{ |
|
|
|
{ |
|
|
|
auto weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, |
|
|
|
auto weight = blobs.empty() ? ieInpNode1 : |
|
|
|
ngraph::Shape(shape), blobs[0].data); |
|
|
|
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); |
|
|
|
|
|
|
|
|
|
|
|
node = std::make_shared<ngraph::op::v1::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY); |
|
|
|
node = std::make_shared<ngraph::op::v1::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY); |
|
|
|
} |
|
|
|
} |
|
|
|
if (hasBias || !hasWeights) |
|
|
|
if (hasBias || !hasWeights) |
|
|
|
{ |
|
|
|
{ |
|
|
|
auto bias = hasBias ? |
|
|
|
std::shared_ptr<ngraph::Node> bias; |
|
|
|
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, |
|
|
|
if (hasBias) |
|
|
|
ngraph::Shape(shape), blobs.back().data) : |
|
|
|
{ |
|
|
|
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, |
|
|
|
bias = blobs.empty() ? ieInpNode1 : |
|
|
|
ngraph::Shape(shape), std::vector<float>(numChannels, 0).data()); |
|
|
|
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, |
|
|
|
|
|
|
|
ngraph::Shape(shape), blobs.back().data); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
else |
|
|
|
|
|
|
|
bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, |
|
|
|
|
|
|
|
ngraph::Shape(shape), std::vector<float>(numChannels, 0).data()); |
|
|
|
node = std::make_shared<ngraph::op::v1::Add>(node, bias, ngraph::op::AutoBroadcastType::NUMPY); |
|
|
|
node = std::make_shared<ngraph::op::v1::Add>(node, bias, ngraph::op::AutoBroadcastType::NUMPY); |
|
|
|
} |
|
|
|
} |
|
|
|
return Ptr<BackendNode>(new InfEngineNgraphNode(node)); |
|
|
|
return Ptr<BackendNode>(new InfEngineNgraphNode(node)); |
|
|
@ -259,8 +270,8 @@ public: |
|
|
|
|
|
|
|
|
|
|
|
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE |
|
|
|
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE |
|
|
|
{ |
|
|
|
{ |
|
|
|
scale = hasWeights ? blobs[0] : Mat(); |
|
|
|
scale = (hasWeights && !blobs.empty()) ? blobs[0] : Mat(); |
|
|
|
shift = hasBias ? blobs.back() : Mat(); |
|
|
|
shift = (hasBias && !blobs.empty()) ? blobs.back() : Mat(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, |
|
|
|
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, |
|
|
|