dnn cann backend: add hardswish, layernorm and instasnce norm for cann and bug fix (#24462)

* add hardswish for cann

* gemm cann bug fix

* fix indentation

* cann: add layer norm

* cann: add instance norm

* add supportBackend

* cann: layer norm does not support axis=-1 due to 1d mat issue

* disable instance norm for now

* fix doc

* remove tensor desc initialization for 1D tensor
pull/24549/head
Yuantao Feng 1 year ago committed by GitHub
parent ec97c38ff9
commit 024dfd54af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 25
      modules/dnn/src/layers/elementwise_layers.cpp
  2. 1
      modules/dnn/src/layers/gemm_layer.cpp
  3. 49
      modules/dnn/src/layers/instance_norm_layer.cpp
  4. 59
      modules/dnn/src/layers/layer_norm.cpp

@ -1890,7 +1890,9 @@ struct HardSwishFunctor : public BaseDefaultFunctor<HardSwishFunctor>
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_CANN;
}
inline float calculate(float x) const
@ -1905,6 +1907,27 @@ struct HardSwishFunctor : public BaseDefaultFunctor<HardSwishFunctor>
}
#endif
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
auto op = std::make_shared<ge::op::HardSwish>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc);
return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif
int64 getFLOPSPerElement() const { return 1; }
};

@ -274,6 +274,7 @@ public:
op->update_input_desc_bias(*(op_const_C->getTensorDesc()));
// set outputs
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc);
return Ptr<BackendNode>(new CannBackendNode(op));
}

@ -6,6 +6,9 @@
#include <opencv2/dnn/shape_utils.hpp>
#include "./cpu_kernels/fast_norm.hpp"
// CANN backend
#include "../op_cann.hpp"
// OpenVINO backend
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
@ -41,6 +44,7 @@ public:
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA;
// backendId == DNN_BACKEND_CANN; // not supported due to 1d mat shape issue
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -169,6 +173,51 @@ public:
}
#endif
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE {
auto input_tensor_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
auto input_tensor_desc = input_tensor_wrapper->getTensorDesc();
auto scale_tensor_wrapper = inputs[1].dynamicCast<CannBackendWrapper>();
auto scale_tensor_desc = scale_tensor_wrapper->getTensorDesc();
auto bias_tensor_wrapper = inputs[2].dynamicCast<CannBackendWrapper>();
auto bias_tensor_desc = bias_tensor_wrapper->getTensorDesc();
auto last_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto scale_node = nodes[1].dynamicCast<CannBackendNode>()->getOp();
auto bias_node = nodes[2].dynamicCast<CannBackendNode>()->getOp();
auto op = std::make_shared<ge::op::InstanceNorm>(name);
// set attrs
op->set_attr_epsilon(epsilon);
// set inputs
// set inputs : x
op->set_input_x_by_name(*last_node, input_tensor_wrapper->name.c_str());
op->update_input_desc_x(*input_tensor_desc);
// set inputs : gamma
op->set_input_gamma_by_name((*scale_node), scale_tensor_wrapper->name.c_str());
op->update_input_desc_gamma(*scale_tensor_desc);
// set inputs : beta
op->set_input_beta_by_name(*bias_node, bias_tensor_wrapper->name.c_str());
op->update_input_desc_beta(*bias_tensor_desc);
// set outputs
auto output_desc_y = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc_y);
auto output_desc_mean = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_mean(*output_desc_mean);
auto output_desc_var = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_variance(*output_desc_var);
return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE {

@ -6,6 +6,9 @@
#include "layers_common.hpp"
#include "cpu_kernels/fast_norm.hpp"
// CANN backend
#include "../op_cann.hpp"
namespace cv { namespace dnn {
class LayerNormLayerImpl CV_FINAL : public LayerNormLayer
@ -22,7 +25,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV;
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_CANN && axis != -1); // axis=-1 not supported due to 1d mat shape problem
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -90,6 +94,59 @@ public:
fastNorm(input, scale, output, epsilon, static_cast<size_t>(axis));
}
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE {
CV_CheckEQ(inputs.size(), static_cast<size_t>(3), "LayerNorm/CANN: requires three input wrappers");
CV_CheckEQ(nodes.size(), static_cast<size_t>(3), "LayerNorm/CANN: requires three input nodes");
auto input_tensor_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
auto input_tensor_desc = input_tensor_wrapper->getTensorDesc();
CV_CheckNE(axis, static_cast<int>(input_tensor_desc->GetShape().GetDimNum() - 1), "LayerNorm: CANN does not support axis set as last axis due to 1D mat compatibility issue");
auto scale_tensor_wrapper = inputs[1].dynamicCast<CannBackendWrapper>();
auto scale_tensor_desc = scale_tensor_wrapper->getTensorDesc();
auto bias_tensor_wrapper = inputs[2].dynamicCast<CannBackendWrapper>();
auto bias_tensor_desc = bias_tensor_wrapper->getTensorDesc();
auto last_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto scale_node = nodes[1].dynamicCast<CannBackendNode>()->getOp();
auto bias_node = nodes[2].dynamicCast<CannBackendNode>()->getOp();
auto op = std::make_shared<ge::op::LayerNorm>(name);
// set attrs
op->set_attr_begin_norm_axis(axis);
op->set_attr_begin_params_axis(axis);
op->set_attr_epsilon(epsilon);
// set inputs
// set inputs : x
op->set_input_x_by_name(*last_node, input_tensor_wrapper->name.c_str());
op->update_input_desc_x(*input_tensor_desc);
// set inputs : gamma
op->set_input_gamma_by_name(*scale_node, scale_tensor_wrapper->name.c_str());
op->update_input_desc_gamma(*scale_tensor_desc);
// set inputs : beta
op->set_input_beta_by_name(*bias_node, bias_tensor_wrapper->name.c_str());
op->update_input_desc_beta(*bias_tensor_desc);
// set outputs
auto output_desc_y = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc_y);
auto output_desc_mean = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_mean(*output_desc_mean);
auto output_desc_var = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_variance(*output_desc_var);
return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif // HAVE_CANN
};
Ptr<LayerNormLayer> LayerNormLayer::create(const LayerParams& params)

Loading…
Cancel
Save