Merge pull request #26079 from Abdurrahheem:ash/hardmax-support

Add Support for Hardmax Layer #26079

This PR add support for `Hardmax` layer, which as previously listed in conformance deny list. 

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/26100/head
Abduragim Shtanchaev 3 months ago committed by GitHub
parent 8def9f75c8
commit 0f8bbf4677
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 6
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  2. 1
      modules/dnn/src/init.cpp
  3. 140
      modules/dnn/src/layers/harmax.cpp
  4. 8
      modules/dnn/src/onnx/onnx_importer.cpp
  5. 7
      modules/dnn/test/test_onnx_conformance_layer_parser_denylist.inl.hpp

@ -274,6 +274,12 @@ CV__DNN_INLINE_NS_BEGIN
static Ptr<EinsumLayer> create(const LayerParams& params); static Ptr<EinsumLayer> create(const LayerParams& params);
}; };
class CV_EXPORTS HardmaxLayer : public Layer
{
public:
static Ptr<HardmaxLayer> create(const LayerParams& params);
};
class CV_EXPORTS BaseConvolutionLayer : public Layer class CV_EXPORTS BaseConvolutionLayer : public Layer
{ {
public: public:

@ -196,6 +196,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS(GRU, GRULayer); CV_DNN_REGISTER_LAYER_CLASS(GRU, GRULayer);
CV_DNN_REGISTER_LAYER_CLASS(CumSum, CumSumLayer); CV_DNN_REGISTER_LAYER_CLASS(CumSum, CumSumLayer);
CV_DNN_REGISTER_LAYER_CLASS(Einsum, EinsumLayer); CV_DNN_REGISTER_LAYER_CLASS(Einsum, EinsumLayer);
CV_DNN_REGISTER_LAYER_CLASS(Hardmax, HardmaxLayer);
CV_DNN_REGISTER_LAYER_CLASS(Scatter, ScatterLayer); CV_DNN_REGISTER_LAYER_CLASS(Scatter, ScatterLayer);
CV_DNN_REGISTER_LAYER_CLASS(ScatterND, ScatterNDLayer); CV_DNN_REGISTER_LAYER_CLASS(ScatterND, ScatterNDLayer);

@ -0,0 +1,140 @@
#include <inttypes.h>
#include <opencv2/dnn/shape_utils.hpp>
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "../ie_ngraph.hpp"
namespace cv
{
namespace dnn
{
class LayerHardmaxImpl CV_FINAL : public HardmaxLayer
{
public:
int axis;
LayerHardmaxImpl(const LayerParams& params)
{
axis = params.get<int>("axis", -1);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV;
}
void getTypes(const std::vector<MatType>& inputs,
const int requiredOutputs,
const int requiredInternals,
std::vector<MatType>& outputs,
std::vector<MatType>& internals) const CV_OVERRIDE
{
CV_Assert(inputs.size());
for (auto input : inputs)
{
CV_CheckType(input, input == CV_32F || input == CV_8S || input == CV_8U || input == CV_32S || input == CV_64S || input == CV_Bool, "");
}
outputs.assign(requiredOutputs, inputs[0]);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_CheckEQ(inputs.size(), 1ull, "Hardmax: one input is expected");
outputs.resize(1);
outputs[0] = inputs[0];
return false;
}
void forward(InputArrayOfArrays inputs_arr,
OutputArrayOfArrays outputs_arr,
OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16F)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat src = inputs[0];
Mat dst = outputs[0];
axis = normalize_axis(axis, src.dims);
MatShape shape(src.size.p, src.size.p + src.dims);
// Prepare output
memset(dst.ptr(), 0, dst.total() * dst.elemSize());
switch (src.depth())
{
case CV_8U: hardmaxApply<uchar>(src, dst, axis); break;
case CV_8S: hardmaxApply<schar>(src, dst, axis); break;
case CV_16U: hardmaxApply<ushort>(src, dst, axis); break;
case CV_16S: hardmaxApply<short>(src, dst, axis); break;
case CV_32S: hardmaxApply<int>(src, dst, axis); break;
case CV_32F: hardmaxApply<float>(src, dst, axis); break;
case CV_64F: hardmaxApply<double>(src, dst, axis); break;
default:
CV_Error(Error::StsUnsupportedFormat, "Unsupported input data type");
}
}
template <typename T>
void hardmaxApply(const cv::Mat& src, cv::Mat& dst, const int axis)
{
const auto *src_ptr = src.ptr<const T>();
auto *dst_ptr = dst.ptr<T>();
const size_t outer_size = src.total(0, axis);
const auto mid_size = static_cast<size_t>(src.size[axis]);
const size_t inner_size = src.total(axis + 1);
const size_t outer_step = src.total(axis);
double nstripes = (double) outer_size * inner_size / 1024.0;
parallel_for_(Range(0, outer_size), [&](const Range& range) {
for (size_t outer = range.start; outer < range.end; ++outer)
{
const size_t outer_offset = outer * outer_step;
for (size_t inner = 0; inner < inner_size; ++inner)
{
T max_val = std::numeric_limits<T>::lowest();
size_t max_idx = 0;
// Find max along the reduction axis
for (size_t mid = 0; mid < mid_size; ++mid)
{
const size_t src_idx = outer_offset + mid * inner_size + inner;
if (src_ptr[src_idx] > max_val)
{
max_val = src_ptr[src_idx];
max_idx = src_idx;
}
}
// Set 1 for max, 0 for others
dst_ptr[max_idx] = 1;
}
}
}, nstripes);
}
};
Ptr<HardmaxLayer> HardmaxLayer::create(const LayerParams& params)
{
return Ptr<HardmaxLayer>(new LayerHardmaxImpl(params));
}
}}

@ -198,6 +198,7 @@ private:
void parseTopK (LayerParams& LayerParams, const opencv_onnx::NodeProto& node_proto); void parseTopK (LayerParams& LayerParams, const opencv_onnx::NodeProto& node_proto);
void parseSimpleLayers (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void parseSimpleLayers (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseEinsum (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void parseEinsum (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
void parseHardmax (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
// Domain: com.microsoft // Domain: com.microsoft
// URL: https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md // URL: https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md
@ -3206,6 +3207,12 @@ void ONNXImporter::parseSimpleLayers(LayerParams& layerParams, const opencv_onnx
addLayer(layerParams, node_proto); addLayer(layerParams, node_proto);
} }
void ONNXImporter::parseHardmax(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
layerParams.type = "Hardmax";
addLayer(layerParams, node_proto);
}
void ONNXImporter::parseEinsum(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto) void ONNXImporter::parseEinsum(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{ {
std::vector<MatShape> einsumInpShapes; std::vector<MatShape> einsumInpShapes;
@ -3998,6 +4005,7 @@ void ONNXImporter::buildDispatchMap_ONNX_AI(int opset_version)
dispatch["Where"] = &ONNXImporter::parseElementWise; dispatch["Where"] = &ONNXImporter::parseElementWise;
dispatch["Range"] = &ONNXImporter::parseRange; dispatch["Range"] = &ONNXImporter::parseRange;
dispatch["Einsum"] = &ONNXImporter::parseEinsum; dispatch["Einsum"] = &ONNXImporter::parseEinsum;
dispatch["Hardmax"] = &ONNXImporter::parseHardmax;
std::vector<std::string> simpleLayers{"Acos", "Acosh", "Asin", "Asinh", "Atan", "Atanh", "Ceil", "Celu", "Cos", std::vector<std::string> simpleLayers{"Acos", "Acosh", "Asin", "Asinh", "Atan", "Atanh", "Ceil", "Celu", "Cos",
"Cosh", "Dropout", "Erf", "Exp", "Floor", "HardSigmoid", "HardSwish", "Cosh", "Dropout", "Erf", "Exp", "Floor", "HardSigmoid", "HardSwish",

@ -128,13 +128,6 @@
"test_gru_defaults", // ---- same as above --- "test_gru_defaults", // ---- same as above ---
"test_gru_seq_length", // ---- same as above --- "test_gru_seq_length", // ---- same as above ---
"test_gru_with_initial_bias", // ---- same as above --- "test_gru_with_initial_bias", // ---- same as above ---
"test_hardmax_axis_0", // Issues::Layer::Can't create layer "onnx_node_output_0!y" of type "Hardmax" in function 'getLayerInstance'
"test_hardmax_axis_1", // ---- same as above ---
"test_hardmax_axis_2", // ---- same as above ---
"test_hardmax_default_axis", // ---- same as above ---
"test_hardmax_example", // ---- same as above ---
"test_hardmax_negative_axis", // ---- same as above ---
"test_hardmax_one_hot", // ---- same as above ---
"test_identity_opt", // 23221 illegal hardware instruction "test_identity_opt", // 23221 illegal hardware instruction
"test_identity_sequence", // Issue:: Unkonwn error "test_identity_sequence", // Issue:: Unkonwn error
"test_if", // Issue::'Graph' is not supported in function 'getLayerParams' "test_if", // Issue::'Graph' is not supported in function 'getLayerParams'

Loading…
Cancel
Save