Merge pull request #19428 from alalek:dnn_drop_misbehaved_clamp

pull/19465/head^2
Alexander Alekhin 4 years ago
commit 4f08bb5046
  1. 26
      modules/dnn/include/opencv2/dnn/shape_utils.hpp
  2. 2
      modules/dnn/src/dnn.cpp
  3. 10
      modules/dnn/src/layers/concat_layer.cpp
  4. 12
      modules/dnn/src/layers/flatten_layer.cpp
  5. 6
      modules/dnn/src/layers/fully_connected_layer.cpp
  6. 8
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  7. 9
      modules/dnn/src/layers/reshape_layer.cpp
  8. 2
      modules/dnn/src/layers/scale_layer.cpp
  9. 8
      modules/dnn/src/layers/slice_layer.cpp
  10. 10
      modules/dnn/src/layers/softmax_layer.cpp
  11. 16
      modules/dnn/src/onnx/onnx_importer.cpp

@ -205,21 +205,33 @@ static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape)
return out; return out;
} }
inline int clamp(int ax, int dims) /// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range.
static inline
int normalize_axis(int axis, int dims)
{ {
return ax < 0 ? ax + dims : ax; CV_Check(axis, axis >= -dims && axis < dims, "");
axis = (axis < 0) ? (dims + axis) : axis;
CV_DbgCheck(axis, axis >= 0 && axis < dims, "");
return axis;
} }
inline int clamp(int ax, const MatShape& shape) static inline
int normalize_axis(int axis, const MatShape& shape)
{ {
return clamp(ax, (int)shape.size()); return normalize_axis(axis, (int)shape.size());
} }
inline Range clamp(const Range& r, int axisSize) static inline
Range normalize_axis_range(const Range& r, int axisSize)
{ {
Range clamped(std::max(r.start, 0), if (r == Range::all())
return Range(0, axisSize);
CV_CheckGE(r.start, 0, "");
Range clamped(r.start,
r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1); r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1);
CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize); CV_DbgCheckGE(clamped.start, 0, "");
CV_CheckLT(clamped.start, clamped.end, "");
CV_CheckLE(clamped.end, axisSize, "");
return clamped; return clamped;
} }

@ -2598,7 +2598,7 @@ struct Net::Impl : public detail::NetImplBase
// the concatenation optimization is applied with batch_size > 1. // the concatenation optimization is applied with batch_size > 1.
// so, for now, we only apply this optimization in the most popular // so, for now, we only apply this optimization in the most popular
// case batch_size == 1. // case batch_size == 1.
int axis = clamp(concatLayer->axis, output.dims); int axis = normalize_axis(concatLayer->axis, output.dims);
if( output.total(0, axis) == 1 ) if( output.total(0, axis) == 1 )
{ {
size_t i, ninputs = ld.inputBlobsId.size(); size_t i, ninputs = ld.inputBlobsId.size();

@ -72,7 +72,7 @@ public:
{ {
CV_Assert(inputs.size() > 0); CV_Assert(inputs.size() > 0);
outputs.resize(1, inputs[0]); outputs.resize(1, inputs[0]);
int cAxis = clamp(axis, inputs[0]); int cAxis = normalize_axis(axis, inputs[0]);
int axisSum = 0; int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
@ -192,7 +192,7 @@ public:
inps.getUMatVector(inputs); inps.getUMatVector(inputs);
outs.getUMatVector(outputs); outs.getUMatVector(outputs);
int cAxis = clamp(axis, inputs[0].dims); int cAxis = normalize_axis(axis, inputs[0].dims);
if (padding) if (padding)
return false; return false;
@ -246,7 +246,7 @@ public:
inputs_arr.getMatVector(inputs); inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs); outputs_arr.getMatVector(outputs);
int cAxis = clamp(axis, inputs[0].dims); int cAxis = normalize_axis(axis, inputs[0].dims);
Mat& outMat = outputs[0]; Mat& outMat = outputs[0];
if (padding) if (padding)
@ -306,7 +306,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name); InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->getDims().size())); ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size())); ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
@ -319,7 +319,7 @@ public:
{ {
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]); InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
const int numDims = data->getDims().size(); const int numDims = data->getDims().size();
const int cAxis = clamp(axis, numDims); const int cAxis = normalize_axis(axis, numDims);
std::vector<size_t> maxDims(numDims, 0); std::vector<size_t> maxDims(numDims, 0);
CV_Assert(inputs.size() == nodes.size()); CV_Assert(inputs.size() == nodes.size());

@ -82,8 +82,8 @@ public:
} }
int numAxes = inputs[0].size(); int numAxes = inputs[0].size();
int startAxis = clamp(_startAxis, numAxes); int startAxis = normalize_axis(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes); int endAxis = normalize_axis(_endAxis, numAxes);
CV_Assert(startAxis >= 0); CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes); CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes);
@ -113,8 +113,8 @@ public:
inputs_arr.getMatVector(inputs); inputs_arr.getMatVector(inputs);
int numAxes = inputs[0].dims; int numAxes = inputs[0].dims;
_startAxis = clamp(_startAxis, numAxes); _startAxis = normalize_axis(_startAxis, numAxes);
_endAxis = clamp(_endAxis, numAxes); _endAxis = normalize_axis(_endAxis, numAxes);
} }
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -186,8 +186,8 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
std::vector<size_t> dims = ieInpNode->get_shape(); std::vector<size_t> dims = ieInpNode->get_shape();
int numAxes = dims.size(); int numAxes = dims.size();
int startAxis = clamp(_startAxis, numAxes); int startAxis = normalize_axis(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes); int endAxis = normalize_axis(_endAxis, numAxes);
CV_Assert(startAxis >= 0); CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < numAxes); CV_Assert(endAxis >= startAxis && endAxis < numAxes);

@ -129,7 +129,7 @@ public:
CV_CheckEQ(blobs[0].dims, 2, ""); CV_CheckEQ(blobs[0].dims, 2, "");
numOutput = blobs[0].size[0]; numOutput = blobs[0].size[0];
CV_Assert(!bias || (size_t)numOutput == blobs[1].total()); CV_Assert(!bias || (size_t)numOutput == blobs[1].total());
cAxis = clamp(axis, inputs[0]); cAxis = normalize_axis(axis, inputs[0]);
} }
MatShape outShape(cAxis + 1); MatShape outShape(cAxis + 1);
@ -352,7 +352,7 @@ public:
return true; return true;
} }
int axisCan = clamp(axis, inputs[0].dims); int axisCan = normalize_axis(axis, inputs[0].dims);
int numOutput = blobs[0].size[0]; int numOutput = blobs[0].size[0];
int innerSize = blobs[0].size[1]; int innerSize = blobs[0].size[1];
int outerSize = total(shape(inputs[0]), 0, axisCan); int outerSize = total(shape(inputs[0]), 0, axisCan);
@ -473,7 +473,7 @@ public:
if (!blobs.empty()) if (!blobs.empty())
{ {
int axisCan = clamp(axis, input[0].dims); int axisCan = normalize_axis(axis, input[0].dims);
int outerSize = input[0].total(0, axisCan); int outerSize = input[0].total(0, axisCan);
for (size_t i = 0; i < input.size(); i++) for (size_t i = 0; i < input.size(); i++)

@ -118,8 +118,8 @@ public:
const UMat& inp0 = inputs[0]; const UMat& inp0 = inputs[0];
UMat& buffer = internals[0]; UMat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims); startAxis = normalize_axis(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims); endAxis = normalize_axis(endAxis, inp0.dims);
size_t num = total(shape(inp0.size), 0, startAxis); size_t num = total(shape(inp0.size), 0, startAxis);
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1); size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
@ -203,8 +203,8 @@ public:
const Mat& inp0 = inputs[0]; const Mat& inp0 = inputs[0];
Mat& buffer = internals[0]; Mat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims); startAxis = normalize_axis(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims); endAxis = normalize_axis(endAxis, inp0.dims);
const float* inpData = inp0.ptr<float>(); const float* inpData = inp0.ptr<float>();
float* outData = outputs[0].ptr<float>(); float* outData = outputs[0].ptr<float>();

@ -60,14 +60,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
int srcShapeSize = (int)srcShape.size(); int srcShapeSize = (int)srcShape.size();
int maskShapeSize = (int)maskShape.size(); int maskShapeSize = (int)maskShape.size();
if (srcRange == Range::all()) srcRange = normalize_axis_range(srcRange, srcShapeSize);
srcRange = Range(0, srcShapeSize);
else
{
int sz = srcRange.size();
srcRange.start = clamp(srcRange.start, srcShapeSize);
srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz;
}
bool explicitMask = !maskShape.empty(); // All mask values are positive. bool explicitMask = !maskShape.empty(); // All mask values are positive.
for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i) for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i)

@ -240,7 +240,7 @@ public:
numChannels = blobs[0].total(); numChannels = blobs[0].total();
std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1); std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1);
int cAxis = clamp(axis, shape.size()); int cAxis = normalize_axis(axis, shape.size());
shape[cAxis] = numChannels; shape[cAxis] = numChannels;
auto node = ieInpNode0; auto node = ieInpNode0;

@ -146,7 +146,7 @@ public:
for (int j = 0; j < sliceRanges[i].size(); ++j) for (int j = 0; j < sliceRanges[i].size(); ++j)
{ {
if (shapesInitialized || inpShape[j] > 0) if (shapesInitialized || inpShape[j] > 0)
outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size(); outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
} }
} }
} }
@ -209,7 +209,7 @@ public:
// Clamp. // Clamp.
for (int j = 0; j < finalSliceRanges[i].size(); ++j) for (int j = 0; j < finalSliceRanges[i].size(); ++j)
{ {
finalSliceRanges[i][j] = clamp(finalSliceRanges[i][j], inpShape[j]); finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]);
} }
} }
@ -601,7 +601,7 @@ public:
CV_Assert(inputs.size() == 2); CV_Assert(inputs.size() == 2);
MatShape dstShape = inputs[0]; MatShape dstShape = inputs[0];
int start = clamp(axis, dstShape); int start = normalize_axis(axis, dstShape);
for (int i = start; i < dstShape.size(); i++) for (int i = start; i < dstShape.size(); i++)
{ {
dstShape[i] = inputs[1][i]; dstShape[i] = inputs[1][i];
@ -620,7 +620,7 @@ public:
const Mat &inpSzBlob = inputs[1]; const Mat &inpSzBlob = inputs[1];
int dims = inpBlob.dims; int dims = inpBlob.dims;
int start_axis = clamp(axis, dims); int start_axis = normalize_axis(axis, dims);
std::vector<int> offset_final(dims, 0); std::vector<int> offset_final(dims, 0);
if (offset.size() == 1) if (offset.size() == 1)

@ -82,7 +82,7 @@ public:
{ {
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
MatShape shape = inputs[0]; MatShape shape = inputs[0];
int cAxis = clamp(axisRaw, shape.size()); int cAxis = normalize_axis(axisRaw, shape.size());
shape[cAxis] = 1; shape[cAxis] = 1;
internals.assign(1, shape); internals.assign(1, shape);
return inplace; return inplace;
@ -115,7 +115,7 @@ public:
UMat& src = inputs[0]; UMat& src = inputs[0];
UMat& dstMat = outputs[0]; UMat& dstMat = outputs[0];
int axis = clamp(axisRaw, src.dims); int axis = normalize_axis(axisRaw, src.dims);
if (softmaxOp.empty()) if (softmaxOp.empty())
{ {
@ -207,7 +207,7 @@ public:
const Mat &src = inputs[0]; const Mat &src = inputs[0];
Mat &dst = outputs[0]; Mat &dst = outputs[0];
int axis = clamp(axisRaw, src.dims); int axis = normalize_axis(axisRaw, src.dims);
size_t outerSize = src.total(0, axis), channels = src.size[axis], size_t outerSize = src.total(0, axis), channels = src.size[axis],
innerSize = src.total(axis + 1); innerSize = src.total(axis + 1);
@ -318,7 +318,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name); InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->getDims().size())); ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
@ -329,7 +329,7 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
int axis = clamp(axisRaw, ieInpNode->get_shape().size()); int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size());
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis); auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
if (logSoftMax) if (logSoftMax)
return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax))); return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax)));

@ -503,7 +503,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
MatShape targetShape; MatShape targetShape;
std::vector<bool> shouldDelete(inpShape.size(), false); std::vector<bool> shouldDelete(inpShape.size(), false);
for (int i = 0; i < axes.size(); i++) { for (int i = 0; i < axes.size(); i++) {
int axis = clamp(axes.get<int>(i), inpShape.size()); int axis = normalize_axis(axes.get<int>(i), inpShape.size());
shouldDelete[axis] = true; shouldDelete[axis] = true;
} }
for (int axis = 0; axis < inpShape.size(); ++axis){ for (int axis = 0; axis < inpShape.size(); ++axis){
@ -515,7 +515,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
if (inpShape.size() == 3 && axes.size() <= 2) if (inpShape.size() == 3 && axes.size() <= 2)
{ {
int axis = clamp(axes.get<int>(0), inpShape.size()); int axis = normalize_axis(axes.get<int>(0), inpShape.size());
CV_CheckNE(axis, 0, ""); CV_CheckNE(axis, 0, "");
LayerParams reshapeLp; LayerParams reshapeLp;
@ -539,8 +539,8 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
avgLp.set("pool", pool); avgLp.set("pool", pool);
if (axes.size() == 2) if (axes.size() == 2)
{ {
CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode"); CV_CheckEQ(normalize_axis(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode"); CV_CheckEQ(normalize_axis(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
avgLp.set("global_pooling", true); avgLp.set("global_pooling", true);
} }
else else
@ -560,9 +560,9 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
CV_Assert(axes.size() <= inpShape.size() - 2); CV_Assert(axes.size() <= inpShape.size() - 2);
std::vector<int> kernel_size(inpShape.size() - 2, 1); std::vector<int> kernel_size(inpShape.size() - 2, 1);
if (axes.size() == 1 && (clamp(axes.get<int>(0), inpShape.size()) <= 1)) if (axes.size() == 1 && (normalize_axis(axes.get<int>(0), inpShape.size()) <= 1))
{ {
int axis = clamp(axes.get<int>(0), inpShape.size()); int axis = normalize_axis(axes.get<int>(0), inpShape.size());
MatShape newShape = inpShape; MatShape newShape = inpShape;
newShape[axis + 1] = total(newShape, axis + 1); newShape[axis + 1] = total(newShape, axis + 1);
newShape.resize(axis + 2); newShape.resize(axis + 2);
@ -584,7 +584,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
else else
{ {
for (int i = 0; i < axes.size(); i++) { for (int i = 0; i < axes.size(); i++) {
int axis = clamp(axes.get<int>(i), inpShape.size()); int axis = normalize_axis(axes.get<int>(i), inpShape.size());
CV_Assert_N(axis >= 2 + i, axis < inpShape.size()); CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
kernel_size[axis - 2] = inpShape[axis]; kernel_size[axis - 2] = inpShape[axis];
} }
@ -1376,7 +1376,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
if (constBlobs.find(node_proto.input(0)) != constBlobs.end()) if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{ {
Mat input = getBlob(node_proto, 0); Mat input = getBlob(node_proto, 0);
int axis = clamp(layerParams.get<int>("axis", 1), input.dims); int axis = normalize_axis(layerParams.get<int>("axis", 1), input.dims);
std::vector<int> out_size(&input.size[0], &input.size[0] + axis); std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
out_size.push_back(input.total(axis)); out_size.push_back(input.total(axis));

Loading…
Cancel
Save