diff --git a/modules/dnn/src/darknet/darknet_io.cpp b/modules/dnn/src/darknet/darknet_io.cpp index 99715df829..11aad453e3 100644 --- a/modules/dnn/src/darknet/darknet_io.cpp +++ b/modules/dnn/src/darknet/darknet_io.cpp @@ -376,7 +376,7 @@ namespace cv { int begin[] = {0, split_size * group_id, 0, 0}; cv::dnn::DictValue paramBegin = cv::dnn::DictValue::arrayInt(begin, 4); - int end[] = {-1, begin[1] + split_size, -1, -1}; + int end[] = {INT_MAX, begin[1] + split_size, INT_MAX, INT_MAX}; cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end, 4); darknet::LayerParameter lp; diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 4370e566a5..aa44e4a5b9 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -64,12 +64,32 @@ namespace cv namespace dnn { -void sliceRangesFromShape(const MatShape& inpShape, int& axis, std::vector >& sliceRanges) +Range normalizeRange(const Range& input_range, int n) { + Range range = input_range; + + range.start = std::min(std::max(range.start, -n), n - 1); + if (range.start < 0) + { + range.start += n; + } + + range.end = std::min(std::max(range.end, -n), n); + if (range.end < 0) + { + range.end += n; + } + + return range; +} + +std::vector > finalizeSliceRange(const MatShape& inpShape, int& axis, + const std::vector >& inputSliceRanges) +{ + std::vector > sliceRanges = inputSliceRanges; CV_Assert(inpShape.size() > 0); bool axisNeg = (axis < 0); axis = (axis + static_cast(inpShape.size())) % inpShape.size(); - int n = inpShape[axis]; for (size_t i = 0; i < sliceRanges.size(); ++i){ std::vector& ranges = sliceRanges[i]; @@ -77,16 +97,20 @@ void sliceRangesFromShape(const MatShape& inpShape, int& axis, std::vector= 0) + for (size_t j = 0; j < ranges.size(); ++j) { - continue; - } + int n = inpShape[j]; + if (n <= 0) + { + continue; + } - CV_Assert(n != 0); - range.start = (n + range.start) % n; + ranges[j] = normalizeRange(ranges[j], n); + } } + + return sliceRanges; } class SliceLayerImpl : public SliceLayer @@ -136,7 +160,7 @@ public: { int size = sizeOrEnd; CV_Assert(size == -1 || size > 0); // -1 value means range [start, axis_size). - sliceRanges[0][i].end = size > 0 ? (start + size) : -1; // We'll finalize a negative value later. + sliceRanges[0][i].end = size > 0 ? (start + size) : INT_MAX; // We'll finalize a negative value later. } else { @@ -186,8 +210,7 @@ public: MatShape inpShape = inputs[0]; int axis_rw = axis; - std::vector > sliceRanges_rw = sliceRanges; - sliceRangesFromShape(inpShape, axis_rw, sliceRanges_rw); + std::vector > sliceRanges_rw = finalizeSliceRange(inpShape, axis_rw, sliceRanges); if (!sliceRanges_rw.empty()) { @@ -198,7 +221,7 @@ public: for (int j = 0; j < sliceRanges_rw[i].size(); ++j) { if (shapesInitialized || inpShape[j] > 0) - outputs[i][j] = normalize_axis_range(sliceRanges_rw[i][j], inpShape[j]).size(); + outputs[i][j] = normalizeRange(sliceRanges_rw[i][j], inpShape[j]).size(); if (!sliceSteps.empty() && (i < sliceSteps.size()) && (j < sliceSteps[i].size()) && (sliceSteps[i][j] > 1)) outputs[i][j] = (outputs[i][j] + sliceSteps[i][j] - 1) / sliceSteps[i][j]; @@ -235,8 +258,7 @@ public: CV_Assert(inputs.size() == 1); const MatSize& inpShape = inputs[0].size; - sliceRangesFromShape(shape(inputs[0]), axis, sliceRanges); - finalSliceRanges = sliceRanges; + finalSliceRanges = finalizeSliceRange(shape(inputs[0]), axis, sliceRanges); if (sliceRanges.empty()) { @@ -266,7 +288,7 @@ public: // Clamp. for (int j = 0; j < finalSliceRanges[i].size(); ++j) { - finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]); + finalSliceRanges[i][j] = normalizeRange(finalSliceRanges[i][j], inpShape[j]); } } diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 7cfc546b12..62569d8b50 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -1275,13 +1275,12 @@ void ONNXImporter::parseSlice(LayerParams& layerParams, const opencv_onnx::NodeP if (axis > 0) { CV_CheckLE(axis, 1024, "Slice layer can't have more than 1024 axes"); // arbitrary limit begin.resize(axis, 0); - end.resize(axis, -1); + end.resize(axis, INT_MAX); } for (int i = 0; i < starts.size(); ++i) { begin.push_back(starts.get(i)); - int finish = ends.get(i); - end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim + end.push_back(ends.get(i)); } } else { // inp_size > 1 CV_Assert(inp_size >= 3); @@ -1305,14 +1304,10 @@ void ONNXImporter::parseSlice(LayerParams& layerParams, const opencv_onnx::NodeP const int* ends = end_blob.ptr(); if (axis > 0) { begin.resize(axis, 0); - end.resize(axis, -1); + end.resize(axis, INT_MAX); } std::copy(starts, starts + start_blob.total(), std::back_inserter(begin)); - for (int i = 0; i < end_blob.total(); ++i) - { - int finish = ends[i]; - end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim - } + std::copy(ends, ends + end_blob.total(), std::back_inserter(end)); if (inp_size == 5) { CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end()); @@ -2485,9 +2480,15 @@ void ONNXImporter::parseExpand(LayerParams& layerParams, const opencv_onnx::Node if (!haveVariables) { - if (broadcast_axes.size() != 1) + if (broadcast_axes.size() > 1) CV_Error(Error::StsNotImplemented, "Expand op doesn't support multiple axes for constant input"); + if (broadcast_axes.empty()) + { + addConstant(output_name, getBlob(node_proto, 0)); + return; + } + Mat input = getBlob(node_proto, 0); input = input.reshape(0, total(inpShape, 0, broadcast_axes[0])); Mat output = cv::repeat(input, 1, targetShape[broadcast_axes[0]]); @@ -2708,7 +2709,7 @@ void ONNXImporter::parseGather(LayerParams& layerParams, const opencv_onnx::Node sliceLp.type = "Slice"; sliceLp.name = inpShape.size() > 1 ? layerParams.name + "/slice" : layerParams.name; std::vector begin(inpShape.size(), 0); - std::vector end(inpShape.size(), -1); + std::vector end(inpShape.size(), INT_MAX); begin[axis] = index; end[axis] = index + 1; diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index 763abf3b4d..b6d6a73013 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -1681,10 +1681,8 @@ void TFImporter::parseStridedSlice(tensorflow::GraphDef& net, const tensorflow:: int end_mask = getLayerAttr(layer, "end_mask").i(); for (int i = 0; i < num; ++i) { - if (ends.at(i) < 0) - ends.at(i) -= 1; if (end_mask & (1 << i)) - ends.at(i) = -1; + ends.at(i) = INT_MAX; if (strides.at(i) != 1) CV_Error(Error::StsNotImplemented, format("StridedSlice with stride %d", strides.at(i))); @@ -1982,15 +1980,16 @@ void TFImporter::parseConv2DBackpropInput(tensorflow::GraphDef& net, const tenso int64_t pads[8]; bool explicit_pads = getExplicitPadding(layerParams, layer, pads); int64_t begs[4] = {}; - int64_t ends[4] = {-1, -1, -1, -1}; + int64_t ends[4] = {}; if (explicit_pads) { name += "/deconv"; layerParams.set("pad_mode", "VALID"); + ends[0] = ends[1] = INT_MAX; for (int i = 2; i < 4; ++i) // begins=[0, 0, a, b], ends=[-1, -1, c, d] { begs[i] = pads[2*i]; - ends[i] = -1 - pads[2*i + 1]; + ends[i] = -pads[2*i + 1]; } } @@ -2010,8 +2009,8 @@ void TFImporter::parseConv2DBackpropInput(tensorflow::GraphDef& net, const tenso const int strideX = layerParams.get("stride_w"); Mat outShape = getTensorContent(getConstBlob(layer, value_id, 0)); int shift = (getDataLayout(layer) == DATA_LAYOUT_NCHW); - const int outH = outShape.at(1 + shift) + begs[2] - 1 - ends[2]; - const int outW = outShape.at(2 + shift) + begs[3] - 1 - ends[3]; + const int outH = outShape.at(1 + shift) + begs[2] - ends[2]; + const int outW = outShape.at(2 + shift) + begs[3] - ends[3]; if (layerParams.get("pad_mode") == "SAME") { layerParams.set("adj_w", (outW - 1) % strideX); diff --git a/modules/dnn/src/torch/torch_importer.cpp b/modules/dnn/src/torch/torch_importer.cpp index 57a624d541..3a46c8f7c0 100644 --- a/modules/dnn/src/torch/torch_importer.cpp +++ b/modules/dnn/src/torch/torch_importer.cpp @@ -954,7 +954,7 @@ struct TorchImporter int size = scalarParams.get("size"); int begins[] = {0, 0, size, size}; - int ends[] = {-1, -1, -size - 1, -size - 1}; + int ends[] = {INT_MAX, INT_MAX, -size, -size}; newModule->apiType = "Slice"; layerParams.set("begin", DictValue::arrayInt(&begins[0], 4)); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 3f4a437637..32cfb0397a 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -2082,7 +2082,7 @@ TEST_P(Layer_Test_Slice, variable_input_shape) int targetId = get<1>(GetParam()); int begin[] = {0, 0, 0, 0}; - int end[] = {-1, -1, -1, -1}; + int end[] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX}; Net net; LayerParams lp; diff --git a/modules/photo/src/seamless_cloning.hpp b/modules/photo/src/seamless_cloning.hpp index 92b24e7b09..4d43970d2d 100644 --- a/modules/photo/src/seamless_cloning.hpp +++ b/modules/photo/src/seamless_cloning.hpp @@ -53,7 +53,7 @@ namespace cv class Cloning { public: - void normalClone(const cv::Mat& destination, const cv::Mat &mask, const cv::Mat &wmask, cv::Mat &cloned, int flag); + void normalClone(const cv::Mat& destination, const cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, int flag); void illuminationChange(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, float alpha, float beta); void localColorChange(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, float red_mul, float green_mul, float blue_mul); void textureFlatten(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, float low_threshold, float high_threhold, int kernel_size, cv::Mat &cloned); @@ -61,10 +61,10 @@ namespace cv protected: void initVariables(const cv::Mat &destination, const cv::Mat &binaryMask); - void computeDerivatives(const cv::Mat &destination, const cv::Mat &patch, const cv::Mat &binaryMask); + void computeDerivatives(const cv::Mat &destination, const cv::Mat &patch, cv::Mat &binaryMask); void scalarProduct(cv::Mat mat, float r, float g, float b); void poisson(const cv::Mat &destination); - void evaluate(const cv::Mat &I, const cv::Mat &wmask, const cv::Mat &cloned); + void evaluate(const cv::Mat &I, cv::Mat &wmask, const cv::Mat &cloned); void dst(const Mat& src, Mat& dest, bool invert = false); void solve(const Mat &img, Mat& mod_diff, Mat &result); diff --git a/modules/photo/src/seamless_cloning_impl.cpp b/modules/photo/src/seamless_cloning_impl.cpp index 8fd4bc7865..4b3258a1d9 100644 --- a/modules/photo/src/seamless_cloning_impl.cpp +++ b/modules/photo/src/seamless_cloning_impl.cpp @@ -246,7 +246,7 @@ void Cloning::initVariables(const Mat &destination, const Mat &binaryMask) filter_Y[j] = 2.0f * (float)std::cos(scale * (j + 1)); } -void Cloning::computeDerivatives(const Mat& destination, const Mat &patch, const Mat &binaryMask) +void Cloning::computeDerivatives(const Mat& destination, const Mat &patch, Mat &binaryMask) { initVariables(destination, binaryMask); @@ -306,7 +306,7 @@ void Cloning::poisson(const Mat &destination) } } -void Cloning::evaluate(const Mat &I, const Mat &wmask, const Mat &cloned) +void Cloning::evaluate(const Mat &I, Mat &wmask, const Mat &cloned) { bitwise_not(wmask,wmask); @@ -320,7 +320,7 @@ void Cloning::evaluate(const Mat &I, const Mat &wmask, const Mat &cloned) merge(output,cloned); } -void Cloning::normalClone(const Mat &destination, const Mat &patch, const Mat &binaryMask, Mat &cloned, int flag) +void Cloning::normalClone(const Mat &destination, const Mat &patch, Mat &binaryMask, Mat &cloned, int flag) { const int w = destination.cols; const int h = destination.rows; diff --git a/modules/python/test/test_misc.py b/modules/python/test/test_misc.py index 9e83cd6856..fd21656d83 100644 --- a/modules/python/test/test_misc.py +++ b/modules/python/test/test_misc.py @@ -643,6 +643,16 @@ class Arguments(NewOpenCVTests): msg="Classes from submodules and global module don't refer " "to the same type") + def test_class_from_submodule_has_global_alias(self): + self.assertTrue(hasattr(cv.ml, "Boost"), + msg="Class is not registered in the submodule") + self.assertTrue(hasattr(cv, "ml_Boost"), + msg="Class from submodule doesn't have alias in the " + "global module") + self.assertEqual(cv.ml.Boost, cv.ml_Boost, + msg="Classes from submodules and global module don't refer " + "to the same type") + def test_inner_class_has_global_alias(self): self.assertTrue(hasattr(cv.SimpleBlobDetector, "Params"), msg="Class is not registered as inner class")