diff --git a/3rdparty/ittnotify/src/ittnotify/ittnotify_config.h b/3rdparty/ittnotify/src/ittnotify/ittnotify_config.h index ca86ab40e9..d68d0e152e 100644 --- a/3rdparty/ittnotify/src/ittnotify/ittnotify_config.h +++ b/3rdparty/ittnotify/src/ittnotify/ittnotify_config.h @@ -196,6 +196,10 @@ # define ITT_ARCH_PPC64 5 #endif /* ITT_ARCH_PPC64 */ +#ifndef ITT_ARCH_AARCH64 /* 64-bit ARM */ +# define ITT_ARCH_AARCH64 6 +#endif /* ITT_ARCH_AARCH64 */ + #ifndef ITT_ARCH # if defined _M_IX86 || defined __i386__ # define ITT_ARCH ITT_ARCH_IA32 @@ -205,6 +209,8 @@ # define ITT_ARCH ITT_ARCH_IA64 # elif defined _M_ARM || defined __arm__ # define ITT_ARCH ITT_ARCH_ARM +# elif defined __aarch64__ +# define ITT_ARCH ITT_ARCH_AARCH64 # elif defined __powerpc64__ # define ITT_ARCH ITT_ARCH_PPC64 # endif @@ -359,7 +365,7 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend) : "memory"); return result; } -#elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_PPC64 +#elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_AARCH64 || ITT_ARCH==ITT_ARCH_PPC64 #define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val) #endif /* ITT_ARCH==ITT_ARCH_IA64 */ #ifndef ITT_SIMPLE_INIT diff --git a/CMakeLists.txt b/CMakeLists.txt index 957c563d18..c820746785 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -221,7 +221,10 @@ OCV_OPTION(BUILD_OPENEXR "Build openexr from source" (((WIN3 OCV_OPTION(BUILD_WEBP "Build WebP from source" (((WIN32 OR ANDROID OR APPLE) AND NOT WINRT) OR OPENCV_FORCE_3RDPARTY_BUILD) ) OCV_OPTION(BUILD_TBB "Download and build TBB from source" (ANDROID OR OPENCV_FORCE_3RDPARTY_BUILD) ) OCV_OPTION(BUILD_IPP_IW "Build IPP IW from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT ) -OCV_OPTION(BUILD_ITT "Build Intel ITT from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT AND NOT APPLE_FRAMEWORK ) +OCV_OPTION(BUILD_ITT "Build Intel ITT from source" + (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) + IF (X86_64 OR X86 OR ARM OR AARCH64 OR PPC64 OR PPC64LE) AND NOT WINRT AND NOT APPLE_FRAMEWORK +) # Optional 3rd party components # =================================================== diff --git a/modules/calib3d/src/undistort.avx2.cpp b/modules/calib3d/src/undistort.avx2.cpp index 69998be39b..9b6608a783 100644 --- a/modules/calib3d/src/undistort.avx2.cpp +++ b/modules/calib3d/src/undistort.avx2.cpp @@ -124,7 +124,7 @@ int initUndistortRectifyMapLine_AVX(float* m1f, float* m2f, short* m1, ushort* m _mm256_mul_pd(__matTilt_20, __xd), _mm256_mul_pd(__matTilt_21, __yd)), __matTilt_22); #endif __m256d __invProj = _mm256_blendv_pd( - __one, _mm256_div_pd(__one, __vecTilt2), + _mm256_div_pd(__one, __vecTilt2), __one, _mm256_cmp_pd(__vecTilt2, _mm256_setzero_pd(), _CMP_EQ_OQ)); #if CV_FMA3 diff --git a/modules/calib3d/test/test_undistort.cpp b/modules/calib3d/test/test_undistort.cpp index 2258f67572..24924c8df7 100644 --- a/modules/calib3d/test/test_undistort.cpp +++ b/modules/calib3d/test/test_undistort.cpp @@ -1469,4 +1469,34 @@ TEST(Calib3d_UndistortPoints, outputShape) } } +TEST(Calib3d_initUndistortRectifyMap, regression_14467) +{ + Size size_w_h(512 + 3, 512); + Matx33f k( + 6200, 0, size_w_h.width / 2.0f, + 0, 6200, size_w_h.height / 2.0f, + 0, 0, 1 + ); + + Mat mesh_uv(size_w_h, CV_32FC2); + for (int i = 0; i < size_w_h.height; i++) + { + for (int j = 0; j < size_w_h.width; j++) + { + mesh_uv.at(i, j) = Vec2f((float)j, (float)i); + } + } + + Matx d( + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0.09, 0.0 + ); + Mat mapxy, dst; + initUndistortRectifyMap(k, d, noArray(), k, size_w_h, CV_32FC2, mapxy, noArray()); + undistortPoints(mapxy.reshape(2, (int)mapxy.total()), dst, k, d, noArray(), k); + dst = dst.reshape(2, mapxy.rows); + EXPECT_LE(cvtest::norm(dst, mesh_uv, NORM_INF), 1e-3); +} + }} // namespace diff --git a/modules/dnn/src/layers/max_unpooling_layer.cpp b/modules/dnn/src/layers/max_unpooling_layer.cpp index b9c1f2da73..2978509d5c 100644 --- a/modules/dnn/src/layers/max_unpooling_layer.cpp +++ b/modules/dnn/src/layers/max_unpooling_layer.cpp @@ -43,12 +43,18 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(inputs.size() == 2); + CV_Assert(inputs.size() == 2 || inputs.size() == 3); CV_Assert(total(inputs[0]) == total(inputs[1])); - MatShape outShape = inputs[0]; - outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height; - outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width; + MatShape outShape; + if (inputs.size() == 2) + { + outShape = inputs[0]; + outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height; + outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width; + } + else + outShape = inputs[2]; outputs.clear(); outputs.push_back(outShape); @@ -71,7 +77,7 @@ public: inputs_arr.getMatVector(inputs); outputs_arr.getMatVector(outputs); - CV_Assert(inputs.size() == 2); + CV_Assert(inputs.size() == 2 || inputs.size() == 3); Mat& input = inputs[0]; Mat& indices = inputs[1]; diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 6c70210c2c..5945e8db11 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -530,6 +530,13 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.type = "Power"; } } + else if (layer_type == "Clip") + { + layerParams.type = "ReLU6"; + replaceLayerParam(layerParams, "min", "min_value"); + replaceLayerParam(layerParams, "max", "max_value"); + + } else if (layer_type == "LeakyRelu") { layerParams.type = "ReLU"; diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index e94533b712..cc45c3f425 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -1370,6 +1370,24 @@ void TFImporter::populateNet(Net dstNet) connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size()); } + else if (type == "MaxPoolGrad") + { + CV_Assert(layer.input_size() == 3); + + layerParams.set("pool_k_h", 0); + layerParams.set("pool_k_w", 0); + layerParams.set("pool_stride_h", 0); + layerParams.set("pool_stride_w", 0); + layerParams.set("pool_pad_h", 0); + layerParams.set("pool_pad_w", 0); + + int id = dstNet.addLayer(name, "MaxUnpool", layerParams); + layer_id[name] = id; + + connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0); + connect(layer_id, dstNet, parsePin(layer.input(1) + ":1"), id, 1); + connect(layer_id, dstNet, parsePin(layer.input(0)), id, 2); + } else if (type == "Placeholder") { if (!hasLayerAttr(layer, "dtype") || diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp index a5cae50621..9588015a1e 100644 --- a/modules/dnn/test/test_caffe_importer.cpp +++ b/modules/dnn/test/test_caffe_importer.cpp @@ -205,7 +205,7 @@ TEST(Reproducibility_FCN, Accuracy) Net net; { const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt"); - const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel"); + const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel", false); net = readNetFromCaffe(proto, model); ASSERT_FALSE(net.empty()); } diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index c99b8cf431..186239494f 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -136,6 +136,11 @@ TEST_P(Test_ONNX_layers, ReLU) testONNXModels("ReLU"); } +TEST_P(Test_ONNX_layers, Clip) +{ + testONNXModels("clip", npy); +} + TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid) { testONNXModels("maxpooling_sigmoid"); diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index dd5d871d71..0687c0ba68 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -218,6 +218,13 @@ TEST_P(Test_TensorFlow_layers, pooling) runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions. } +TEST_P(Test_TensorFlow_layers, max_pool_grad) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); + runTensorFlowNet("max_pool_grad"); +} + // TODO: fix tests and replace to pooling TEST_P(Test_TensorFlow_layers, ave_pool_same) { diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index b0ca04179b..8a7dafba9e 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -968,13 +968,15 @@ static std::string findData(const std::string& relative_path, bool required, boo std::string prefix = path_join(datapath, subdir); std::string result_; CHECK_FILE_WITH_PREFIX(prefix, result_); -#if 1 // check for misused 'optional' mode if (!required && !result_.empty()) { std::cout << "TEST ERROR: Don't use 'optional' findData() for " << relative_path << std::endl; - CV_Assert(required || result_.empty()); + static bool checkOptionalFlag = cv::utils::getConfigurationParameterBool("OPENCV_TEST_CHECK_OPTIONAL_DATA", false); + if (checkOptionalFlag) + { + CV_Assert(required || result_.empty()); + } } -#endif if (!result_.empty()) return result_; } diff --git a/modules/videoio/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp index 79515d5df6..5b559bd9be 100644 --- a/modules/videoio/src/cap_gstreamer.cpp +++ b/modules/videoio/src/cap_gstreamer.cpp @@ -85,6 +85,11 @@ static void handleMessage(GstElement * pipeline); namespace { +#if defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +#endif + template static inline void GSafePtr_addref(T* ptr) { if (ptr) @@ -109,6 +114,10 @@ template<> inline void GSafePtr_release(GstEncoding template<> inline void GSafePtr_addref(char* pPtr); // declaration only. not defined. should not be used template<> inline void GSafePtr_release(char** pPtr) { if (pPtr) { g_free(*pPtr); *pPtr = NULL; } } +#if defined __clang__ +# pragma clang diagnostic pop +#endif + template class GSafePtr {