Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14980/head
Alexander Alekhin 5 years ago
commit 43eba3d750
  1. 8
      3rdparty/ittnotify/src/ittnotify/ittnotify_config.h
  2. 5
      CMakeLists.txt
  3. 2
      modules/calib3d/src/undistort.avx2.cpp
  4. 30
      modules/calib3d/test/test_undistort.cpp
  5. 12
      modules/dnn/src/layers/max_unpooling_layer.cpp
  6. 7
      modules/dnn/src/onnx/onnx_importer.cpp
  7. 18
      modules/dnn/src/tensorflow/tf_importer.cpp
  8. 2
      modules/dnn/test/test_caffe_importer.cpp
  9. 5
      modules/dnn/test/test_onnx_importer.cpp
  10. 7
      modules/dnn/test/test_tf_importer.cpp
  11. 6
      modules/ts/src/ts.cpp
  12. 9
      modules/videoio/src/cap_gstreamer.cpp

@ -196,6 +196,10 @@
# define ITT_ARCH_PPC64 5
#endif /* ITT_ARCH_PPC64 */
#ifndef ITT_ARCH_AARCH64 /* 64-bit ARM */
# define ITT_ARCH_AARCH64 6
#endif /* ITT_ARCH_AARCH64 */
#ifndef ITT_ARCH
# if defined _M_IX86 || defined __i386__
# define ITT_ARCH ITT_ARCH_IA32
@ -205,6 +209,8 @@
# define ITT_ARCH ITT_ARCH_IA64
# elif defined _M_ARM || defined __arm__
# define ITT_ARCH ITT_ARCH_ARM
# elif defined __aarch64__
# define ITT_ARCH ITT_ARCH_AARCH64
# elif defined __powerpc64__
# define ITT_ARCH ITT_ARCH_PPC64
# endif
@ -359,7 +365,7 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
: "memory");
return result;
}
#elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_PPC64
#elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_AARCH64 || ITT_ARCH==ITT_ARCH_PPC64
#define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val)
#endif /* ITT_ARCH==ITT_ARCH_IA64 */
#ifndef ITT_SIMPLE_INIT

@ -221,7 +221,10 @@ OCV_OPTION(BUILD_OPENEXR "Build openexr from source" (((WIN3
OCV_OPTION(BUILD_WEBP "Build WebP from source" (((WIN32 OR ANDROID OR APPLE) AND NOT WINRT) OR OPENCV_FORCE_3RDPARTY_BUILD) )
OCV_OPTION(BUILD_TBB "Download and build TBB from source" (ANDROID OR OPENCV_FORCE_3RDPARTY_BUILD) )
OCV_OPTION(BUILD_IPP_IW "Build IPP IW from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT )
OCV_OPTION(BUILD_ITT "Build Intel ITT from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT AND NOT APPLE_FRAMEWORK )
OCV_OPTION(BUILD_ITT "Build Intel ITT from source"
(NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD)
IF (X86_64 OR X86 OR ARM OR AARCH64 OR PPC64 OR PPC64LE) AND NOT WINRT AND NOT APPLE_FRAMEWORK
)
# Optional 3rd party components
# ===================================================

@ -124,7 +124,7 @@ int initUndistortRectifyMapLine_AVX(float* m1f, float* m2f, short* m1, ushort* m
_mm256_mul_pd(__matTilt_20, __xd), _mm256_mul_pd(__matTilt_21, __yd)), __matTilt_22);
#endif
__m256d __invProj = _mm256_blendv_pd(
__one, _mm256_div_pd(__one, __vecTilt2),
_mm256_div_pd(__one, __vecTilt2), __one,
_mm256_cmp_pd(__vecTilt2, _mm256_setzero_pd(), _CMP_EQ_OQ));
#if CV_FMA3

@ -1469,4 +1469,34 @@ TEST(Calib3d_UndistortPoints, outputShape)
}
}
TEST(Calib3d_initUndistortRectifyMap, regression_14467)
{
Size size_w_h(512 + 3, 512);
Matx33f k(
6200, 0, size_w_h.width / 2.0f,
0, 6200, size_w_h.height / 2.0f,
0, 0, 1
);
Mat mesh_uv(size_w_h, CV_32FC2);
for (int i = 0; i < size_w_h.height; i++)
{
for (int j = 0; j < size_w_h.width; j++)
{
mesh_uv.at<Vec2f>(i, j) = Vec2f((float)j, (float)i);
}
}
Matx<double, 1, 14> d(
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0.09, 0.0
);
Mat mapxy, dst;
initUndistortRectifyMap(k, d, noArray(), k, size_w_h, CV_32FC2, mapxy, noArray());
undistortPoints(mapxy.reshape(2, (int)mapxy.total()), dst, k, d, noArray(), k);
dst = dst.reshape(2, mapxy.rows);
EXPECT_LE(cvtest::norm(dst, mesh_uv, NORM_INF), 1e-3);
}
}} // namespace

@ -43,12 +43,18 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
CV_Assert(inputs.size() == 2 || inputs.size() == 3);
CV_Assert(total(inputs[0]) == total(inputs[1]));
MatShape outShape = inputs[0];
MatShape outShape;
if (inputs.size() == 2)
{
outShape = inputs[0];
outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height;
outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width;
}
else
outShape = inputs[2];
outputs.clear();
outputs.push_back(outShape);
@ -71,7 +77,7 @@ public:
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2);
CV_Assert(inputs.size() == 2 || inputs.size() == 3);
Mat& input = inputs[0];
Mat& indices = inputs[1];

@ -530,6 +530,13 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams.type = "Power";
}
}
else if (layer_type == "Clip")
{
layerParams.type = "ReLU6";
replaceLayerParam(layerParams, "min", "min_value");
replaceLayerParam(layerParams, "max", "max_value");
}
else if (layer_type == "LeakyRelu")
{
layerParams.type = "ReLU";

@ -1370,6 +1370,24 @@ void TFImporter::populateNet(Net dstNet)
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
}
else if (type == "MaxPoolGrad")
{
CV_Assert(layer.input_size() == 3);
layerParams.set("pool_k_h", 0);
layerParams.set("pool_k_w", 0);
layerParams.set("pool_stride_h", 0);
layerParams.set("pool_stride_w", 0);
layerParams.set("pool_pad_h", 0);
layerParams.set("pool_pad_w", 0);
int id = dstNet.addLayer(name, "MaxUnpool", layerParams);
layer_id[name] = id;
connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
connect(layer_id, dstNet, parsePin(layer.input(1) + ":1"), id, 1);
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 2);
}
else if (type == "Placeholder")
{
if (!hasLayerAttr(layer, "dtype") ||

@ -205,7 +205,7 @@ TEST(Reproducibility_FCN, Accuracy)
Net net;
{
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt");
const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel");
const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel", false);
net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty());
}

@ -136,6 +136,11 @@ TEST_P(Test_ONNX_layers, ReLU)
testONNXModels("ReLU");
}
TEST_P(Test_ONNX_layers, Clip)
{
testONNXModels("clip", npy);
}
TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
{
testONNXModels("maxpooling_sigmoid");

@ -218,6 +218,13 @@ TEST_P(Test_TensorFlow_layers, pooling)
runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions.
}
TEST_P(Test_TensorFlow_layers, max_pool_grad)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("max_pool_grad");
}
// TODO: fix tests and replace to pooling
TEST_P(Test_TensorFlow_layers, ave_pool_same)
{

@ -968,13 +968,15 @@ static std::string findData(const std::string& relative_path, bool required, boo
std::string prefix = path_join(datapath, subdir);
std::string result_;
CHECK_FILE_WITH_PREFIX(prefix, result_);
#if 1 // check for misused 'optional' mode
if (!required && !result_.empty())
{
std::cout << "TEST ERROR: Don't use 'optional' findData() for " << relative_path << std::endl;
static bool checkOptionalFlag = cv::utils::getConfigurationParameterBool("OPENCV_TEST_CHECK_OPTIONAL_DATA", false);
if (checkOptionalFlag)
{
CV_Assert(required || result_.empty());
}
#endif
}
if (!result_.empty())
return result_;
}

@ -85,6 +85,11 @@ static void handleMessage(GstElement * pipeline);
namespace {
#if defined __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
#endif
template<typename T> static inline void GSafePtr_addref(T* ptr)
{
if (ptr)
@ -109,6 +114,10 @@ template<> inline void GSafePtr_release<GstEncodingContainerProfile>(GstEncoding
template<> inline void GSafePtr_addref<char>(char* pPtr); // declaration only. not defined. should not be used
template<> inline void GSafePtr_release<char>(char** pPtr) { if (pPtr) { g_free(*pPtr); *pPtr = NULL; } }
#if defined __clang__
# pragma clang diagnostic pop
#endif
template <typename T>
class GSafePtr
{

Loading…
Cancel
Save