diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 611b9f7fcb..84c9b4dbbc 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2233,7 +2233,10 @@ struct Net::Impl if (isAsync) CV_Error(Error::StsNotImplemented, "Default implementation fallbacks in asynchronous mode"); - CV_Assert(layer->supportBackend(DNN_BACKEND_OPENCV)); + if (!layer->supportBackend(DNN_BACKEND_OPENCV)) + CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend", + ld.name.c_str(), ld.type.c_str())); + if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) { std::vector umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers); diff --git a/modules/dnn/src/layers/layers_common.cpp b/modules/dnn/src/layers/layers_common.cpp index 2f5f486155..f119c12ac0 100644 --- a/modules/dnn/src/layers/layers_common.cpp +++ b/modules/dnn/src/layers/layers_common.cpp @@ -148,13 +148,12 @@ void getPoolingKernelParams(const LayerParams ¶ms, std::vector& kern std::vector& pads_begin, std::vector& pads_end, std::vector& strides, cv::String &padMode) { - util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode); - globalPooling = params.has("global_pooling") && params.get("global_pooling"); if (globalPooling) { + util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode); if(params.has("kernel_h") || params.has("kernel_w") || params.has("kernel_size")) { CV_Error(cv::Error::StsBadArg, "In global_pooling mode, kernel_size (or kernel_h and kernel_w) cannot be specified"); @@ -171,6 +170,7 @@ void getPoolingKernelParams(const LayerParams ¶ms, std::vector& kern else { util::getKernelSize(params, kernel); + util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode, kernel.size()); } } diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index edda69add1..af896a5f2a 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -397,11 +397,33 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.set("ceil_mode", layerParams.has("pad_mode")); layerParams.set("ave_pool_padded_area", framework_name == "pytorch"); } - else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool") + else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" || layer_type == "ReduceMean") { + CV_Assert(node_proto.input_size() == 1); layerParams.type = "Pooling"; - layerParams.set("pool", layer_type == "GlobalAveragePool" ? "AVE" : "MAX"); - layerParams.set("global_pooling", true); + layerParams.set("pool", layer_type == "GlobalMaxPool"? "MAX" : "AVE"); + layerParams.set("global_pooling", layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool"); + + if (layer_type == "ReduceMean") + { + if (layerParams.get("keepdims") == 0 || !layerParams.has("axes")) + CV_Error(Error::StsNotImplemented, "Unsupported mode of ReduceMean operation."); + + MatShape inpShape = outShapes[node_proto.input(0)]; + if (inpShape.size() != 4 && inpShape.size() != 5) + CV_Error(Error::StsNotImplemented, "Unsupported input shape of reduce_mean operation."); + + DictValue axes = layerParams.get("axes"); + CV_Assert(axes.size() <= inpShape.size() - 2); + std::vector kernel_size(inpShape.size() - 2, 1); + for (int i = 0; i < axes.size(); i++) { + int axis = axes.get(i); + CV_Assert_N(axis >= 2 + i, axis < inpShape.size()); + kernel_size[axis - 2] = inpShape[axis]; + } + + layerParams.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size())); + } } else if (layer_type == "Slice") { @@ -747,11 +769,13 @@ void ONNXImporter::populateNet(Net dstNet) if (axes.size() != 1) CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze"); - int dims[] = {1, -1}; + MatShape inpShape = outShapes[node_proto.input(0)]; + int axis = axes.getIntValue(0); + CV_Assert(0 <= axis && axis <= inpShape.size()); + std::vector outShape = inpShape; + outShape.insert(outShape.begin() + axis, 1); layerParams.type = "Reshape"; - layerParams.set("axis", axes.getIntValue(0)); - layerParams.set("num_axes", 1); - layerParams.set("dim", DictValue::arrayInt(&dims[0], 2)); + layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size())); } else if (layer_type == "Reshape") { diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index ce77e867a2..aeceb9ac69 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -162,6 +162,18 @@ TEST_P(Test_ONNX_layers, Clip) testONNXModels("clip", npy); } +TEST_P(Test_ONNX_layers, ReduceMean) +{ + testONNXModels("reduce_mean"); +} + +TEST_P(Test_ONNX_layers, ReduceMean3D) +{ + if (target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); + testONNXModels("reduce_mean3d"); +} + TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid) { testONNXModels("maxpooling_sigmoid"); diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 0357b8ecc5..8fcf81245a 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -350,11 +350,6 @@ TEST_P(Test_TensorFlow_layers, l2_normalize_3d) runTensorFlowNet("l2_normalize_3d"); } -TEST_P(Test_TensorFlow_layers, Split) -{ - runTensorFlowNet("split"); -} - class Test_TensorFlow_nets : public DNNTestLayer {}; TEST_P(Test_TensorFlow_nets, MobileNet_SSD) @@ -682,6 +677,9 @@ TEST_P(Test_TensorFlow_layers, lstm) TEST_P(Test_TensorFlow_layers, split) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); + runTensorFlowNet("split"); if (backend == DNN_BACKEND_INFERENCE_ENGINE) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); runTensorFlowNet("split_equals"); diff --git a/modules/js/src/embindgen.py b/modules/js/src/embindgen.py index 88d2eb7023..a5bada6332 100644 --- a/modules/js/src/embindgen.py +++ b/modules/js/src/embindgen.py @@ -141,7 +141,7 @@ features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptor 'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'], 'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'], 'BFMatcher': ['isMaskSupported', 'create'], - '': ['drawKeypoints', 'drawMatches']} + '': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']} photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', \ 'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', \ @@ -590,7 +590,7 @@ class JSWrapperGenerator(object): match = re.search(r'const std::vector<(.*)>&', arg_type) if match: type_in_vect = match.group(1) - if type_in_vect != 'cv::Mat': + if type_in_vect in ['int', 'float', 'double', 'char', 'uchar', 'String', 'std::string']: casted_arg_name = 'emscripten::vecFromJSArray<' + type_in_vect + '>(' + arg_name + ')' arg_type = re.sub(r'std::vector<(.*)>', 'emscripten::val', arg_type) w_signature.append(arg_type + ' ' + arg_name) diff --git a/modules/js/test/test_features2d.js b/modules/js/test/test_features2d.js index 21982f65f8..173ada0ded 100644 --- a/modules/js/test/test_features2d.js +++ b/modules/js/test/test_features2d.js @@ -80,3 +80,36 @@ QUnit.test('BFMatcher', function(assert) { assert.equal(dm.size(), 67); }); + +QUnit.test('Drawing', function(assert) { + // Generate key points. + let image = generateTestFrame(); + + let kp = new cv.KeyPointVector(); + let descriptors = new cv.Mat(); + let orb = new cv.ORB(); + orb.detectAndCompute(image, new cv.Mat(), kp, descriptors); + assert.equal(kp.size(), 67); + + let dst = new cv.Mat(); + cv.drawKeypoints(image, kp, dst); + assert.equal(dst.rows, image.rows); + assert.equal(dst.cols, image.cols); + + // Run a matcher. + let dm = new cv.DMatchVector(); + let matcher = new cv.BFMatcher(); + matcher.match(descriptors, descriptors, dm); + assert.equal(dm.size(), 67); + + cv.drawMatches(image, kp, image, kp, dm, dst); + assert.equal(dst.rows, image.rows); + assert.equal(dst.cols, 2 * image.cols); + + dm = new cv.DMatchVectorVector(); + matcher.knnMatch(descriptors, descriptors, dm, 2); + assert.equal(dm.size(), 67); + cv.drawMatchesKnn(image, kp, image, kp, dm, dst); + assert.equal(dst.rows, image.rows); + assert.equal(dst.cols, 2 * image.cols); +}); diff --git a/modules/videoio/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp index 5b559bd9be..981ce1dd60 100644 --- a/modules/videoio/src/cap_gstreamer.cpp +++ b/modules/videoio/src/cap_gstreamer.cpp @@ -141,14 +141,14 @@ public: inline operator T* () CV_NOEXCEPT { return ptr; } inline operator /*const*/ T* () const CV_NOEXCEPT { return (T*)ptr; } // there is no const correctness in Gst C API - inline T* get() CV_NOEXCEPT { return ptr; } - inline /*const*/ T* get() const CV_NOEXCEPT { CV_Assert(ptr); return (T*)ptr; } // there is no const correctness in Gst C API + T* get() { CV_Assert(ptr); return ptr; } + /*const*/ T* get() const { CV_Assert(ptr); return (T*)ptr; } // there is no const correctness in Gst C API - inline const T* operator -> () const { CV_Assert(ptr); return ptr; } + const T* operator -> () const { CV_Assert(ptr); return ptr; } inline operator bool () const CV_NOEXCEPT { return ptr != NULL; } inline bool operator ! () const CV_NOEXCEPT { return ptr == NULL; } - inline T** getRef() { CV_Assert(ptr == NULL); return &ptr; } + T** getRef() { CV_Assert(ptr == NULL); return &ptr; } inline GSafePtr& reset(T* p) CV_NOEXCEPT // pass result of functions with "transfer floating" ownership { @@ -1221,7 +1221,21 @@ public: num_frames(0), framerate(0) { } - virtual ~CvVideoWriter_GStreamer() CV_OVERRIDE { close(); } + virtual ~CvVideoWriter_GStreamer() CV_OVERRIDE + { + try + { + close(); + } + catch (const std::exception& e) + { + CV_WARN("C++ exception in writer destructor: " << e.what()); + } + catch (...) + { + CV_WARN("Unknown exception in writer destructor. Ignore"); + } + } int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_GSTREAMER; } @@ -1253,7 +1267,11 @@ void CvVideoWriter_GStreamer::close_() { handleMessage(pipeline); - if (gst_app_src_end_of_stream(GST_APP_SRC(source.get())) != GST_FLOW_OK) + if (!(bool)source) + { + CV_WARN("No source in GStreamer pipeline. Ignore"); + } + else if (gst_app_src_end_of_stream(GST_APP_SRC(source.get())) != GST_FLOW_OK) { CV_WARN("Cannot send EOS to GStreamer pipeline"); } diff --git a/modules/videoio/test/test_video_io.cpp b/modules/videoio/test/test_video_io.cpp index 08deff523f..f55802b4ba 100644 --- a/modules/videoio/test/test_video_io.cpp +++ b/modules/videoio/test/test_video_io.cpp @@ -506,4 +506,63 @@ TEST(Videoio, exceptions) EXPECT_THROW(cap.open("this_does_not_exist.avi", CAP_OPENCV_MJPEG), Exception); } + +typedef Videoio_Writer Videoio_Writer_bad_fourcc; + +TEST_P(Videoio_Writer_bad_fourcc, nocrash) +{ + if (!isBackendAvailable(apiPref, cv::videoio_registry::getStreamBackends())) + throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref)); + + VideoWriter writer; + EXPECT_NO_THROW(writer.open(video_file, apiPref, fourcc, fps, frame_size, true)); + ASSERT_FALSE(writer.isOpened()); + EXPECT_NO_THROW(writer.release()); +} + +static vector generate_Ext_Fourcc_API_nocrash() +{ + static const Ext_Fourcc_API params[] = { +#ifdef HAVE_MSMF_DISABLED // MSMF opens writer stream + {"wmv", "aaaa", CAP_MSMF}, + {"mov", "aaaa", CAP_MSMF}, +#endif + +#ifdef HAVE_QUICKTIME + {"mov", "aaaa", CAP_QT}, + {"avi", "aaaa", CAP_QT}, + {"mkv", "aaaa", CAP_QT}, +#endif + +#ifdef HAVE_AVFOUNDATION + {"mov", "aaaa", CAP_AVFOUNDATION}, + {"mp4", "aaaa", CAP_AVFOUNDATION}, + {"m4v", "aaaa", CAP_AVFOUNDATION}, +#endif + +#ifdef HAVE_FFMPEG + {"avi", "aaaa", CAP_FFMPEG}, + {"mkv", "aaaa", CAP_FFMPEG}, +#endif + +#ifdef HAVE_GSTREAMER + {"avi", "aaaa", CAP_GSTREAMER}, + {"mkv", "aaaa", CAP_GSTREAMER}, +#endif + {"avi", "aaaa", CAP_OPENCV_MJPEG}, +}; + + const size_t N = sizeof(params)/sizeof(params[0]); + vector result; result.reserve(N); + for (size_t i = 0; i < N; i++) + { + const Ext_Fourcc_API& src = params[i]; + Ext_Fourcc_API e = { src.ext, src.fourcc, src.api }; + result.push_back(e); + } + return result; +} + +INSTANTIATE_TEST_CASE_P(videoio, Videoio_Writer_bad_fourcc, testing::ValuesIn(generate_Ext_Fourcc_API_nocrash())); + } // namespace