Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/15112/head
Alexander Alekhin 5 years ago
commit 426482e05b
  1. 5
      modules/dnn/src/dnn.cpp
  2. 4
      modules/dnn/src/layers/layers_common.cpp
  3. 38
      modules/dnn/src/onnx/onnx_importer.cpp
  4. 12
      modules/dnn/test/test_onnx_importer.cpp
  5. 8
      modules/dnn/test/test_tf_importer.cpp
  6. 4
      modules/js/src/embindgen.py
  7. 33
      modules/js/test/test_features2d.js
  8. 30
      modules/videoio/src/cap_gstreamer.cpp
  9. 59
      modules/videoio/test/test_video_io.cpp

@ -2233,7 +2233,10 @@ struct Net::Impl
if (isAsync)
CV_Error(Error::StsNotImplemented, "Default implementation fallbacks in asynchronous mode");
CV_Assert(layer->supportBackend(DNN_BACKEND_OPENCV));
if (!layer->supportBackend(DNN_BACKEND_OPENCV))
CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend",
ld.name.c_str(), ld.type.c_str()));
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
{
std::vector<UMat> umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers);

@ -148,13 +148,12 @@ void getPoolingKernelParams(const LayerParams &params, std::vector<size_t>& kern
std::vector<size_t>& pads_begin, std::vector<size_t>& pads_end,
std::vector<size_t>& strides, cv::String &padMode)
{
util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode);
globalPooling = params.has("global_pooling") &&
params.get<bool>("global_pooling");
if (globalPooling)
{
util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode);
if(params.has("kernel_h") || params.has("kernel_w") || params.has("kernel_size"))
{
CV_Error(cv::Error::StsBadArg, "In global_pooling mode, kernel_size (or kernel_h and kernel_w) cannot be specified");
@ -171,6 +170,7 @@ void getPoolingKernelParams(const LayerParams &params, std::vector<size_t>& kern
else
{
util::getKernelSize(params, kernel);
util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode, kernel.size());
}
}

@ -397,11 +397,33 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams.set("ceil_mode", layerParams.has("pad_mode"));
layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
}
else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool")
else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" || layer_type == "ReduceMean")
{
CV_Assert(node_proto.input_size() == 1);
layerParams.type = "Pooling";
layerParams.set("pool", layer_type == "GlobalAveragePool" ? "AVE" : "MAX");
layerParams.set("global_pooling", true);
layerParams.set("pool", layer_type == "GlobalMaxPool"? "MAX" : "AVE");
layerParams.set("global_pooling", layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool");
if (layer_type == "ReduceMean")
{
if (layerParams.get<int>("keepdims") == 0 || !layerParams.has("axes"))
CV_Error(Error::StsNotImplemented, "Unsupported mode of ReduceMean operation.");
MatShape inpShape = outShapes[node_proto.input(0)];
if (inpShape.size() != 4 && inpShape.size() != 5)
CV_Error(Error::StsNotImplemented, "Unsupported input shape of reduce_mean operation.");
DictValue axes = layerParams.get("axes");
CV_Assert(axes.size() <= inpShape.size() - 2);
std::vector<int> kernel_size(inpShape.size() - 2, 1);
for (int i = 0; i < axes.size(); i++) {
int axis = axes.get<int>(i);
CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
kernel_size[axis - 2] = inpShape[axis];
}
layerParams.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
}
}
else if (layer_type == "Slice")
{
@ -747,11 +769,13 @@ void ONNXImporter::populateNet(Net dstNet)
if (axes.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
int dims[] = {1, -1};
MatShape inpShape = outShapes[node_proto.input(0)];
int axis = axes.getIntValue(0);
CV_Assert(0 <= axis && axis <= inpShape.size());
std::vector<int> outShape = inpShape;
outShape.insert(outShape.begin() + axis, 1);
layerParams.type = "Reshape";
layerParams.set("axis", axes.getIntValue(0));
layerParams.set("num_axes", 1);
layerParams.set("dim", DictValue::arrayInt(&dims[0], 2));
layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
}
else if (layer_type == "Reshape")
{

@ -162,6 +162,18 @@ TEST_P(Test_ONNX_layers, Clip)
testONNXModels("clip", npy);
}
TEST_P(Test_ONNX_layers, ReduceMean)
{
testONNXModels("reduce_mean");
}
TEST_P(Test_ONNX_layers, ReduceMean3D)
{
if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("reduce_mean3d");
}
TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
{
testONNXModels("maxpooling_sigmoid");

@ -350,11 +350,6 @@ TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
runTensorFlowNet("l2_normalize_3d");
}
TEST_P(Test_TensorFlow_layers, Split)
{
runTensorFlowNet("split");
}
class Test_TensorFlow_nets : public DNNTestLayer {};
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
@ -682,6 +677,9 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
runTensorFlowNet("split");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("split_equals");

@ -141,7 +141,7 @@ features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptor
'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'],
'BFMatcher': ['isMaskSupported', 'create'],
'': ['drawKeypoints', 'drawMatches']}
'': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']}
photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', \
'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', \
@ -590,7 +590,7 @@ class JSWrapperGenerator(object):
match = re.search(r'const std::vector<(.*)>&', arg_type)
if match:
type_in_vect = match.group(1)
if type_in_vect != 'cv::Mat':
if type_in_vect in ['int', 'float', 'double', 'char', 'uchar', 'String', 'std::string']:
casted_arg_name = 'emscripten::vecFromJSArray<' + type_in_vect + '>(' + arg_name + ')'
arg_type = re.sub(r'std::vector<(.*)>', 'emscripten::val', arg_type)
w_signature.append(arg_type + ' ' + arg_name)

@ -80,3 +80,36 @@ QUnit.test('BFMatcher', function(assert) {
assert.equal(dm.size(), 67);
});
QUnit.test('Drawing', function(assert) {
// Generate key points.
let image = generateTestFrame();
let kp = new cv.KeyPointVector();
let descriptors = new cv.Mat();
let orb = new cv.ORB();
orb.detectAndCompute(image, new cv.Mat(), kp, descriptors);
assert.equal(kp.size(), 67);
let dst = new cv.Mat();
cv.drawKeypoints(image, kp, dst);
assert.equal(dst.rows, image.rows);
assert.equal(dst.cols, image.cols);
// Run a matcher.
let dm = new cv.DMatchVector();
let matcher = new cv.BFMatcher();
matcher.match(descriptors, descriptors, dm);
assert.equal(dm.size(), 67);
cv.drawMatches(image, kp, image, kp, dm, dst);
assert.equal(dst.rows, image.rows);
assert.equal(dst.cols, 2 * image.cols);
dm = new cv.DMatchVectorVector();
matcher.knnMatch(descriptors, descriptors, dm, 2);
assert.equal(dm.size(), 67);
cv.drawMatchesKnn(image, kp, image, kp, dm, dst);
assert.equal(dst.rows, image.rows);
assert.equal(dst.cols, 2 * image.cols);
});

@ -141,14 +141,14 @@ public:
inline operator T* () CV_NOEXCEPT { return ptr; }
inline operator /*const*/ T* () const CV_NOEXCEPT { return (T*)ptr; } // there is no const correctness in Gst C API
inline T* get() CV_NOEXCEPT { return ptr; }
inline /*const*/ T* get() const CV_NOEXCEPT { CV_Assert(ptr); return (T*)ptr; } // there is no const correctness in Gst C API
T* get() { CV_Assert(ptr); return ptr; }
/*const*/ T* get() const { CV_Assert(ptr); return (T*)ptr; } // there is no const correctness in Gst C API
inline const T* operator -> () const { CV_Assert(ptr); return ptr; }
const T* operator -> () const { CV_Assert(ptr); return ptr; }
inline operator bool () const CV_NOEXCEPT { return ptr != NULL; }
inline bool operator ! () const CV_NOEXCEPT { return ptr == NULL; }
inline T** getRef() { CV_Assert(ptr == NULL); return &ptr; }
T** getRef() { CV_Assert(ptr == NULL); return &ptr; }
inline GSafePtr& reset(T* p) CV_NOEXCEPT // pass result of functions with "transfer floating" ownership
{
@ -1221,7 +1221,21 @@ public:
num_frames(0), framerate(0)
{
}
virtual ~CvVideoWriter_GStreamer() CV_OVERRIDE { close(); }
virtual ~CvVideoWriter_GStreamer() CV_OVERRIDE
{
try
{
close();
}
catch (const std::exception& e)
{
CV_WARN("C++ exception in writer destructor: " << e.what());
}
catch (...)
{
CV_WARN("Unknown exception in writer destructor. Ignore");
}
}
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_GSTREAMER; }
@ -1253,7 +1267,11 @@ void CvVideoWriter_GStreamer::close_()
{
handleMessage(pipeline);
if (gst_app_src_end_of_stream(GST_APP_SRC(source.get())) != GST_FLOW_OK)
if (!(bool)source)
{
CV_WARN("No source in GStreamer pipeline. Ignore");
}
else if (gst_app_src_end_of_stream(GST_APP_SRC(source.get())) != GST_FLOW_OK)
{
CV_WARN("Cannot send EOS to GStreamer pipeline");
}

@ -506,4 +506,63 @@ TEST(Videoio, exceptions)
EXPECT_THROW(cap.open("this_does_not_exist.avi", CAP_OPENCV_MJPEG), Exception);
}
typedef Videoio_Writer Videoio_Writer_bad_fourcc;
TEST_P(Videoio_Writer_bad_fourcc, nocrash)
{
if (!isBackendAvailable(apiPref, cv::videoio_registry::getStreamBackends()))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
VideoWriter writer;
EXPECT_NO_THROW(writer.open(video_file, apiPref, fourcc, fps, frame_size, true));
ASSERT_FALSE(writer.isOpened());
EXPECT_NO_THROW(writer.release());
}
static vector<Ext_Fourcc_API> generate_Ext_Fourcc_API_nocrash()
{
static const Ext_Fourcc_API params[] = {
#ifdef HAVE_MSMF_DISABLED // MSMF opens writer stream
{"wmv", "aaaa", CAP_MSMF},
{"mov", "aaaa", CAP_MSMF},
#endif
#ifdef HAVE_QUICKTIME
{"mov", "aaaa", CAP_QT},
{"avi", "aaaa", CAP_QT},
{"mkv", "aaaa", CAP_QT},
#endif
#ifdef HAVE_AVFOUNDATION
{"mov", "aaaa", CAP_AVFOUNDATION},
{"mp4", "aaaa", CAP_AVFOUNDATION},
{"m4v", "aaaa", CAP_AVFOUNDATION},
#endif
#ifdef HAVE_FFMPEG
{"avi", "aaaa", CAP_FFMPEG},
{"mkv", "aaaa", CAP_FFMPEG},
#endif
#ifdef HAVE_GSTREAMER
{"avi", "aaaa", CAP_GSTREAMER},
{"mkv", "aaaa", CAP_GSTREAMER},
#endif
{"avi", "aaaa", CAP_OPENCV_MJPEG},
};
const size_t N = sizeof(params)/sizeof(params[0]);
vector<Ext_Fourcc_API> result; result.reserve(N);
for (size_t i = 0; i < N; i++)
{
const Ext_Fourcc_API& src = params[i];
Ext_Fourcc_API e = { src.ext, src.fourcc, src.api };
result.push_back(e);
}
return result;
}
INSTANTIATE_TEST_CASE_P(videoio, Videoio_Writer_bad_fourcc, testing::ValuesIn(generate_Ext_Fourcc_API_nocrash()));
} // namespace

Loading…
Cancel
Save