Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14857/head
Alexander Alekhin 6 years ago
commit f33f88de31
  1. 2
      modules/calib3d/src/calibration.cpp
  2. 10
      modules/core/include/opencv2/core/hal/intrin_avx512.hpp
  3. 1
      modules/dnn/src/layers/batch_norm_layer.cpp
  4. 2
      modules/dnn/src/layers/mvn_layer.cpp
  5. 1
      modules/dnn/src/layers/reshape_layer.cpp
  6. 2
      modules/dnn/src/onnx/onnx_importer.cpp
  7. 3
      modules/imgproc/src/sumpixels.avx512_skx.cpp
  8. 2
      modules/python/package/cv2/__init__.py
  9. 2
      modules/videoio/src/cap_mfx_writer.cpp
  10. 10
      samples/dnn/js_face_recognition.html

@ -2528,6 +2528,8 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
double c = _t[idx], nt = cvNorm(&t, 0, CV_L2);
_uu[idx] = c > 0 ? 1 : -1;
CV_Assert(nt > 0.0);
// calculate global Z rotation
cvCrossProduct(&t,&uu,&ww);
double nw = cvNorm(&ww, 0, CV_L2);

@ -5,6 +5,12 @@
#ifndef OPENCV_HAL_INTRIN_AVX512_HPP
#define OPENCV_HAL_INTRIN_AVX512_HPP
#if defined(_MSC_VER) && (_MSC_VER < 1920/*MSVS2019*/)
# pragma warning(disable:4146) // unary minus operator applied to unsigned type, result still unsigned
# pragma warning(disable:4309) // 'argument': truncation of constant value
# pragma warning(disable:4310) // cast truncates constant value
#endif
#define CVT_ROUND_MODES_IMPLEMENTED 0
#define CV_SIMD512 1
@ -1599,13 +1605,13 @@ inline v_float64x8 v_lut(const double* tab, const v_int32x16& idxvec)
inline void v_lut_deinterleave(const float* tab, const v_int32x16& idxvec, v_float32x16& x, v_float32x16& y)
{
x.val = _mm512_i32gather_ps(idxvec.val, tab, 4);
y.val = _mm512_i32gather_ps(idxvec.val, tab + 1, 4);
y.val = _mm512_i32gather_ps(idxvec.val, &tab[1], 4);
}
inline void v_lut_deinterleave(const double* tab, const v_int32x16& idxvec, v_float64x8& x, v_float64x8& y)
{
x.val = _mm512_i32gather_pd(_v512_extract_low(idxvec.val), tab, 8);
y.val = _mm512_i32gather_pd(_v512_extract_low(idxvec.val), tab + 1, 8);
y.val = _mm512_i32gather_pd(_v512_extract_low(idxvec.val), &tab[1], 8);
}
inline v_int8x64 v_interleave_pairs(const v_int8x64& vec)

@ -33,6 +33,7 @@ public:
BatchNormLayerImpl(const LayerParams& params)
: dims(-1)
{
setParamsFrom(params);
CV_Assert(blobs.size() >= 2);

@ -147,6 +147,7 @@ public:
UMat &inpMat = inputs[inpIdx];
UMat &outMat = outputs[inpIdx];
int newRows = total(shape(inpMat), 0, splitDim);
CV_Assert(newRows != 0);
MatShape s = shape(newRows, inpMat.total() / newRows);
UMat meanMat = UMat(s[0], 1, (use_half) ? CV_16S : CV_32F);
@ -221,6 +222,7 @@ public:
UMat &inpMat = inputs[inpIdx];
UMat &outMat = outputs[inpIdx];
int newRows = total(shape(inpMat), 0, splitDim);
CV_Assert(newRows != 0);
MatShape s = shape(newRows, inpMat.total() / newRows);
UMat oneMat = UMat::ones(s[1], 1, CV_32F);

@ -138,6 +138,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
size_t srcTotal = total(srcShape);
size_t dstTotal = total(dstShape);
CV_Assert(dstTotal != 0);
if (inferDim != -1)
{

@ -162,6 +162,8 @@ void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
std::vector<Mat>& outputs)
{
Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
CV_Assert((bool)layer);
std::vector<MatShape> inpShapes(inputs.size());
int ddepth = CV_32F;
for (size_t i = 0; i < inputs.size(); ++i)

@ -6,6 +6,9 @@
#include "precomp.hpp"
#include "sumpixels.hpp"
#include "opencv2/core/hal/intrin.hpp"
namespace cv {
namespace { // Anonymous namespace to avoid exposing the implementation classes

@ -39,7 +39,7 @@ def bootstrap():
l_vars = locals()
if sys.version_info[:2] < (3, 0):
from cv2.load_config_py2 import exec_file_wrapper
from . load_config_py2 import exec_file_wrapper
else:
from . load_config_py3 import exec_file_wrapper

@ -31,7 +31,7 @@ inline mfxU32 codecIdByFourCC(int fourcc)
}
VideoWriter_IntelMFX::VideoWriter_IntelMFX(const String &filename, int _fourcc, double fps, Size frameSize_, bool)
: session(0), plugin(0), deviceHandler(0), bs(0), encoder(0), pool(0), frameSize(frameSize_), good(false)
: session(0), plugin(0), deviceHandler(0), bs(0), encoder(0), pool(0), outSurface(NULL), frameSize(frameSize_), good(false)
{
mfxStatus res = MFX_ERR_NONE;

@ -3,12 +3,12 @@
<html>
<head>
<script async src="../../opencv.js" type="text/javascript"></script>
<script src="../../utils.js" type="text/javascript"></script>
<script type='text/javascript'>
var netDet = undefined, netRecogn = undefined;
var persons = {};
var utils = new Utils('');
//! [Run face detection model]
function detectFaces(img) {
@ -68,7 +68,6 @@ function recognize(face) {
//! [Recognize]
function loadModels(callback) {
var utils = new Utils('');
var proto = 'https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt';
var weights = 'https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel';
var recognModel = 'https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7';
@ -186,11 +185,16 @@ function main() {
document.getElementById('startStopButton').disabled = false;
};
// Load opencv.js
utils.loadOpenCv(() => {
main();
});
</script>
</head>
<body onload="main()">
<body>
<button id="startStopButton" type="button" disabled="true">Start</button>
<div id="status"></div>
<canvas id="output" width=640 height=480 style="max-width: 100%"></canvas>

Loading…
Cancel
Save