Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14663/head
Alexander Alekhin 6 years ago committed by Alexander Alekhin
commit 43467a2ac7
  1. 15
      CMakeLists.txt
  2. 6
      cmake/OpenCVDetectTBB.cmake
  3. 2
      doc/py_tutorials/py_tutorials.markdown
  4. BIN
      doc/py_tutorials/py_video/images/background.jpg
  5. BIN
      doc/py_tutorials/py_video/images/camshift.jpg
  6. BIN
      doc/py_tutorials/py_video/images/lucas.jpg
  7. BIN
      doc/py_tutorials/py_video/images/opticalflow.jpeg
  8. BIN
      doc/py_tutorials/py_video/py_bg_subtraction/images/resframe.jpg
  9. BIN
      doc/py_tutorials/py_video/py_bg_subtraction/images/resgmg.jpg
  10. BIN
      doc/py_tutorials/py_video/py_bg_subtraction/images/resmog.jpg
  11. BIN
      doc/py_tutorials/py_video/py_bg_subtraction/images/resmog2.jpg
  12. 171
      doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.markdown
  13. 14
      doc/py_tutorials/py_video/py_table_of_contents_video.markdown
  14. 8
      modules/calib3d/include/opencv2/calib3d.hpp
  15. 15
      modules/calib3d/misc/java/gen_dict.json
  16. 34
      modules/calib3d/misc/java/test/Calib3dTest.java
  17. 4
      modules/calib3d/perf/perf_pnp.cpp
  18. 6
      modules/calib3d/src/ap3p.cpp
  19. 7
      modules/calib3d/src/calibration.cpp
  20. 42
      modules/calib3d/src/epnp.cpp
  21. 17
      modules/calib3d/src/p3p.cpp
  22. 13
      modules/calib3d/src/solvepnp.cpp
  23. 12
      modules/calib3d/src/undistort.cpp
  24. 177
      modules/calib3d/test/test_cameracalibration.cpp
  25. 372
      modules/calib3d/test/test_solvepnp_ransac.cpp
  26. 176
      modules/calib3d/test/test_undistort.cpp
  27. 2
      modules/calib3d/test/test_undistort_badarg.cpp
  28. 10
      modules/core/include/opencv2/core/hal/intrin_avx.hpp
  29. 4
      modules/core/misc/java/test/MatTest.java
  30. 28
      modules/core/misc/java/test/RotatedRectTest.java
  31. 9
      modules/core/test/test_intrin_utils.hpp
  32. 2
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  33. 3
      modules/dnn/perf/perf_net.cpp
  34. 81
      modules/dnn/src/dnn.cpp
  35. 66
      modules/dnn/src/layers/slice_layer.cpp
  36. 14
      modules/dnn/test/test_backends.cpp
  37. 28
      modules/dnn/test/test_caffe_importer.cpp
  38. 7
      modules/dnn/test/test_onnx_importer.cpp
  39. 14
      modules/dnn/test/test_tf_importer.cpp
  40. 4
      modules/imgcodecs/misc/java/test/ImgcodecsTest.java
  41. 77
      modules/imgcodecs/src/grfmt_tiff.cpp
  42. 1369
      modules/imgproc/src/color_hsv.simd.hpp
  43. 27
      modules/imgproc/src/color_lab.cpp
  44. 2
      modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java
  45. 2
      modules/java/test/android_test/src/org/opencv/test/OpenCVTestRunner.java
  46. 6
      modules/java/test/android_test/src/org/opencv/test/android/UtilsTest.java
  47. 9
      modules/java/test/pure_test/build.xml
  48. 2
      modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java
  49. 44
      modules/js/src/core_bindings.cpp
  50. 2
      modules/js/src/embindgen.py
  51. 54
      modules/js/test/test_imgproc.js
  52. 6
      modules/objdetect/misc/java/test/CascadeClassifierTest.java
  53. 2
      modules/ts/misc/run.py
  54. 5
      modules/ts/misc/run_suite.py
  55. 55
      modules/videoio/src/cap_dshow.cpp
  56. 213
      modules/videoio/src/cap_images.cpp

@ -1527,11 +1527,16 @@ if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
if(INF_ENGINE_TARGET)
set(__msg "YES (${INF_ENGINE_RELEASE} / ${INF_ENGINE_VERSION})")
get_target_property(_lib ${INF_ENGINE_TARGET} IMPORTED_LOCATION)
if(NOT _lib)
get_target_property(_lib_rel ${INF_ENGINE_TARGET} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_dbg ${INF_ENGINE_TARGET} IMPORTED_IMPLIB_DEBUG)
set(_lib "${_lib_rel} / ${_lib_dbg}")
endif()
get_target_property(_lib_imp_rel ${INF_ENGINE_TARGET} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${INF_ENGINE_TARGET} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${INF_ENGINE_TARGET} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${INF_ENGINE_TARGET} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${INF_ENGINE_TARGET} INTERFACE_INCLUDE_DIRECTORIES)
status(" Inference Engine:" "${__msg}")
status(" libs:" "${_lib}")

@ -70,9 +70,13 @@ function(ocv_tbb_env_guess _found)
add_library(tbb UNKNOWN IMPORTED)
set_target_properties(tbb PROPERTIES
IMPORTED_LOCATION "${TBB_ENV_LIB}"
IMPORTED_LOCATION_DEBUG "${TBB_ENV_LIB_DEBUG}"
INTERFACE_INCLUDE_DIRECTORIES "${TBB_ENV_INCLUDE}"
)
if (TBB_ENV_LIB_DEBUG)
set_target_properties(tbb PROPERTIES
IMPORTED_LOCATION_DEBUG "${TBB_ENV_LIB_DEBUG}"
)
endif()
# workaround: system TBB library is used for linking instead of provided
if(CV_GCC)
get_filename_component(_dir "${TBB_ENV_LIB}" DIRECTORY)

@ -25,7 +25,7 @@ OpenCV-Python Tutorials {#tutorial_py_root}
In this section
you will learn about feature detectors and descriptors
- @subpage tutorial_py_table_of_contents_video
- @ref tutorial_table_of_content_video
In this section you
will learn different techniques to work with videos like object tracking etc.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

@ -1,173 +1,4 @@
Background Subtraction {#tutorial_py_bg_subtraction}
======================
Goal
----
In this chapter,
- We will familiarize with the background subtraction methods available in OpenCV.
Basics
------
Background subtraction is a major preprocessing step in many vision-based applications. For
example, consider the case of a visitor counter where a static camera takes the number of visitors
entering or leaving the room, or a traffic camera extracting information about the vehicles etc. In
all these cases, first you need to extract the person or vehicles alone. Technically, you need to
extract the moving foreground from static background.
If you have an image of background alone, like an image of the room without visitors, image of the road
without vehicles etc, it is an easy job. Just subtract the new image from the background. You get
the foreground objects alone. But in most of the cases, you may not have such an image, so we need
to extract the background from whatever images we have. It becomes more complicated when there are
shadows of the vehicles. Since shadows also move, simple subtraction will mark that also as
foreground. It complicates things.
Several algorithms were introduced for this purpose. OpenCV has implemented three such algorithms
which are very easy to use. We will see them one-by-one.
### BackgroundSubtractorMOG
It is a Gaussian Mixture-based Background/Foreground Segmentation Algorithm. It was introduced in
the paper "An improved adaptive background mixture model for real-time tracking with shadow
detection" by P. KadewTraKuPong and R. Bowden in 2001. It uses a method to model each background
pixel by a mixture of K Gaussian distributions (K = 3 to 5). The weights of the mixture represent
the time proportions that those colours stay in the scene. The probable background colours are the
ones which stay longer and more static.
While coding, we need to create a background object using the function,
**cv.createBackgroundSubtractorMOG()**. It has some optional parameters like length of history,
number of gaussian mixtures, threshold etc. It is all set to some default values. Then inside the
video loop, use backgroundsubtractor.apply() method to get the foreground mask.
See a simple example below:
@code{.py}
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('vtest.avi')
fgbg = cv.bgsegm.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
@endcode
( All the results are shown at the end for comparison).
### BackgroundSubtractorMOG2
It is also a Gaussian Mixture-based Background/Foreground Segmentation Algorithm. It is based on two
papers by Z.Zivkovic, "Improved adaptive Gaussian mixture model for background subtraction" in 2004
and "Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
in 2006. One important feature of this algorithm is that it selects the appropriate number of
gaussian distribution for each pixel. (Remember, in last case, we took a K gaussian distributions
throughout the algorithm). It provides better adaptability to varying scenes due illumination
changes etc.
As in previous case, we have to create a background subtractor object. Here, you have an option of
detecting shadows or not. If detectShadows = True (which is so by default), it
detects and marks shadows, but decreases the speed. Shadows will be marked in gray color.
@code{.py}
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('vtest.avi')
fgbg = cv.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
@endcode
(Results given at the end)
### BackgroundSubtractorGMG
This algorithm combines statistical background image estimation and per-pixel Bayesian segmentation.
It was introduced by Andrew B. Godbehere, Akihiro Matsukawa, and Ken Goldberg in their paper "Visual
Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive Audio Art
Installation" in 2012. As per the paper, the system ran a successful interactive audio art
installation called “Are We There Yet?” from March 31 - July 31 2011 at the Contemporary Jewish
Museum in San Francisco, California.
It uses first few (120 by default) frames for background modelling. It employs probabilistic
foreground segmentation algorithm that identifies possible foreground objects using Bayesian
inference. The estimates are adaptive; newer observations are more heavily weighted than old
observations to accommodate variable illumination. Several morphological filtering operations like
closing and opening are done to remove unwanted noise. You will get a black window during first few
frames.
It would be better to apply morphological opening to the result to remove the noises.
@code{.py}
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('vtest.avi')
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
@endcode
Results
-------
**Original Frame**
Below image shows the 200th frame of a video
![image](images/resframe.jpg)
**Result of BackgroundSubtractorMOG**
![image](images/resmog.jpg)
**Result of BackgroundSubtractorMOG2**
Gray color region shows shadow region.
![image](images/resmog2.jpg)
**Result of BackgroundSubtractorGMG**
Noise is removed with morphological opening.
![image](images/resgmg.jpg)
Additional Resources
--------------------
Exercises
---------
Tutorial content has been moved: @ref tutorial_background_subtraction

@ -1,16 +1,4 @@
Video Analysis {#tutorial_py_table_of_contents_video}
==============
- @ref tutorial_meanshift
We have already seen
an example of color-based tracking. It is simpler. This time, we see significantly better
algorithms like "Meanshift", and its upgraded version, "Camshift" to find and track them.
- @ref tutorial_optical_flow
Now let's discuss an important concept, "Optical Flow", which is related to videos and has many applications.
- @subpage tutorial_py_bg_subtraction
In several applications, we need to extract foreground for further operations like object tracking. Background Subtraction is a well-known method in those cases.
Content has been moved: @ref tutorial_table_of_content_video

@ -586,7 +586,7 @@ vector\<Point3f\> ), where N is the number of points in the view.
@param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
@param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
vector\<Point2f\> .
@param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
points with respect to components of the rotation vector, translation vector, focal lengths,
@ -3000,9 +3000,9 @@ point coordinates out of the normalized distorted point coordinates ("normalized
coordinates do not depend on the camera matrix).
The function can be used for both a stereo camera head or a monocular camera (when R is empty).
@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
@param dst Output ideal point coordinates after undistortion and reverse perspective
@param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector\<Point2f\> ).
@param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients

@ -2,21 +2,6 @@
"class_ignore_list": [
"CirclesGridFinderParameters"
],
"missing_consts" : {
"Calib3d": {
"public" : [
["CALIB_USE_INTRINSIC_GUESS", "1"],
["CALIB_RECOMPUTE_EXTRINSIC", "2"],
["CALIB_CHECK_COND", "4"],
["CALIB_FIX_SKEW", "8"],
["CALIB_FIX_K1", "16"],
["CALIB_FIX_K2", "32"],
["CALIB_FIX_K3", "64"],
["CALIB_FIX_K4", "128"],
["CALIB_FIX_INTRINSIC", "256"]
]
}
},
"namespaces_dict": {
"cv.fisheye": "fisheye"
},

@ -187,7 +187,7 @@ public class Calib3dTest extends OpenCVTestCase {
Size patternSize = new Size(9, 6);
MatOfPoint2f corners = new MatOfPoint2f();
Calib3d.findChessboardCorners(grayChess, patternSize, corners);
assertTrue(!corners.empty());
assertFalse(corners.empty());
}
public void testFindChessboardCornersMatSizeMatInt() {
@ -195,7 +195,7 @@ public class Calib3dTest extends OpenCVTestCase {
MatOfPoint2f corners = new MatOfPoint2f();
Calib3d.findChessboardCorners(grayChess, patternSize, corners, Calib3d.CALIB_CB_ADAPTIVE_THRESH + Calib3d.CALIB_CB_NORMALIZE_IMAGE
+ Calib3d.CALIB_CB_FAST_CHECK);
assertTrue(!corners.empty());
assertFalse(corners.empty());
}
public void testFind4QuadCornerSubpix() {
@ -204,7 +204,7 @@ public class Calib3dTest extends OpenCVTestCase {
Size region_size = new Size(5, 5);
Calib3d.findChessboardCorners(grayChess, patternSize, corners);
Calib3d.find4QuadCornerSubpix(grayChess, corners, region_size);
assertTrue(!corners.empty());
assertFalse(corners.empty());
}
public void testFindCirclesGridMatSizeMat() {
@ -622,6 +622,34 @@ public class Calib3dTest extends OpenCVTestCase {
assertMatEqual(truth, lines, EPS);
}
public void testConstants()
{
// calib3d.hpp: some constants have conflict with constants from 'fisheye' namespace
assertEquals(1, Calib3d.CALIB_USE_INTRINSIC_GUESS);
assertEquals(2, Calib3d.CALIB_FIX_ASPECT_RATIO);
assertEquals(4, Calib3d.CALIB_FIX_PRINCIPAL_POINT);
assertEquals(8, Calib3d.CALIB_ZERO_TANGENT_DIST);
assertEquals(16, Calib3d.CALIB_FIX_FOCAL_LENGTH);
assertEquals(32, Calib3d.CALIB_FIX_K1);
assertEquals(64, Calib3d.CALIB_FIX_K2);
assertEquals(128, Calib3d.CALIB_FIX_K3);
assertEquals(0x0800, Calib3d.CALIB_FIX_K4);
assertEquals(0x1000, Calib3d.CALIB_FIX_K5);
assertEquals(0x2000, Calib3d.CALIB_FIX_K6);
assertEquals(0x4000, Calib3d.CALIB_RATIONAL_MODEL);
assertEquals(0x8000, Calib3d.CALIB_THIN_PRISM_MODEL);
assertEquals(0x10000, Calib3d.CALIB_FIX_S1_S2_S3_S4);
assertEquals(0x40000, Calib3d.CALIB_TILTED_MODEL);
assertEquals(0x80000, Calib3d.CALIB_FIX_TAUX_TAUY);
assertEquals(0x100000, Calib3d.CALIB_USE_QR);
assertEquals(0x200000, Calib3d.CALIB_FIX_TANGENT_DIST);
assertEquals(0x100, Calib3d.CALIB_FIX_INTRINSIC);
assertEquals(0x200, Calib3d.CALIB_SAME_FOCAL_LENGTH);
assertEquals(0x400, Calib3d.CALIB_ZERO_DISPARITY);
assertEquals((1 << 17), Calib3d.CALIB_USE_LU);
assertEquals((1 << 22), Calib3d.CALIB_USE_EXTRINSIC_GUESS);
}
public void testGetDefaultNewCameraMatrixMat() {
Mat mtx = Calib3d.getDefaultNewCameraMatrix(gray0);

@ -12,8 +12,8 @@ typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(5, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Combine( //When non planar, DLT needs at least 6 points for SOLVEPNP_ITERATIVE flag
testing::Values(6, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_UPNP, (int)SOLVEPNP_DLS)
)
)

@ -328,7 +328,7 @@ int ap3p::computePoses(const double featureVectors[3][4],
bool ap3p::solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Mat &ipoints) {
CV_INSTRUMENT_REGION();
double rotation_matrix[3][3], translation[3];
double rotation_matrix[3][3] = {}, translation[3] = {};
std::vector<double> points;
if (opoints.depth() == ipoints.depth()) {
if (opoints.depth() == CV_32F)
@ -353,7 +353,7 @@ bool ap3p::solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Ma
int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv::Mat &opoints, const cv::Mat &ipoints) {
CV_INSTRUMENT_REGION();
double rotation_matrix[4][3][3], translation[4][3];
double rotation_matrix[4][3][3] = {}, translation[4][3] = {};
std::vector<double> points;
if (opoints.depth() == ipoints.depth()) {
if (opoints.depth() == CV_32F)
@ -391,7 +391,7 @@ ap3p::solve(double R[3][3], double t[3],
double mu1, double mv1, double X1, double Y1, double Z1,
double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3) {
double Rs[4][3][3], ts[4][3];
double Rs[4][3][3] = {}, ts[4][3] = {};
const bool p4p = true;
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);

@ -1168,6 +1168,7 @@ CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
else
{
// non-planar structure. Use DLT method
CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences.");
double* L;
double LL[12*12], LW[12], LV[12*12], sc;
CvMat _LL = cvMat( 12, 12, CV_64F, LL );
@ -3560,8 +3561,14 @@ void cv::projectPoints( InputArray _opoints,
{
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
if (npoints < 0)
opoints = opoints.t();
npoints = opoints.checkVector(3);
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
if (opoints.cols == 3)
opoints = opoints.reshape(3);
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;

@ -60,7 +60,7 @@ void epnp::choose_control_points(void)
// Take C1, C2, and C3 from PCA on the reference points:
CvMat * PW0 = cvCreateMat(number_of_correspondences, 3, CV_64F);
double pw0tpw0[3 * 3], dc[3] = {0}, uct[3 * 3] = {0};
double pw0tpw0[3 * 3] = {}, dc[3] = {}, uct[3 * 3] = {};
CvMat PW0tPW0 = cvMat(3, 3, CV_64F, pw0tpw0);
CvMat DC = cvMat(3, 1, CV_64F, dc);
CvMat UCt = cvMat(3, 3, CV_64F, uct);
@ -83,7 +83,7 @@ void epnp::choose_control_points(void)
void epnp::compute_barycentric_coordinates(void)
{
double cc[3 * 3], cc_inv[3 * 3];
double cc[3 * 3] = {}, cc_inv[3 * 3] = {};
CvMat CC = cvMat(3, 3, CV_64F, cc);
CvMat CC_inv = cvMat(3, 3, CV_64F, cc_inv);
@ -98,10 +98,12 @@ void epnp::compute_barycentric_coordinates(void)
double * a = &alphas[0] + 4 * i;
for(int j = 0; j < 3; j++)
{
a[1 + j] =
ci[3 * j ] * (pi[0] - cws[0][0]) +
ci[3 * j + 1] * (pi[1] - cws[0][1]) +
ci[3 * j + 2] * (pi[2] - cws[0][2]);
ci[3 * j ] * (pi[0] - cws[0][0]) +
ci[3 * j + 1] * (pi[1] - cws[0][1]) +
ci[3 * j + 2] * (pi[2] - cws[0][2]);
}
a[0] = 1.0f - a[1] - a[2] - a[3];
}
}
@ -132,7 +134,7 @@ void epnp::compute_ccs(const double * betas, const double * ut)
const double * v = ut + 12 * (11 - i);
for(int j = 0; j < 4; j++)
for(int k = 0; k < 3; k++)
ccs[j][k] += betas[i] * v[3 * j + k];
ccs[j][k] += betas[i] * v[3 * j + k];
}
}
@ -157,7 +159,7 @@ void epnp::compute_pose(Mat& R, Mat& t)
for(int i = 0; i < number_of_correspondences; i++)
fill_M(M, 2 * i, &alphas[0] + 4 * i, us[2 * i], us[2 * i + 1]);
double mtm[12 * 12], d[12], ut[12 * 12];
double mtm[12 * 12] = {}, d[12] = {}, ut[12 * 12] = {};
CvMat MtM = cvMat(12, 12, CV_64F, mtm);
CvMat D = cvMat(12, 1, CV_64F, d);
CvMat Ut = cvMat(12, 12, CV_64F, ut);
@ -166,15 +168,15 @@ void epnp::compute_pose(Mat& R, Mat& t)
cvSVD(&MtM, &D, &Ut, 0, CV_SVD_MODIFY_A | CV_SVD_U_T);
cvReleaseMat(&M);
double l_6x10[6 * 10], rho[6];
double l_6x10[6 * 10] = {}, rho[6] = {};
CvMat L_6x10 = cvMat(6, 10, CV_64F, l_6x10);
CvMat Rho = cvMat(6, 1, CV_64F, rho);
compute_L_6x10(ut, l_6x10);
compute_rho(rho);
double Betas[4][4], rep_errors[4];
double Rs[4][3][3], ts[4][3];
double Betas[4][4] = {}, rep_errors[4] = {};
double Rs[4][3][3] = {}, ts[4][3] = {};
find_betas_approx_1(&L_6x10, &Rho, Betas[1]);
gauss_newton(&L_6x10, &Rho, Betas[1]);
@ -221,7 +223,7 @@ double epnp::dot(const double * v1, const double * v2)
void epnp::estimate_R_and_t(double R[3][3], double t[3])
{
double pc0[3], pw0[3];
double pc0[3] = {}, pw0[3] = {};
pc0[0] = pc0[1] = pc0[2] = 0.0;
pw0[0] = pw0[1] = pw0[2] = 0.0;
@ -240,7 +242,7 @@ void epnp::estimate_R_and_t(double R[3][3], double t[3])
pw0[j] /= number_of_correspondences;
}
double abt[3 * 3] = {0}, abt_d[3], abt_u[3 * 3], abt_v[3 * 3];
double abt[3 * 3] = {}, abt_d[3] = {}, abt_u[3 * 3] = {}, abt_v[3 * 3] = {};
CvMat ABt = cvMat(3, 3, CV_64F, abt);
CvMat ABt_D = cvMat(3, 1, CV_64F, abt_d);
CvMat ABt_U = cvMat(3, 3, CV_64F, abt_u);
@ -284,7 +286,7 @@ void epnp::solve_for_sign(void)
if (pcs[2] < 0.0) {
for(int i = 0; i < 4; i++)
for(int j = 0; j < 3; j++)
ccs[i][j] = -ccs[i][j];
ccs[i][j] = -ccs[i][j];
for(int i = 0; i < number_of_correspondences; i++) {
pcs[3 * i ] = -pcs[3 * i];
@ -332,7 +334,7 @@ double epnp::reprojection_error(const double R[3][3], const double t[3])
void epnp::find_betas_approx_1(const CvMat * L_6x10, const CvMat * Rho,
double * betas)
{
double l_6x4[6 * 4], b4[4] = {0};
double l_6x4[6 * 4] = {}, b4[4] = {};
CvMat L_6x4 = cvMat(6, 4, CV_64F, l_6x4);
CvMat B4 = cvMat(4, 1, CV_64F, b4);
@ -364,7 +366,7 @@ void epnp::find_betas_approx_1(const CvMat * L_6x10, const CvMat * Rho,
void epnp::find_betas_approx_2(const CvMat * L_6x10, const CvMat * Rho,
double * betas)
{
double l_6x3[6 * 3], b3[3] = {0};
double l_6x3[6 * 3] = {}, b3[3] = {};
CvMat L_6x3 = cvMat(6, 3, CV_64F, l_6x3);
CvMat B3 = cvMat(3, 1, CV_64F, b3);
@ -396,7 +398,7 @@ void epnp::find_betas_approx_2(const CvMat * L_6x10, const CvMat * Rho,
void epnp::find_betas_approx_3(const CvMat * L_6x10, const CvMat * Rho,
double * betas)
{
double l_6x5[6 * 5], b5[5] = {0};
double l_6x5[6 * 5] = {}, b5[5] = {};
CvMat L_6x5 = cvMat(6, 5, CV_64F, l_6x5);
CvMat B5 = cvMat(5, 1, CV_64F, b5);
@ -431,7 +433,7 @@ void epnp::compute_L_6x10(const double * ut, double * l_6x10)
v[2] = ut + 12 * 9;
v[3] = ut + 12 * 8;
double dv[4][6][3];
double dv[4][6][3] = {};
for(int i = 0; i < 4; i++) {
int a = 0, b = 1;
@ -442,8 +444,8 @@ void epnp::compute_L_6x10(const double * ut, double * l_6x10)
b++;
if (b > 3) {
a++;
b = a + 1;
a++;
b = a + 1;
}
}
}
@ -506,7 +508,7 @@ void epnp::gauss_newton(const CvMat * L_6x10, const CvMat * Rho, double betas[4]
{
const int iterations_number = 5;
double a[6*4], b[6], x[4] = {0};
double a[6*4] = {}, b[6] = {}, x[4] = {};
CvMat A = cvMat(6, 4, CV_64F, a);
CvMat B = cvMat(6, 1, CV_64F, b);
CvMat X = cvMat(4, 1, CV_64F, x);

@ -35,7 +35,7 @@ bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat
{
CV_INSTRUMENT_REGION();
double rotation_matrix[3][3], translation[3];
double rotation_matrix[3][3] = {}, translation[3] = {};
std::vector<double> points;
if (opoints.depth() == ipoints.depth())
{
@ -63,7 +63,7 @@ int p3p::solve(std::vector<cv::Mat>& Rs, std::vector<cv::Mat>& tvecs, const cv::
{
CV_INSTRUMENT_REGION();
double rotation_matrix[4][3][3], translation[4][3];
double rotation_matrix[4][3][3] = {}, translation[4][3] = {};
std::vector<double> points;
if (opoints.depth() == ipoints.depth())
{
@ -103,7 +103,7 @@ bool p3p::solve(double R[3][3], double t[3],
double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3)
{
double Rs[4][3][3], ts[4][3];
double Rs[4][3][3] = {}, ts[4][3] = {};
const bool p4p = true;
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);
@ -159,7 +159,7 @@ int p3p::solve(double R[4][3][3], double t[4][3],
cosines[1] = mu0 * mu2 + mv0 * mv2 + mk0 * mk2;
cosines[2] = mu0 * mu1 + mv0 * mv1 + mk0 * mk1;
double lengths[4][3];
double lengths[4][3] = {};
int n = solve_for_lengths(lengths, distances, cosines);
int nb_solutions = 0;
@ -319,21 +319,21 @@ bool p3p::align(double M_end[3][3],
double R[3][3], double T[3])
{
// Centroids:
double C_start[3], C_end[3];
double C_start[3] = {}, C_end[3] = {};
for(int i = 0; i < 3; i++) C_end[i] = (M_end[0][i] + M_end[1][i] + M_end[2][i]) / 3;
C_start[0] = (X0 + X1 + X2) / 3;
C_start[1] = (Y0 + Y1 + Y2) / 3;
C_start[2] = (Z0 + Z1 + Z2) / 3;
// Covariance matrix s:
double s[3 * 3];
double s[3 * 3] = {};
for(int j = 0; j < 3; j++) {
s[0 * 3 + j] = (X0 * M_end[0][j] + X1 * M_end[1][j] + X2 * M_end[2][j]) / 3 - C_end[j] * C_start[0];
s[1 * 3 + j] = (Y0 * M_end[0][j] + Y1 * M_end[1][j] + Y2 * M_end[2][j]) / 3 - C_end[j] * C_start[1];
s[2 * 3 + j] = (Z0 * M_end[0][j] + Z1 * M_end[1][j] + Z2 * M_end[2][j]) / 3 - C_end[j] * C_start[2];
}
double Qs[16], evs[4], U[16];
double Qs[16] = {}, evs[4] = {}, U[16] = {};
Qs[0 * 4 + 0] = s[0 * 3 + 0] + s[1 * 3 + 1] + s[2 * 3 + 2];
Qs[1 * 4 + 1] = s[0 * 3 + 0] - s[1 * 3 + 1] - s[2 * 3 + 2];
@ -386,7 +386,7 @@ bool p3p::align(double M_end[3][3],
bool p3p::jacobi_4x4(double * A, double * D, double * U)
{
double B[4], Z[4];
double B[4] = {}, Z[4] = {};
double Id[16] = {1., 0., 0., 0.,
0., 1., 0., 0.,
0., 0., 1., 0.,
@ -396,7 +396,6 @@ bool p3p::jacobi_4x4(double * A, double * D, double * U)
B[0] = A[0]; B[1] = A[5]; B[2] = A[10]; B[3] = A[15];
memcpy(D, B, 4 * sizeof(double));
memset(Z, 0, 4 * sizeof(double));
for(int iter = 0; iter < 50; iter++) {
double sum = fabs(A[1]) + fabs(A[2]) + fabs(A[3]) + fabs(A[6]) + fabs(A[7]) + fabs(A[11]);

@ -245,6 +245,9 @@ bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
if( model_points == npoints )
{
opoints = opoints.reshape(3);
ipoints = ipoints.reshape(2);
bool result = solvePnP(opoints, ipoints, cameraMatrix, distCoeffs, _rvec, _tvec, useExtrinsicGuess, ransac_kernel_method);
if(!result)
@ -350,6 +353,11 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
CV_Assert( npoints == 3 || npoints == 4 );
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
@ -745,6 +753,11 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;

@ -565,10 +565,16 @@ void undistortPoints(InputArray _src, OutputArray _dst,
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();
CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));
int npoints = src.checkVector(2), depth = src.depth();
if (npoints < 0)
src = src.t();
npoints = src.checkVector(2);
CV_Assert(npoints >= 0 && src.isContinuous() && (depth == CV_32F || depth == CV_64F));
_dst.create(src.size(), src.type(), -1, true);
if (src.cols == 2)
src = src.reshape(2);
_dst.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
Mat dst = _dst.getMat();
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _ccameraMatrix = cvMat(cameraMatrix);

@ -1829,6 +1829,183 @@ void CV_StereoCalibrationTest_CPP::correct( const Mat& F,
TEST(Calib3d_CalibrateCamera_CPP, regression) { CV_CameraCalibrationTest_CPP test; test.safe_run(); }
TEST(Calib3d_CalibrationMatrixValues_CPP, accuracy) { CV_CalibrationMatrixValuesTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, regression) { CV_ProjectPointsTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, inputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
//3xN 1-channel
Mat objectPoints = (Mat_<float>(3, 2) << -L, L,
L, L,
0, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx2 1-channel
Mat objectPoints = (Mat_<float>(2, 3) << -L, L, 0,
L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//1xN 3-channel
Mat objectPoints(1, 2, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(0,1) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx1 3-channel
Mat objectPoints(2, 1, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(1,0) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3f>
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3d>
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_ProjectPoints_CPP, outputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Mat --> will be Nx1 2-channel
Mat imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Nx1 2-channel
Mat imagePoints(3,1,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//1xN 2-channel
Mat imagePoints(1,3,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.cols);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
//vector<Point2f>
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
//vector<Point2d>
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_StereoCalibrate_CPP, regression) { CV_StereoCalibrationTest_CPP test; test.safe_run(); }
TEST(Calib3d_StereoCalibrateCorner, regression) { CV_StereoCalibrationCornerTest test; test.safe_run(); }

@ -1211,6 +1211,7 @@ TEST(Calib3d_SolvePnP, translation)
p3d.push_back(Point3f(0,10,10));
p3d.push_back(Point3f(10,10,10));
p3d.push_back(Point3f(2,5,5));
p3d.push_back(Point3f(-4,8,6));
vector<Point2f> p2d;
projectPoints(p3d, crvec, ctvec, cameraIntrinsic, noArray(), p2d);
@ -1845,4 +1846,375 @@ TEST(Calib3d_SolvePnP, refine)
}
}
TEST(Calib3d_SolvePnPRansac, minPoints)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.9072420896651262, 0.09226497171882152, 0.8880772883671504);
Matx31d true_tvec(7.376333362427632, 8.434449036856979, 13.79801619778456);
{
//nb points = 5 --> ransac_kernel_method = SOLVEPNP_EPNP
Mat keypoints13D = (Mat_<float>(5, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434,
15.316854, 3.7486348, 12.491116);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
vector<Point3f> objectPoints;
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
objectPoints.push_back(Point3f(keypoints13D.at<float>(i,0), keypoints13D.at<float>(i,1), keypoints13D.at<float>(i,2)));
}
Mat rvec = Mat::zeros(1,3,CV_64FC1);
Mat Tvec = Mat::zeros(1,3,CV_64FC1);
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
Mat rvec2, Tvec2;
solvePnP(objectPoints, imagesPoints, matK, distCoeff, rvec2, Tvec2, false, SOLVEPNP_EPNP);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(rvec, rvec2, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(Tvec, Tvec2, NORM_INF), 1e-6);
}
{
//nb points = 4 --> ransac_kernel_method = SOLVEPNP_P3P
Mat keypoints13D = (Mat_<float>(4, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
vector<Point3f> objectPoints;
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
objectPoints.push_back(Point3f(keypoints13D.at<float>(i,0), keypoints13D.at<float>(i,1), keypoints13D.at<float>(i,2)));
}
Mat rvec = Mat::zeros(1,3,CV_64FC1);
Mat Tvec = Mat::zeros(1,3,CV_64FC1);
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
Mat rvec2, Tvec2;
solvePnP(objectPoints, imagesPoints, matK, distCoeff, rvec2, Tvec2, false, SOLVEPNP_P3P);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(rvec, rvec2, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(Tvec, Tvec2, NORM_INF), 1e-6);
}
}
TEST(Calib3d_SolvePnPRansac, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.9072420896651262, 0.09226497171882152, 0.8880772883671504);
Matx31d true_tvec(7.376333362427632, 8.434449036856979, 13.79801619778456);
{
//Nx3 1-channel
Mat keypoints13D = (Mat_<float>(6, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434,
10.00604, 2.8654366, 15.472504,
-4.6863389, 5.9355154, 13.146358);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//1xN 3-channel
Mat keypoints13D(1, 6, CV_32FC3);
keypoints13D.at<Vec3f>(0,0) = Vec3f(12.00604f, -2.8654366f, 18.472504f);
keypoints13D.at<Vec3f>(0,1) = Vec3f(7.6863389f, 4.9355154f, 11.146358f);
keypoints13D.at<Vec3f>(0,2) = Vec3f(14.260933f, 2.8320458f, 12.582781f);
keypoints13D.at<Vec3f>(0,3) = Vec3f(3.4562225f, 8.2668982f, 11.300434f);
keypoints13D.at<Vec3f>(0,4) = Vec3f(10.00604f, 2.8654366f, 15.472504f);
keypoints13D.at<Vec3f>(0,5) = Vec3f(-4.6863389f, 5.9355154f, 13.146358f);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(0,i) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//Nx1 3-channel
Mat keypoints13D(6, 1, CV_32FC3);
keypoints13D.at<Vec3f>(0,0) = Vec3f(12.00604f, -2.8654366f, 18.472504f);
keypoints13D.at<Vec3f>(1,0) = Vec3f(7.6863389f, 4.9355154f, 11.146358f);
keypoints13D.at<Vec3f>(2,0) = Vec3f(14.260933f, 2.8320458f, 12.582781f);
keypoints13D.at<Vec3f>(3,0) = Vec3f(3.4562225f, 8.2668982f, 11.300434f);
keypoints13D.at<Vec3f>(4,0) = Vec3f(10.00604f, 2.8654366f, 15.472504f);
keypoints13D.at<Vec3f>(5,0) = Vec3f(-4.6863389f, 5.9355154f, 13.146358f);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(i,0) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//vector<Point3f>
vector<Point3f> keypoints13D;
keypoints13D.push_back(Point3f(12.00604f, -2.8654366f, 18.472504f));
keypoints13D.push_back(Point3f(7.6863389f, 4.9355154f, 11.146358f));
keypoints13D.push_back(Point3f(14.260933f, 2.8320458f, 12.582781f));
keypoints13D.push_back(Point3f(3.4562225f, 8.2668982f, 11.300434f));
keypoints13D.push_back(Point3f(10.00604f, 2.8654366f, 15.472504f));
keypoints13D.push_back(Point3f(-4.6863389f, 5.9355154f, 13.146358f));
vector<Point2f> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//vector<Point3d>
vector<Point3d> keypoints13D;
keypoints13D.push_back(Point3d(12.00604f, -2.8654366f, 18.472504f));
keypoints13D.push_back(Point3d(7.6863389f, 4.9355154f, 11.146358f));
keypoints13D.push_back(Point3d(14.260933f, 2.8320458f, 12.582781f));
keypoints13D.push_back(Point3d(3.4562225f, 8.2668982f, 11.300434f));
keypoints13D.push_back(Point3d(10.00604f, 2.8654366f, 15.472504f));
keypoints13D.push_back(Point3d(-4.6863389f, 5.9355154f, 13.146358f));
vector<Point2d> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
}
TEST(Calib3d_SolvePnP, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.407, 0.092, 0.88);
Matx31d true_tvec(0.576, -0.43, 1.3798);
vector<Point3d> objectPoints;
const double L = 0.5;
objectPoints.push_back(Point3d(-L, -L, L));
objectPoints.push_back(Point3d( L, -L, L));
objectPoints.push_back(Point3d( L, L, L));
objectPoints.push_back(Point3d(-L, L, L));
objectPoints.push_back(Point3d(-L, -L, -L));
objectPoints.push_back(Point3d( L, -L, -L));
const int methodsCount = 6;
int methods[] = {SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE};
for (int method = 0; method < methodsCount; method++)
{
if (methods[method] == SOLVEPNP_IPPE_SQUARE)
{
objectPoints[0] = Point3d(-L, L, 0);
objectPoints[1] = Point3d( L, L, 0);
objectPoints[2] = Point3d( L, -L, 0);
objectPoints[3] = Point3d(-L, -L, 0);
}
{
//Nx3 1-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(4, 3, CV_32FC1);
}
else
{
keypoints13D = Mat(6, 3, CV_32FC1);
}
for (int i = 0; i < keypoints13D.rows; i++)
{
keypoints13D.at<float>(i,0) = static_cast<float>(objectPoints[i].x);
keypoints13D.at<float>(i,1) = static_cast<float>(objectPoints[i].y);
keypoints13D.at<float>(i,2) = static_cast<float>(objectPoints[i].z);
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//1xN 3-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(1, 4, CV_32FC3);
}
else
{
keypoints13D = Mat(1, 6, CV_32FC3);
}
for (int i = 0; i < keypoints13D.cols; i++)
{
keypoints13D.at<Vec3f>(0,i) = Vec3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z));
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(0,i) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//Nx1 3-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(4, 1, CV_32FC3);
}
else
{
keypoints13D = Mat(6, 1, CV_32FC3);
}
for (int i = 0; i < keypoints13D.rows; i++)
{
keypoints13D.at<Vec3f>(i,0) = Vec3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z));
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(i,0) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//vector<Point3f>
vector<Point3f> keypoints13D;
const int nbPts = (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE) ? 4 : 6;
for (int i = 0; i < nbPts; i++)
{
keypoints13D.push_back(Point3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z)));
}
vector<Point2f> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//vector<Point3d>
vector<Point3d> keypoints13D;
const int nbPts = (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE) ? 4 : 6;
for (int i = 0; i < nbPts; i++)
{
keypoints13D.push_back(objectPoints[i]);
}
vector<Point2d> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
}
}
}} // namespace

@ -1293,4 +1293,180 @@ void CV_UndistortMapTest::prepare_to_validation( int )
TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); }
TEST(Calib3d_InitUndistortMap, accuracy) { CV_UndistortMapTest test; test.safe_run(); }
TEST(Calib3d_UndistortPoints, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Matx33d cameraMatrix = Matx33d::eye();
{
//2xN 1-channel
Mat imagePoints(2, 3, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(1,0) = 240;
imagePoints.at<float>(0,1) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(0,2) = 320; imagePoints.at<float>(1,2) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(0,i), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(1,i), std::numeric_limits<float>::epsilon());
}
}
{
//Nx2 1-channel
Mat imagePoints(3, 2, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(0,1) = 240;
imagePoints.at<float>(1,0) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(2,0) = 320; imagePoints.at<float>(2,1) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(i,0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(i,1), std::numeric_limits<float>::epsilon());
}
}
{
//1xN 2-channel
Mat imagePoints(1, 3, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(0,1) = Vec2f(0, 240);
imagePoints.at<Vec2f>(0,2) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(0,i)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(0,i)(1), std::numeric_limits<float>::epsilon());
}
}
{
//Nx1 2-channel
Mat imagePoints(3, 1, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(1,0) = Vec2f(0, 240);
imagePoints.at<Vec2f>(2,0) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(i,0)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(i,0)(1), std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2f>
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2d>
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
TEST(Calib3d_UndistortPoints, outputShape)
{
Matx33d cameraMatrix = Matx33d::eye();
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Mat --> will be Nx1 2-channel
Mat normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Nx1 2-channel
Mat normalized(static_cast<int>(imagePoints.size()), 1, CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//1xN 2-channel
Mat normalized(1, static_cast<int>(imagePoints.size()), CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.cols);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//vector<Point2f>
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
//vector<Point2d>
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
}} // namespace

@ -105,7 +105,7 @@ void CV_UndistortPointsBadArgTest::run(int)
R = cv::cvarrToMat(&_R_orig);
src_points = cv::cvarrToMat(&_src_points_orig);
src_points.create(2, 2, CV_32F);
src_points.create(2, 2, CV_32FC2);
errcount += run_test_case( CV_StsAssert, "Invalid input data matrix size" );
src_points = cv::cvarrToMat(&_src_points_orig);

@ -1227,18 +1227,14 @@ inline int v_signmask(const v_uint8x32& a)
{ return v_signmask(v_reinterpret_as_s8(a)); }
inline int v_signmask(const v_int16x16& a)
{
v_int8x32 v = v_int8x32(_mm256_packs_epi16(a.val, a.val));
return v_signmask(v) & 255;
}
{ return v_signmask(v_pack(a, a)) & 0xFFFF; }
inline int v_signmask(const v_uint16x16& a)
{ return v_signmask(v_reinterpret_as_s16(a)); }
inline int v_signmask(const v_int32x8& a)
{
__m256i a16 = _mm256_packs_epi32(a.val, a.val);
v_int8x32 v = v_int8x32(_mm256_packs_epi16(a16, a16));
return v_signmask(v) & 15;
v_int16x16 a16 = v_pack(a, a);
return v_signmask(v_pack(a16, a16)) & 0xFF;
}
inline int v_signmask(const v_uint32x8& a)
{ return v_signmask(v_reinterpret_as_s32(a)); }

@ -266,7 +266,7 @@ public class MatTest extends OpenCVTestCase {
public void testEmpty() {
assertTrue(dst.empty());
assertTrue(!gray0.empty());
assertFalse(gray0.empty());
}
public void testEyeIntIntInt() {
@ -1194,7 +1194,7 @@ public class MatTest extends OpenCVTestCase {
}
public void testToString() {
assertTrue(null != gray0.toString());
assertNotNull(gray0.toString());
}
public void testTotal() {

@ -48,7 +48,7 @@ public class RotatedRectTest extends OpenCVTestCase {
RotatedRect rrect = new RotatedRect(center, size, angle);
RotatedRect clone = rrect.clone();
assertTrue(clone != null);
assertNotNull(clone);
assertTrue(rrect.center.equals(clone.center));
assertTrue(rrect.size.equals(clone.size));
assertTrue(rrect.angle == clone.angle);
@ -66,24 +66,24 @@ public class RotatedRectTest extends OpenCVTestCase {
RotatedRect clone2 = rrect2.clone();
assertTrue(rrect1.equals(rrect3));
assertTrue(!rrect1.equals(rrect2));
assertFalse(rrect1.equals(rrect2));
assertTrue(rrect2.equals(clone2));
clone2.angle = 10;
assertTrue(!rrect2.equals(clone2));
assertFalse(rrect2.equals(clone2));
assertTrue(rrect1.equals(clone1));
clone1.center.x += 1;
assertTrue(!rrect1.equals(clone1));
assertFalse(rrect1.equals(clone1));
clone1.center.x -= 1;
assertTrue(rrect1.equals(clone1));
clone1.size.width += 1;
assertTrue(!rrect1.equals(clone1));
assertFalse(rrect1.equals(clone1));
assertTrue(!rrect1.equals(size));
assertFalse(rrect1.equals(size));
}
public void testHashCode() {
@ -140,10 +140,10 @@ public class RotatedRectTest extends OpenCVTestCase {
public void testRotatedRect() {
RotatedRect rr = new RotatedRect();
assertTrue(rr != null);
assertTrue(rr.center != null);
assertTrue(rr.size != null);
assertTrue(rr.angle == 0.0);
assertNotNull(rr);
assertNotNull(rr.center);
assertNotNull(rr.size);
assertEquals(0.0, rr.angle);
}
public void testRotatedRectDoubleArray() {
@ -161,10 +161,10 @@ public class RotatedRectTest extends OpenCVTestCase {
public void testRotatedRectPointSizeDouble() {
RotatedRect rr = new RotatedRect(center, size, 40);
assertTrue(rr != null);
assertTrue(rr.center != null);
assertTrue(rr.size != null);
assertTrue(rr.angle == 40.0);
assertNotNull(rr);
assertNotNull(rr.center);
assertNotNull(rr.size);
assertEquals(40.0, rr.angle);
}
public void testSet() {

@ -32,8 +32,7 @@ template <> struct initializer<64>
return R(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15],
d[16], d[17], d[18], d[19], d[20], d[21], d[22], d[23], d[24], d[25], d[26], d[27], d[28], d[29], d[30], d[31],
d[32], d[33], d[34], d[35], d[36], d[37], d[38], d[39], d[40], d[41], d[42], d[43], d[44], d[45], d[46], d[47],
d[48], d[49], d[50], d[51], d[52], d[53], d[54], d[55], d[56], d[57], d[58], d[59], d[50], d[51], d[52], d[53],
d[54], d[55], d[56], d[57], d[58], d[59], d[60], d[61], d[62], d[63]);
d[48], d[49], d[50], d[51], d[52], d[53], d[54], d[55], d[56], d[57], d[58], d[59], d[60], d[61], d[62], d[63]);
}
};
@ -660,7 +659,7 @@ template<typename R> struct TheTest
{
SCOPED_TRACE(cv::format("i=%d", i));
EXPECT_COMPARE_EQ((float)std::sqrt(dataA[i]), (float)resB[i]);
EXPECT_COMPARE_EQ(1/(float)std::sqrt(dataA[i]), (float)resC[i]);
EXPECT_COMPARE_EQ((float)(1/std::sqrt(dataA[i])), (float)resC[i]);
EXPECT_COMPARE_EQ((float)abs(dataA[i]), (float)resE[i]);
}
@ -808,8 +807,8 @@ template<typename R> struct TheTest
dataC *= (LaneType)-1;
R a = dataA, b = dataB, c = dataC, d = dataD, e = dataE;
int m = v_signmask(a);
EXPECT_EQ(2, m);
EXPECT_EQ(2, v_signmask(a));
EXPECT_EQ(2 | (1 << (R::nlanes / 2)) | (1 << (R::nlanes - 1)), v_signmask(b));
EXPECT_EQ(false, v_check_all(a));
EXPECT_EQ(false, v_check_all(b));

@ -608,7 +608,7 @@ CV__DNN_INLINE_NS_BEGIN
};
/**
* @brief Bilinear resize layer from https://github.com/cdmh/deeplab-public
* @brief Bilinear resize layer from https://github.com/cdmh/deeplab-public-ver2
*
* It differs from @ref ResizeLayer in output shape and resize scales computations.
*/

@ -214,8 +214,7 @@ PERF_TEST_P_(DNNTestNetwork, EAST_text_detection)
PERF_TEST_P_(DNNTestNetwork, FastNeuralStyle_eccv16)
{
if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", "", Mat(cv::Size(320, 240), CV_32FC3));
}

@ -3056,6 +3056,23 @@ int Net::getLayerId(const String &layer)
return impl->getLayerId(layer);
}
String parseLayerParams(const String& name, const LayerParams& lp) {
DictValue param = lp.get(name);
std::ostringstream out;
out << name << " ";
switch (param.size()) {
case 1: out << ": "; break;
case 2: out << "(HxW): "; break;
case 3: out << "(DxHxW): "; break;
default: CV_Error(Error::StsNotImplemented, format("Unsupported %s size = %d", name.c_str(), param.size()));
}
for (size_t i = 0; i < param.size() - 1; i++) {
out << param.get<int>(i) << " x ";
}
out << param.get<int>(param.size() - 1) << "\\l";
return out.str();
}
String Net::dump()
{
CV_Assert(!empty());
@ -3141,39 +3158,47 @@ String Net::dump()
out << " | ";
}
out << lp.name << "\\n" << lp.type << "\\n";
if (lp.has("kernel_size")) {
DictValue size = lp.get("kernel_size");
out << "kernel (HxW): " << size << " x " << size << "\\l";
} else if (lp.has("kernel_h") && lp.has("kernel_w")) {
DictValue h = lp.get("kernel_h");
DictValue w = lp.get("kernel_w");
out << "kernel (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("stride")) {
DictValue stride = lp.get("stride");
out << "stride (HxW): " << stride << " x " << stride << "\\l";
} else if (lp.has("stride_h") && lp.has("stride_w")) {
DictValue h = lp.get("stride_h");
DictValue w = lp.get("stride_w");
out << "stride (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("dilation")) {
DictValue dilation = lp.get("dilation");
out << "dilation (HxW): " << dilation << " x " << dilation << "\\l";
} else if (lp.has("dilation_h") && lp.has("dilation_w")) {
DictValue h = lp.get("dilation_h");
DictValue w = lp.get("dilation_w");
out << "dilation (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("pad")) {
DictValue pad = lp.get("pad");
out << "pad (LxTxRxB): " << pad << " x " << pad << " x " << pad << " x " << pad << "\\l";
if (lp.has("kernel_size")) {
String kernel = parseLayerParams("kernel_size", lp);
out << kernel;
} else if (lp.has("kernel_h") && lp.has("kernel_w")) {
DictValue h = lp.get("kernel_h");
DictValue w = lp.get("kernel_w");
out << "kernel (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("stride")) {
String stride = parseLayerParams("stride", lp);
out << stride;
} else if (lp.has("stride_h") && lp.has("stride_w")) {
DictValue h = lp.get("stride_h");
DictValue w = lp.get("stride_w");
out << "stride (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("dilation")) {
String dilation = parseLayerParams("dilation", lp);
out << dilation;
} else if (lp.has("dilation_h") && lp.has("dilation_w")) {
DictValue h = lp.get("dilation_h");
DictValue w = lp.get("dilation_w");
out << "dilation (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("pad")) {
DictValue pad = lp.get("pad");
out << "pad ";
switch (pad.size()) {
case 1: out << ": " << pad << "\\l"; break;
case 2: out << "(HxW): (" << pad.get<int>(0) << " x " << pad.get<int>(1) << ")" << "\\l"; break;
case 4: out << "(HxW): (" << pad.get<int>(0) << ", " << pad.get<int>(2) << ") x (" << pad.get<int>(1) << ", " << pad.get<int>(3) << ")" << "\\l"; break;
case 6: out << "(DxHxW): (" << pad.get<int>(0) << ", " << pad.get<int>(3) << ") x (" << pad.get<int>(1) << ", " << pad.get<int>(4)
<< ") x (" << pad.get<int>(2) << ", " << pad.get<int>(5) << ")" << "\\l"; break;
default: CV_Error(Error::StsNotImplemented, format("Unsupported pad size = %d", pad.size()));
}
} else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) {
DictValue l = lp.get("pad_l");
DictValue t = lp.get("pad_t");
DictValue r = lp.get("pad_r");
DictValue b = lp.get("pad_b");
out << "pad (LxTxRxB): " << l << " x " << t << " x " << r << " x " << b << "\\l";
out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")" << "\\l";
}
else if (lp.has("pooled_w") || lp.has("pooled_h")) {
DictValue h = lp.get("pooled_h");

@ -110,15 +110,9 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
return INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) &&
sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
}
else
#endif
return backendId == DNN_BACKEND_OPENCV;
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE &&
sliceRanges.size() == 1 && sliceRanges[0].size() == 4);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -264,39 +258,65 @@ public:
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Crop";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CropLayer> ieLayer(new InferenceEngine::CropLayer(lp));
CV_Assert(sliceRanges.size() == 1);
std::vector<size_t> axes, offsets, dims;
int from, to, step;
int numDims = sliceRanges[0].size();
if (preferableTarget == DNN_TARGET_MYRIAD)
{
from = 1;
to = sliceRanges[0].size() + 1;
to = numDims;
step = 1;
}
else
{
from = sliceRanges[0].size() - 1;
from = numDims - 1;
to = -1;
step = -1;
}
for (int i = from; i != to; i += step)
{
ieLayer->axis.push_back(i);
ieLayer->offset.push_back(sliceRanges[0][i].start);
ieLayer->dim.push_back(sliceRanges[0][i].end - sliceRanges[0][i].start);
axes.push_back(i);
offsets.push_back(sliceRanges[0][i].start);
dims.push_back(sliceRanges[0][i].size());
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size();
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Crop");
ieLayer.getParameters()["axis"] = axes;
ieLayer.getParameters()["dim"] = dims;
ieLayer.getParameters()["offset"] = offsets;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights).
auto shapeSource = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32,
InferenceEngine::Layout::ANY, outShape);
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
return Ptr<BackendNode>();
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Crop";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CropLayer> ieLayer(new InferenceEngine::CropLayer(lp));
ieLayer->axis = axes;
ieLayer->offset = offsets;
ieLayer->dim = dims;
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // IE < R5
return Ptr<BackendNode>();
}
#endif
};

@ -110,7 +110,10 @@ TEST_P(DNNTestNetwork, AlexNet)
TEST_P(DNNTestNetwork, ResNet_50)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
processNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
Size(224, 224), "prob",
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_resnet_50.yml" :
@ -344,7 +347,10 @@ TEST_P(DNNTestNetwork, opencv_face_detector)
TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
@ -382,6 +388,8 @@ TEST_P(DNNTestNetwork, DenseNet_121)
TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB, CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
@ -396,7 +404,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
Mat img = imread(findDataFile("dnn/googlenet_1.png", false));
Mat inp = blobFromImage(img, 1.0, Size(320, 240), Scalar(103.939, 116.779, 123.68), false, false);
// Output image has values in range [-143.526, 148.539].
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.3 : 4e-5;
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 4e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.28 : 2e-3;
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
}

@ -114,6 +114,9 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
{
Target targetId = get<1>(GetParam());
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (!ocl::useOpenCL() && targetId != DNN_TARGET_CPU)
throw SkipTestException("OpenCL is disabled");
bool readFromMemory = get<0>(GetParam());
Net net;
{
@ -154,7 +157,8 @@ INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_AlexNet, Combine(testing::Bool(),
TEST(Reproducibility_FCN, Accuracy)
{
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_2GB);
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_DEBUG_VERYLONG, CV_TEST_TAG_MEMORY_2GB);
Net net;
{
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt", false);
@ -183,7 +187,7 @@ TEST(Reproducibility_FCN, Accuracy)
TEST(Reproducibility_SSD, Accuracy)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
applyTestTag(CV_TEST_TAG_MEMORY_512MB, CV_TEST_TAG_DEBUG_LONG);
Net net;
{
const string proto = findDataFile("dnn/ssd_vgg16.prototxt", false);
@ -281,6 +285,9 @@ TEST_P(Reproducibility_ResNet50, Accuracy)
{
Target targetId = GetParam();
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (!ocl::useOpenCL() && targetId != DNN_TARGET_CPU)
throw SkipTestException("OpenCL is disabled");
Net net = readNetFromCaffe(findDataFile("dnn/ResNet-50-deploy.prototxt", false),
findDataFile("dnn/ResNet-50-model.caffemodel", false));
@ -541,7 +548,11 @@ INSTANTIATE_TEST_CASE_P(Test_Caffe, opencv_face_detector,
TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_LONG,
CV_TEST_TAG_DEBUG_VERYLONG
);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
@ -559,7 +570,10 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
TEST_P(Test_Caffe_nets, FasterRCNN_zf)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
@ -571,7 +585,11 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
TEST_P(Test_Caffe_nets, RFCN)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_2GB));
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_LONG,
CV_TEST_TAG_DEBUG_VERYLONG
);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");

@ -343,7 +343,7 @@ TEST_P(Test_ONNX_nets, VGG16_bn)
TEST_P(Test_ONNX_nets, ZFNet)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(CV_TEST_TAG_MEMORY_2GB);
testONNXModels("zfnet512", pb);
}
@ -418,7 +418,10 @@ TEST_P(Test_ONNX_nets, MobileNet_v2)
TEST_P(Test_ONNX_nets, LResNet100E_IR)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");

@ -437,7 +437,12 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
TEST_P(Test_TensorFlow_nets, Faster_RCNN)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB)); // FIXIT split test
// FIXIT split test
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_LONG,
CV_TEST_TAG_DEBUG_VERYLONG
);
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
"faster_rcnn_resnet50_coco_2018_01_28"};
@ -535,7 +540,10 @@ TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
// np.save('east_text_detection.geometry.npy', geometry)
TEST_P(Test_TensorFlow_nets, EAST_text_detection)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
applyTestTag(
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG
);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
@ -765,7 +773,7 @@ TEST(Test_TensorFlow, two_inputs)
TEST(Test_TensorFlow, Mask_RCNN)
{
applyTestTag(CV_TEST_TAG_MEMORY_1GB);
applyTestTag(CV_TEST_TAG_MEMORY_1GB, CV_TEST_TAG_DEBUG_VERYLONG);
std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt", false);
std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);

@ -38,7 +38,7 @@ public class ImgcodecsTest extends OpenCVTestCase {
public void testImreadString() {
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
assertTrue(!dst.empty());
assertFalse(dst.empty());
assertEquals(3, dst.channels());
assertTrue(512 == dst.cols());
assertTrue(512 == dst.rows());
@ -46,7 +46,7 @@ public class ImgcodecsTest extends OpenCVTestCase {
public void testImreadStringInt() {
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0);
assertTrue(!dst.empty());
assertFalse(dst.empty());
assertEquals(1, dst.channels());
assertTrue(512 == dst.cols());
assertTrue(512 == dst.rows());

@ -334,6 +334,77 @@ bool TiffDecoder::nextPage()
readHeader();
}
static void fixOrientationPartial(Mat &img, uint16 orientation)
{
switch(orientation) {
case ORIENTATION_RIGHTTOP:
case ORIENTATION_LEFTBOT:
flip(img, img, -1);
/* fall through */
case ORIENTATION_LEFTTOP:
case ORIENTATION_RIGHTBOT:
transpose(img, img);
break;
}
}
static void fixOrientationFull(Mat &img, int orientation)
{
switch(orientation) {
case ORIENTATION_TOPRIGHT:
flip(img, img, 1);
break;
case ORIENTATION_BOTRIGHT:
flip(img, img, -1);
break;
case ORIENTATION_BOTLEFT:
flip(img, img, 0);
break;
case ORIENTATION_LEFTTOP:
transpose(img, img);
break;
case ORIENTATION_RIGHTTOP:
transpose(img, img);
flip(img, img, 1);
break;
case ORIENTATION_RIGHTBOT:
transpose(img, img);
flip(img, img, -1);
break;
case ORIENTATION_LEFTBOT:
transpose(img, img);
flip(img, img, 0);
break;
}
}
/**
* Fix orientation defined in tag 274.
* For 8 bit some corrections are done by TIFFReadRGBAStrip/Tile already.
* Not so for 16/32/64 bit.
*/
static void fixOrientation(Mat &img, uint16 orientation, int dst_bpp)
{
switch(dst_bpp) {
case 8:
fixOrientationPartial(img, orientation);
break;
case 16:
case 32:
case 64:
fixOrientationFull(img, orientation);
break;
}
}
bool TiffDecoder::readData( Mat& img )
{
int type = img.type();
@ -363,10 +434,11 @@ bool TiffDecoder::readData( Mat& img )
CV_TIFF_CHECK_CALL_DEBUG(TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &ncn));
uint16 img_orientation = ORIENTATION_TOPLEFT;
CV_TIFF_CHECK_CALL_DEBUG(TIFFGetField(tif, TIFFTAG_ORIENTATION, &img_orientation));
bool vert_flip = (img_orientation == ORIENTATION_BOTRIGHT) || (img_orientation == ORIENTATION_RIGHTBOT) ||
(img_orientation == ORIENTATION_BOTLEFT) || (img_orientation == ORIENTATION_LEFTBOT);
const int bitsPerByte = 8;
int dst_bpp = (int)(img.elemSize1() * bitsPerByte);
bool vert_flip = dst_bpp == 8 &&
(img_orientation == ORIENTATION_BOTRIGHT || img_orientation == ORIENTATION_RIGHTBOT ||
img_orientation == ORIENTATION_BOTLEFT || img_orientation == ORIENTATION_LEFTBOT);
int wanted_channels = normalizeChannelsNumber(img.channels());
if (dst_bpp == 8)
@ -579,6 +651,7 @@ bool TiffDecoder::readData( Mat& img )
} // for x
} // for y
}
fixOrientation(img, img_orientation, dst_bpp);
}
if (m_hdr && depth >= CV_32F)

File diff suppressed because it is too large Load Diff

@ -3266,7 +3266,7 @@ struct RGB2Luv_b
return;
}
int i, j, scn = srccn;
int scn = srccn;
#if CV_SIMD
float CV_DECL_ALIGNED(CV_SIMD_WIDTH) buf[bufChannels*BLOCK_SIZE];
#else
@ -3295,16 +3295,16 @@ struct RGB2Luv_b
}
#endif
for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*bufChannels )
for(int i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*bufChannels )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
j = 0;
static const softfloat f255inv = softfloat::one()/f255;
#if CV_SIMD
v_float32 v255inv = vx_setall_f32((float)f255inv);
if(scn == 4)
{
int j = 0;
static const int nBlock = fsize*4;
for( ; j <= dn*bufChannels - nBlock*3;
j += nBlock*3, src += nBlock*4)
@ -3334,9 +3334,16 @@ struct RGB2Luv_b
v_store_interleave(buf + j + k*3*fsize, f[0*4+k], f[1*4+k], f[2*4+k]);
}
}
for( ; j < dn*bufChannels; j += bufChannels, src += 4 )
{
buf[j ] = (float)(src[0]*((float)f255inv));
buf[j+1] = (float)(src[1]*((float)f255inv));
buf[j+2] = (float)(src[2]*((float)f255inv));
}
}
else // scn == 3
{
int j = 0;
static const int nBlock = fsize*2;
for( ; j <= dn*bufChannels - nBlock;
j += nBlock, src += nBlock)
@ -3348,17 +3355,23 @@ struct RGB2Luv_b
v_store_aligned(buf + j + 0*fsize, v_cvt_f32(q0)*v255inv);
v_store_aligned(buf + j + 1*fsize, v_cvt_f32(q1)*v255inv);
}
for( ; j < dn*bufChannels; j++, src++ )
{
buf[j] = (float)(src[0]*((float)f255inv));
}
}
#endif
for( ; j < dn*bufChannels; j += bufChannels, src += scn )
#else
for(int j = 0; j < dn*bufChannels; j += bufChannels, src += scn )
{
buf[j ] = (float)(src[0]*((float)f255inv));
buf[j+1] = (float)(src[1]*((float)f255inv));
buf[j+2] = (float)(src[2]*((float)f255inv));
}
#endif
fcvt(buf, buf, dn);
j = 0;
int j = 0;
#if CV_SIMD
for( ; j <= dn*3 - fsize*3*4; j += fsize*3*4)
@ -3389,7 +3402,7 @@ struct RGB2Luv_b
#endif
for( ; j < dn*3; j += 3 )
{
dst[j] = saturate_cast<uchar>(buf[j]*(float)fL);
dst[j+0] = saturate_cast<uchar>(buf[j+0]*(float)fL);
dst[j+1] = saturate_cast<uchar>(buf[j+1]*(float)fu + (float)su);
dst[j+2] = saturate_cast<uchar>(buf[j+2]*(float)fv + (float)sv);
}

@ -581,7 +581,7 @@ public class OpenCVTestCase extends TestCase {
message = TAG + " :: " + "could not instantiate " + cname + "! Exception: " + ex.getMessage();
}
assertTrue(message, instance!=null);
assertNotNull(message, instance);
return instance;
}

@ -96,7 +96,7 @@ public class OpenCVTestRunner extends InstrumentationTestRunner {
}
context = getContext();
Assert.assertTrue("Context can't be 'null'", context != null);
Assert.assertNotNull("Context can't be 'null'", context);
LENA_PATH = Utils.exportResource(context, R.drawable.lena);
CHESS_PATH = Utils.exportResource(context, R.drawable.chessboard);
LBPCASCADE_FRONTALFACE_PATH = Utils.exportResource(context, R.raw.lbpcascade_frontalface);

@ -74,7 +74,7 @@ public class UtilsTest extends OpenCVTestCase {
// RGBA
Mat imgRGBA = new Mat();
Imgproc.cvtColor(imgBGR, imgRGBA, Imgproc.COLOR_BGR2RGBA);
assertTrue(!imgRGBA.empty() && imgRGBA.channels() == 4);
assertFalse(imgRGBA.empty() && imgRGBA.channels() == 4);
bmp16.eraseColor(Color.BLACK); m16.setTo(s0);
Utils.matToBitmap(imgRGBA, bmp16); Utils.bitmapToMat(bmp16, m16);
@ -92,7 +92,7 @@ public class UtilsTest extends OpenCVTestCase {
// RGB
Mat imgRGB = new Mat();
Imgproc.cvtColor(imgBGR, imgRGB, Imgproc.COLOR_BGR2RGB);
assertTrue(!imgRGB.empty() && imgRGB.channels() == 3);
assertFalse(imgRGB.empty() && imgRGB.channels() == 3);
bmp16.eraseColor(Color.BLACK); m16.setTo(s0);
Utils.matToBitmap(imgRGB, bmp16); Utils.bitmapToMat(bmp16, m16);
@ -110,7 +110,7 @@ public class UtilsTest extends OpenCVTestCase {
// Gray
Mat imgGray = new Mat();
Imgproc.cvtColor(imgBGR, imgGray, Imgproc.COLOR_BGR2GRAY);
assertTrue(!imgGray.empty() && imgGray.channels() == 1);
assertFalse(imgGray.empty() && imgGray.channels() == 1);
Mat tmp = new Mat();
bmp16.eraseColor(Color.BLACK); m16.setTo(s0);

@ -4,6 +4,9 @@
<property name="test.dir" value="testResults"/>
<property name="build.dir" value="build"/>
<property name="opencv.test.package" value="*"/>
<property name="opencv.test.class" value="*"/>
<path id="master-classpath">
<fileset dir="lib">
<include name="*.jar"/>
@ -21,7 +24,7 @@
<target name="compile">
<mkdir dir="build/classes"/>
<javac sourcepath="" srcdir="src" destdir="build/classes" includeantruntime="false" >
<javac sourcepath="" srcdir="src" destdir="build/classes" debug="on" includeantruntime="false" >
<include name="**/*.java"/>
<classpath refid="master-classpath"/>
</javac>
@ -38,7 +41,7 @@
<target name="test" depends="jar">
<mkdir dir="${test.dir}"/>
<junit printsummary="true" haltonfailure="false" haltonerror="false" showoutput="true" logfailedtests="true" maxmemory="256m">
<junit printsummary="withOutAndErr" haltonfailure="false" haltonerror="false" showoutput="true" logfailedtests="true" maxmemory="256m">
<sysproperty key="java.library.path" path="${opencv.lib.path}"/>
<env key="PATH" path="${opencv.lib.path}:${env.PATH}:${env.Path}"/>
<env key="DYLD_LIBRARY_PATH" path="${env.OPENCV_SAVED_DYLD_LIBRARY_PATH}"/> <!-- https://github.com/opencv/opencv/issues/14353 -->
@ -50,7 +53,7 @@
<formatter type="xml"/>
<batchtest fork="yes" todir="${test.dir}">
<zipfileset src="build/jar/opencv-test.jar" includes="**/*.class" excludes="**/OpenCVTest*">
<zipfileset src="build/jar/opencv-test.jar" includes="**/${opencv.test.package}/${opencv.test.class}.class" excludes="**/OpenCVTest*">
<exclude name="**/*$*.class"/>
</zipfileset>
</batchtest>

@ -607,7 +607,7 @@ public class OpenCVTestCase extends TestCase {
message = TAG + " :: " + "could not instantiate " + cname + "! Exception: " + ex.getMessage();
}
assertTrue(message, instance!=null);
assertNotNull(message, instance);
return instance;
}

@ -296,6 +296,40 @@ namespace binding_utils
cv::minEnclosingCircle(points, circle.center, circle.radius);
return circle;
}
int floodFill_withRect_helper(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4, emscripten::val arg5, Scalar arg6 = Scalar(), Scalar arg7 = Scalar(), int arg8 = 4)
{
cv::Rect rect;
int rc = cv::floodFill(arg1, arg2, arg3, arg4, &rect, arg6, arg7, arg8);
arg5.set("x", emscripten::val(rect.x));
arg5.set("y", emscripten::val(rect.y));
arg5.set("width", emscripten::val(rect.width));
arg5.set("height", emscripten::val(rect.height));
return rc;
}
int floodFill_wrapper(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4, emscripten::val arg5, Scalar arg6, Scalar arg7, int arg8) {
return floodFill_withRect_helper(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
}
int floodFill_wrapper_1(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4, emscripten::val arg5, Scalar arg6, Scalar arg7) {
return floodFill_withRect_helper(arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
int floodFill_wrapper_2(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4, emscripten::val arg5, Scalar arg6) {
return floodFill_withRect_helper(arg1, arg2, arg3, arg4, arg5, arg6);
}
int floodFill_wrapper_3(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4, emscripten::val arg5) {
return floodFill_withRect_helper(arg1, arg2, arg3, arg4, arg5);
}
int floodFill_wrapper_4(cv::Mat& arg1, cv::Mat& arg2, Point arg3, Scalar arg4) {
return cv::floodFill(arg1, arg2, arg3, arg4);
}
#endif
#ifdef HAVE_OPENCV_VIDEO
@ -557,6 +591,16 @@ EMSCRIPTEN_BINDINGS(binding_utils)
#ifdef HAVE_OPENCV_IMGPROC
function("minEnclosingCircle", select_overload<binding_utils::Circle(const cv::Mat&)>(&binding_utils::minEnclosingCircle));
function("floodFill", select_overload<int(cv::Mat&, cv::Mat&, Point, Scalar, emscripten::val, Scalar, Scalar, int)>(&binding_utils::floodFill_wrapper));
function("floodFill", select_overload<int(cv::Mat&, cv::Mat&, Point, Scalar, emscripten::val, Scalar, Scalar)>(&binding_utils::floodFill_wrapper_1));
function("floodFill", select_overload<int(cv::Mat&, cv::Mat&, Point, Scalar, emscripten::val, Scalar)>(&binding_utils::floodFill_wrapper_2));
function("floodFill", select_overload<int(cv::Mat&, cv::Mat&, Point, Scalar, emscripten::val)>(&binding_utils::floodFill_wrapper_3));
function("floodFill", select_overload<int(cv::Mat&, cv::Mat&, Point, Scalar)>(&binding_utils::floodFill_wrapper_4));
#endif
function("minMaxLoc", select_overload<binding_utils::MinMaxLoc(const cv::Mat&, const cv::Mat&)>(&binding_utils::minMaxLoc));

@ -84,7 +84,7 @@ ignore_list = ['locate', #int&
'minEnclosingCircle', #float&
'checkRange',
'minMaxLoc', #double*
'floodFill',
'floodFill', # special case, implemented in core_bindings.cpp
'phaseCorrelate',
'randShuffle',
'calibrationMatrixValues', #double&

@ -147,6 +147,60 @@ QUnit.test('test_imgProc', function(assert) {
dest.delete();
source.delete();
}
// floodFill
{
let center = new cv.Point(5, 5);
let rect = new cv.Rect(0, 0, 0, 0);
let img = new cv.Mat.zeros(10, 10, cv.CV_8UC1);
let color = new cv.Scalar (255);
cv.circle(img, center, 3, color, 1);
let edge = new cv.Mat();
cv.Canny(img, edge, 100, 255);
cv.copyMakeBorder(edge, edge, 1, 1, 1, 1, cv.BORDER_REPLICATE);
let expected_img_data = new Uint8Array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 255, 0, 0, 0, 0,
0, 0, 0, 255, 255, 255, 255, 255, 0, 0,
0, 0, 0, 255, 0, 255, 0, 255, 0, 0,
0, 0, 255, 255, 255, 255, 0, 0, 255, 0,
0, 0, 0, 255, 0, 0, 0, 255, 0, 0,
0, 0, 0, 255, 255, 0, 255, 255, 0, 0,
0, 0, 0, 0, 0, 255, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
let img_elem = 10*10*1;
let expected_img_data_ptr = cv._malloc(img_elem);
let expected_img_data_heap = new Uint8Array(cv.HEAPU8.buffer,
expected_img_data_ptr,
img_elem);
expected_img_data_heap.set(new Uint8Array(expected_img_data.buffer));
let expected_img = new cv.Mat( 10, 10, cv.CV_8UC1, expected_img_data_ptr, 0);
let expected_rect = new cv.Rect(3,3,3,3);
let compare_result = new cv.Mat(10, 10, cv.CV_8UC1);
cv.floodFill(img, edge, center, color, rect);
cv.compare (img, expected_img, compare_result, cv.CMP_EQ);
// expect every pixels are the same.
assert.equal (cv.countNonZero(compare_result), img.total());
assert.equal (rect.x, expected_rect.x);
assert.equal (rect.y, expected_rect.y);
assert.equal (rect.width, expected_rect.width);
assert.equal (rect.height, expected_rect.height);
img.delete();
edge.delete();
expected_img.delete();
compare_result.delete();
}
});
QUnit.test('test_segmentation', function(assert) {

@ -22,12 +22,12 @@ public class CascadeClassifierTest extends OpenCVTestCase {
public void testCascadeClassifier() {
cc = new CascadeClassifier();
assertTrue(null != cc);
assertNotNull(cc);
}
public void testCascadeClassifierString() {
cc = new CascadeClassifier(OpenCVTestRunner.LBPCASCADE_FRONTALFACE_PATH);
assertTrue(null != cc);
assertNotNull(cc);
}
public void testDetectMultiScaleMatListOfRect() {
@ -98,7 +98,7 @@ public class CascadeClassifierTest extends OpenCVTestCase {
public void testLoad() {
cc = new CascadeClassifier();
cc.load(OpenCVTestRunner.LBPCASCADE_FRONTALFACE_PATH);
assertTrue(!cc.empty());
assertFalse(cc.empty());
}
}

@ -50,7 +50,7 @@ if __name__ == "__main__":
parser.add_argument("--android_env", action='append', help="Android: add environment variable (NAME=VALUE)")
parser.add_argument("--android_propagate_opencv_env", action="store_true", default=False, help="Android: propagate OPENCV* environment variables")
parser.add_argument("--serial", metavar="serial number", default="", help="Android: directs command to the USB device or emulator with the given serial number")
parser.add_argument("--package", metavar="package", default="", help="Android: run jUnit tests for specified package")
parser.add_argument("--package", metavar="package", default="", help="Java: run JUnit tests for specified module or Android package")
parser.add_argument("--trace", action="store_true", default=False, help="Trace: enable OpenCV tracing")
parser.add_argument("--trace_dump", metavar="trace_dump", default=-1, help="Trace: dump highlight calls (specify max entries count, 0 - dump all)")

@ -112,7 +112,10 @@ class TestSuite(object):
args = args[:]
exe = os.path.abspath(path)
if module == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type, "buildAndTest"]
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type]
if self.options.package:
cmd += ["-Dopencv.test.package=%s" % self.options.package]
cmd += ["buildAndTest"]
ret = execute(cmd, cwd=self.cache.java_test_dir)
return None, ret
elif module in ['python2', 'python3']:

@ -528,6 +528,8 @@ class videoInput{
int getFourcc(int deviceID) const;
double getFPS(int deviceID) const;
int getChannel(int deviceID) const;
// RGB conversion setting
bool getConvertRGB(int deviceID);
bool setConvertRGB(int deviceID, bool enable);
@ -963,6 +965,16 @@ videoDevice::~videoDevice(){
HRESULT HR = NOERROR;
//Check to see if the graph is running, if so stop it.
if( (pControl) )
{
HR = pControl->Pause();
if (FAILED(HR)) DebugPrintOut("ERROR - Could not pause pControl\n");
HR = pControl->Stop();
if (FAILED(HR)) DebugPrintOut("ERROR - Could not stop pControl\n");
}
//Stop the callback and free it
if( (sgCallback) && (pGrabber) )
{
@ -979,16 +991,6 @@ videoDevice::~videoDevice(){
delete sgCallback;
}
//Check to see if the graph is running, if so stop it.
if( (pControl) )
{
HR = pControl->Pause();
if (FAILED(HR)) DebugPrintOut("ERROR - Could not pause pControl\n");
HR = pControl->Stop();
if (FAILED(HR)) DebugPrintOut("ERROR - Could not stop pControl\n");
}
//Disconnect filters from capture device
if( (pVideoInputFilter) )NukeDownstream(pVideoInputFilter);
@ -1480,6 +1482,12 @@ double videoInput::getFPS(int id) const
}
int videoInput::getChannel(int deviceID) const
{
if (!isDeviceSetup(deviceID))
return 0;
return VDList[deviceID]->storeConn;
}
// ----------------------------------------------------------------------
//
@ -3342,11 +3350,15 @@ double VideoCapture_DShow::getProperty(int propIdx) const
return g_VI.getFourcc(m_index);
case CV_CAP_PROP_FPS:
return g_VI.getFPS(m_index);
case CV_CAP_PROP_CONVERT_RGB:
return g_VI.getConvertRGB(m_index);
case CAP_PROP_CHANNEL:
return g_VI.getChannel(m_index);
case CV_CAP_PROP_AUTOFOCUS:
// Flags indicate whether or not autofocus is enabled
if (g_VI.getVideoSettingCamera(m_index, CameraControl_Focus, min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)flags;
return -1;
break;
// video filter properties
case CV_CAP_PROP_BRIGHTNESS:
@ -3361,7 +3373,7 @@ double VideoCapture_DShow::getProperty(int propIdx) const
case CV_CAP_PROP_GAIN:
if (g_VI.getVideoSettingFilter(m_index, g_VI.getVideoPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)current_value;
return -1;
break;
// camera properties
case CV_CAP_PROP_PAN:
@ -3373,14 +3385,12 @@ double VideoCapture_DShow::getProperty(int propIdx) const
case CV_CAP_PROP_FOCUS:
if (g_VI.getVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)current_value;
return -1;
}
if (propIdx == CV_CAP_PROP_SETTINGS )
{
break;
case CV_CAP_PROP_SETTINGS:
return g_VI.property_window_count(m_index);
default:
break;
}
// unknown parameter or value not available
return -1;
}
@ -3488,12 +3498,6 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
return true;
}
// show video/camera filter dialog
if (propIdx == CV_CAP_PROP_SETTINGS )
{
return g_VI.showSettingsWindow(m_index);
}
//video Filter properties
switch (propIdx)
{
@ -3521,6 +3525,9 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
case CV_CAP_PROP_IRIS:
case CV_CAP_PROP_FOCUS:
return g_VI.setVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), (long)propVal);
// show video/camera filter dialog
case CV_CAP_PROP_SETTINGS:
return g_VI.showSettingsWindow(m_index);
}
return false;

@ -51,27 +51,23 @@
#include "precomp.hpp"
#include "opencv2/imgcodecs.hpp"
#include <sys/stat.h>
#ifdef NDEBUG
#include "opencv2/core/utils/filesystem.hpp"
#if 0
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#define CV_WARN(message) CV_LOG_INFO(NULL, "CAP_IMAGES warning: %s (%s:%d)" << message)
#endif
#ifndef _MAX_PATH
#define _MAX_PATH 1024
#endif
namespace cv
{
namespace cv {
class CvCapture_Images: public IVideoCapture
{
public:
void init()
{
filename.clear();
filename_pattern.clear();
frame.release();
currentframe = firstframe = 0;
length = 0;
@ -101,8 +97,7 @@ public:
bool open(const String&);
void close();
protected:
std::string filename; // actually a printf-pattern
std::string filename_pattern; // actually a printf-pattern
unsigned currentframe;
unsigned firstframe; // number of first frame
unsigned length; // length of sequence
@ -118,12 +113,8 @@ void CvCapture_Images::close()
bool CvCapture_Images::grabFrame()
{
char str[_MAX_PATH];
if( filename.empty() )
return false;
sprintf(str, filename.c_str(), firstframe + currentframe);
cv::String filename = cv::format(filename_pattern.c_str(), (int)(firstframe + currentframe));
CV_Assert(!filename.empty());
if (grabbedInOpen)
{
@ -133,7 +124,7 @@ bool CvCapture_Images::grabFrame()
return !frame.empty();
}
frame = imread(str, IMREAD_UNCHANGED);
frame = imread(filename, IMREAD_UNCHANGED);
if( !frame.empty() )
currentframe++;
@ -152,7 +143,7 @@ double CvCapture_Images::getProperty(int id) const
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
CV_WARN("collections of images don't have framerates\n");
CV_WARN("collections of images don't have framerates");
return 0;
case CV_CAP_PROP_POS_FRAMES:
return currentframe;
@ -165,10 +156,10 @@ double CvCapture_Images::getProperty(int id) const
case CV_CAP_PROP_FRAME_HEIGHT:
return frame.rows;
case CV_CAP_PROP_FPS:
CV_WARN("collections of images don't have framerates\n");
CV_WARN("collections of images don't have framerates");
return 1;
case CV_CAP_PROP_FOURCC:
CV_WARN("collections of images don't have 4-character codes\n");
CV_WARN("collections of images don't have 4-character codes");
return 0;
}
return 0;
@ -181,11 +172,11 @@ bool CvCapture_Images::setProperty(int id, double value)
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping\n");
CV_WARN("seeking to negative positions does not work - clamping");
value = 0;
}
if(value >= length) {
CV_WARN("seeking beyond end of sequence - clamping\n");
CV_WARN("seeking beyond end of sequence - clamping");
value = length - 1;
}
currentframe = cvRound(value);
@ -194,10 +185,10 @@ bool CvCapture_Images::setProperty(int id, double value)
return true;
case CV_CAP_PROP_POS_AVI_RATIO:
if(value > 1) {
CV_WARN("seeking beyond end of sequence - clamping\n");
CV_WARN("seeking beyond end of sequence - clamping");
value = 1;
} else if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping\n");
CV_WARN("seeking to negative positions does not work - clamping");
value = 0;
}
currentframe = cvRound((length - 1) * value);
@ -205,61 +196,92 @@ bool CvCapture_Images::setProperty(int id, double value)
grabbedInOpen = false; // grabbed frame is not valid anymore
return true;
}
CV_WARN("unknown/unhandled property\n");
CV_WARN("unknown/unhandled property");
return false;
}
static std::string extractPattern(const std::string& filename, unsigned& offset)
static
std::string icvExtractPattern(const std::string& filename, unsigned *offset)
{
std::string name;
size_t len = filename.size();
CV_Assert(!filename.empty());
CV_Assert(offset);
if( filename.empty() )
return std::string();
*offset = 0;
// check whether this is a valid image sequence filename
char *at = strchr((char*)filename.c_str(), '%');
if(at)
std::string::size_type pos = filename.find('%');
if (pos != std::string::npos)
{
unsigned int dummy;
if(sscanf(at + 1, "%ud", &dummy) != 1)
return std::string();
name = filename;
pos++; CV_Assert(pos < len);
if (filename[pos] == '0') // optional zero prefix
{
pos++; CV_Assert(pos < len);
}
if (filename[pos] >= '1' && filename[pos] <= '9') // optional numeric size (1..9) (one symbol only)
{
pos++; CV_Assert(pos < len);
}
if (filename[pos] == 'd' || filename[pos] == 'u')
{
pos++;
if (pos == len)
return filename; // end of string '...%5d'
CV_Assert(pos < len);
if (filename.find('%', pos) == std::string::npos)
return filename; // no more patterns
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: invalid multiple patterns: %s", filename.c_str()));
}
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: error, expected '0?[1-9][du]' pattern, got: %s", filename.c_str()));
}
else // no pattern filename was given - extract the pattern
{
at = (char*)filename.c_str();
// ignore directory names
char *slash = strrchr(at, '/');
if (slash) at = slash + 1;
pos = filename.rfind('/');
#ifdef _WIN32
slash = strrchr(at, '\\');
if (slash) at = slash + 1;
if (pos == std::string::npos)
pos = filename.rfind('\\');
#endif
if (pos != std::string::npos)
pos++;
else
pos = 0;
while (*at && !isdigit(*at)) at++;
if(!*at)
return std::string();
while (pos < len && !isdigit(filename[pos])) pos++;
sscanf(at, "%u", &offset);
if (pos == len)
{
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: can't find starting number (in the name of file): %s", filename.c_str()));
}
name = filename.substr(0, at - filename.c_str());
name += "%0";
std::string::size_type pos0 = pos;
int i;
char *extension;
for(i = 0, extension = at; isdigit(at[i]); i++, extension++)
;
char places[13] = {0};
sprintf(places, "%dd", i);
const int64_t max_number = 1000000000;
CV_Assert(max_number < INT_MAX); // offset is 'int'
name += places;
name += extension;
int number_str_size = 0;
uint64_t number = 0;
while (pos < len && isdigit(filename[pos]))
{
char ch = filename[pos];
number = (number * 10) + (uint64_t)((int)ch - (int)'0');
CV_Assert(number < max_number);
number_str_size++;
CV_Assert(number_str_size <= 64); // don't allow huge zero prefixes
pos++;
}
CV_Assert(number_str_size > 0);
*offset = (int)number;
std::string result;
if (pos0 > 0)
result += filename.substr(0, pos0);
result += cv::format("%%0%dd", number_str_size);
if (pos < len)
result += filename.substr(pos);
CV_LOG_INFO(NULL, "Pattern: " << result << " @ " << number);
return result;
}
return name;
}
@ -268,33 +290,34 @@ bool CvCapture_Images::open(const std::string& _filename)
unsigned offset = 0;
close();
filename = extractPattern(_filename, offset);
if( filename.empty() )
return false;
CV_Assert(!_filename.empty());
filename_pattern = icvExtractPattern(_filename, &offset);
CV_Assert(!filename_pattern.empty());
// determine the length of the sequence
length = 0;
char str[_MAX_PATH];
for(;;)
for (length = 0; ;)
{
sprintf(str, filename.c_str(), offset + length);
struct stat s;
if(stat(str, &s))
cv::String filename = cv::format(filename_pattern.c_str(), (int)(offset + length));
if (!utils::fs::exists(filename))
{
if(length == 0 && offset == 0) // allow starting with 0 or 1
if (length == 0 && offset == 0) // allow starting with 0 or 1
{
offset++;
continue;
}
break;
}
if(!haveImageReader(str))
if(!haveImageReader(filename))
{
CV_LOG_INFO(NULL, "CAP_IMAGES: Stop scanning. Can't read image file: " << filename);
break;
}
length++;
}
if(length == 0)
if (length == 0)
{
close();
return false;
@ -312,7 +335,7 @@ bool CvCapture_Images::open(const std::string& _filename)
bool CvCapture_Images::isOpened() const
{
return !filename.empty();
return !filename_pattern.empty();
}
Ptr<IVideoCapture> create_Images_capture(const std::string &filename)
@ -330,7 +353,7 @@ class CvVideoWriter_Images CV_FINAL : public CvVideoWriter
public:
CvVideoWriter_Images()
{
filename.clear();
filename_pattern.clear();
currentframe = 0;
}
virtual ~CvVideoWriter_Images() { close(); }
@ -342,20 +365,23 @@ public:
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_IMAGES; }
protected:
std::string filename;
std::string filename_pattern;
unsigned currentframe;
std::vector<int> params;
};
bool CvVideoWriter_Images::writeFrame( const IplImage* image )
{
char str[_MAX_PATH];
sprintf(str, filename.c_str(), currentframe);
CV_Assert(!filename_pattern.empty());
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
CV_Assert(!filename.empty());
std::vector<int> image_params = params;
image_params.push_back(0); // append parameters 'stop' mark
image_params.push_back(0);
cv::Mat img = cv::cvarrToMat(image);
bool ret = cv::imwrite(str, img, image_params);
bool ret = cv::imwrite(filename, img, image_params);
currentframe++;
@ -364,7 +390,7 @@ bool CvVideoWriter_Images::writeFrame( const IplImage* image )
void CvVideoWriter_Images::close()
{
filename.clear();
filename_pattern.clear();
currentframe = 0;
params.clear();
}
@ -373,16 +399,14 @@ void CvVideoWriter_Images::close()
bool CvVideoWriter_Images::open( const char* _filename )
{
unsigned offset = 0;
close();
filename = cv::extractPattern(_filename, offset);
if(filename.empty())
return false;
CV_Assert(_filename);
filename_pattern = icvExtractPattern(_filename, &offset);
CV_Assert(!filename_pattern.empty());
char str[_MAX_PATH];
sprintf(str, filename.c_str(), 0);
if(!cv::haveImageWriter(str))
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
if (!cv::haveImageWriter(filename))
{
close();
return false;
@ -409,10 +433,17 @@ Ptr<IVideoWriter> create_Images_writer(const std::string &filename, int, double,
{
CvVideoWriter_Images *writer = new CvVideoWriter_Images;
if( writer->open( filename.c_str() ))
return makePtr<LegacyWriter>(writer);
try
{
if( writer->open( filename.c_str() ))
return makePtr<LegacyWriter>(writer);
}
catch (...)
{
delete writer;
throw;
}
delete writer;
return 0;
}

Loading…
Cancel
Save