diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 709c21b1af..9bba16a85e 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -2222,6 +2222,209 @@ public: int mode = StereoSGBM::MODE_SGBM); }; + +//! cv::undistort mode +enum UndistortTypes +{ + PROJ_SPHERICAL_ORTHO = 0, + PROJ_SPHERICAL_EQRECT = 1 +}; + +/** @brief Transforms an image to compensate for lens distortion. + +The function transforms an image to compensate radial and tangential lens distortion. + +The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap +(with bilinear interpolation). See the former function for details of the transformation being +performed. + +Those pixels in the destination image, for which there is no correspondent pixels in the source +image, are filled with zeros (black color). + +A particular subset of the source image that will be visible in the corrected image can be regulated +by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate +newCameraMatrix depending on your requirements. + +The camera matrix and the distortion parameters can be determined using #calibrateCamera. If +the resolution of images is different from the resolution used at the calibration stage, \f$f_x, +f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain +the same. + +@param src Input (distorted) image. +@param dst Output (corrected) image that has the same size and type as src . +@param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as +cameraMatrix but you may additionally scale and shift the result by using a different matrix. + */ +CV_EXPORTS_W void undistort( InputArray src, OutputArray dst, + InputArray cameraMatrix, + InputArray distCoeffs, + InputArray newCameraMatrix = noArray() ); + +/** @brief Computes the undistortion and rectification transformation map. + +The function computes the joint undistortion and rectification transformation and represents the +result in the form of maps for remap. The undistorted image looks like original, as if it is +captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a +monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by +#getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera, +newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . + +Also, this new camera is oriented differently in the coordinate space, according to R. That, for +example, helps to align two heads of a stereo camera so that the epipolar lines on both images +become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera). + +The function actually builds the maps for the inverse mapping algorithm that is used by remap. That +is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function +computes the corresponding coordinates in the source image (that is, in the original image from +camera). The following process is applied: +\f[ +\begin{array}{l} +x \leftarrow (u - {c'}_x)/{f'}_x \\ +y \leftarrow (v - {c'}_y)/{f'}_y \\ +{[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\ +x' \leftarrow X/W \\ +y' \leftarrow Y/W \\ +r^2 \leftarrow x'^2 + y'^2 \\ +x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} ++ 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\ +y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} ++ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ +s\vecthree{x'''}{y'''}{1} = +\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} +{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} +{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\ +map_x(u,v) \leftarrow x''' f_x + c_x \\ +map_y(u,v) \leftarrow y''' f_y + c_y +\end{array} +\f] +where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +are the distortion coefficients. + +In case of a stereo camera, this function is called twice: once for each camera head, after +stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera +was not calibrated, it is still possible to compute the rectification transformations directly from +the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes +homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D +space. R can be computed from H as +\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f] +where cameraMatrix can be chosen arbitrarily. + +@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 , +computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation +is assumed. In cvInitUndistortMap R assumed to be an identity matrix. +@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$. +@param size Undistorted image size. +@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps +@param map1 The first output map. +@param map2 The second output map. + */ +CV_EXPORTS_W +void initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs, + InputArray R, InputArray newCameraMatrix, + Size size, int m1type, OutputArray map1, OutputArray map2); + +//! initializes maps for #remap for wide-angle +CV_EXPORTS +float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, int destImageWidth, + int m1type, OutputArray map1, OutputArray map2, + enum UndistortTypes projType = PROJ_SPHERICAL_EQRECT, double alpha = 0); +static inline +float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, int destImageWidth, + int m1type, OutputArray map1, OutputArray map2, + int projType, double alpha = 0) +{ + return initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth, + m1type, map1, map2, (UndistortTypes)projType, alpha); +} + +/** @brief Returns the default new camera matrix. + +The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when +centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). + +In the latter case, the new camera matrix will be: + +\f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\f] + +where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively. + +By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not +move the principal point. However, when you work with stereo, it is important to move the principal +points in both views to the same y-coordinate (which is required by most of stereo correspondence +algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for +each view where the principal points are located at the center. + +@param cameraMatrix Input camera matrix. +@param imgsize Camera view image size in pixels. +@param centerPrincipalPoint Location of the principal point in the new camera matrix. The +parameter indicates whether this location should be at the image center or not. + */ +CV_EXPORTS_W +Mat getDefaultNewCameraMatrix(InputArray cameraMatrix, Size imgsize = Size(), + bool centerPrincipalPoint = false); + +/** @brief Computes the ideal point coordinates from the observed point coordinates. + +The function is similar to #undistort and #initUndistortRectifyMap but it operates on a +sparse set of points instead of a raster image. Also the function performs a reverse transformation +to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a +planar object, it does, up to a translation vector, if the proper R is specified. + +For each observed point coordinate \f$(u, v)\f$ the function computes: +\f[ +\begin{array}{l} +x^{"} \leftarrow (u - c_x)/f_x \\ +y^{"} \leftarrow (v - c_y)/f_y \\ +(x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ +{[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ +x \leftarrow X/W \\ +y \leftarrow Y/W \\ +\text{only performed if P is specified:} \\ +u' \leftarrow x {f'}_x + {c'}_x \\ +v' \leftarrow y {f'}_y + {c'}_y +\end{array} +\f] + +where *undistort* is an approximate iterative algorithm that estimates the normalized original +point coordinates out of the normalized distorted point coordinates ("normalized" means that the +coordinates do not depend on the camera matrix). + +The function can be used for both a stereo camera head or a monocular camera (when R is empty). + +@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2). +@param dst Output ideal point coordinates after undistortion and reverse perspective +transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. +@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by +#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. +@param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by +#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. + */ +CV_EXPORTS_W +void undistortPoints(InputArray src, OutputArray dst, + InputArray cameraMatrix, InputArray distCoeffs, + InputArray R = noArray(), InputArray P = noArray()); +/** @overload + @note Default version of #undistortPoints does 5 iterations to compute undistorted points. + */ +CV_EXPORTS_AS(undistortPointsIter) +void undistortPoints(InputArray src, OutputArray dst, + InputArray cameraMatrix, InputArray distCoeffs, + InputArray R, InputArray P, TermCriteria criteria); + //! @} calib3d /** @brief The methods in this namespace use a so-called fisheye camera model. diff --git a/modules/calib3d/include/opencv2/calib3d/calib3d_c.h b/modules/calib3d/include/opencv2/calib3d/calib3d_c.h index 8ec6390d70..680d7d64fd 100644 --- a/modules/calib3d/include/opencv2/calib3d/calib3d_c.h +++ b/modules/calib3d/include/opencv2/calib3d/calib3d_c.h @@ -379,6 +379,39 @@ CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues CV_DEFAULT(0) ); +/** @brief Transforms the input image to compensate lens distortion +@see cv::undistort +*/ +CVAPI(void) cvUndistort2( const CvArr* src, CvArr* dst, + const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + const CvMat* new_camera_matrix CV_DEFAULT(0) ); + +/** @brief Computes transformation map from intrinsic camera parameters + that can used by cvRemap +*/ +CVAPI(void) cvInitUndistortMap( const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + CvArr* mapx, CvArr* mapy ); + +/** @brief Computes undistortion+rectification map for a head of stereo camera +@see cv::initUndistortRectifyMap +*/ +CVAPI(void) cvInitUndistortRectifyMap( const CvMat* camera_matrix, + const CvMat* dist_coeffs, + const CvMat *R, const CvMat* new_camera_matrix, + CvArr* mapx, CvArr* mapy ); + +/** @brief Computes the original (undistorted) feature coordinates + from the observed (distorted) coordinates +@see cv::undistortPoints +*/ +CVAPI(void) cvUndistortPoints( const CvMat* src, CvMat* dst, + const CvMat* camera_matrix, + const CvMat* dist_coeffs, + const CvMat* R CV_DEFAULT(0), + const CvMat* P CV_DEFAULT(0)); + /** @} calib3d_c */ #ifdef __cplusplus diff --git a/modules/calib3d/misc/java/test/Calib3dTest.java b/modules/calib3d/misc/java/test/Calib3dTest.java index 67193d9586..98b10c8383 100644 --- a/modules/calib3d/misc/java/test/Calib3dTest.java +++ b/modules/calib3d/misc/java/test/Calib3dTest.java @@ -1,6 +1,7 @@ package org.opencv.test.calib3d; import org.opencv.calib3d.Calib3d; +import org.opencv.core.Core; import org.opencv.core.CvType; import org.opencv.core.Mat; import org.opencv.core.MatOfDouble; @@ -14,6 +15,15 @@ import org.opencv.imgproc.Imgproc; public class Calib3dTest extends OpenCVTestCase { + Size size; + + @Override + protected void setUp() throws Exception { + super.setUp(); + + size = new Size(3, 3); + } + public void testCalibrateCameraListOfMatListOfMatSizeMatMatListOfMatListOfMat() { fail("Not yet implemented"); } @@ -602,4 +612,131 @@ public class Calib3dTest extends OpenCVTestCase { Calib3d.computeCorrespondEpilines(left, 1, fundamental, lines); assertMatEqual(truth, lines, EPS); } + + public void testGetDefaultNewCameraMatrixMat() { + Mat mtx = Calib3d.getDefaultNewCameraMatrix(gray0); + + assertFalse(mtx.empty()); + assertEquals(0, Core.countNonZero(mtx)); + } + + public void testGetDefaultNewCameraMatrixMatSizeBoolean() { + Mat mtx = Calib3d.getDefaultNewCameraMatrix(gray0, size, true); + + assertFalse(mtx.empty()); + assertFalse(0 == Core.countNonZero(mtx)); + // TODO_: write better test + } + + public void testInitUndistortRectifyMap() { + fail("Not yet implemented"); + Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F); + cameraMatrix.put(0, 0, 1, 0, 1); + cameraMatrix.put(1, 0, 0, 1, 1); + cameraMatrix.put(2, 0, 0, 0, 1); + + Mat R = new Mat(3, 3, CvType.CV_32F, new Scalar(2)); + Mat newCameraMatrix = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); + + Mat distCoeffs = new Mat(); + Mat map1 = new Mat(); + Mat map2 = new Mat(); + + // TODO: complete this test + Calib3d.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, CvType.CV_32F, map1, map2); + } + + public void testInitWideAngleProjMapMatMatSizeIntIntMatMat() { + fail("Not yet implemented"); + Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F); + Mat distCoeffs = new Mat(1, 4, CvType.CV_32F); + // Size imageSize = new Size(2, 2); + + cameraMatrix.put(0, 0, 1, 0, 1); + cameraMatrix.put(1, 0, 0, 1, 2); + cameraMatrix.put(2, 0, 0, 0, 1); + + distCoeffs.put(0, 0, 1, 3, 2, 4); + truth = new Mat(3, 3, CvType.CV_32F); + truth.put(0, 0, 0, 0, 0); + truth.put(1, 0, 0, 0, 0); + truth.put(2, 0, 0, 3, 0); + // TODO: No documentation for this function + // Calib3d.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, + // 5, m1type, truthput1, truthput2); + } + + public void testInitWideAngleProjMapMatMatSizeIntIntMatMatInt() { + fail("Not yet implemented"); + } + + public void testInitWideAngleProjMapMatMatSizeIntIntMatMatIntDouble() { + fail("Not yet implemented"); + } + + public void testUndistortMatMatMatMat() { + Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); + Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F) { + { + put(0, 0, 1, 0, 1); + put(1, 0, 0, 1, 2); + put(2, 0, 0, 0, 1); + } + }; + Mat distCoeffs = new Mat(1, 4, CvType.CV_32F) { + { + put(0, 0, 1, 3, 2, 4); + } + }; + + Calib3d.undistort(src, dst, cameraMatrix, distCoeffs); + + truth = new Mat(3, 3, CvType.CV_32F) { + { + put(0, 0, 0, 0, 0); + put(1, 0, 0, 0, 0); + put(2, 0, 0, 3, 0); + } + }; + assertMatEqual(truth, dst, EPS); + } + + public void testUndistortMatMatMatMatMat() { + Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); + Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F) { + { + put(0, 0, 1, 0, 1); + put(1, 0, 0, 1, 2); + put(2, 0, 0, 0, 1); + } + }; + Mat distCoeffs = new Mat(1, 4, CvType.CV_32F) { + { + put(0, 0, 2, 1, 4, 5); + } + }; + Mat newCameraMatrix = new Mat(3, 3, CvType.CV_32F, new Scalar(1)); + + Calib3d.undistort(src, dst, cameraMatrix, distCoeffs, newCameraMatrix); + + truth = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); + assertMatEqual(truth, dst, EPS); + } + + //undistortPoints(List src, List dst, Mat cameraMatrix, Mat distCoeffs) + public void testUndistortPointsListOfPointListOfPointMatMat() { + MatOfPoint2f src = new MatOfPoint2f(new Point(1, 2), new Point(3, 4), new Point(-1, -1)); + MatOfPoint2f dst = new MatOfPoint2f(); + Mat cameraMatrix = Mat.eye(3, 3, CvType.CV_64FC1); + Mat distCoeffs = new Mat(8, 1, CvType.CV_64FC1, new Scalar(0)); + + Calib3d.undistortPoints(src, dst, cameraMatrix, distCoeffs); + + assertEquals(src.size(), dst.size()); + for(int i=0; i #include diff --git a/modules/imgproc/include/opencv2/imgproc/detail/distortion_model.hpp b/modules/calib3d/src/distortion_model.hpp similarity index 100% rename from modules/imgproc/include/opencv2/imgproc/detail/distortion_model.hpp rename to modules/calib3d/src/distortion_model.hpp diff --git a/modules/imgproc/src/undistort.avx2.cpp b/modules/calib3d/src/undistort.avx2.cpp similarity index 100% rename from modules/imgproc/src/undistort.avx2.cpp rename to modules/calib3d/src/undistort.avx2.cpp diff --git a/modules/imgproc/src/undistort.cpp b/modules/calib3d/src/undistort.cpp similarity index 95% rename from modules/imgproc/src/undistort.cpp rename to modules/calib3d/src/undistort.cpp index 14e5d37d13..2cdfe2ab59 100644 --- a/modules/imgproc/src/undistort.cpp +++ b/modules/calib3d/src/undistort.cpp @@ -41,9 +41,11 @@ //M*/ #include "precomp.hpp" -#include "opencv2/imgproc/detail/distortion_model.hpp" +#include "distortion_model.hpp" #include "undistort.hpp" +#include "opencv2/calib3d/calib3d_c.h" + cv::Mat cv::getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize, bool centerPrincipalPoint ) { @@ -534,29 +536,31 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM } } -void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, - const CvMat* _distCoeffs, - const CvMat* matR, const CvMat* matP ) +void cvUndistortPoints(const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, + const CvMat* _distCoeffs, + const CvMat* matR, const CvMat* matP) { cvUndistortPointsInternal(_src, _dst, _cameraMatrix, _distCoeffs, matR, matP, cv::TermCriteria(cv::TermCriteria::COUNT, 5, 0.01)); } -void cv::undistortPoints( InputArray _src, OutputArray _dst, - InputArray _cameraMatrix, - InputArray _distCoeffs, - InputArray _Rmat, - InputArray _Pmat ) +namespace cv { + +void undistortPoints(InputArray _src, OutputArray _dst, + InputArray _cameraMatrix, + InputArray _distCoeffs, + InputArray _Rmat, + InputArray _Pmat) { undistortPoints(_src, _dst, _cameraMatrix, _distCoeffs, _Rmat, _Pmat, TermCriteria(TermCriteria::MAX_ITER, 5, 0.01)); } -void cv::undistortPoints( InputArray _src, OutputArray _dst, - InputArray _cameraMatrix, - InputArray _distCoeffs, - InputArray _Rmat, - InputArray _Pmat, - TermCriteria criteria) +void undistortPoints(InputArray _src, OutputArray _dst, + InputArray _cameraMatrix, + InputArray _distCoeffs, + InputArray _Rmat, + InputArray _Pmat, + TermCriteria criteria) { Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); @@ -578,10 +582,7 @@ void cv::undistortPoints( InputArray _src, OutputArray _dst, cvUndistortPointsInternal(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP, criteria); } -namespace cv -{ - -static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, int projType) +static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, enum UndistortTypes projType) { double x = p.x, y = p.y; double beta = 1 + 2*alpha; @@ -613,11 +614,11 @@ static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, int pr } return Point2f((float)asin(x1), (float)asin(y1)); } - CV_Error(CV_StsBadArg, "Unknown projection type"); + CV_Error(Error::StsBadArg, "Unknown projection type"); } -static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType) +static Point2f invMapPointSpherical(Point2f _p, float alpha, enum UndistortTypes projType) { double eps = 1e-12; Vec2d p(_p.x, _p.y), q(_p.x, _p.y), err; @@ -646,11 +647,10 @@ static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType) return i < maxiter ? Point2f((float)q[0], (float)q[1]) : Point2f(-FLT_MAX, -FLT_MAX); } -} - -float cv::initWideAngleProjMap( InputArray _cameraMatrix0, InputArray _distCoeffs0, - Size imageSize, int destImageWidth, int m1type, - OutputArray _map1, OutputArray _map2, int projType, double _alpha ) +float initWideAngleProjMap(InputArray _cameraMatrix0, InputArray _distCoeffs0, + Size imageSize, int destImageWidth, int m1type, + OutputArray _map1, OutputArray _map2, + enum UndistortTypes projType, double _alpha) { Mat cameraMatrix0 = _cameraMatrix0.getMat(), distCoeffs0 = _distCoeffs0.getMat(); double k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, M[9]={0,0,0,0,0,0,0,0,0}; @@ -735,4 +735,5 @@ float cv::initWideAngleProjMap( InputArray _cameraMatrix0, InputArray _distCoeff return scale; } +} // namespace /* End of file */ diff --git a/modules/imgproc/src/undistort.hpp b/modules/calib3d/src/undistort.hpp similarity index 95% rename from modules/imgproc/src/undistort.hpp rename to modules/calib3d/src/undistort.hpp index 02c1774a47..26633d0fb5 100644 --- a/modules/imgproc/src/undistort.hpp +++ b/modules/calib3d/src/undistort.hpp @@ -40,8 +40,8 @@ // //M*/ -#ifndef OPENCV_IMGPROC_UNDISTORT_HPP -#define OPENCV_IMGPROC_UNDISTORT_HPP +#ifndef OPENCV_CALIB3D_UNDISTORT_HPP +#define OPENCV_CALIB3D_UNDISTORT_HPP namespace cv { @@ -54,6 +54,6 @@ namespace cv #endif } -#endif +#endif // OPENCV_CALIB3D_UNDISTORT_HPP /* End of file */ diff --git a/modules/calib3d/test/test_undistort.cpp b/modules/calib3d/test/test_undistort.cpp index e4fe4fe1f3..80fab69a6c 100644 --- a/modules/calib3d/test/test_undistort.cpp +++ b/modules/calib3d/test/test_undistort.cpp @@ -42,6 +42,7 @@ #include "test_precomp.hpp" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/calib3d/calib3d_c.h" namespace opencv_test { namespace { @@ -938,4 +939,584 @@ TEST(Calib3d_DefaultNewCameraMatrix, accuracy) { CV_DefaultNewCameraMatrixTest t TEST(Calib3d_UndistortPoints, accuracy) { CV_UndistortPointsTest test; test.safe_run(); } TEST(Calib3d_InitUndistortRectifyMap, accuracy) { CV_InitUndistortRectifyMapTest test; test.safe_run(); } +////////////////////////////// undistort ///////////////////////////////// + +static void test_remap( const Mat& src, Mat& dst, const Mat& mapx, const Mat& mapy, + Mat* mask=0, int interpolation=CV_INTER_LINEAR ) +{ + int x, y, k; + int drows = dst.rows, dcols = dst.cols; + int srows = src.rows, scols = src.cols; + const uchar* sptr0 = src.ptr(); + int depth = src.depth(), cn = src.channels(); + int elem_size = (int)src.elemSize(); + int step = (int)(src.step / CV_ELEM_SIZE(depth)); + int delta; + + if( interpolation != CV_INTER_CUBIC ) + { + delta = 0; + scols -= 1; srows -= 1; + } + else + { + delta = 1; + scols = MAX(scols - 3, 0); + srows = MAX(srows - 3, 0); + } + + int scols1 = MAX(scols - 2, 0); + int srows1 = MAX(srows - 2, 0); + + if( mask ) + *mask = Scalar::all(0); + + for( y = 0; y < drows; y++ ) + { + uchar* dptr = dst.ptr(y); + const float* mx = mapx.ptr(y); + const float* my = mapy.ptr(y); + uchar* m = mask ? mask->ptr(y) : 0; + + for( x = 0; x < dcols; x++, dptr += elem_size ) + { + float xs = mx[x]; + float ys = my[x]; + int ixs = cvFloor(xs); + int iys = cvFloor(ys); + + if( (unsigned)(ixs - delta - 1) >= (unsigned)scols1 || + (unsigned)(iys - delta - 1) >= (unsigned)srows1 ) + { + if( m ) + m[x] = 1; + if( (unsigned)(ixs - delta) >= (unsigned)scols || + (unsigned)(iys - delta) >= (unsigned)srows ) + continue; + } + + xs -= ixs; + ys -= iys; + + switch( depth ) + { + case CV_8U: + { + const uchar* sptr = sptr0 + iys*step + ixs*cn; + for( k = 0; k < cn; k++ ) + { + float v00 = sptr[k]; + float v01 = sptr[cn + k]; + float v10 = sptr[step + k]; + float v11 = sptr[step + cn + k]; + + v00 = v00 + xs*(v01 - v00); + v10 = v10 + xs*(v11 - v10); + v00 = v00 + ys*(v10 - v00); + dptr[k] = (uchar)cvRound(v00); + } + } + break; + case CV_16U: + { + const ushort* sptr = (const ushort*)sptr0 + iys*step + ixs*cn; + for( k = 0; k < cn; k++ ) + { + float v00 = sptr[k]; + float v01 = sptr[cn + k]; + float v10 = sptr[step + k]; + float v11 = sptr[step + cn + k]; + + v00 = v00 + xs*(v01 - v00); + v10 = v10 + xs*(v11 - v10); + v00 = v00 + ys*(v10 - v00); + ((ushort*)dptr)[k] = (ushort)cvRound(v00); + } + } + break; + case CV_32F: + { + const float* sptr = (const float*)sptr0 + iys*step + ixs*cn; + for( k = 0; k < cn; k++ ) + { + float v00 = sptr[k]; + float v01 = sptr[cn + k]; + float v10 = sptr[step + k]; + float v11 = sptr[step + cn + k]; + + v00 = v00 + xs*(v01 - v00); + v10 = v10 + xs*(v11 - v10); + v00 = v00 + ys*(v10 - v00); + ((float*)dptr)[k] = (float)v00; + } + } + break; + default: + assert(0); + } + } + } +} + +class CV_ImgWarpBaseTest : public cvtest::ArrayTest +{ +public: + CV_ImgWarpBaseTest( bool warp_matrix ); + +protected: + int read_params( CvFileStorage* fs ); + int prepare_test_case( int test_case_idx ); + void get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ); + void get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high ); + void fill_array( int test_case_idx, int i, int j, Mat& arr ); + + int interpolation; + int max_interpolation; + double spatial_scale_zoom, spatial_scale_decimate; +}; + + +CV_ImgWarpBaseTest::CV_ImgWarpBaseTest( bool warp_matrix ) +{ + test_array[INPUT].push_back(NULL); + if( warp_matrix ) + test_array[INPUT].push_back(NULL); + test_array[INPUT_OUTPUT].push_back(NULL); + test_array[REF_INPUT_OUTPUT].push_back(NULL); + max_interpolation = 5; + interpolation = 0; + element_wise_relative_error = false; + spatial_scale_zoom = 0.01; + spatial_scale_decimate = 0.005; +} + + +int CV_ImgWarpBaseTest::read_params( CvFileStorage* fs ) +{ + int code = cvtest::ArrayTest::read_params( fs ); + return code; +} + + +void CV_ImgWarpBaseTest::get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high ) +{ + cvtest::ArrayTest::get_minmax_bounds( i, j, type, low, high ); + if( CV_MAT_DEPTH(type) == CV_32F ) + { + low = Scalar::all(-10.); + high = Scalar::all(10); + } +} + + +void CV_ImgWarpBaseTest::get_test_array_types_and_sizes( int test_case_idx, + vector >& sizes, vector >& types ) +{ + RNG& rng = ts->get_rng(); + int depth = cvtest::randInt(rng) % 3; + int cn = cvtest::randInt(rng) % 3 + 1; + cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types ); + depth = depth == 0 ? CV_8U : depth == 1 ? CV_16U : CV_32F; + cn += cn == 2; + + types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(depth, cn); + if( test_array[INPUT].size() > 1 ) + types[INPUT][1] = cvtest::randInt(rng) & 1 ? CV_32FC1 : CV_64FC1; + + interpolation = cvtest::randInt(rng) % max_interpolation; +} + + +void CV_ImgWarpBaseTest::fill_array( int test_case_idx, int i, int j, Mat& arr ) +{ + if( i != INPUT || j != 0 ) + cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr ); +} + +int CV_ImgWarpBaseTest::prepare_test_case( int test_case_idx ) +{ + int code = cvtest::ArrayTest::prepare_test_case( test_case_idx ); + Mat& img = test_mat[INPUT][0]; + int i, j, cols = img.cols; + int type = img.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + double scale = depth == CV_16U ? 1000. : 255.*0.5; + double space_scale = spatial_scale_decimate; + vector buffer(img.cols*cn); + + if( code <= 0 ) + return code; + + if( test_mat[INPUT_OUTPUT][0].cols >= img.cols && + test_mat[INPUT_OUTPUT][0].rows >= img.rows ) + space_scale = spatial_scale_zoom; + + for( i = 0; i < img.rows; i++ ) + { + uchar* ptr = img.ptr(i); + switch( cn ) + { + case 1: + for( j = 0; j < cols; j++ ) + buffer[j] = (float)((sin((i+1)*space_scale)*sin((j+1)*space_scale)+1.)*scale); + break; + case 2: + for( j = 0; j < cols; j++ ) + { + buffer[j*2] = (float)((sin((i+1)*space_scale)+1.)*scale); + buffer[j*2+1] = (float)((sin((i+j)*space_scale)+1.)*scale); + } + break; + case 3: + for( j = 0; j < cols; j++ ) + { + buffer[j*3] = (float)((sin((i+1)*space_scale)+1.)*scale); + buffer[j*3+1] = (float)((sin(j*space_scale)+1.)*scale); + buffer[j*3+2] = (float)((sin((i+j)*space_scale)+1.)*scale); + } + break; + case 4: + for( j = 0; j < cols; j++ ) + { + buffer[j*4] = (float)((sin((i+1)*space_scale)+1.)*scale); + buffer[j*4+1] = (float)((sin(j*space_scale)+1.)*scale); + buffer[j*4+2] = (float)((sin((i+j)*space_scale)+1.)*scale); + buffer[j*4+3] = (float)((sin((i-j)*space_scale)+1.)*scale); + } + break; + default: + assert(0); + } + + /*switch( depth ) + { + case CV_8U: + for( j = 0; j < cols*cn; j++ ) + ptr[j] = (uchar)cvRound(buffer[j]); + break; + case CV_16U: + for( j = 0; j < cols*cn; j++ ) + ((ushort*)ptr)[j] = (ushort)cvRound(buffer[j]); + break; + case CV_32F: + for( j = 0; j < cols*cn; j++ ) + ((float*)ptr)[j] = (float)buffer[j]; + break; + default: + assert(0); + }*/ + cv::Mat src(1, cols*cn, CV_32F, &buffer[0]); + cv::Mat dst(1, cols*cn, depth, ptr); + src.convertTo(dst, dst.type()); + } + + return code; +} + + +class CV_UndistortTest : public CV_ImgWarpBaseTest +{ +public: + CV_UndistortTest(); + +protected: + void get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ); + void run_func(); + int prepare_test_case( int test_case_idx ); + void prepare_to_validation( int /*test_case_idx*/ ); + double get_success_error_level( int test_case_idx, int i, int j ); + void fill_array( int test_case_idx, int i, int j, Mat& arr ); + +private: + bool useCPlus; + cv::Mat input0; + cv::Mat input1; + cv::Mat input2; + cv::Mat input_new_cam; + cv::Mat input_output; + + bool zero_new_cam; + bool zero_distortion; +}; + + +CV_UndistortTest::CV_UndistortTest() : CV_ImgWarpBaseTest( false ) +{ + //spatial_scale_zoom = spatial_scale_decimate; + test_array[INPUT].push_back(NULL); + test_array[INPUT].push_back(NULL); + test_array[INPUT].push_back(NULL); + + spatial_scale_decimate = spatial_scale_zoom; +} + + +void CV_UndistortTest::get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ) +{ + RNG& rng = ts->get_rng(); + CV_ImgWarpBaseTest::get_test_array_types_and_sizes( test_case_idx, sizes, types ); + int type = types[INPUT][0]; + type = CV_MAKETYPE( CV_8U, CV_MAT_CN(type) ); + types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = type; + types[INPUT][1] = cvtest::randInt(rng)%2 ? CV_64F : CV_32F; + types[INPUT][2] = cvtest::randInt(rng)%2 ? CV_64F : CV_32F; + sizes[INPUT][1] = cvSize(3,3); + sizes[INPUT][2] = cvtest::randInt(rng)%2 ? cvSize(4,1) : cvSize(1,4); + types[INPUT][3] = types[INPUT][1]; + sizes[INPUT][3] = sizes[INPUT][1]; + interpolation = CV_INTER_LINEAR; +} + + +void CV_UndistortTest::fill_array( int test_case_idx, int i, int j, Mat& arr ) +{ + if( i != INPUT ) + CV_ImgWarpBaseTest::fill_array( test_case_idx, i, j, arr ); +} + + +void CV_UndistortTest::run_func() +{ + if (!useCPlus) + { + CvMat a = cvMat(test_mat[INPUT][1]), k = cvMat(test_mat[INPUT][2]); + cvUndistort2( test_array[INPUT][0], test_array[INPUT_OUTPUT][0], &a, &k); + } + else + { + if (zero_distortion) + { + cv::undistort(input0,input_output,input1,cv::Mat()); + } + else + { + cv::undistort(input0,input_output,input1,input2); + } + } +} + + +double CV_UndistortTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ ) +{ + int depth = test_mat[INPUT][0].depth(); + return depth == CV_8U ? 16 : depth == CV_16U ? 1024 : 5e-2; +} + + +int CV_UndistortTest::prepare_test_case( int test_case_idx ) +{ + RNG& rng = ts->get_rng(); + int code = CV_ImgWarpBaseTest::prepare_test_case( test_case_idx ); + + const Mat& src = test_mat[INPUT][0]; + double k[4], a[9] = {0,0,0,0,0,0,0,0,1}; + double new_cam[9] = {0,0,0,0,0,0,0,0,1}; + double sz = MAX(src.rows, src.cols); + + Mat& _new_cam0 = test_mat[INPUT][3]; + Mat _new_cam(test_mat[INPUT][3].rows,test_mat[INPUT][3].cols,CV_64F,new_cam); + Mat& _a0 = test_mat[INPUT][1]; + Mat _a(3,3,CV_64F,a); + Mat& _k0 = test_mat[INPUT][2]; + Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k); + + if( code <= 0 ) + return code; + + double aspect_ratio = cvtest::randReal(rng)*0.6 + 0.7; + a[2] = (src.cols - 1)*0.5 + cvtest::randReal(rng)*10 - 5; + a[5] = (src.rows - 1)*0.5 + cvtest::randReal(rng)*10 - 5; + a[0] = sz/(0.9 - cvtest::randReal(rng)*0.6); + a[4] = aspect_ratio*a[0]; + k[0] = cvtest::randReal(rng)*0.06 - 0.03; + k[1] = cvtest::randReal(rng)*0.06 - 0.03; + if( k[0]*k[1] > 0 ) + k[1] = -k[1]; + if( cvtest::randInt(rng)%4 != 0 ) + { + k[2] = cvtest::randReal(rng)*0.004 - 0.002; + k[3] = cvtest::randReal(rng)*0.004 - 0.002; + } + else + k[2] = k[3] = 0; + + new_cam[0] = a[0] + (cvtest::randReal(rng) - (double)0.5)*0.2*a[0]; //10% + new_cam[4] = a[4] + (cvtest::randReal(rng) - (double)0.5)*0.2*a[4]; //10% + new_cam[2] = a[2] + (cvtest::randReal(rng) - (double)0.5)*0.3*test_mat[INPUT][0].rows; //15% + new_cam[5] = a[5] + (cvtest::randReal(rng) - (double)0.5)*0.3*test_mat[INPUT][0].cols; //15% + + _a.convertTo(_a0, _a0.depth()); + + zero_distortion = (cvtest::randInt(rng)%2) == 0 ? false : true; + _k.convertTo(_k0, _k0.depth()); + + zero_new_cam = (cvtest::randInt(rng)%2) == 0 ? false : true; + _new_cam.convertTo(_new_cam0, _new_cam0.depth()); + + //Testing C++ code + useCPlus = ((cvtest::randInt(rng) % 2)!=0); + if (useCPlus) + { + input0 = test_mat[INPUT][0]; + input1 = test_mat[INPUT][1]; + input2 = test_mat[INPUT][2]; + input_new_cam = test_mat[INPUT][3]; + } + + return code; +} + + +void CV_UndistortTest::prepare_to_validation( int /*test_case_idx*/ ) +{ + if (useCPlus) + { + Mat& output = test_mat[INPUT_OUTPUT][0]; + input_output.convertTo(output, output.type()); + } + Mat& src = test_mat[INPUT][0]; + Mat& dst = test_mat[REF_INPUT_OUTPUT][0]; + Mat& dst0 = test_mat[INPUT_OUTPUT][0]; + Mat mapx, mapy; + cvtest::initUndistortMap( test_mat[INPUT][1], test_mat[INPUT][2], dst.size(), mapx, mapy ); + Mat mask( dst.size(), CV_8U ); + test_remap( src, dst, mapx, mapy, &mask, interpolation ); + dst.setTo(Scalar::all(0), mask); + dst0.setTo(Scalar::all(0), mask); +} + + +class CV_UndistortMapTest : public cvtest::ArrayTest +{ +public: + CV_UndistortMapTest(); + +protected: + void get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ); + void run_func(); + int prepare_test_case( int test_case_idx ); + void prepare_to_validation( int /*test_case_idx*/ ); + double get_success_error_level( int test_case_idx, int i, int j ); + void fill_array( int test_case_idx, int i, int j, Mat& arr ); + +private: + bool dualChannel; +}; + + +CV_UndistortMapTest::CV_UndistortMapTest() +{ + test_array[INPUT].push_back(NULL); + test_array[INPUT].push_back(NULL); + test_array[OUTPUT].push_back(NULL); + test_array[OUTPUT].push_back(NULL); + test_array[REF_OUTPUT].push_back(NULL); + test_array[REF_OUTPUT].push_back(NULL); + + element_wise_relative_error = false; +} + + +void CV_UndistortMapTest::get_test_array_types_and_sizes( int test_case_idx, vector >& sizes, vector >& types ) +{ + RNG& rng = ts->get_rng(); + cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types ); + int depth = cvtest::randInt(rng)%2 ? CV_64F : CV_32F; + + Size sz = sizes[OUTPUT][0]; + types[INPUT][0] = types[INPUT][1] = depth; + dualChannel = cvtest::randInt(rng)%2 == 0; + types[OUTPUT][0] = types[OUTPUT][1] = + types[REF_OUTPUT][0] = types[REF_OUTPUT][1] = dualChannel ? CV_32FC2 : CV_32F; + sizes[INPUT][0] = cvSize(3,3); + sizes[INPUT][1] = cvtest::randInt(rng)%2 ? cvSize(4,1) : cvSize(1,4); + + sz.width = MAX(sz.width,16); + sz.height = MAX(sz.height,16); + sizes[OUTPUT][0] = sizes[OUTPUT][1] = + sizes[REF_OUTPUT][0] = sizes[REF_OUTPUT][1] = sz; +} + + +void CV_UndistortMapTest::fill_array( int test_case_idx, int i, int j, Mat& arr ) +{ + if( i != INPUT ) + cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr ); +} + + +void CV_UndistortMapTest::run_func() +{ + CvMat a = cvMat(test_mat[INPUT][0]), k = cvMat(test_mat[INPUT][1]); + + if (!dualChannel ) + cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], test_array[OUTPUT][1] ); + else + cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], 0 ); +} + + +double CV_UndistortMapTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ ) +{ + return 1e-3; +} + + +int CV_UndistortMapTest::prepare_test_case( int test_case_idx ) +{ + RNG& rng = ts->get_rng(); + int code = cvtest::ArrayTest::prepare_test_case( test_case_idx ); + const Mat& mapx = test_mat[OUTPUT][0]; + double k[4], a[9] = {0,0,0,0,0,0,0,0,1}; + double sz = MAX(mapx.rows, mapx.cols); + Mat& _a0 = test_mat[INPUT][0], &_k0 = test_mat[INPUT][1]; + Mat _a(3,3,CV_64F,a); + Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k); + + if( code <= 0 ) + return code; + + double aspect_ratio = cvtest::randReal(rng)*0.6 + 0.7; + a[2] = (mapx.cols - 1)*0.5 + cvtest::randReal(rng)*10 - 5; + a[5] = (mapx.rows - 1)*0.5 + cvtest::randReal(rng)*10 - 5; + a[0] = sz/(0.9 - cvtest::randReal(rng)*0.6); + a[4] = aspect_ratio*a[0]; + k[0] = cvtest::randReal(rng)*0.06 - 0.03; + k[1] = cvtest::randReal(rng)*0.06 - 0.03; + if( k[0]*k[1] > 0 ) + k[1] = -k[1]; + k[2] = cvtest::randReal(rng)*0.004 - 0.002; + k[3] = cvtest::randReal(rng)*0.004 - 0.002; + + _a.convertTo(_a0, _a0.depth()); + _k.convertTo(_k0, _k0.depth()); + + if (dualChannel) + { + test_mat[REF_OUTPUT][1] = Scalar::all(0); + test_mat[OUTPUT][1] = Scalar::all(0); + } + + return code; +} + + +void CV_UndistortMapTest::prepare_to_validation( int ) +{ + Mat mapx, mapy; + cvtest::initUndistortMap( test_mat[INPUT][0], test_mat[INPUT][1], test_mat[REF_OUTPUT][0].size(), mapx, mapy ); + if( !dualChannel ) + { + mapx.copyTo(test_mat[REF_OUTPUT][0]); + mapy.copyTo(test_mat[REF_OUTPUT][1]); + } + else + { + Mat p[2] = {mapx, mapy}; + cv::merge(p, 2, test_mat[REF_OUTPUT][0]); + } +} + +TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); } +TEST(Calib3d_InitUndistortMap, accuracy) { CV_UndistortMapTest test; test.safe_run(); } + }} // namespace diff --git a/modules/calib3d/test/test_undistort_badarg.cpp b/modules/calib3d/test/test_undistort_badarg.cpp index 4e4587d3c6..11de8c91ae 100644 --- a/modules/calib3d/test/test_undistort_badarg.cpp +++ b/modules/calib3d/test/test_undistort_badarg.cpp @@ -40,7 +40,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/calib3d/calib3d_c.h" namespace opencv_test { namespace { diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 67d363057b..6ff7abb97c 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -329,12 +329,6 @@ enum AdaptiveThresholdTypes { ADAPTIVE_THRESH_GAUSSIAN_C = 1 }; -//! cv::undistort mode -enum UndistortTypes { - PROJ_SPHERICAL_ORTHO = 0, - PROJ_SPHERICAL_EQRECT = 1 - }; - //! class of the pixel in GrabCut algorithm enum GrabCutClasses { GC_BGD = 0, //!< an obvious background pixels @@ -2977,193 +2971,6 @@ CV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst, //! @} imgproc_filter -//! @addtogroup imgproc_transform -//! @{ - -/** @brief Transforms an image to compensate for lens distortion. - -The function transforms an image to compensate radial and tangential lens distortion. - -The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap -(with bilinear interpolation). See the former function for details of the transformation being -performed. - -Those pixels in the destination image, for which there is no correspondent pixels in the source -image, are filled with zeros (black color). - -A particular subset of the source image that will be visible in the corrected image can be regulated -by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate -newCameraMatrix depending on your requirements. - -The camera matrix and the distortion parameters can be determined using #calibrateCamera. If -the resolution of images is different from the resolution used at the calibration stage, \f$f_x, -f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain -the same. - -@param src Input (distorted) image. -@param dst Output (corrected) image that has the same size and type as src . -@param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . -@param distCoeffs Input vector of distortion coefficients -\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ -of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. -@param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as -cameraMatrix but you may additionally scale and shift the result by using a different matrix. - */ -CV_EXPORTS_W void undistort( InputArray src, OutputArray dst, - InputArray cameraMatrix, - InputArray distCoeffs, - InputArray newCameraMatrix = noArray() ); - -/** @brief Computes the undistortion and rectification transformation map. - -The function computes the joint undistortion and rectification transformation and represents the -result in the form of maps for remap. The undistorted image looks like original, as if it is -captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a -monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by -#getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera, -newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . - -Also, this new camera is oriented differently in the coordinate space, according to R. That, for -example, helps to align two heads of a stereo camera so that the epipolar lines on both images -become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera). - -The function actually builds the maps for the inverse mapping algorithm that is used by remap. That -is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function -computes the corresponding coordinates in the source image (that is, in the original image from -camera). The following process is applied: -\f[ -\begin{array}{l} -x \leftarrow (u - {c'}_x)/{f'}_x \\ -y \leftarrow (v - {c'}_y)/{f'}_y \\ -{[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\ -x' \leftarrow X/W \\ -y' \leftarrow Y/W \\ -r^2 \leftarrow x'^2 + y'^2 \\ -x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} -+ 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\ -y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} -+ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ -s\vecthree{x'''}{y'''}{1} = -\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} -{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} -{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\ -map_x(u,v) \leftarrow x''' f_x + c_x \\ -map_y(u,v) \leftarrow y''' f_y + c_y -\end{array} -\f] -where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ -are the distortion coefficients. - -In case of a stereo camera, this function is called twice: once for each camera head, after -stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera -was not calibrated, it is still possible to compute the rectification transformations directly from -the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes -homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D -space. R can be computed from H as -\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f] -where cameraMatrix can be chosen arbitrarily. - -@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . -@param distCoeffs Input vector of distortion coefficients -\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ -of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. -@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 , -computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation -is assumed. In cvInitUndistortMap R assumed to be an identity matrix. -@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$. -@param size Undistorted image size. -@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps -@param map1 The first output map. -@param map2 The second output map. - */ -CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs, - InputArray R, InputArray newCameraMatrix, - Size size, int m1type, OutputArray map1, OutputArray map2 ); - -//! initializes maps for #remap for wide-angle -CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs, - Size imageSize, int destImageWidth, - int m1type, OutputArray map1, OutputArray map2, - int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0); - -/** @brief Returns the default new camera matrix. - -The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when -centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). - -In the latter case, the new camera matrix will be: - -\f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\f] - -where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively. - -By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not -move the principal point. However, when you work with stereo, it is important to move the principal -points in both views to the same y-coordinate (which is required by most of stereo correspondence -algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for -each view where the principal points are located at the center. - -@param cameraMatrix Input camera matrix. -@param imgsize Camera view image size in pixels. -@param centerPrincipalPoint Location of the principal point in the new camera matrix. The -parameter indicates whether this location should be at the image center or not. - */ -CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize = Size(), - bool centerPrincipalPoint = false ); - -/** @brief Computes the ideal point coordinates from the observed point coordinates. - -The function is similar to #undistort and #initUndistortRectifyMap but it operates on a -sparse set of points instead of a raster image. Also the function performs a reverse transformation -to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a -planar object, it does, up to a translation vector, if the proper R is specified. - -For each observed point coordinate \f$(u, v)\f$ the function computes: -\f[ -\begin{array}{l} -x^{"} \leftarrow (u - c_x)/f_x \\ -y^{"} \leftarrow (v - c_y)/f_y \\ -(x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ -{[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ -x \leftarrow X/W \\ -y \leftarrow Y/W \\ -\text{only performed if P is specified:} \\ -u' \leftarrow x {f'}_x + {c'}_x \\ -v' \leftarrow y {f'}_y + {c'}_y -\end{array} -\f] - -where *undistort* is an approximate iterative algorithm that estimates the normalized original -point coordinates out of the normalized distorted point coordinates ("normalized" means that the -coordinates do not depend on the camera matrix). - -The function can be used for both a stereo camera head or a monocular camera (when R is empty). - -@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2). -@param dst Output ideal point coordinates after undistortion and reverse perspective -transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. -@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . -@param distCoeffs Input vector of distortion coefficients -\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ -of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. -@param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by -#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. -@param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by -#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. - */ -CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst, - InputArray cameraMatrix, InputArray distCoeffs, - InputArray R = noArray(), InputArray P = noArray()); -/** @overload - @note Default version of #undistortPoints does 5 iterations to compute undistorted points. - - */ -CV_EXPORTS_AS(undistortPointsIter) void undistortPoints( InputArray src, OutputArray dst, - InputArray cameraMatrix, InputArray distCoeffs, - InputArray R, InputArray P, TermCriteria criteria); - -//! @} imgproc_transform - //! @addtogroup imgproc_hist //! @{ diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc_c.h b/modules/imgproc/include/opencv2/imgproc/imgproc_c.h index cec0f3653a..9f7131f7b8 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc_c.h +++ b/modules/imgproc/include/opencv2/imgproc/imgproc_c.h @@ -273,39 +273,6 @@ CVAPI(void) cvLinearPolar( const CvArr* src, CvArr* dst, CvPoint2D32f center, double maxRadius, int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS)); -/** @brief Transforms the input image to compensate lens distortion -@see cv::undistort -*/ -CVAPI(void) cvUndistort2( const CvArr* src, CvArr* dst, - const CvMat* camera_matrix, - const CvMat* distortion_coeffs, - const CvMat* new_camera_matrix CV_DEFAULT(0) ); - -/** @brief Computes transformation map from intrinsic camera parameters - that can used by cvRemap -*/ -CVAPI(void) cvInitUndistortMap( const CvMat* camera_matrix, - const CvMat* distortion_coeffs, - CvArr* mapx, CvArr* mapy ); - -/** @brief Computes undistortion+rectification map for a head of stereo camera -@see cv::initUndistortRectifyMap -*/ -CVAPI(void) cvInitUndistortRectifyMap( const CvMat* camera_matrix, - const CvMat* dist_coeffs, - const CvMat *R, const CvMat* new_camera_matrix, - CvArr* mapx, CvArr* mapy ); - -/** @brief Computes the original (undistorted) feature coordinates - from the observed (distorted) coordinates -@see cv::undistortPoints -*/ -CVAPI(void) cvUndistortPoints( const CvMat* src, CvMat* dst, - const CvMat* camera_matrix, - const CvMat* dist_coeffs, - const CvMat* R CV_DEFAULT(0), - const CvMat* P CV_DEFAULT(0)); - /** @brief Returns a structuring element of the specified size and shape for morphological operations. @note the created structuring element IplConvKernel\* element must be released in the end using diff --git a/modules/imgproc/misc/java/test/ImgprocTest.java b/modules/imgproc/misc/java/test/ImgprocTest.java index fc9d92ed9b..5534905159 100644 --- a/modules/imgproc/misc/java/test/ImgprocTest.java +++ b/modules/imgproc/misc/java/test/ImgprocTest.java @@ -891,21 +891,6 @@ public class ImgprocTest extends OpenCVTestCase { assertMatEqual(truth, transform, EPS); } - public void testGetDefaultNewCameraMatrixMat() { - Mat mtx = Imgproc.getDefaultNewCameraMatrix(gray0); - - assertFalse(mtx.empty()); - assertEquals(0, Core.countNonZero(mtx)); - } - - public void testGetDefaultNewCameraMatrixMatSizeBoolean() { - Mat mtx = Imgproc.getDefaultNewCameraMatrix(gray0, size, true); - - assertFalse(mtx.empty()); - assertFalse(0 == Core.countNonZero(mtx)); - // TODO_: write better test - } - public void testGetDerivKernelsMatMatIntIntInt() { Mat kx = new Mat(imgprocSz, imgprocSz, CvType.CV_32F); Mat ky = new Mat(imgprocSz, imgprocSz, CvType.CV_32F); @@ -1139,52 +1124,6 @@ public class ImgprocTest extends OpenCVTestCase { fail("Not yet implemented"); } - public void testInitUndistortRectifyMap() { - fail("Not yet implemented"); - Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F); - cameraMatrix.put(0, 0, 1, 0, 1); - cameraMatrix.put(1, 0, 0, 1, 1); - cameraMatrix.put(2, 0, 0, 0, 1); - - Mat R = new Mat(3, 3, CvType.CV_32F, new Scalar(2)); - Mat newCameraMatrix = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); - - Mat distCoeffs = new Mat(); - Mat map1 = new Mat(); - Mat map2 = new Mat(); - - // TODO: complete this test - Imgproc.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, CvType.CV_32F, map1, map2); - } - - public void testInitWideAngleProjMapMatMatSizeIntIntMatMat() { - fail("Not yet implemented"); - Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F); - Mat distCoeffs = new Mat(1, 4, CvType.CV_32F); - // Size imageSize = new Size(2, 2); - - cameraMatrix.put(0, 0, 1, 0, 1); - cameraMatrix.put(1, 0, 0, 1, 2); - cameraMatrix.put(2, 0, 0, 0, 1); - - distCoeffs.put(0, 0, 1, 3, 2, 4); - truth = new Mat(3, 3, CvType.CV_32F); - truth.put(0, 0, 0, 0, 0); - truth.put(1, 0, 0, 0, 0); - truth.put(2, 0, 0, 3, 0); - // TODO: No documentation for this function - // Imgproc.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, - // 5, m1type, truthput1, truthput2); - } - - public void testInitWideAngleProjMapMatMatSizeIntIntMatMatInt() { - fail("Not yet implemented"); - } - - public void testInitWideAngleProjMapMatMatSizeIntIntMatMatIntDouble() { - fail("Not yet implemented"); - } - public void testIntegral2MatMatMat() { Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); Mat expSum = new Mat(4, 4, CvType.CV_64F); @@ -1748,72 +1687,6 @@ public class ImgprocTest extends OpenCVTestCase { assertMatEqual(makeMask(gray255.clone(), 0), dst); } - public void testUndistortMatMatMatMat() { - Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); - Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F) { - { - put(0, 0, 1, 0, 1); - put(1, 0, 0, 1, 2); - put(2, 0, 0, 0, 1); - } - }; - Mat distCoeffs = new Mat(1, 4, CvType.CV_32F) { - { - put(0, 0, 1, 3, 2, 4); - } - }; - - Imgproc.undistort(src, dst, cameraMatrix, distCoeffs); - - truth = new Mat(3, 3, CvType.CV_32F) { - { - put(0, 0, 0, 0, 0); - put(1, 0, 0, 0, 0); - put(2, 0, 0, 3, 0); - } - }; - assertMatEqual(truth, dst, EPS); - } - - public void testUndistortMatMatMatMatMat() { - Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); - Mat cameraMatrix = new Mat(3, 3, CvType.CV_32F) { - { - put(0, 0, 1, 0, 1); - put(1, 0, 0, 1, 2); - put(2, 0, 0, 0, 1); - } - }; - Mat distCoeffs = new Mat(1, 4, CvType.CV_32F) { - { - put(0, 0, 2, 1, 4, 5); - } - }; - Mat newCameraMatrix = new Mat(3, 3, CvType.CV_32F, new Scalar(1)); - - Imgproc.undistort(src, dst, cameraMatrix, distCoeffs, newCameraMatrix); - - truth = new Mat(3, 3, CvType.CV_32F, new Scalar(3)); - assertMatEqual(truth, dst, EPS); - } - - //undistortPoints(List src, List dst, Mat cameraMatrix, Mat distCoeffs) - public void testUndistortPointsListOfPointListOfPointMatMat() { - MatOfPoint2f src = new MatOfPoint2f(new Point(1, 2), new Point(3, 4), new Point(-1, -1)); - MatOfPoint2f dst = new MatOfPoint2f(); - Mat cameraMatrix = Mat.eye(3, 3, CvType.CV_64FC1); - Mat distCoeffs = new Mat(8, 1, CvType.CV_64FC1, new Scalar(0)); - - Imgproc.undistortPoints(src, dst, cameraMatrix, distCoeffs); - - assertEquals(src.size(), dst.size()); - for(int i=0; i