Fix issue with solvePnPRansac and Nx3 1-channel input when the number of points is 5. Try to uniform the input shape of projectPoints and undistortPoints.

pull/14447/head
catree 6 years ago
parent a433394870
commit 7ed858e38e
  1. 2
      modules/calib3d/include/opencv2/calib3d.hpp
  2. 4
      modules/calib3d/perf/perf_pnp.cpp
  3. 7
      modules/calib3d/src/calibration.cpp
  4. 13
      modules/calib3d/src/solvepnp.cpp
  5. 177
      modules/calib3d/test/test_cameracalibration.cpp
  6. 372
      modules/calib3d/test/test_solvepnp_ransac.cpp
  7. 176
      modules/calib3d/test/test_undistort.cpp
  8. 12
      modules/imgproc/include/opencv2/imgproc.hpp
  9. 12
      modules/imgproc/src/undistort.cpp

@ -516,7 +516,7 @@ vector\<Point3f\> ), where N is the number of points in the view.
@param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
@param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
vector\<Point2f\> .
@param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
points with respect to components of the rotation vector, translation vector, focal lengths,

@ -12,8 +12,8 @@ typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(5, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Combine( //When non planar, DLT needs at least 6 points for SOLVEPNP_ITERATIVE flag
testing::Values(6, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_UPNP, (int)SOLVEPNP_DLS)
)
)

@ -1094,6 +1094,7 @@ CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
else
{
// non-planar structure. Use DLT method
CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences.");
double* L;
double LL[12*12], LW[12], LV[12*12], sc;
CvMat _LL = cvMat( 12, 12, CV_64F, LL );
@ -3314,8 +3315,14 @@ void cv::projectPoints( InputArray _opoints,
{
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
if (npoints < 0)
opoints = opoints.t();
npoints = opoints.checkVector(3);
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
if (opoints.cols == 3)
opoints = opoints.reshape(3);
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;

@ -245,6 +245,9 @@ bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
if( model_points == npoints )
{
opoints = opoints.reshape(3);
ipoints = ipoints.reshape(2);
bool result = solvePnP(opoints, ipoints, cameraMatrix, distCoeffs, _rvec, _tvec, useExtrinsicGuess, ransac_kernel_method);
if(!result)
@ -350,6 +353,11 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
CV_Assert( npoints == 3 || npoints == 4 );
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
@ -745,6 +753,11 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;

@ -2071,6 +2071,183 @@ TEST(Calib3d_CalibrationMatrixValues_C, accuracy) { CV_CalibrationMatrixValuesTe
TEST(Calib3d_CalibrationMatrixValues_CPP, accuracy) { CV_CalibrationMatrixValuesTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_C, accuracy) { CV_ProjectPointsTest_C test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, regression) { CV_ProjectPointsTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, inputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
//3xN 1-channel
Mat objectPoints = (Mat_<float>(3, 2) << -L, L,
L, L,
0, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx2 1-channel
Mat objectPoints = (Mat_<float>(2, 3) << -L, L, 0,
L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//1xN 3-channel
Mat objectPoints(1, 2, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(0,1) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx1 3-channel
Mat objectPoints(2, 1, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(1,0) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3f>
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3d>
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_ProjectPoints_CPP, outputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Mat --> will be Nx1 2-channel
Mat imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Nx1 2-channel
Mat imagePoints(3,1,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//1xN 2-channel
Mat imagePoints(1,3,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.cols);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
//vector<Point2f>
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
//vector<Point2d>
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_StereoCalibrate_C, regression) { CV_StereoCalibrationTest_C test; test.safe_run(); }
TEST(Calib3d_StereoCalibrate_CPP, regression) { CV_StereoCalibrationTest_CPP test; test.safe_run(); }
TEST(Calib3d_StereoCalibrateCorner, regression) { CV_StereoCalibrationCornerTest test; test.safe_run(); }

@ -1211,6 +1211,7 @@ TEST(Calib3d_SolvePnP, translation)
p3d.push_back(Point3f(0,10,10));
p3d.push_back(Point3f(10,10,10));
p3d.push_back(Point3f(2,5,5));
p3d.push_back(Point3f(-4,8,6));
vector<Point2f> p2d;
projectPoints(p3d, crvec, ctvec, cameraIntrinsic, noArray(), p2d);
@ -1845,4 +1846,375 @@ TEST(Calib3d_SolvePnP, refine)
}
}
TEST(Calib3d_SolvePnPRansac, minPoints)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.9072420896651262, 0.09226497171882152, 0.8880772883671504);
Matx31d true_tvec(7.376333362427632, 8.434449036856979, 13.79801619778456);
{
//nb points = 5 --> ransac_kernel_method = SOLVEPNP_EPNP
Mat keypoints13D = (Mat_<float>(5, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434,
15.316854, 3.7486348, 12.491116);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
vector<Point3f> objectPoints;
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
objectPoints.push_back(Point3f(keypoints13D.at<float>(i,0), keypoints13D.at<float>(i,1), keypoints13D.at<float>(i,2)));
}
Mat rvec = Mat::zeros(1,3,CV_64FC1);
Mat Tvec = Mat::zeros(1,3,CV_64FC1);
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
Mat rvec2, Tvec2;
solvePnP(objectPoints, imagesPoints, matK, distCoeff, rvec2, Tvec2, false, SOLVEPNP_EPNP);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(rvec, rvec2, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(Tvec, Tvec2, NORM_INF), 1e-6);
}
{
//nb points = 4 --> ransac_kernel_method = SOLVEPNP_P3P
Mat keypoints13D = (Mat_<float>(4, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
vector<Point3f> objectPoints;
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
objectPoints.push_back(Point3f(keypoints13D.at<float>(i,0), keypoints13D.at<float>(i,1), keypoints13D.at<float>(i,2)));
}
Mat rvec = Mat::zeros(1,3,CV_64FC1);
Mat Tvec = Mat::zeros(1,3,CV_64FC1);
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
Mat rvec2, Tvec2;
solvePnP(objectPoints, imagesPoints, matK, distCoeff, rvec2, Tvec2, false, SOLVEPNP_P3P);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-4);
EXPECT_LE(cvtest::norm(rvec, rvec2, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(Tvec, Tvec2, NORM_INF), 1e-6);
}
}
TEST(Calib3d_SolvePnPRansac, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.9072420896651262, 0.09226497171882152, 0.8880772883671504);
Matx31d true_tvec(7.376333362427632, 8.434449036856979, 13.79801619778456);
{
//Nx3 1-channel
Mat keypoints13D = (Mat_<float>(6, 3) << 12.00604, -2.8654366, 18.472504,
7.6863389, 4.9355154, 11.146358,
14.260933, 2.8320458, 12.582781,
3.4562225, 8.2668982, 11.300434,
10.00604, 2.8654366, 15.472504,
-4.6863389, 5.9355154, 13.146358);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//1xN 3-channel
Mat keypoints13D(1, 6, CV_32FC3);
keypoints13D.at<Vec3f>(0,0) = Vec3f(12.00604f, -2.8654366f, 18.472504f);
keypoints13D.at<Vec3f>(0,1) = Vec3f(7.6863389f, 4.9355154f, 11.146358f);
keypoints13D.at<Vec3f>(0,2) = Vec3f(14.260933f, 2.8320458f, 12.582781f);
keypoints13D.at<Vec3f>(0,3) = Vec3f(3.4562225f, 8.2668982f, 11.300434f);
keypoints13D.at<Vec3f>(0,4) = Vec3f(10.00604f, 2.8654366f, 15.472504f);
keypoints13D.at<Vec3f>(0,5) = Vec3f(-4.6863389f, 5.9355154f, 13.146358f);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(0,i) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//Nx1 3-channel
Mat keypoints13D(6, 1, CV_32FC3);
keypoints13D.at<Vec3f>(0,0) = Vec3f(12.00604f, -2.8654366f, 18.472504f);
keypoints13D.at<Vec3f>(1,0) = Vec3f(7.6863389f, 4.9355154f, 11.146358f);
keypoints13D.at<Vec3f>(2,0) = Vec3f(14.260933f, 2.8320458f, 12.582781f);
keypoints13D.at<Vec3f>(3,0) = Vec3f(3.4562225f, 8.2668982f, 11.300434f);
keypoints13D.at<Vec3f>(4,0) = Vec3f(10.00604f, 2.8654366f, 15.472504f);
keypoints13D.at<Vec3f>(5,0) = Vec3f(-4.6863389f, 5.9355154f, 13.146358f);
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(i,0) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//vector<Point3f>
vector<Point3f> keypoints13D;
keypoints13D.push_back(Point3f(12.00604f, -2.8654366f, 18.472504f));
keypoints13D.push_back(Point3f(7.6863389f, 4.9355154f, 11.146358f));
keypoints13D.push_back(Point3f(14.260933f, 2.8320458f, 12.582781f));
keypoints13D.push_back(Point3f(3.4562225f, 8.2668982f, 11.300434f));
keypoints13D.push_back(Point3f(10.00604f, 2.8654366f, 15.472504f));
keypoints13D.push_back(Point3f(-4.6863389f, 5.9355154f, 13.146358f));
vector<Point2f> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
{
//vector<Point3d>
vector<Point3d> keypoints13D;
keypoints13D.push_back(Point3d(12.00604f, -2.8654366f, 18.472504f));
keypoints13D.push_back(Point3d(7.6863389f, 4.9355154f, 11.146358f));
keypoints13D.push_back(Point3d(14.260933f, 2.8320458f, 12.582781f));
keypoints13D.push_back(Point3d(3.4562225f, 8.2668982f, 11.300434f));
keypoints13D.push_back(Point3d(10.00604f, 2.8654366f, 15.472504f));
keypoints13D.push_back(Point3d(-4.6863389f, 5.9355154f, 13.146358f));
vector<Point2d> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnPRansac(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-6);
}
}
TEST(Calib3d_SolvePnP, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Mat matK = Mat::eye(3,3,CV_64FC1);
Mat distCoeff = Mat::zeros(1,5,CV_64FC1);
Matx31d true_rvec(0.407, 0.092, 0.88);
Matx31d true_tvec(0.576, -0.43, 1.3798);
vector<Point3d> objectPoints;
const double L = 0.5;
objectPoints.push_back(Point3d(-L, -L, L));
objectPoints.push_back(Point3d( L, -L, L));
objectPoints.push_back(Point3d( L, L, L));
objectPoints.push_back(Point3d(-L, L, L));
objectPoints.push_back(Point3d(-L, -L, -L));
objectPoints.push_back(Point3d( L, -L, -L));
const int methodsCount = 6;
int methods[] = {SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE};
for (int method = 0; method < methodsCount; method++)
{
if (methods[method] == SOLVEPNP_IPPE_SQUARE)
{
objectPoints[0] = Point3d(-L, L, 0);
objectPoints[1] = Point3d( L, L, 0);
objectPoints[2] = Point3d( L, -L, 0);
objectPoints[3] = Point3d(-L, -L, 0);
}
{
//Nx3 1-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(4, 3, CV_32FC1);
}
else
{
keypoints13D = Mat(6, 3, CV_32FC1);
}
for (int i = 0; i < keypoints13D.rows; i++)
{
keypoints13D.at<float>(i,0) = static_cast<float>(objectPoints[i].x);
keypoints13D.at<float>(i,1) = static_cast<float>(objectPoints[i].y);
keypoints13D.at<float>(i,2) = static_cast<float>(objectPoints[i].z);
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, 2, CV_32FC1);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<float>(i,0) = imagesPoints[i].x;
keypoints22D.at<float>(i,1) = imagesPoints[i].y;
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//1xN 3-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(1, 4, CV_32FC3);
}
else
{
keypoints13D = Mat(1, 6, CV_32FC3);
}
for (int i = 0; i < keypoints13D.cols; i++)
{
keypoints13D.at<Vec3f>(0,i) = Vec3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z));
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(0,i) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//Nx1 3-channel
Mat keypoints13D;
if (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE)
{
keypoints13D = Mat(4, 1, CV_32FC3);
}
else
{
keypoints13D = Mat(6, 1, CV_32FC3);
}
for (int i = 0; i < keypoints13D.rows; i++)
{
keypoints13D.at<Vec3f>(i,0) = Vec3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z));
}
vector<Point2f> imagesPoints;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, imagesPoints);
Mat keypoints22D(keypoints13D.rows, keypoints13D.cols, CV_32FC2);
for (int i = 0; i < static_cast<int>(imagesPoints.size()); i++)
{
keypoints22D.at<Vec2f>(i,0) = Vec2f(imagesPoints[i].x, imagesPoints[i].y);
}
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//vector<Point3f>
vector<Point3f> keypoints13D;
const int nbPts = (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE) ? 4 : 6;
for (int i = 0; i < nbPts; i++)
{
keypoints13D.push_back(Point3f(static_cast<float>(objectPoints[i].x),
static_cast<float>(objectPoints[i].y),
static_cast<float>(objectPoints[i].z)));
}
vector<Point2f> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
{
//vector<Point3d>
vector<Point3d> keypoints13D;
const int nbPts = (methods[method] == SOLVEPNP_P3P || methods[method] == SOLVEPNP_AP3P ||
methods[method] == SOLVEPNP_IPPE || methods[method] == SOLVEPNP_IPPE_SQUARE) ? 4 : 6;
for (int i = 0; i < nbPts; i++)
{
keypoints13D.push_back(objectPoints[i]);
}
vector<Point2d> keypoints22D;
projectPoints(keypoints13D, true_rvec, true_tvec, matK, distCoeff, keypoints22D);
Mat rvec, Tvec;
solvePnP(keypoints13D, keypoints22D, matK, distCoeff, rvec, Tvec, false, methods[method]);
EXPECT_LE(cvtest::norm(true_rvec, rvec, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(true_tvec, Tvec, NORM_INF), 1e-3);
}
}
}
}} // namespace

@ -938,4 +938,180 @@ TEST(Calib3d_DefaultNewCameraMatrix, accuracy) { CV_DefaultNewCameraMatrixTest t
TEST(Calib3d_UndistortPoints, accuracy) { CV_UndistortPointsTest test; test.safe_run(); }
TEST(Calib3d_InitUndistortRectifyMap, accuracy) { CV_InitUndistortRectifyMapTest test; test.safe_run(); }
TEST(Calib3d_UndistortPoints, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Matx33d cameraMatrix = Matx33d::eye();
{
//2xN 1-channel
Mat imagePoints(2, 3, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(1,0) = 240;
imagePoints.at<float>(0,1) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(0,2) = 320; imagePoints.at<float>(1,2) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(0,i), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(1,i), std::numeric_limits<float>::epsilon());
}
}
{
//Nx2 1-channel
Mat imagePoints(3, 2, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(0,1) = 240;
imagePoints.at<float>(1,0) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(2,0) = 320; imagePoints.at<float>(2,1) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(i,0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(i,1), std::numeric_limits<float>::epsilon());
}
}
{
//1xN 2-channel
Mat imagePoints(1, 3, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(0,1) = Vec2f(0, 240);
imagePoints.at<Vec2f>(0,2) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(0,i)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(0,i)(1), std::numeric_limits<float>::epsilon());
}
}
{
//Nx1 2-channel
Mat imagePoints(3, 1, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(1,0) = Vec2f(0, 240);
imagePoints.at<Vec2f>(2,0) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(i,0)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(i,0)(1), std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2f>
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2d>
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
TEST(Calib3d_UndistortPoints, outputShape)
{
Matx33d cameraMatrix = Matx33d::eye();
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Mat --> will be Nx1 2-channel
Mat normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Nx1 2-channel
Mat normalized(static_cast<int>(imagePoints.size()), 1, CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//1xN 2-channel
Mat normalized(1, static_cast<int>(imagePoints.size()), CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.cols);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//vector<Point2f>
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
//vector<Point2d>
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
}} // namespace

@ -3116,9 +3116,9 @@ point coordinates out of the normalized distorted point coordinates ("normalized
coordinates do not depend on the camera matrix).
The function can be used for both a stereo camera head or a monocular camera (when R is empty).
@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
@param dst Output ideal point coordinates after undistortion and reverse perspective
@param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector\<Point2f\> ).
@param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients
@ -3131,14 +3131,14 @@ of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion
*/
CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R = noArray(), InputArray P = noArray());
InputArray R = noArray(), InputArray P = noArray() );
/** @overload
@note Default version of #undistortPoints does 5 iterations to compute undistorted points.
*/
CV_EXPORTS_AS(undistortPointsIter) void undistortPoints( InputArray src, OutputArray dst,
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R, InputArray P, TermCriteria criteria);
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R, InputArray P, TermCriteria criteria );
//! @} imgproc_transform

@ -561,10 +561,16 @@ void cv::undistortPoints( InputArray _src, OutputArray _dst,
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();
CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));
int npoints = src.checkVector(2), depth = src.depth();
if (npoints < 0)
src = src.t();
npoints = src.checkVector(2);
CV_Assert(npoints >= 0 && src.isContinuous() && (depth == CV_32F || depth == CV_64F));
_dst.create(src.size(), src.type(), -1, true);
if (src.cols == 2)
src = src.reshape(2);
_dst.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
Mat dst = _dst.getMat();
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _ccameraMatrix = cvMat(cameraMatrix);

Loading…
Cancel
Save