added 3-camera rectification and 8-coeff distortion model

pull/13383/head
Vadim Pisarevsky 14 years ago
parent 6960e1544d
commit 31dbefc865
  1. 17
      modules/calib3d/include/opencv2/calib3d/calib3d.hpp
  2. 232
      modules/calib3d/src/calibration.cpp
  3. 45
      modules/imgproc/src/undistort.cpp
  4. 343
      samples/cpp/3calibration.cpp
  5. 32
      samples/cpp/calibration.cpp

@ -220,6 +220,9 @@ CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
@ -544,6 +547,9 @@ enum
CALIB_FIX_K1 = 32,
CALIB_FIX_K2 = 64,
CALIB_FIX_K3 = 128,
CALIB_FIX_K4 = 2048,
CALIB_FIX_K5 = 4096,
CALIB_FIX_K6 = 8192,
// only for stereo
CALIB_FIX_INTRINSIC = 256,
CALIB_SAME_FOCAL_LENGTH = 512,
@ -605,6 +611,17 @@ CV_EXPORTS bool stereoRectifyUncalibrated( const Mat& points1,
Mat& H1, Mat& H2,
double threshold=5 );
//! computes the rectification transformations for 3-head camera, where the heads are on the same line.
CV_EXPORTS float rectify3( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const vector<vector<Point2f> >& imgpt1,
const vector<vector<Point2f> >& imgpt3,
Size imageSize, const Mat& R12, const Mat& T12, const Mat& R13, const Mat& T13,
Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q,
double alpha, Size newImgSize,
Rect* roi1, Rect* roi2, int flags );
//! returns the optimal new camera matrix
CV_EXPORTS Mat getOptimalNewCameraMatrix( const Mat& cameraMatrix, const Mat& distCoeffs,
Size imageSize, double alpha, Size newImgSize=Size(),

@ -757,6 +757,8 @@ CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
}
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector";
CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
const CvMat* r_vec,
const CvMat* t_vec,
@ -774,7 +776,7 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
int calc_derivatives;
const CvPoint3D64f* M;
CvPoint2D64f* m;
double r[3], R[9], dRdr[27], t[3], a[9], k[5] = {0,0,0,0,0}, fx, fy, cx, cy;
double r[3], R[9], dRdr[27], t[3], a[9], k[8] = {0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
@ -860,9 +862,9 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
(distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
(distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5) )
CV_Error( CV_StsBadArg,
"Distortion coefficients must be 1x4, 4x1, 1x5 or 5x1 floating-point vector" );
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
_k = cvMat( distCoeffs->rows, distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
@ -943,8 +945,8 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
{
if( !CV_IS_MAT(dpdk) ||
(CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
dpdk->rows != count*2 || (dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
CV_Error( CV_StsBadArg, "dp/df must be 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
dpdk->rows != count*2 || (dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
CV_Error( CV_StsBadArg, "dp/df must be 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
if( !distCoeffs )
CV_Error( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
@ -967,7 +969,7 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
double r2, r4, r6, a1, a2, a3, cdist;
double r2, r4, r6, a1, a2, a3, cdist, icdist2;
double xd, yd;
z = z ? 1./z : 1;
@ -980,8 +982,9 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
a2 = r2 + 2*x*x;
a3 = r2 + 2*y*y;
cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
xd = x*cdist + k[2]*a1 + k[3]*a2;
yd = y*cdist + k[2]*a3 + k[3]*a1;
icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;
m[i].x = xd*fx + cx;
m[i].y = yd*fy + cy;
@ -1015,10 +1018,10 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
if( dpdk_p )
{
dpdk_p[0] = fx*x*r2;
dpdk_p[1] = fx*x*r4;
dpdk_p[dpdk_step] = fy*y*r2;
dpdk_p[dpdk_step+1] = fy*y*r4;
dpdk_p[0] = fx*x*icdist2*r2;
dpdk_p[1] = fx*x*icdist2*r4;
dpdk_p[dpdk_step] = fy*y*icdist2*r2;
dpdk_p[dpdk_step+1] = fy*y*icdist2*r4;
if( _dpdk->cols > 2 )
{
dpdk_p[2] = fx*a1;
@ -1027,8 +1030,18 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
dpdk_p[dpdk_step+3] = fy*a1;
if( _dpdk->cols > 4 )
{
dpdk_p[4] = fx*x*r6;
dpdk_p[dpdk_step+4] = fy*y*r6;
dpdk_p[4] = fx*x*icdist2*r6;
dpdk_p[dpdk_step+4] = fy*y*icdist2*r6;
if( _dpdk->cols > 5 )
{
dpdk_p[5] = fx*x*cdist*(-icdist2)*icdist2*r2;
dpdk_p[dpdk_step+5] = fy*y*cdist*(-icdist2)*icdist2*r2;
dpdk_p[6] = fx*x*icdist2*cdist*(-icdist2)*icdist2*r4;
dpdk_p[dpdk_step+6] = fy*y*cdist*(-icdist2)*icdist2*r4;
dpdk_p[7] = fx*x*icdist2*cdist*(-icdist2)*icdist2*r6;
dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
}
}
}
dpdk_p += dpdk_step*2;
@ -1041,11 +1054,12 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
{
double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];
double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;
double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt);
double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
double dmxdt = fx*(dxdt[j]*cdist + x*dcdist_dt +
k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]));
double dmydt = fy*(dydt[j]*cdist + y*dcdist_dt +
k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt);
double dmxdt = fx*(dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt +
k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]));
double dmydt = fy*(dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt +
k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt);
dpdt_p[j] = dmxdt;
dpdt_p[dpdt_step+j] = dmydt;
}
@ -1078,11 +1092,12 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
double dydr = z*(dy0dr[j] - y*dz0dr[j]);
double dr2dr = 2*x*dxdr + 2*y*dydr;
double dcdist_dr = k[0]*dr2dr + 2*k[1]*r2*dr2dr + 3*k[4]*r4*dr2dr;
double dicdist2_dr = -icdist2*icdist2*(k[5]*dr2dr + 2*k[6]*r2*dr2dr + 3*k[7]*r4*dr2dr);
double da1dr = 2*(x*dydr + y*dxdr);
double dmxdr = fx*(dxdr*cdist + x*dcdist_dr +
k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr));
double dmydr = fy*(dydr*cdist + y*dcdist_dr +
k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr);
double dmxdr = fx*(dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr +
k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr));
double dmydr = fy*(dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr +
k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr);
dpdr_p[j] = dmxdr;
dpdr_p[dpdr_step+j] = dmydr;
}
@ -1414,12 +1429,12 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
CvMat* rvecs, CvMat* tvecs, int flags )
{
const int NINTRINSIC = 9;
const int NINTRINSIC = 12;
Ptr<CvMat> matM, _m, _Ji, _Je, _err;
CvLevMarq solver;
double reprojErr = 0;
double A[9], k[5] = {0,0,0,0,0};
double A[9], k[8] = {0,0,0,0,0,0,0,0};
CvMat matA = cvMat(3, 3, CV_64F, A), _k;
int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
double aspectRatio = 0.;
@ -1472,9 +1487,9 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
(distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
(distCoeffs->cols*distCoeffs->rows != 4 &&
distCoeffs->cols*distCoeffs->rows != 5) )
CV_Error( CV_StsBadArg,
"Distortion coefficients must be 4x1, 1x4, 5x1 or 1x5 floating-point matrix" );
distCoeffs->cols*distCoeffs->rows != 5 &&
distCoeffs->cols*distCoeffs->rows != 8) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
for( i = 0; i < nimages; i++ )
{
@ -1502,8 +1517,12 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
cvZero( _Ji );
_k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 4 )
flags |= CV_CALIB_FIX_K3;
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
{
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
flags |= CV_CALIB_FIX_K3;
flags |= CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;
}
// 1. initialize intrinsic parameters & LM solver
if( flags & CV_CALIB_USE_INTRINSIC_GUESS )
@ -1556,7 +1575,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];
param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];
param[8] = k[4];
param[8] = k[4]; param[9] = k[5]; param[10] = k[6]; param[11] = k[7];
if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
mask[0] = mask[1] = 0;
@ -1573,6 +1592,12 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
mask[5] = 0;
if( flags & CV_CALIB_FIX_K3 )
mask[8] = 0;
if( flags & CV_CALIB_FIX_K4 )
mask[9] = 0;
if( flags & CV_CALIB_FIX_K5 )
mask[10] = 0;
if( flags & CV_CALIB_FIX_K6 )
mask[11] = 0;
}
// 2. initialize extrinsic parameters
@ -1605,11 +1630,9 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
pparam[0] = pparam[1]*aspectRatio;
}
A[0] = param[0]; A[4] = param[1];
A[2] = param[2]; A[5] = param[3];
k[0] = param[4]; k[1] = param[5]; k[2] = param[6];
k[3] = param[7];
k[4] = param[8];
A[0] = param[0]; A[4] = param[1]; A[2] = param[2]; A[5] = param[3];
k[0] = param[4]; k[1] = param[5]; k[2] = param[6]; k[3] = param[7];
k[4] = param[8]; k[5] = param[9]; k[6] = param[10]; k[7] = param[11];
if( !proceed )
break;
@ -1787,12 +1810,12 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
CvTermCriteria termCrit,
int flags )
{
const int NINTRINSIC = 9;
const int NINTRINSIC = 12;
Ptr<CvMat> npoints, err, J_LR, Je, Ji, imagePoints[2], objectPoints, RT0;
CvLevMarq solver;
double reprojErr = 0;
double A[2][9], dk[2][5]={{0,0,0,0,0},{0,0,0,0,0}}, rlr[9];
double A[2][9], dk[2][8]={{0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0}}, rlr[9];
CvMat K[2], Dist[2], om_LR, T_LR;
CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
@ -1838,7 +1861,7 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
(_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
K[k] = cvMat(3,3,CV_64F,A[k]);
Dist[k] = cvMat(1,5,CV_64F,dk[k]);
Dist[k] = cvMat(1,8,CV_64F,dk[k]);
imagePoints[k] = cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type)));
cvConvert( points, imagePoints[k] );
@ -1849,7 +1872,7 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
cvConvert( cameraMatrix, &K[k] );
if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|
CV_CALIB_FIX_K1|CV_CALIB_FIX_K2|CV_CALIB_FIX_K3) )
CV_CALIB_FIX_K1|CV_CALIB_FIX_K2|CV_CALIB_FIX_K3|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6) )
{
CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
@ -1909,6 +1932,12 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
imask[5] = imask[NINTRINSIC+5] = 0;
if( flags & CV_CALIB_FIX_K3 )
imask[8] = imask[NINTRINSIC+8] = 0;
if( flags & CV_CALIB_FIX_K4 )
imask[9] = imask[NINTRINSIC+9] = 0;
if( flags & CV_CALIB_FIX_K5 )
imask[10] = imask[NINTRINSIC+10] = 0;
if( flags & CV_CALIB_FIX_K6 )
imask[11] = imask[NINTRINSIC+11] = 0;
}
/*
@ -1981,7 +2010,8 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
dk[k][2] = dk[k][3] = 0;
iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
iparam[7] = dk[k][3]; iparam[8] = dk[k][4];
iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
}
om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
@ -2045,6 +2075,9 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
dk[k][2] = iparam[k*NINTRINSIC+6];
dk[k][3] = iparam[k*NINTRINSIC+7];
dk[k][4] = iparam[k*NINTRINSIC+8];
dk[k][5] = iparam[k*NINTRINSIC+9];
dk[k][6] = iparam[k*NINTRINSIC+10];
dk[k][7] = iparam[k*NINTRINSIC+11];
}
}
@ -2301,7 +2334,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
cvConvert( &Ri, _R1 );
cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
cvConvert( &Ri, _R2 );
cvMatMul(&r_r, matT, &t);
cvMatMul(&Ri, matT, &t);
// calculate projection/camera matrices
// these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
@ -3074,11 +3107,13 @@ static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
{
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 5) : Size(5, 1), rtype);
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 8) : Size(8, 1), rtype);
if( distCoeffs0.size() == Size(1, 4) ||
distCoeffs0.size() == Size(1, 5) ||
distCoeffs0.size() == Size(1, 8) ||
distCoeffs0.size() == Size(4, 1) ||
distCoeffs0.size() == Size(5, 1) )
distCoeffs0.size() == Size(5, 1) ||
distCoeffs0.size() == Size(8, 1) )
{
Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
distCoeffs0.convertTo(dstCoeffs, rtype);
@ -3451,4 +3486,113 @@ void cv::decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,
}
namespace cv
{
static void adjust3rdMatrix(const vector<vector<Point2f> >& imgpt1_0,
const vector<vector<Point2f> >& imgpt3_0,
const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
{
vector<Point2f> imgpt1, imgpt3;
for( int i = 0; i < (int)std::min(imgpt1_0.size(), imgpt3_0.size()); i++ )
{
if( !imgpt1_0[i].empty() && !imgpt3_0[i].empty() )
{
std::copy(imgpt1_0[i].begin(), imgpt1_0[i].end(), std::back_inserter(imgpt1));
std::copy(imgpt3_0[i].begin(), imgpt3_0[i].end(), std::back_inserter(imgpt3));
}
}
undistortPoints(Mat(imgpt1), imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
undistortPoints(Mat(imgpt3), imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
double y1_ = 0, y2_ = 0, y1y1_ = 0, y1y2_ = 0;
int n = imgpt1.size();
for( int i = 0; i < n; i++ )
{
double y1 = imgpt3[i].y, y2 = imgpt1[i].y;
y1_ += y1; y2_ += y2;
y1y1_ += y1*y1; y1y2_ += y1*y2;
}
y1_ /= n;
y2_ /= n;
y1y1_ /= n;
y1y2_ /= n;
double a = (y1y2_ - y1_*y2_)/(y1y1_ - y1_*y1_);
double b = y2_ - a*y1_;
P3.at<double>(0,0) *= a;
P3.at<double>(1,1) *= a;
P3.at<double>(0,2) = P3.at<double>(0,2)*a;
P3.at<double>(1,2) = P3.at<double>(1,2)*a + b;
}
}
float cv::rectify3( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const vector<vector<Point2f> >& imgpt1,
const vector<vector<Point2f> >& imgpt3,
Size imageSize, const Mat& R12, const Mat& T12, const Mat& R13, const Mat& T13,
Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q,
double alpha, Size newImgSize,
Rect* roi1, Rect* roi2, int flags )
{
// first, rectify the 1-2 stereo pair
stereoRectify( cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
imageSize, R12, T12, R1, R2, P1, P2, Q,
alpha, imageSize, roi1, roi2, flags );
// recompute rectification transforms for cameras 1 & 2.
Mat om, r_r, r_r13;
if( R13.size() != Size(3,3) )
Rodrigues(R13, r_r13);
else
R13.copyTo(r_r13);
if( R12.size() == Size(3,3) )
Rodrigues(R12, om);
else
R12.copyTo(om);
om *= -0.5;
Rodrigues(om, r_r); // rotate cameras to same orientation by averaging
Mat_<double> t12 = r_r * T12;
int idx = fabs(t12(0,0)) > fabs(t12(1,0)) ? 0 : 1;
double c = t12(idx,0), nt = norm(t12, CV_L2);
Mat_<double> uu = Mat_<double>::zeros(3,1);
uu(idx, 0) = c > 0 ? 1 : -1;
// calculate global Z rotation
Mat_<double> ww = t12.cross(uu), wR;
double nw = norm(ww, CV_L2);
ww *= acos(fabs(c)/nt)/nw;
Rodrigues(ww, wR);
// now rotate camera 3 to make its optical axis parallel to cameras 1 and 2.
R3 = wR*r_r.t()*r_r13.t();
Mat_<double> t13 = R3 * T13;
P2.copyTo(P3);
Mat t = P3.col(3);
t13.copyTo(t);
if( !imgpt1.empty() && imgpt3.empty() )
adjust3rdMatrix(imgpt1, imgpt3, cameraMatrix1, distCoeffs1, cameraMatrix3, distCoeffs3, R1, R3, P1, P3);
return (float)((P3.at<double>(idx,3)/P3.at<double>(idx,idx))/
(P2.at<double>(idx,3)/P2.at<double>(idx,idx)));
}
/* End of file. */

@ -89,7 +89,7 @@ void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,
distCoeffs = Mat_<double>(_distCoeffs);
else
{
distCoeffs.create(5, 1);
distCoeffs.create(8, 1);
distCoeffs = 0.;
}
@ -101,8 +101,9 @@ void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,
double u0 = A(0, 2), v0 = A(1, 2);
double fx = A(0, 0), fy = A(1, 1);
CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(1, 5) ||
distCoeffs.size() == Size(4, 1) || distCoeffs.size() == Size(5, 1));
CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) ||
distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) ||
distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1));
if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
distCoeffs = distCoeffs.t();
@ -111,7 +112,10 @@ void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,
double k2 = ((double*)distCoeffs.data)[1];
double p1 = ((double*)distCoeffs.data)[2];
double p2 = ((double*)distCoeffs.data)[3];
double k3 = distCoeffs.cols + distCoeffs.rows - 1 == 5 ? ((double*)distCoeffs.data)[4] : 0.;
double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? ((double*)distCoeffs.data)[4] : 0.;
double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[5] : 0.;
double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[6] : 0.;
double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[7] : 0.;
for( int i = 0; i < size.height; i++ )
{
@ -126,7 +130,7 @@ void initUndistortRectifyMap( const Mat& _cameraMatrix, const Mat& _distCoeffs,
double w = 1./_w, x = _x*w, y = _y*w;
double x2 = x*x, y2 = y*y;
double r2 = x2 + y2, _2xy = 2*x*y;
double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;
double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;
double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0;
if( m1type == CV_16SC2 )
@ -248,7 +252,7 @@ void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatr
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP )
{
double A[3][3], RR[3][3], k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
CvMat matA=cvMat(3, 3, CV_64F, A), _Dk;
CvMat _RR=cvMat(3, 3, CV_64F, RR);
const CvPoint2D32f* srcf;
@ -276,7 +280,8 @@ void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatr
CV_Assert( CV_IS_MAT(_distCoeffs) &&
(_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
(_distCoeffs->rows*_distCoeffs->cols == 4 ||
_distCoeffs->rows*_distCoeffs->cols == 5) );
_distCoeffs->rows*_distCoeffs->cols == 5 ||
_distCoeffs->rows*_distCoeffs->cols == 8));
_Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
@ -341,7 +346,7 @@ void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatr
for( j = 0; j < iters; j++ )
{
double r2 = x*x + y*y;
double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
x = (x0 - deltaX)*icdist;
@ -488,10 +493,13 @@ static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType)
}
float initWideAngleProjMap( const Mat& cameraMatrix, const Mat& distCoeffs,
float initWideAngleProjMap( const Mat& cameraMatrix0, const Mat& distCoeffs0,
Size imageSize, int destImageWidth, int m1type,
Mat& map1, Mat& map2, int projType, double _alpha )
{
double k[8] = {0,0,0,0,0,0,0,0}, M[9]={0,0,0,0,0,0,0,0,0};
Mat distCoeffs(distCoeffs0.rows, distCoeffs0.cols, CV_MAKETYPE(CV_64F,distCoeffs0.channels()), k);
Mat cameraMatrix(3,3,CV_64F,M);
Point2f scenter((float)cameraMatrix.at<double>(0,2), (float)cameraMatrix.at<double>(1,2));
Point2f dcenter((destImageWidth-1)*0.5f, 0.f);
float xmin = FLT_MAX, xmax = -FLT_MAX, ymin = FLT_MAX, ymax = -FLT_MAX;
@ -500,6 +508,13 @@ float initWideAngleProjMap( const Mat& cameraMatrix, const Mat& distCoeffs,
Mat _u(u), I = Mat::eye(3,3,CV_64F);
float alpha = (float)_alpha;
int ndcoeffs = distCoeffs0.cols*distCoeffs0.rows*distCoeffs0.channels();
CV_Assert((distCoeffs0.cols == 1 || distCoeffs0.rows == 1) &&
(ndcoeffs == 4 || ndcoeffs == 5 || ndcoeffs == 8));
CV_Assert(cameraMatrix0.size() == Size(3,3));
distCoeffs0.convertTo(distCoeffs,CV_64F);
cameraMatrix0.convertTo(cameraMatrix,CV_64F);
alpha = std::min(alpha, 0.999f);
for( int i = 0; i < N; i++ )
@ -520,14 +535,8 @@ float initWideAngleProjMap( const Mat& cameraMatrix, const Mat& distCoeffs,
dcenter.y = (dsize.height - 1)*0.5f;
Mat mapxy(dsize, CV_32FC2);
double k1 = distCoeffs.at<double>(0,0),
k2 = distCoeffs.at<double>(1,0),
k3 = distCoeffs.at<double>(4,0),
p1 = distCoeffs.at<double>(2,0),
p2 = distCoeffs.at<double>(3,0);
double fx = cameraMatrix.at<double>(0,0),
fy = cameraMatrix.at<double>(1,1),
cx = scenter.x, cy = scenter.y;
double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7];
double fx = cameraMatrix.at<double>(0,0), fy = cameraMatrix.at<double>(1,1), cx = scenter.x, cy = scenter.y;
for( int y = 0; y < dsize.height; y++ )
{
@ -543,7 +552,7 @@ float initWideAngleProjMap( const Mat& cameraMatrix, const Mat& distCoeffs,
}
double x2 = q.x*q.x, y2 = q.y*q.y;
double r2 = x2 + y2, _2xy = 2*q.x*q.y;
double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;
double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
double u = fx*(q.x*kr + p1*_2xy + p2*(r2 + 2*x2)) + cx;
double v = fy*(q.y*kr + p1*(r2 + 2*y2) + p2*_2xy) + cy;

@ -0,0 +1,343 @@
#include "opencv2/opencv.hpp"
#include <stdio.h>
#include <string.h>
#include <time.h>
using namespace cv;
using namespace std;
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
}
static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Mat& cameraMatrix3, Mat& distCoeffs3,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);
vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
for( i = 0; i < (int)imgpt0.size(); i++ )
if( !imgpt0[i].empty() )
imgpt.push_back(imgpt0[i]);
if( imgpt.size() < 3 )
{
printf("Error: not enough views for camera %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
if( c == 3 )
{
calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K3);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
printf("Error: camera %d was not calibrated\n", c);
return false;
}
}
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
vector<vector<Point2f> > imgpt_right;
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
imgpt_right.clear();
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt.push_back(imagePoints1[i]);
imgpt_right.push_back(imgpt0[i]);
}
if( imgpt.size() < 3 )
{
printf("Error: not enough shared views for cameras 1 and %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
Mat R, T, E, F;
stereoCalibrate(objpt, imgpt, imgpt_right, cameraMatrix1, distCoeffs1, cameraMatrix, distCoeffs,
imageSize, R, T, E, F,
TermCriteria(TermCriteria::COUNT, 30, 0),
(c == 3 ? CV_CALIB_FIX_INTRINSIC : 0) | CV_CALIB_FIX_K3);
if( c == 2 )
{
cameraMatrix2 = cameraMatrix;
distCoeffs2 = distCoeffs;
R12 = R; T12 = T;
}
else
{
R13 = R; T13 = T;
}
}
return true;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
int main( int argc, char** argv )
{
int i, k;
int flags = 0;
Size boardSize, imageSize;
float squareSize = 1.f, aspectRatio = 1.f;
const char* outputFilename = "out_camera_data.yml";
const char* inputFilename = 0;
vector<vector<Point2f> > imgpt[3];
vector<string> imageList;
if( argc < 2 )
{
printf( "This is a camera calibration sample.\n"
"Usage: calibration\n"
" -w <board_width> # the number of inner corners per one of board dimension\n"
" -h <board_height> # the number of inner corners per another board dimension\n"
" [-s <squareSize>] # square size in some user-defined units (1 by default)\n"
" [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
" [-zt] # assume zero tangential distortion\n"
" [-a <aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [input_data] # input data - text file with a list of the images of the board\n"
"\n" );
return 0;
}
for( i = 1; i < argc; i++ )
{
const char* s = argv[i];
if( strcmp( s, "-w" ) == 0 )
{
if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
return fprintf( stderr, "Invalid board width\n" ), -1;
}
else if( strcmp( s, "-h" ) == 0 )
{
if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
return fprintf( stderr, "Invalid board height\n" ), -1;
}
else if( strcmp( s, "-s" ) == 0 )
{
if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
return fprintf( stderr, "Invalid board square width\n" ), -1;
}
else if( strcmp( s, "-a" ) == 0 )
{
if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
return printf("Invalid aspect ratio\n" ), -1;
flags |= CV_CALIB_FIX_ASPECT_RATIO;
}
else if( strcmp( s, "-zt" ) == 0 )
{
flags |= CV_CALIB_ZERO_TANGENT_DIST;
}
else if( strcmp( s, "-p" ) == 0 )
{
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
}
else if( strcmp( s, "-o" ) == 0 )
{
outputFilename = argv[++i];
}
else if( s[0] != '-' )
{
inputFilename = s;
}
else
return fprintf( stderr, "Unknown option %s", s ), -1;
}
if( !inputFilename ||
!readStringList(inputFilename, imageList) ||
imageList.size() == 0 || imageList.size() % 3 != 0 )
{
printf("Error: the input image list is not specified, or can not be read, or the number of files is not divisible by 3\n");
return -1;
}
Mat view, viewGray;
Mat cameraMatrix[3], distCoeffs[3], R[3], P[3], R12, T12;
for( k = 0; k < 3; k++ )
{
cameraMatrix[k] = Mat_<double>::eye(3,3);
cameraMatrix[k].at<double>(0,0) = aspectRatio;
cameraMatrix[k].at<double>(1,1) = 1;
distCoeffs[k] = Mat_<double>::zeros(5,1);
}
Mat R13=Mat_<double>::eye(3,3), T13=Mat_<double>::zeros(3,1);
FileStorage fs;
namedWindow( "Image View", 0 );
for( k = 0; k < 3; k++ )
imgpt[k].resize(imageList.size()/3);
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
for( k = 0; k < 3; k++ )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
printf("%s\n", imageList[i*3+k].c_str());
view = imread(imageList[i*3+k], 1);
if(view.data)
{
vector<Point2f> ptvec;
imageSize = view.size();
cvtColor(view, viewGray, CV_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found );
if( found )
{
imgpt[k1][i].resize(ptvec.size());
std::copy(ptvec.begin(), ptvec.end(), imgpt[k1][i].begin());
}
//imshow("view", view);
//int c = waitKey(0) & 255;
//if( c == 27 || c == 'q' || c == 'Q' )
// return -1;
}
}
}
printf("Running calibration ...\n");
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, CV_STORAGE_WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;
fs << "T13" << T13;
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
fs.release();
Mat Q;
// step 3: find rectification transforms
rectify3(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CV_CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
for( k = 0; k < 3; k++ )
initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
destroyWindow("view");
canvas = Scalar::all(0);
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
canvas = Scalar::all(0);
for( k = 0; k < 3; k++ )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], 1);
if(!view.data)
continue;
Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
remap(view, rview, map1[k1], map2[k1], CV_INTER_LINEAR);
}
printf("%s %s %s\n", imageList[i*3].c_str(), imageList[i*3+1].c_str(), imageList[i*3+2].c_str());
resize( canvas, small_canvas, Size(1500, 1500/3) );
for( k = 0; k < small_canvas.rows; k += 16 )
line(small_canvas, Point(0, k), Point(small_canvas.cols, k), Scalar(0,255,0), 1);
imshow("rectified", small_canvas);
int c = waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
return 0;
}

@ -82,7 +82,7 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
distCoeffs = Mat::zeros(5, 1, CV_64F);
distCoeffs = Mat::zeros(8, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1);
calcChessboardCorners(boardSize, squareSize, objectPoints[0]);
@ -90,7 +90,7 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
objectPoints.resize(imagePoints.size(),objectPoints[0]);
calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, flags);
distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);///*|CV_CALIB_FIX_K3*/|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
@ -237,6 +237,7 @@ int main( int argc, char** argv )
int flags = 0;
VideoCapture capture;
bool flipVertical = false;
bool showUndistorted = false;
int delay = 1000;
clock_t prevTimestamp = 0;
int mode = DETECTION;
@ -269,6 +270,7 @@ int main( int argc, char** argv )
" [-a <aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [-v] # flip the captured images around the horizontal axis\n"
" [-su] # show undistorted images after calibration\n"
" [input_data] # input data, one of the following:\n"
" # - text file with a list of the images of the board\n"
" # - name of video file with a video of the board\n"
@ -336,6 +338,10 @@ int main( int argc, char** argv )
{
outputFilename = argv[++i];
}
else if( strcmp( s, "-su" ) == 0 )
{
showUndistorted = true;
}
else if( s[0] != '-' )
{
if( isdigit(s[0]) )
@ -469,5 +475,27 @@ int main( int argc, char** argv )
break;
}
}
if( !capture.isOpened() && showUndistorted )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
for( i = 0; i < (int)imageList.size(); i++ )
{
view = imread(imageList[i], 1);
if(!view.data)
continue;
//undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
int c = waitKey();
if( (c & 255) == 27 || c == 'q' || c == 'Q' )
break;
}
}
return 0;
}

Loading…
Cancel
Save