Remove unused internal C functions

pull/26435/head
Vincent Rabaud 6 days ago
parent 6873bdee70
commit 3d89824423
  1. 205
      modules/calib3d/src/calib3d_c_api.h
  2. 91
      modules/calib3d/src/calibration.cpp
  3. 13
      modules/calib3d/src/checkchessboard.cpp
  4. 35
      modules/calib3d/src/compat_ptsetreg.cpp
  5. 360
      modules/calib3d/src/posit.cpp
  6. 48
      modules/calib3d/src/undistort.dispatch.cpp

@ -55,45 +55,8 @@ extern "C" {
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CvPOSITObject* cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
void cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
void cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
int cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
/*#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC*/
int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
@ -102,15 +65,6 @@ void cvComputeCorrespondEpilines( const CvMat* points,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
void cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
void cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Finds perspective transformation between the object plane and image (view) plane */
int cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
@ -129,45 +83,6 @@ void cvInitIntrinsicParams2D( const CvMat* object_points,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
/*int cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );*/
/* Draws individual chessboard corners or the whole chessboard detected */
/*void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );*/
/*#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
#define CV_CALIB_THIN_PRISM_MODEL 32768
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
#define CV_CALIB_TILTED_MODEL 262144
#define CV_CALIB_FIX_TAUX_TAUY 524288
#define CV_CALIB_FIX_TANGENT_DIST 2097152
#define CV_CALIB_NINTRINSIC 18*/
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
double cvCalibrateCamera2( const CvMat* object_points,
@ -182,37 +97,6 @@ double cvCalibrateCamera2( const CvMat* object_points,
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
double cvCalibrateCamera4( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
int iFixedPoint,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
CvMat* newObjPoints CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
void cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
/*#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512*/
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
@ -248,95 +132,6 @@ int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CvStereoBMState* cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
void cvReleaseStereoBMState( CvStereoBMState** state );
void cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CvRect cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
void cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
void cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
/** @brief Transforms the input image to compensate lens distortion
@see cv::undistort
*/
void cvUndistort2( const CvArr* src, CvArr* dst,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
const CvMat* new_camera_matrix CV_DEFAULT(0) );
/** @brief Computes transformation map from intrinsic camera parameters
that can used by cvRemap
*/
void cvInitUndistortMap( const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvArr* mapx, CvArr* mapy );
/** @brief Computes undistortion+rectification map for a head of stereo camera
@see cv::initUndistortRectifyMap
*/
void cvInitUndistortRectifyMap( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
const CvMat *R, const CvMat* new_camera_matrix,
CvArr* mapx, CvArr* mapy );
/** @brief Computes the original (undistorted) feature coordinates
from the observed (distorted) coordinates
@see cv::undistortPoints

@ -679,82 +679,6 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
distCoeffs, rvecs, tvecs, NULL, NULL, NULL, flags, termCrit);
}
CV_IMPL double cvCalibrateCamera4( const CvMat* objectPoints,
const CvMat* imagePoints, const CvMat* npoints,
CvSize imageSize, int iFixedPoint, CvMat* cameraMatrix, CvMat* distCoeffs,
CvMat* rvecs, CvMat* tvecs, CvMat* newObjPoints, int flags, CvTermCriteria termCrit )
{
if( !CV_IS_MAT(npoints) )
CV_Error( cv::Error::StsBadArg, "npoints is not a valid matrix" );
if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
(npoints->rows != 1 && npoints->cols != 1) )
CV_Error( cv::Error::StsUnsupportedFormat,
"the array of point counters must be 1-dimensional integer vector" );
bool releaseObject = iFixedPoint > 0 && iFixedPoint < npoints->data.i[0] - 1;
int nimages = npoints->rows * npoints->cols;
int npstep = npoints->rows == 1 ? 1 : npoints->step / CV_ELEM_SIZE(npoints->type);
int i, ni;
// check object points. If not qualified, report errors.
if( releaseObject )
{
if( !CV_IS_MAT(objectPoints) )
CV_Error( cv::Error::StsBadArg, "objectPoints is not a valid matrix" );
Mat matM;
if(CV_MAT_CN(objectPoints->type) == 3) {
matM = cvarrToMat(objectPoints);
} else {
convertPointsHomogeneous(cvarrToMat(objectPoints), matM);
}
matM = matM.reshape(3, 1);
ni = npoints->data.i[0];
for( i = 1; i < nimages; i++ )
{
if( npoints->data.i[i * npstep] != ni )
{
CV_Error( cv::Error::StsBadArg, "All objectPoints[i].size() should be equal when "
"object-releasing method is requested." );
}
Mat ocmp = matM.colRange(ni * i, ni * i + ni) != matM.colRange(0, ni);
ocmp = ocmp.reshape(1);
if( countNonZero(ocmp) )
{
CV_Error( cv::Error::StsBadArg, "All objectPoints[i] should be identical when object-releasing"
" method is requested." );
}
}
}
return cvCalibrateCamera2Internal(objectPoints, imagePoints, npoints, imageSize, iFixedPoint,
cameraMatrix, distCoeffs, rvecs, tvecs, newObjPoints, NULL,
NULL, flags, termCrit);
}
void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
double apertureWidth, double apertureHeight, double *fovx, double *fovy,
double *focalLength, CvPoint2D64f *principalPoint, double *pasp )
{
/* Validate parameters. */
if(calibMatr == 0)
CV_Error(cv::Error::StsNullPtr, "Some of parameters is a NULL pointer!");
if(!CV_IS_MAT(calibMatr))
CV_Error(cv::Error::StsUnsupportedFormat, "Input parameters must be matrices!");
double dummy = .0;
Point2d pp;
cv::calibrationMatrixValues(cvarrToMat(calibMatr), imgSize, apertureWidth, apertureHeight,
fovx ? *fovx : dummy,
fovy ? *fovy : dummy,
focalLength ? *focalLength : dummy,
pp,
pasp ? *pasp : dummy);
if(principalPoint)
*principalPoint = cvPoint2D64f(pp.x, pp.y);
}
//////////////////////////////// Stereo Calibration ///////////////////////////////////
@ -1870,21 +1794,6 @@ void cv::reprojectImageTo3D( InputArray _disparity,
}
void cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* matQ,
int handleMissingValues )
{
cv::Mat disp = cv::cvarrToMat(disparityImage);
cv::Mat _3dimg = cv::cvarrToMat(_3dImage);
cv::Mat mq = cv::cvarrToMat(matQ);
CV_Assert( disp.size() == _3dimg.size() );
int dtype = _3dimg.type();
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
}
namespace cv
{

@ -40,7 +40,6 @@
//M*/
#include "precomp.hpp"
#include "calib3d_c_api.h"
#include <vector>
#include <algorithm>
@ -150,18 +149,6 @@ static bool checkQuads(vector<pair<float, int> > & quads, const cv::Size & size)
return false;
}
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
cv::Mat img = cv::cvarrToMat(src);
return (int)cv::checkChessboard(img, size);
}
bool cv::checkChessboard(InputArray _img, Size size)
{
Mat img = _img.getMat();

@ -322,12 +322,6 @@ void CvLevMarq::step()
param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? nonzero_param(j++) : 0);
}
CV_IMPL int cvRANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
return cv::RANSACUpdateNumIters(p, ep, modelPoints, maxIters);
}
CV_IMPL int cvFindHomography( const CvMat* _src, const CvMat* _dst, CvMat* __H, int method,
double ransacReprojThreshold, CvMat* _mask, int maxIters,
double confidence)
@ -365,35 +359,6 @@ CV_IMPL int cvFindHomography( const CvMat* _src, const CvMat* _dst, CvMat* __H,
}
CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fmatrix, int method,
double param1, double param2, CvMat* _mask )
{
cv::Mat m1 = cv::cvarrToMat(points1), m2 = cv::cvarrToMat(points2);
if( m1.channels() == 1 && (m1.rows == 2 || m1.rows == 3) && m1.cols > 3 )
cv::transpose(m1, m1);
if( m2.channels() == 1 && (m2.rows == 2 || m2.rows == 3) && m2.cols > 3 )
cv::transpose(m2, m2);
const cv::Mat FM = cv::cvarrToMat(fmatrix), mask = cv::cvarrToMat(_mask);
cv::Mat FM0 = cv::findFundamentalMat(m1, m2, method, param1, param2,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray());
if( FM0.empty() )
{
cv::Mat FM0z = cv::cvarrToMat(fmatrix);
FM0z.setTo(cv::Scalar::all(0));
return 0;
}
CV_Assert( FM0.cols == 3 && FM0.rows % 3 == 0 && FM.cols == 3 && FM.rows % 3 == 0 && FM.channels() == 1 );
cv::Mat FM1 = FM.rowRange(0, MIN(FM0.rows, FM.rows));
FM0.rowRange(0, FM1.rows).convertTo(FM1, FM1.type());
return FM1.rows / 3;
}
CV_IMPL void cvComputeCorrespondEpilines( const CvMat* points, int pointImageID,
const CvMat* fmatrix, CvMat* _lines )
{

@ -1,360 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "calib3d_c_api.h"
/* POSIT structure */
struct CvPOSITObject
{
int N;
float* inv_matr;
float* obj_vecs;
float* img_vecs;
};
static void icvPseudoInverse3D( float *a, float *b, int n, int method );
static CvStatus icvCreatePOSITObject( CvPoint3D32f *points,
int numPoints,
CvPOSITObject **ppObject )
{
int i;
/* Compute size of required memory */
/* buffer for inverse matrix = N*3*float */
/* buffer for storing weakImagePoints = numPoints * 2 * float */
/* buffer for storing object vectors = N*3*float */
/* buffer for storing image vectors = N*2*float */
int N = numPoints - 1;
int inv_matr_size = N * 3 * sizeof( float );
int obj_vec_size = inv_matr_size;
int img_vec_size = N * 2 * sizeof( float );
CvPOSITObject *pObject;
/* check bad arguments */
if( points == NULL )
return CV_NULLPTR_ERR;
if( numPoints < 4 )
return CV_BADSIZE_ERR;
if( ppObject == NULL )
return CV_NULLPTR_ERR;
/* memory allocation */
pObject = (CvPOSITObject *) cvAlloc( sizeof( CvPOSITObject ) +
inv_matr_size + obj_vec_size + img_vec_size );
if( !pObject )
return CV_OUTOFMEM_ERR;
/* part the memory between all structures */
pObject->N = N;
pObject->inv_matr = (float *) ((char *) pObject + sizeof( CvPOSITObject ));
pObject->obj_vecs = (float *) ((char *) (pObject->inv_matr) + inv_matr_size);
pObject->img_vecs = (float *) ((char *) (pObject->obj_vecs) + obj_vec_size);
/****************************************************************************************\
* Construct object vectors from object points *
\****************************************************************************************/
for( i = 0; i < numPoints - 1; i++ )
{
pObject->obj_vecs[i] = points[i + 1].x - points[0].x;
pObject->obj_vecs[N + i] = points[i + 1].y - points[0].y;
pObject->obj_vecs[2 * N + i] = points[i + 1].z - points[0].z;
}
/****************************************************************************************\
* Compute pseudoinverse matrix *
\****************************************************************************************/
icvPseudoInverse3D( pObject->obj_vecs, pObject->inv_matr, N, 0 );
*ppObject = pObject;
return CV_NO_ERR;
}
static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
float focalLength, CvTermCriteria criteria,
float* rotation, float* translation )
{
int i, j, k;
int count = 0;
bool converged = false;
float scale = 0, inv_Z = 0;
float diff = (float)criteria.epsilon;
/* Check bad arguments */
if( imagePoints == NULL )
return CV_NULLPTR_ERR;
if( pObject == NULL )
return CV_NULLPTR_ERR;
if( focalLength <= 0 )
return CV_BADFACTOR_ERR;
if( !rotation )
return CV_NULLPTR_ERR;
if( !translation )
return CV_NULLPTR_ERR;
if( (criteria.type == 0) || (criteria.type > (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS)))
return CV_BADFLAG_ERR;
if( (criteria.type & CV_TERMCRIT_EPS) && criteria.epsilon < 0 )
return CV_BADFACTOR_ERR;
if( (criteria.type & CV_TERMCRIT_ITER) && criteria.max_iter <= 0 )
return CV_BADFACTOR_ERR;
/* init variables */
float inv_focalLength = 1 / focalLength;
int N = pObject->N;
float *objectVectors = pObject->obj_vecs;
float *invMatrix = pObject->inv_matr;
float *imgVectors = pObject->img_vecs;
while( !converged )
{
if( count == 0 )
{
/* subtract out origin to get image vectors */
for( i = 0; i < N; i++ )
{
imgVectors[i] = imagePoints[i + 1].x - imagePoints[0].x;
imgVectors[N + i] = imagePoints[i + 1].y - imagePoints[0].y;
}
}
else
{
diff = 0;
/* Compute new SOP (scaled orthograthic projection) image from pose */
for( i = 0; i < N; i++ )
{
/* objectVector * k */
float old;
float tmp = objectVectors[i] * rotation[6] /*[2][0]*/ +
objectVectors[N + i] * rotation[7] /*[2][1]*/ +
objectVectors[2 * N + i] * rotation[8] /*[2][2]*/;
tmp *= inv_Z;
tmp += 1;
old = imgVectors[i];
imgVectors[i] = imagePoints[i + 1].x * tmp - imagePoints[0].x;
diff = MAX( diff, (float) fabs( imgVectors[i] - old ));
old = imgVectors[N + i];
imgVectors[N + i] = imagePoints[i + 1].y * tmp - imagePoints[0].y;
diff = MAX( diff, (float) fabs( imgVectors[N + i] - old ));
}
}
/* calculate I and J vectors */
for( i = 0; i < 2; i++ )
{
for( j = 0; j < 3; j++ )
{
rotation[3*i+j] /*[i][j]*/ = 0;
for( k = 0; k < N; k++ )
{
rotation[3*i+j] /*[i][j]*/ += invMatrix[j * N + k] * imgVectors[i * N + k];
}
}
}
float inorm =
rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
rotation[1] /*[0][1]*/ * rotation[1] /*[0][1]*/ +
rotation[2] /*[0][2]*/ * rotation[2] /*[0][2]*/;
float jnorm =
rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
rotation[4] /*[1][1]*/ * rotation[4] /*[1][1]*/ +
rotation[5] /*[1][2]*/ * rotation[5] /*[1][2]*/;
const float invInorm = cvInvSqrt( inorm );
const float invJnorm = cvInvSqrt( jnorm );
inorm *= invInorm;
jnorm *= invJnorm;
rotation[0] /*[0][0]*/ *= invInorm;
rotation[1] /*[0][1]*/ *= invInorm;
rotation[2] /*[0][2]*/ *= invInorm;
rotation[3] /*[1][0]*/ *= invJnorm;
rotation[4] /*[1][1]*/ *= invJnorm;
rotation[5] /*[1][2]*/ *= invJnorm;
/* row2 = row0 x row1 (cross product) */
rotation[6] /*->m[2][0]*/ = rotation[1] /*->m[0][1]*/ * rotation[5] /*->m[1][2]*/ -
rotation[2] /*->m[0][2]*/ * rotation[4] /*->m[1][1]*/;
rotation[7] /*->m[2][1]*/ = rotation[2] /*->m[0][2]*/ * rotation[3] /*->m[1][0]*/ -
rotation[0] /*->m[0][0]*/ * rotation[5] /*->m[1][2]*/;
rotation[8] /*->m[2][2]*/ = rotation[0] /*->m[0][0]*/ * rotation[4] /*->m[1][1]*/ -
rotation[1] /*->m[0][1]*/ * rotation[3] /*->m[1][0]*/;
scale = (inorm + jnorm) / 2.0f;
inv_Z = scale * inv_focalLength;
count++;
converged = ((criteria.type & CV_TERMCRIT_EPS) && (diff < criteria.epsilon))
|| ((criteria.type & CV_TERMCRIT_ITER) && (count == criteria.max_iter));
}
const float invScale = 1 / scale;
translation[0] = imagePoints[0].x * invScale;
translation[1] = imagePoints[0].y * invScale;
translation[2] = 1 / inv_Z;
return CV_NO_ERR;
}
static CvStatus icvReleasePOSITObject( CvPOSITObject ** ppObject )
{
cvFree( ppObject );
return CV_NO_ERR;
}
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: icvPseudoInverse3D
// Purpose: Pseudoinverse N x 3 matrix N >= 3
// Context:
// Parameters:
// a - input matrix
// b - pseudoinversed a
// n - number of rows in a
// method - if 0, then b = inv(transpose(a)*a) * transpose(a)
// if 1, then SVD used.
// Returns:
// Notes: Both matrix are stored by n-dimensional vectors.
// Now only method == 0 supported.
//F*/
void
icvPseudoInverse3D( float *a, float *b, int n, int method )
{
if( method == 0 )
{
float ata00 = 0;
float ata11 = 0;
float ata22 = 0;
float ata01 = 0;
float ata02 = 0;
float ata12 = 0;
int k;
/* compute matrix ata = transpose(a) * a */
for( k = 0; k < n; k++ )
{
float a0 = a[k];
float a1 = a[n + k];
float a2 = a[2 * n + k];
ata00 += a0 * a0;
ata11 += a1 * a1;
ata22 += a2 * a2;
ata01 += a0 * a1;
ata02 += a0 * a2;
ata12 += a1 * a2;
}
/* inverse matrix ata */
{
float p00 = ata11 * ata22 - ata12 * ata12;
float p01 = -(ata01 * ata22 - ata12 * ata02);
float p02 = ata12 * ata01 - ata11 * ata02;
float p11 = ata00 * ata22 - ata02 * ata02;
float p12 = -(ata00 * ata12 - ata01 * ata02);
float p22 = ata00 * ata11 - ata01 * ata01;
float det = 0;
det += ata00 * p00;
det += ata01 * p01;
det += ata02 * p02;
const float inv_det = 1 / det;
/* compute resultant matrix */
for( k = 0; k < n; k++ )
{
float a0 = a[k];
float a1 = a[n + k];
float a2 = a[2 * n + k];
b[k] = (p00 * a0 + p01 * a1 + p02 * a2) * inv_det;
b[n + k] = (p01 * a0 + p11 * a1 + p12 * a2) * inv_det;
b[2 * n + k] = (p02 * a0 + p12 * a1 + p22 * a2) * inv_det;
}
}
}
/*if ( method == 1 )
{
}
*/
return;
}
CV_IMPL CvPOSITObject *
cvCreatePOSITObject( CvPoint3D32f * points, int numPoints )
{
CvPOSITObject *pObject = 0;
IPPI_CALL( icvCreatePOSITObject( points, numPoints, &pObject ));
return pObject;
}
CV_IMPL void
cvPOSIT( CvPOSITObject * pObject, CvPoint2D32f * imagePoints,
double focalLength, CvTermCriteria criteria,
float* rotation, float* translation )
{
IPPI_CALL( icvPOSIT( pObject, imagePoints,(float) focalLength, criteria,
rotation, translation ));
}
CV_IMPL void
cvReleasePOSITObject( CvPOSITObject ** ppObject )
{
IPPI_CALL( icvReleasePOSITObject( ppObject ));
}
/* End of file. */

@ -334,54 +334,6 @@ void undistort( InputArray _src, OutputArray _dst, InputArray _cameraMatrix,
}
CV_IMPL void
cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs, const CvMat* newAarr )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs), newA;
if( newAarr )
newA = cv::cvarrToMat(newAarr);
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
cv::undistort( src, dst, A, distCoeffs, newA );
}
CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs,
CvArr* mapxarr, CvArr* mapyarr )
{
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);
cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
if( mapyarr )
mapy0 = mapy = cv::cvarrToMat(mapyarr);
cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A,
mapx.size(), mapx.type(), mapx, mapy );
CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
}
void
cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs,
const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr )
{
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar;
cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
if( mapyarr )
mapy0 = mapy = cv::cvarrToMat(mapyarr);
if( dist_coeffs )
distCoeffs = cv::cvarrToMat(dist_coeffs);
if( Rarr )
R = cv::cvarrToMat(Rarr);
if( ArArr )
Ar = cv::cvarrToMat(ArArr);
cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy );
CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
}
static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP, cv::TermCriteria criteria)

Loading…
Cancel
Save