Merge pull request #12248 from hrnr:video_remove_ransac

* video: remove duplicate RANSAC code

* remove RANSAC code video module. The module now uses RANSAC estimators from calib3d.
* deprecate estimateRigidTransform

* replace internal usage of deprecated estimateRigidTransform

* remove from wrappers
* replace usage in shape module. shape module now links to calib3d instead of video module.
* reprecate also C API version

* remove cvEstimateRigidTransform

* supress deprecated warnings in estimateRigidTransform test

* the function is now deprecated
pull/12273/head
Jiri Horner 6 years ago committed by Alexander Alekhin
parent 7d4bb9428b
commit 49283ec035
  1. 2
      modules/shape/CMakeLists.txt
  2. 12
      modules/shape/src/aff_trans.cpp
  3. 2
      modules/shape/src/precomp.hpp
  4. 2
      modules/video/CMakeLists.txt
  5. 8
      modules/video/include/opencv2/video/tracking.hpp
  6. 4
      modules/video/include/opencv2/video/tracking_c.h
  7. 18
      modules/video/src/compat_video.cpp
  8. 207
      modules/video/src/lkpyramid.cpp
  9. 9
      modules/video/test/test_estimaterigid.cpp

@ -1,2 +1,2 @@
set(the_description "Shape descriptors and matchers")
ocv_define_module(shape opencv_core opencv_imgproc opencv_video WRAP python)
ocv_define_module(shape opencv_core opencv_imgproc opencv_calib3d WRAP python)

@ -222,12 +222,18 @@ void AffineTransformerImpl::estimateTransformation(InputArray _pts1, InputArray
shape2.push_back(pt2);
}
// estimateRigidTransform //
Mat affine;
estimateRigidTransform(shape1, shape2, fullAffine).convertTo(affine, CV_32F);
if (fullAffine)
{
estimateAffine2D(shape1, shape2).convertTo(affine, CV_32F);
} else
{
estimateAffinePartial2D(shape1, shape2).convertTo(affine, CV_32F);
}
if (affine.empty())
affine=_localAffineEstimate(shape1, shape2, fullAffine); //In case there is not good solution, just give a LLS based one
//In case there is not good solution, just give a LLS based one
affine = _localAffineEstimate(shape1, shape2, fullAffine);
affineMat = affine;
}

@ -47,7 +47,7 @@
#include <cmath>
#include <iostream>
#include "opencv2/video/tracking.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/shape.hpp"

@ -1,2 +1,2 @@
set(the_description "Video Analysis")
ocv_define_module(video opencv_imgproc WRAP java python js)
ocv_define_module(video opencv_imgproc opencv_calib3d WRAP java python js)

@ -249,13 +249,13 @@ where src[i] and dst[i] are the i-th points in src and dst, respectively
\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f]
when fullAffine=false.
@deprecated Use cv::estimateAffine2D, cv::estimateAffinePartial2D instead. If you are using this fuction
with images, extract points using cv::calcOpticalFlowPyrLK and then use the estimation fuctions.
@sa
estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography
*/
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine);
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine, int ransacMaxIters, double ransacGoodRatio,
int ransacSize0);
CV_DEPRECATED CV_EXPORTS Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine );
enum
{

@ -94,10 +94,6 @@ CVAPI(void) cvCalcAffineFlowPyrLK( const CvArr* prev, const CvArr* curr,
char* status, float* track_error,
CvTermCriteria criteria, int flags );
/* Estimate rigid transformation between 2 images or 2 point sets */
CVAPI(int) cvEstimateRigidTransform( const CvArr* A, const CvArr* B,
CvMat* M, int full_affine );
/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */
CVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next,
CvArr* flow, double pyr_scale, int levels,

@ -299,21 +299,3 @@ CV_IMPL void cvCalcOpticalFlowFarneback(
cv::calcOpticalFlowFarneback( prev, next, flow, pyr_scale, levels,
winsize, iterations, poly_n, poly_sigma, flags );
}
CV_IMPL int
cvEstimateRigidTransform( const CvArr* arrA, const CvArr* arrB, CvMat* arrM, int full_affine )
{
cv::Mat matA = cv::cvarrToMat(arrA), matB = cv::cvarrToMat(arrB);
const cv::Mat matM0 = cv::cvarrToMat(arrM);
cv::Mat matM = cv::estimateRigidTransform(matA, matB, full_affine != 0);
if( matM.empty() )
{
matM = cv::cvarrToMat(arrM);
matM.setTo(cv::Scalar::all(0));
return 0;
}
matM.convertTo(matM0, matM0.type());
return 1;
}

@ -45,6 +45,7 @@
#include "lkpyramid.hpp"
#include "opencl_kernels_video.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
@ -1398,115 +1399,20 @@ void cv::calcOpticalFlowPyrLK( InputArray _prevImg, InputArray _nextImg,
optflow->calc(_prevImg,_nextImg,_prevPts,_nextPts,_status,_err);
}
namespace cv
{
static void
getRTMatrix( const std::vector<Point2f> a, const std::vector<Point2f> b,
int count, Mat& M, bool fullAffine )
{
CV_Assert( M.isContinuous() );
if( fullAffine )
{
double sa[6][6]={{0.}}, sb[6]={0.};
Mat A( 6, 6, CV_64F, &sa[0][0] ), B( 6, 1, CV_64F, sb );
Mat MM = M.reshape(1, 6);
for( int i = 0; i < count; i++ )
{
sa[0][0] += a[i].x*a[i].x;
sa[0][1] += a[i].y*a[i].x;
sa[0][2] += a[i].x;
sa[1][1] += a[i].y*a[i].y;
sa[1][2] += a[i].y;
sb[0] += a[i].x*b[i].x;
sb[1] += a[i].y*b[i].x;
sb[2] += b[i].x;
sb[3] += a[i].x*b[i].y;
sb[4] += a[i].y*b[i].y;
sb[5] += b[i].y;
}
sa[3][4] = sa[4][3] = sa[1][0] = sa[0][1];
sa[3][5] = sa[5][3] = sa[2][0] = sa[0][2];
sa[4][5] = sa[5][4] = sa[2][1] = sa[1][2];
sa[3][3] = sa[0][0];
sa[4][4] = sa[1][1];
sa[5][5] = sa[2][2] = count;
solve( A, B, MM, DECOMP_EIG );
}
else
{
double sa[4][4]={{0.}}, sb[4]={0.}, m[4] = {0};
Mat A( 4, 4, CV_64F, sa ), B( 4, 1, CV_64F, sb );
Mat MM( 4, 1, CV_64F, m );
for( int i = 0; i < count; i++ )
{
sa[0][0] += a[i].x*a[i].x + a[i].y*a[i].y;
sa[0][2] += a[i].x;
sa[0][3] += a[i].y;
sb[0] += a[i].x*b[i].x + a[i].y*b[i].y;
sb[1] += a[i].x*b[i].y - a[i].y*b[i].x;
sb[2] += b[i].x;
sb[3] += b[i].y;
}
sa[1][1] = sa[0][0];
sa[2][1] = sa[1][2] = -sa[0][3];
sa[3][1] = sa[1][3] = sa[2][0] = sa[0][2];
sa[2][2] = sa[3][3] = count;
sa[3][0] = sa[0][3];
solve( A, B, MM, DECOMP_EIG );
double* om = M.ptr<double>();
om[0] = om[4] = m[0];
om[1] = -m[1];
om[3] = m[1];
om[2] = m[2];
om[5] = m[3];
}
}
}
cv::Mat cv::estimateRigidTransform( InputArray src1, InputArray src2, bool fullAffine )
{
return estimateRigidTransform(src1, src2, fullAffine, 500, 0.5, 3);
}
cv::Mat cv::estimateRigidTransform( InputArray src1, InputArray src2, bool fullAffine, int ransacMaxIters, double ransacGoodRatio,
const int ransacSize0)
{
CV_INSTRUMENT_REGION()
Mat M(2, 3, CV_64F), A = src1.getMat(), B = src2.getMat();
Mat A = src1.getMat(), B = src2.getMat();
const int COUNT = 15;
const int WIDTH = 160, HEIGHT = 120;
std::vector<Point2f> pA, pB;
std::vector<int> good_idx;
std::vector<uchar> status;
double scale = 1.;
int i, j, k, k1;
RNG rng((uint64)-1);
int good_count = 0;
if( ransacSize0 < 3 )
CV_Error( Error::StsBadArg, "ransacSize0 should have value bigger than 2.");
if( ransacGoodRatio > 1 || ransacGoodRatio < 0)
CV_Error( Error::StsBadArg, "ransacGoodRatio should have value between 0 and 1");
int i, j, k;
if( A.size() != B.size() )
CV_Error( Error::StsUnmatchedSizes, "Both input images must have the same size" );
@ -1518,11 +1424,13 @@ cv::Mat cv::estimateRigidTransform( InputArray src1, InputArray src2, bool fullA
if( count > 0 )
{
// inputs are points
A.reshape(2, count).convertTo(pA, CV_32F);
B.reshape(2, count).convertTo(pB, CV_32F);
}
else if( A.depth() == CV_8U )
{
// inputs are images
int cn = A.channels();
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
Size sz0 = A.size();
@ -1594,108 +1502,11 @@ cv::Mat cv::estimateRigidTransform( InputArray src1, InputArray src2, bool fullA
else
CV_Error( Error::StsUnsupportedFormat, "Both input images must have either 8uC1 or 8uC3 type" );
good_idx.resize(count);
if( count < ransacSize0 )
return Mat();
Rect brect = boundingRect(pB);
std::vector<Point2f> a(ransacSize0);
std::vector<Point2f> b(ransacSize0);
// RANSAC stuff:
// 1. find the consensus
for( k = 0; k < ransacMaxIters; k++ )
if (fullAffine)
{
std::vector<int> idx(ransacSize0);
// choose random 3 non-complanar points from A & B
for( i = 0; i < ransacSize0; i++ )
{
for( k1 = 0; k1 < ransacMaxIters; k1++ )
{
idx[i] = rng.uniform(0, count);
for( j = 0; j < i; j++ )
{
if( idx[j] == idx[i] )
break;
// check that the points are not very close one each other
if( fabs(pA[idx[i]].x - pA[idx[j]].x) +
fabs(pA[idx[i]].y - pA[idx[j]].y) < FLT_EPSILON )
break;
if( fabs(pB[idx[i]].x - pB[idx[j]].x) +
fabs(pB[idx[i]].y - pB[idx[j]].y) < FLT_EPSILON )
break;
}
if( j < i )
continue;
if( i+1 == ransacSize0 )
{
// additional check for non-complanar vectors
a[0] = pA[idx[0]];
a[1] = pA[idx[1]];
a[2] = pA[idx[2]];
b[0] = pB[idx[0]];
b[1] = pB[idx[1]];
b[2] = pB[idx[2]];
double dax1 = a[1].x - a[0].x, day1 = a[1].y - a[0].y;
double dax2 = a[2].x - a[0].x, day2 = a[2].y - a[0].y;
double dbx1 = b[1].x - b[0].x, dby1 = b[1].y - b[0].y;
double dbx2 = b[2].x - b[0].x, dby2 = b[2].y - b[0].y;
const double eps = 0.01;
if( fabs(dax1*day2 - day1*dax2) < eps*std::sqrt(dax1*dax1+day1*day1)*std::sqrt(dax2*dax2+day2*day2) ||
fabs(dbx1*dby2 - dby1*dbx2) < eps*std::sqrt(dbx1*dbx1+dby1*dby1)*std::sqrt(dbx2*dbx2+dby2*dby2) )
continue;
}
break;
}
if( k1 >= ransacMaxIters )
break;
}
if( i < ransacSize0 )
continue;
// estimate the transformation using 3 points
getRTMatrix( a, b, 3, M, fullAffine );
const double* m = M.ptr<double>();
for( i = 0, good_count = 0; i < count; i++ )
{
if( std::abs( m[0]*pA[i].x + m[1]*pA[i].y + m[2] - pB[i].x ) +
std::abs( m[3]*pA[i].x + m[4]*pA[i].y + m[5] - pB[i].y ) < std::max(brect.width,brect.height)*0.05 )
good_idx[good_count++] = i;
}
if( good_count >= count*ransacGoodRatio )
break;
}
if( k >= ransacMaxIters )
return Mat();
if( good_count < count )
return estimateAffine2D(pA, pB);
} else
{
for( i = 0; i < good_count; i++ )
{
j = good_idx[i];
pA[i] = pA[j];
pB[i] = pB[j];
}
return estimateAffinePartial2D(pA, pB);
}
getRTMatrix( pA, pB, good_count, M, fullAffine );
M.at<double>(0, 2) /= scale;
M.at<double>(1, 2) /= scale;
return M;
}
/* End of file. */

@ -42,6 +42,15 @@
#include "test_precomp.hpp"
// this is test for a deprecated function. let's ignore deprecated warnings in this file
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#elif defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#elif defined(_MSC_VER)
#pragma warning( disable : 4996)
#endif
namespace opencv_test { namespace {
class CV_RigidTransform_Test : public cvtest::BaseTest

Loading…
Cancel
Save