Merge pull request #24005 from ivashmak:merge_usac_5.x

Merge usac to 5.x #24005

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake


Base branch is PR #23979. Merging PR #23078, 23900 and PR #23806  to 5.x
pull/24070/head
Maksym Ivashechkin 1 year ago committed by GitHub
parent 4d695cd2f4
commit 0e8748746f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 15
      modules/3d/include/opencv2/3d.hpp
  2. 4
      modules/3d/src/five-point.cpp
  3. 8
      modules/3d/src/fundam.cpp
  4. 15
      modules/3d/src/ptcloud/sac_segmentation.cpp
  5. 4
      modules/3d/src/ptsetreg.cpp
  6. 17
      modules/3d/src/solvepnp.cpp
  7. 661
      modules/3d/src/usac.hpp
  8. 308
      modules/3d/src/usac/bundle.cpp
  9. 705
      modules/3d/src/usac/degeneracy.cpp
  10. 40
      modules/3d/src/usac/dls_solver.cpp
  11. 453
      modules/3d/src/usac/essential_solver.cpp
  12. 183
      modules/3d/src/usac/estimator.cpp
  13. 505
      modules/3d/src/usac/fundamental_solver.cpp
  14. 222
      modules/3d/src/usac/gamma_values.cpp
  15. 578
      modules/3d/src/usac/homography_solver.cpp
  16. 559
      modules/3d/src/usac/local_optimization.cpp
  17. 15
      modules/3d/src/usac/plane_solver.cpp
  18. 86
      modules/3d/src/usac/pnp_solver.cpp
  19. 568
      modules/3d/src/usac/quality.cpp
  20. 1716
      modules/3d/src/usac/ransac_solvers.cpp
  21. 30
      modules/3d/src/usac/sampler.cpp
  22. 15
      modules/3d/src/usac/sphere_solver.cpp
  23. 148
      modules/3d/src/usac/termination.cpp
  24. 428
      modules/3d/src/usac/utils.cpp
  25. 4
      modules/3d/test/test_sac_segmentation.cpp
  26. 15
      modules/3d/test/test_usac.cpp

@ -419,12 +419,13 @@ enum { FM_7POINT = 1, //!< 7-point algorithm
FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used. FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used.
}; };
enum SamplingMethod { SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, enum SamplingMethod { SAMPLING_UNIFORM=0, SAMPLING_PROGRESSIVE_NAPSAC=1, SAMPLING_NAPSAC=2,
SAMPLING_PROSAC }; SAMPLING_PROSAC=3 };
enum LocalOptimMethod {LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, enum LocalOptimMethod {LOCAL_OPTIM_NULL=0, LOCAL_OPTIM_INNER_LO=1, LOCAL_OPTIM_INNER_AND_ITER_LO=2,
LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA}; LOCAL_OPTIM_GC=3, LOCAL_OPTIM_SIGMA=4};
enum ScoreMethod {SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS}; enum ScoreMethod {SCORE_METHOD_RANSAC=0, SCORE_METHOD_MSAC=1, SCORE_METHOD_MAGSAC=2, SCORE_METHOD_LMEDS=3};
enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS }; enum NeighborSearchMethod { NEIGH_FLANN_KNN=0, NEIGH_GRID=1, NEIGH_FLANN_RADIUS=2 };
enum PolishingMethod { NONE_POLISHER=0, LSQ_POLISHER=1, MAGSAC=2, COV_POLISHER=3 };
struct CV_EXPORTS_W_SIMPLE UsacParams struct CV_EXPORTS_W_SIMPLE UsacParams
{ // in alphabetical order { // in alphabetical order
@ -440,6 +441,8 @@ struct CV_EXPORTS_W_SIMPLE UsacParams
CV_PROP_RW SamplingMethod sampler; CV_PROP_RW SamplingMethod sampler;
CV_PROP_RW ScoreMethod score; CV_PROP_RW ScoreMethod score;
CV_PROP_RW double threshold; CV_PROP_RW double threshold;
CV_PROP_RW PolishingMethod final_polisher;
CV_PROP_RW int final_polisher_iterations;
}; };
/** @brief Converts a rotation matrix to a rotation vector or vice versa. /** @brief Converts a rotation matrix to a rotation vector or vice versa.

@ -504,9 +504,9 @@ Mat findEssentialMat( InputArray points1, InputArray points2,
InputArray cameraMatrix1, InputArray cameraMatrix2, InputArray cameraMatrix1, InputArray cameraMatrix2,
InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask, const UsacParams &params) { InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask, const UsacParams &params) {
Ptr<usac::Model> model; Ptr<usac::Model> model;
usac::setParameters(model, usac::EstimationMethod::Essential, params, mask.needed()); usac::setParameters(model, usac::EstimationMethod::ESSENTIAL, params, mask.needed());
Ptr<usac::RansacOutput> ransac_output; Ptr<usac::RansacOutput> ransac_output;
if (usac::run(model, points1, points2, model->getRandomGeneratorState(), if (usac::run(model, points1, points2,
ransac_output, cameraMatrix1, cameraMatrix2, dist_coeff1, dist_coeff2)) { ransac_output, cameraMatrix1, cameraMatrix2, dist_coeff1, dist_coeff2)) {
usac::saveMask(mask, ransac_output->getInliersMask()); usac::saveMask(mask, ransac_output->getInliersMask());
return ransac_output->getModel(); return ransac_output->getModel();

@ -438,9 +438,9 @@ Mat findHomography( InputArray _points1, InputArray _points2,
Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask, Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask,
const UsacParams &params) { const UsacParams &params) {
Ptr<usac::Model> model; Ptr<usac::Model> model;
usac::setParameters(model, usac::EstimationMethod::Homography, params, mask.needed()); usac::setParameters(model, usac::EstimationMethod::HOMOGRAPHY, params, mask.needed());
Ptr<usac::RansacOutput> ransac_output; Ptr<usac::RansacOutput> ransac_output;
if (usac::run(model, srcPoints, dstPoints, model->getRandomGeneratorState(), if (usac::run(model, srcPoints, dstPoints,
ransac_output, noArray(), noArray(), noArray(), noArray())) { ransac_output, noArray(), noArray(), noArray(), noArray())) {
usac::saveMask(mask, ransac_output->getInliersMask()); usac::saveMask(mask, ransac_output->getInliersMask());
return ransac_output->getModel() / ransac_output->getModel().at<double>(2,2); return ransac_output->getModel() / ransac_output->getModel().at<double>(2,2);
@ -895,10 +895,10 @@ Mat findFundamentalMat( InputArray points1, InputArray points2, OutputArray mask
Mat findFundamentalMat( InputArray points1, InputArray points2, Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, const UsacParams &params) { OutputArray mask, const UsacParams &params) {
Ptr<usac::Model> model; Ptr<usac::Model> model;
setParameters(model, usac::EstimationMethod::Fundamental, params, mask.needed()); setParameters(model, usac::EstimationMethod::FUNDAMENTAL, params, mask.needed());
CV_Assert(model); CV_Assert(model);
Ptr<usac::RansacOutput> ransac_output; Ptr<usac::RansacOutput> ransac_output;
if (usac::run(model, points1, points2, model->getRandomGeneratorState(), if (usac::run(model, points1, points2,
ransac_output, noArray(), noArray(), noArray(), noArray())) { ransac_output, noArray(), noArray(), noArray(), noArray())) {
usac::saveMask(mask, ransac_output->getInliersMask()); usac::saveMask(mask, ransac_output->getInliersMask());
return ransac_output->getModel(); return ransac_output->getModel();

@ -55,7 +55,6 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
// RANSAC // RANSAC
using namespace usac; using namespace usac;
int _max_iterations_before_lo = 100, _max_num_hypothesis_to_test_before_rejection = 15;
SamplingMethod _sampling_method = SamplingMethod::SAMPLING_UNIFORM; SamplingMethod _sampling_method = SamplingMethod::SAMPLING_UNIFORM;
LocalOptimMethod _lo_method = LocalOptimMethod::LOCAL_OPTIM_INNER_LO; LocalOptimMethod _lo_method = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
ScoreMethod _score_method = ScoreMethod::SCORE_METHOD_RANSAC; ScoreMethod _score_method = ScoreMethod::SCORE_METHOD_RANSAC;
@ -70,16 +69,17 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
Ptr <ModelVerifier> verifier; Ptr <ModelVerifier> verifier;
Ptr <LocalOptimization> lo; Ptr <LocalOptimization> lo;
Ptr <Degeneracy> degeneracy; Ptr <Degeneracy> degeneracy;
Ptr <TerminationCriteria> termination; Ptr <Termination> termination;
Ptr <FinalModelPolisher> polisher; Ptr <FinalModelPolisher> polisher;
Ptr <MinimalSolver> min_solver; Ptr <MinimalSolver> min_solver;
Ptr <NonMinimalSolver> non_min_solver; Ptr <NonMinimalSolver> non_min_solver;
Ptr <Estimator> estimator; Ptr <Estimator> estimator;
Ptr <usac::Error> error; Ptr <usac::Error> error;
EstimationMethod est_method;
switch (sac_model_type) switch (sac_model_type)
{ {
case SAC_MODEL_PLANE: case SAC_MODEL_PLANE:
est_method = EstimationMethod::PLANE;
min_solver = PlaneModelMinimalSolver::create(points); min_solver = PlaneModelMinimalSolver::create(points);
non_min_solver = PlaneModelNonMinimalSolver::create(points); non_min_solver = PlaneModelNonMinimalSolver::create(points);
error = PlaneModelError::create(points); error = PlaneModelError::create(points);
@ -90,6 +90,7 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
// error = CylinderModelError::create(points); // error = CylinderModelError::create(points);
// break; // break;
case SAC_MODEL_SPHERE: case SAC_MODEL_SPHERE:
est_method = EstimationMethod::SPHERE;
min_solver = SphereModelMinimalSolver::create(points); min_solver = SphereModelMinimalSolver::create(points);
non_min_solver = SphereModelNonMinimalSolver::create(points); non_min_solver = SphereModelNonMinimalSolver::create(points);
error = SphereModelError::create(points); error = SphereModelError::create(points);
@ -114,7 +115,7 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
estimator = PointCloudModelEstimator::create(min_solver, non_min_solver, constraint_func); estimator = PointCloudModelEstimator::create(min_solver, non_min_solver, constraint_func);
sampler = UniformSampler::create(state++, min_sample_size, points_size); sampler = UniformSampler::create(state++, min_sample_size, points_size);
quality = RansacQuality::create(points_size, _threshold, error); quality = RansacQuality::create(points_size, _threshold, error);
verifier = ModelVerifier::create(); verifier = ModelVerifier::create(quality);
Ptr <RandomGenerator> lo_sampler = UniformRandomGenerator::create(state++, points_size, Ptr <RandomGenerator> lo_sampler = UniformRandomGenerator::create(state++, points_size,
@ -128,11 +129,8 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
termination = StandardTerminationCriteria::create termination = StandardTerminationCriteria::create
(confidence, points_size, min_sample_size, max_iterations); (confidence, points_size, min_sample_size, max_iterations);
Ptr <SimpleUsacConfig> usacConfig = SimpleUsacConfig::create(); Ptr <SimpleUsacConfig> usacConfig = SimpleUsacConfig::create(est_method);
usacConfig->setMaxIterations(max_iterations); usacConfig->setMaxIterations(max_iterations);
usacConfig->setMaxIterationsBeforeLo(_max_iterations_before_lo);
usacConfig->setMaxNumHypothesisToTestBeforeRejection(
_max_num_hypothesis_to_test_before_rejection);
usacConfig->setRandomGeneratorState(state); usacConfig->setRandomGeneratorState(state);
usacConfig->setParallel(is_parallel); usacConfig->setParallel(is_parallel);
usacConfig->setNeighborsSearchMethod(_neighbors_search_method); usacConfig->setNeighborsSearchMethod(_neighbors_search_method);
@ -146,7 +144,6 @@ SACSegmentationImpl::segmentSingle(Mat &points, std::vector<bool> &label, Mat &m
UniversalRANSAC ransac(usacConfig, points_size, estimator, quality, sampler, UniversalRANSAC ransac(usacConfig, points_size, estimator, quality, sampler,
termination, verifier, degeneracy, lo, polisher); termination, verifier, degeneracy, lo, polisher);
Ptr <usac::RansacOutput> ransac_output; Ptr <usac::RansacOutput> ransac_output;
if (!ransac.run(ransac_output)) if (!ransac.run(ransac_output))
{ {
return 0; return 0;

@ -1022,9 +1022,9 @@ Mat estimateAffine2D(InputArray _from, InputArray _to, OutputArray _inliers,
Mat estimateAffine2D(InputArray _from, InputArray _to, OutputArray inliers, Mat estimateAffine2D(InputArray _from, InputArray _to, OutputArray inliers,
const UsacParams &params) { const UsacParams &params) {
Ptr<usac::Model> model; Ptr<usac::Model> model;
usac::setParameters(model, usac::EstimationMethod::Affine, params, inliers.needed()); usac::setParameters(model, usac::EstimationMethod::AFFINE, params, inliers.needed());
Ptr<usac::RansacOutput> ransac_output; Ptr<usac::RansacOutput> ransac_output;
if (usac::run(model, _from, _to, model->getRandomGeneratorState(), if (usac::run(model, _from, _to,
ransac_output, noArray(), noArray(), noArray(), noArray())) { ransac_output, noArray(), noArray(), noArray(), noArray())) {
usac::saveMask(inliers, ransac_output->getInliersMask()); usac::saveMask(inliers, ransac_output->getInliersMask());
return ransac_output->getModel().rowRange(0,2); return ransac_output->getModel().rowRange(0,2);

@ -199,21 +199,6 @@ public:
Mat tvec; Mat tvec;
}; };
UsacParams::UsacParams()
{
confidence = 0.99;
isParallel = false;
loIterations = 5;
loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
loSampleSize = 14;
maxIterations = 5000;
neighborsSearch = NeighborSearchMethod::NEIGH_GRID;
randomGeneratorState = 0;
sampler = SamplingMethod::SAMPLING_UNIFORM;
score = ScoreMethod::SCORE_METHOD_MSAC;
threshold = 1.5;
}
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs, InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
@ -407,7 +392,7 @@ bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
usac::setParameters(model_params, cameraMatrix.empty() ? usac::EstimationMethod::P6P : usac::setParameters(model_params, cameraMatrix.empty() ? usac::EstimationMethod::P6P :
usac::EstimationMethod::P3P, params, inliers.needed()); usac::EstimationMethod::P3P, params, inliers.needed());
Ptr<usac::RansacOutput> ransac_output; Ptr<usac::RansacOutput> ransac_output;
if (usac::run(model_params, imagePoints, objectPoints, model_params->getRandomGeneratorState(), if (usac::run(model_params, imagePoints, objectPoints,
ransac_output, cameraMatrix, noArray(), distCoeffs, noArray())) { ransac_output, cameraMatrix, noArray(), distCoeffs, noArray())) {
if (inliers.needed()) { if (inliers.needed()) {
const auto &inliers_mask = ransac_output->getInliersMask(); const auto &inliers_mask = ransac_output->getInliersMask();

File diff suppressed because it is too large Load Diff

@ -0,0 +1,308 @@
// Copyright (c) 2020, Viktor Larsson
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../precomp.hpp"
#include "../usac.hpp"
namespace cv { namespace usac {
class MlesacLoss {
public:
MlesacLoss(double threshold) : squared_thr(threshold * threshold), norm_thr(squared_thr*3), one_over_thr(1/norm_thr), inv_sq_thr(1/squared_thr) {}
double loss(double r2) const {
return r2 < norm_thr ? r2 * one_over_thr - 1 : 0;
}
double weight(double r2) const {
// use Cauchly weight
return 1.0 / (1.0 + r2 * inv_sq_thr);
}
const double squared_thr;
private:
const double norm_thr, one_over_thr, inv_sq_thr;
};
class RelativePoseJacobianAccumulator {
private:
const Mat* correspondences;
const std::vector<int> &sample;
const int sample_size;
const MlesacLoss &loss_fn;
const double *weights;
public:
RelativePoseJacobianAccumulator(
const Mat& correspondences_,
const std::vector<int> &sample_,
const int sample_size_,
const MlesacLoss &l,
const double *w = nullptr) :
correspondences(&correspondences_),
sample(sample_),
sample_size(sample_size_),
loss_fn(l),
weights(w) {}
Matx33d essential_from_motion(const CameraPose &pose) const {
return Matx33d(0.0, -pose.t(2), pose.t(1),
pose.t(2), 0.0, -pose.t(0),
-pose.t(1), pose.t(0), 0.0) * pose.R;
}
double residual(const CameraPose &pose) const {
const Matx33d E = essential_from_motion(pose);
const float m11=static_cast<float>(E(0,0)), m12=static_cast<float>(E(0,1)), m13=static_cast<float>(E(0,2));
const float m21=static_cast<float>(E(1,0)), m22=static_cast<float>(E(1,1)), m23=static_cast<float>(E(1,2));
const float m31=static_cast<float>(E(2,0)), m32=static_cast<float>(E(2,1)), m33=static_cast<float>(E(2,2));
const auto * const pts = (float *) correspondences->data;
double cost = 0.0;
for (int k = 0; k < sample_size; ++k) {
const int idx = 4*sample[k];
const float x1=pts[idx], y1=pts[idx+1], x2=pts[idx+2], y2=pts[idx+3];
const float F_pt1_x = m11 * x1 + m12 * y1 + m13,
F_pt1_y = m21 * x1 + m22 * y1 + m23;
const float pt2_F_x = x2 * m11 + y2 * m21 + m31,
pt2_F_y = x2 * m12 + y2 * m22 + m32;
const float pt2_F_pt1 = x2 * F_pt1_x + y2 * F_pt1_y + m31 * x1 + m32 * y1 + m33;
const float r2 = pt2_F_pt1 * pt2_F_pt1 / (F_pt1_x * F_pt1_x + F_pt1_y * F_pt1_y +
pt2_F_x * pt2_F_x + pt2_F_y * pt2_F_y);
if (weights == nullptr)
cost += loss_fn.loss(r2);
else cost += weights[k] * loss_fn.loss(r2);
}
return cost;
}
void accumulate(const CameraPose &pose, Matx<double, 5, 5> &JtJ, Matx<double, 5, 1> &Jtr, Matx<double, 3, 2> &tangent_basis) const {
const auto * const pts = (float *) correspondences->data;
// We start by setting up a basis for the updates in the translation (orthogonal to t)
// We find the minimum element of t and cross product with the corresponding basis vector.
// (this ensures that the first cross product is not close to the zero vector)
Vec3d tangent_basis_col0;
if (std::abs(pose.t(0)) < std::abs(pose.t(1))) {
// x < y
if (std::abs(pose.t(0)) < std::abs(pose.t(2))) {
tangent_basis_col0 = pose.t.cross(Vec3d(1,0,0));
} else {
tangent_basis_col0 = pose.t.cross(Vec3d(0,0,1));
}
} else {
// x > y
if (std::abs(pose.t(1)) < std::abs(pose.t(2))) {
tangent_basis_col0 = pose.t.cross(Vec3d(0,1,0));
} else {
tangent_basis_col0 = pose.t.cross(Vec3d(0,0,1));
}
}
tangent_basis_col0 /= norm(tangent_basis_col0);
Vec3d tangent_basis_col1 = pose.t.cross(tangent_basis_col0);
tangent_basis_col1 /= norm(tangent_basis_col1);
for (int i = 0; i < 3; i++) {
tangent_basis(i,0) = tangent_basis_col0(i);
tangent_basis(i,1) = tangent_basis_col1(i);
}
const Matx33d E = essential_from_motion(pose);
// Matrices contain the jacobians of E w.r.t. the rotation and translation parameters
// Each column is vec(E*skew(e_k)) where e_k is k:th basis vector
const Matx<double, 9, 3> dR = {0., -E(0,2), E(0,1),
0., -E(1,2), E(1,1),
0., -E(2,2), E(2,1),
E(0,2), 0., -E(0,0),
E(1,2), 0., -E(1,0),
E(2,2), 0., -E(2,0),
-E(0,1), E(0,0), 0.,
-E(1,1), E(1,0), 0.,
-E(2,1), E(2,0), 0.};
Matx<double, 9, 2> dt;
// Each column is vec(skew(tangent_basis[k])*R)
for (int i = 0; i <= 2; i+=1) {
const Vec3d r_i(pose.R(0,i), pose.R(1,i), pose.R(2,i));
for (int j = 0; j <= 1; j+= 1) {
const Vec3d v = (j == 0 ? tangent_basis_col0 : tangent_basis_col1).cross(r_i);
for (int k = 0; k < 3; k++) {
dt(3*i+k,j) = v[k];
}
}
}
for (int k = 0; k < sample_size; ++k) {
const auto point_idx = 4*sample[k];
const Vec3d pt1 (pts[point_idx], pts[point_idx+1], 1), pt2 (pts[point_idx+2], pts[point_idx+3], 1);
const double C = pt2.dot(E * pt1);
// J_C is the Jacobian of the epipolar constraint w.r.t. the image points
const Vec4d J_C ((E.col(0).t() * pt2)[0], (E.col(1).t() * pt2)[0], (E.row(0) * pt1)[0], (E.row(1) * pt1)[0]);
const double nJ_C = norm(J_C);
const double inv_nJ_C = 1.0 / nJ_C;
const double r = C * inv_nJ_C;
if (r*r > loss_fn.squared_thr) continue;
// Compute weight from robust loss function (used in the IRLS)
double weight = loss_fn.weight(r * r) / sample_size;
if (weights != nullptr)
weight = weights[k] * weight;
if(weight < DBL_EPSILON)
continue;
// Compute Jacobian of Sampson error w.r.t the fundamental/essential matrix (3x3)
Matx<double, 1, 9> dF (pt1(0) * pt2(0), pt1(0) * pt2(1), pt1(0), pt1(1) * pt2(0), pt1(1) * pt2(1), pt1(1), pt2(0), pt2(1), 1.0);
const double s = C * inv_nJ_C * inv_nJ_C;
dF(0) -= s * (J_C(2) * pt1(0) + J_C(0) * pt2(0));
dF(1) -= s * (J_C(3) * pt1(0) + J_C(0) * pt2(1));
dF(2) -= s * (J_C(0));
dF(3) -= s * (J_C(2) * pt1(1) + J_C(1) * pt2(0));
dF(4) -= s * (J_C(3) * pt1(1) + J_C(1) * pt2(1));
dF(5) -= s * (J_C(1));
dF(6) -= s * (J_C(2));
dF(7) -= s * (J_C(3));
dF *= inv_nJ_C;
// and then w.r.t. the pose parameters (rotation + tangent basis for translation)
const Matx13d dFdR = dF * dR;
const Matx12d dFdt = dF * dt;
const Matx<double, 1, 5> J (dFdR(0), dFdR(1), dFdR(2), dFdt(0), dFdt(1));
// Accumulate into JtJ and Jtr
Jtr += weight * C * inv_nJ_C * J.t();
JtJ(0, 0) += weight * (J(0) * J(0));
JtJ(1, 0) += weight * (J(1) * J(0));
JtJ(1, 1) += weight * (J(1) * J(1));
JtJ(2, 0) += weight * (J(2) * J(0));
JtJ(2, 1) += weight * (J(2) * J(1));
JtJ(2, 2) += weight * (J(2) * J(2));
JtJ(3, 0) += weight * (J(3) * J(0));
JtJ(3, 1) += weight * (J(3) * J(1));
JtJ(3, 2) += weight * (J(3) * J(2));
JtJ(3, 3) += weight * (J(3) * J(3));
JtJ(4, 0) += weight * (J(4) * J(0));
JtJ(4, 1) += weight * (J(4) * J(1));
JtJ(4, 2) += weight * (J(4) * J(2));
JtJ(4, 3) += weight * (J(4) * J(3));
JtJ(4, 4) += weight * (J(4) * J(4));
}
}
};
bool satisfyCheirality (const Matx33d& R, const Vec3d &t, const Vec3d &x1, const Vec3d &x2) {
// This code assumes that x1 and x2 are unit vectors
const auto Rx1 = R * x1;
// lambda_2 * x2 = R * ( lambda_1 * x1 ) + t
// [1 a; a 1] * [lambda1; lambda2] = [b1; b2]
// [lambda1; lambda2] = [1 -a; -a 1] * [b1; b2] / (1 - a*a)
const double a = -Rx1.dot(x2), b1 = -Rx1.dot(t), b2 = x2.dot(t);
// Note that we drop the factor 1.0/(1-a*a) since it is always positive.
return (b1 - a * b2 > 0) && (-a * b1 + b2 > 0);
}
int refine_relpose(const Mat &correspondences_,
const std::vector<int> &sample_,
const int sample_size_,
CameraPose *pose,
const BundleOptions &opt,
const double* weights) {
MlesacLoss loss_fn(opt.loss_scale);
RelativePoseJacobianAccumulator accum(correspondences_, sample_, sample_size_, loss_fn, weights);
// return lm_5dof_impl(accum, pose, opt);
Matx<double, 5, 5> JtJ;
Matx<double, 5, 1> Jtr;
Matx<double, 3, 2> tangent_basis;
Matx33d sw = Matx33d::zeros();
double lambda = opt.initial_lambda;
// Compute initial cost
double cost = accum.residual(*pose);
bool recompute_jac = true;
int iter;
for (iter = 0; iter < opt.max_iterations; ++iter) {
// We only recompute jacobian and residual vector if last step was successful
if (recompute_jac) {
std::fill(JtJ.val, JtJ.val+25, 0);
std::fill(Jtr.val, Jtr.val +5, 0);
accum.accumulate(*pose, JtJ, Jtr, tangent_basis);
if (norm(Jtr) < opt.gradient_tol)
break;
}
// Add dampening
JtJ(0, 0) += lambda;
JtJ(1, 1) += lambda;
JtJ(2, 2) += lambda;
JtJ(3, 3) += lambda;
JtJ(4, 4) += lambda;
Matx<double, 5, 1> sol;
Matx<double, 5, 5> JtJ_symm = JtJ;
for (int i = 0; i < 5; i++)
for (int j = i+1; j < 5; j++)
JtJ_symm(i,j) = JtJ(j,i);
const bool success = solve(-JtJ_symm, Jtr, sol);
if (!success || norm(sol) < opt.step_tol)
break;
Vec3d w (sol(0,0), sol(1,0), sol(2,0));
const double theta = norm(w);
w /= theta;
const double a = std::sin(theta);
const double b = std::cos(theta);
sw(0, 1) = -w(2);
sw(0, 2) = w(1);
sw(1, 2) = -w(0);
sw(1, 0) = w(2);
sw(2, 0) = -w(1);
sw(2, 1) = w(0);
CameraPose pose_new;
pose_new.R = pose->R + pose->R * (a * sw + (1 - b) * sw * sw);
// In contrast to the 6dof case, we don't apply R here
// (since this can already be added into tangent_basis)
pose_new.t = pose->t + Vec3d(Mat(tangent_basis * Matx21d(sol(3,0), sol(4,0))));
double cost_new = accum.residual(pose_new);
if (cost_new < cost) {
*pose = pose_new;
lambda /= 10;
cost = cost_new;
recompute_jac = true;
} else {
JtJ(0, 0) -= lambda;
JtJ(1, 1) -= lambda;
JtJ(2, 2) -= lambda;
JtJ(3, 3) -= lambda;
JtJ(4, 4) -= lambda;
lambda *= 10;
recompute_jac = false;
}
}
return iter;
}
}}

@ -8,12 +8,14 @@
namespace cv { namespace usac { namespace cv { namespace usac {
class EpipolarGeometryDegeneracyImpl : public EpipolarGeometryDegeneracy { class EpipolarGeometryDegeneracyImpl : public EpipolarGeometryDegeneracy {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points; // i-th row xi1 yi1 xi2 yi2
const int min_sample_size; const int min_sample_size;
public: public:
explicit EpipolarGeometryDegeneracyImpl (const Mat &points_, int sample_size_) : explicit EpipolarGeometryDegeneracyImpl (const Mat &points_, int sample_size_) :
points_mat(&points_), points ((float*) points_.data), min_sample_size (sample_size_) {} points_mat(points_), min_sample_size (sample_size_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
/* /*
* Do oriented constraint to verify if epipolar geometry is in front or behind the camera. * Do oriented constraint to verify if epipolar geometry is in front or behind the camera.
* Return: true if all points are in front of the camers w.r.t. tested epipolar geometry - satisfies constraint. * Return: true if all points are in front of the camers w.r.t. tested epipolar geometry - satisfies constraint.
@ -23,39 +25,28 @@ public:
* e × x ~+ x'^T F * e × x ~+ x'^T F
*/ */
inline bool isModelValid(const Mat &F_, const std::vector<int> &sample) const override { inline bool isModelValid(const Mat &F_, const std::vector<int> &sample) const override {
// F is of rank 2, taking cross product of two rows we obtain null vector of F const Vec3d ep = Utils::getRightEpipole(F_);
Vec3d ec_mat = F_.row(0).cross(F_.row(2)); const auto * const e = ep.val; // of size 3x1
auto * ec = ec_mat.val; // of size 3x1
// e is zero vector, recompute e
if (ec[0] <= 1.9984e-15 && ec[0] >= -1.9984e-15 &&
ec[1] <= 1.9984e-15 && ec[1] >= -1.9984e-15 &&
ec[2] <= 1.9984e-15 && ec[2] >= -1.9984e-15) {
ec_mat = F_.row(1).cross(F_.row(2));
ec = ec_mat.val;
}
const auto * const F = (double *) F_.data; const auto * const F = (double *) F_.data;
const float * points = points_mat.ptr<float>();
// without loss of generality, let the first point in sample be in front of the camera. // without loss of generality, let the first point in sample be in front of the camera.
int pt = 4*sample[0]; int pt = 4*sample[0];
// s1 = F11 * x2 + F21 * y2 + F31 * 1 // check only two first elements of vectors (e × x) and (x'^T F)
// s2 = e'_2 * 1 - e'_3 * y1 // s1 = (x'^T F)[0] = x2 * F11 + y2 * F21 + 1 * F31
// s2 = (e × x)[0] = e'_2 * 1 - e'_3 * y1
// sign1 = s1 * s2 // sign1 = s1 * s2
const double sign1 = (F[0]*points[pt+2]+F[3]*points[pt+3]+F[6])*(ec[1]-ec[2]*points[pt+1]); const double sign1 = (F[0]*points[pt+2]+F[3]*points[pt+3]+F[6])*(e[1]-e[2]*points[pt+1]);
for (int i = 1; i < min_sample_size; i++) { for (int i = 1; i < min_sample_size; i++) {
pt = 4 * sample[i]; pt = 4 * sample[i];
// if signum of the first point and tested point differs // if signum of the first point and tested point differs
// then two points are on different sides of the camera. // then two points are on different sides of the camera.
if (sign1*(F[0]*points[pt+2]+F[3]*points[pt+3]+F[6])*(ec[1]-ec[2]*points[pt+1])<0) if (sign1*(F[0]*points[pt+2]+F[3]*points[pt+3]+F[6])*(e[1]-e[2]*points[pt+1])<0)
return false; return false;
} }
return true; return true;
} }
Ptr<Degeneracy> clone(int /*state*/) const override {
return makePtr<EpipolarGeometryDegeneracyImpl>(*points_mat, min_sample_size);
}
}; };
void EpipolarGeometryDegeneracy::recoverRank (Mat &model, bool is_fundamental_mat) { void EpipolarGeometryDegeneracy::recoverRank (Mat &model, bool is_fundamental_mat) {
/* /*
@ -79,15 +70,19 @@ Ptr<EpipolarGeometryDegeneracy> EpipolarGeometryDegeneracy::create (const Mat &p
class HomographyDegeneracyImpl : public HomographyDegeneracy { class HomographyDegeneracyImpl : public HomographyDegeneracy {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points; const float TOLERANCE = 2 * FLT_EPSILON; // 2 from area of triangle
public: public:
explicit HomographyDegeneracyImpl (const Mat &points_) : explicit HomographyDegeneracyImpl (const Mat &points_) :
points_mat(&points_), points ((float *)points_.data) {} points_mat(points_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
inline bool isSampleGood (const std::vector<int> &sample) const override { inline bool isSampleGood (const std::vector<int> &sample) const override {
const int smpl1 = 4*sample[0], smpl2 = 4*sample[1], smpl3 = 4*sample[2], smpl4 = 4*sample[3]; const int smpl1 = 4*sample[0], smpl2 = 4*sample[1], smpl3 = 4*sample[2], smpl4 = 4*sample[3];
// planar correspondences must lie on the same side of any line from two points in sample // planar correspondences must lie on the same side of any line from two points in sample
const float * points = points_mat.ptr<float>();
const float x1 = points[smpl1], y1 = points[smpl1+1], X1 = points[smpl1+2], Y1 = points[smpl1+3]; const float x1 = points[smpl1], y1 = points[smpl1+1], X1 = points[smpl1+2], Y1 = points[smpl1+3];
const float x2 = points[smpl2], y2 = points[smpl2+1], X2 = points[smpl2+2], Y2 = points[smpl2+3]; const float x2 = points[smpl2], y2 = points[smpl2+1], X2 = points[smpl2+2], Y2 = points[smpl2+3];
const float x3 = points[smpl3], y3 = points[smpl3+1], X3 = points[smpl3+2], Y3 = points[smpl3+3]; const float x3 = points[smpl3], y3 = points[smpl3+1], X3 = points[smpl3+2], Y3 = points[smpl3+3];
@ -117,222 +112,582 @@ public:
return false; return false;
// Checks if points are not collinear // Checks if points are not collinear
// If area of triangle constructed with 3 points is less then threshold then points are collinear: // If area of triangle constructed with 3 points is less than threshold then points are collinear:
// |x1 y1 1| |x1 y1 1| // |x1 y1 1| |x1 y1 1|
// (1/2) det |x2 y2 1| = (1/2) det |x2-x1 y2-y1 0| = (1/2) det |x2-x1 y2-y1| < threshold // (1/2) det |x2 y2 1| = (1/2) det |x2-x1 y2-y1 0| = det |x2-x1 y2-y1| < 2 * threshold
// |x3 y3 1| |x3-x1 y3-y1 0| |x3-x1 y3-y1| // |x3 y3 1| |x3-x1 y3-y1 0| |x3-x1 y3-y1|
// for points on the first image // for points on the first image
if (fabsf((x2-x1) * (y3-y1) - (y2-y1) * (x3-x1)) * 0.5 < FLT_EPSILON) return false; //1,2,3 if (fabsf((x2-x1) * (y3-y1) - (y2-y1) * (x3-x1)) < TOLERANCE) return false; //1,2,3
if (fabsf((x2-x1) * (y4-y1) - (y2-y1) * (x4-x1)) * 0.5 < FLT_EPSILON) return false; //1,2,4 if (fabsf((x2-x1) * (y4-y1) - (y2-y1) * (x4-x1)) < TOLERANCE) return false; //1,2,4
if (fabsf((x3-x1) * (y4-y1) - (y3-y1) * (x4-x1)) * 0.5 < FLT_EPSILON) return false; //1,3,4 if (fabsf((x3-x1) * (y4-y1) - (y3-y1) * (x4-x1)) < TOLERANCE) return false; //1,3,4
if (fabsf((x3-x2) * (y4-y2) - (y3-y2) * (x4-x2)) * 0.5 < FLT_EPSILON) return false; //2,3,4 if (fabsf((x3-x2) * (y4-y2) - (y3-y2) * (x4-x2)) < TOLERANCE) return false; //2,3,4
// for points on the second image // for points on the second image
if (fabsf((X2-X1) * (Y3-Y1) - (Y2-Y1) * (X3-X1)) * 0.5 < FLT_EPSILON) return false; //1,2,3 if (fabsf((X2-X1) * (Y3-Y1) - (Y2-Y1) * (X3-X1)) < TOLERANCE) return false; //1,2,3
if (fabsf((X2-X1) * (Y4-Y1) - (Y2-Y1) * (X4-X1)) * 0.5 < FLT_EPSILON) return false; //1,2,4 if (fabsf((X2-X1) * (Y4-Y1) - (Y2-Y1) * (X4-X1)) < TOLERANCE) return false; //1,2,4
if (fabsf((X3-X1) * (Y4-Y1) - (Y3-Y1) * (X4-X1)) * 0.5 < FLT_EPSILON) return false; //1,3,4 if (fabsf((X3-X1) * (Y4-Y1) - (Y3-Y1) * (X4-X1)) < TOLERANCE) return false; //1,3,4
if (fabsf((X3-X2) * (Y4-Y2) - (Y3-Y2) * (X4-X2)) * 0.5 < FLT_EPSILON) return false; //2,3,4 if (fabsf((X3-X2) * (Y4-Y2) - (Y3-Y2) * (X4-X2)) < TOLERANCE) return false; //2,3,4
return true; return true;
} }
Ptr<Degeneracy> clone(int /*state*/) const override {
return makePtr<HomographyDegeneracyImpl>(*points_mat);
}
}; };
Ptr<HomographyDegeneracy> HomographyDegeneracy::create (const Mat &points_) { Ptr<HomographyDegeneracy> HomographyDegeneracy::create (const Mat &points_) {
return makePtr<HomographyDegeneracyImpl>(points_); return makePtr<HomographyDegeneracyImpl>(points_);
} }
class FundamentalDegeneracyViaEImpl : public FundamentalDegeneracyViaE {
private:
bool is_F_objective;
std::vector<std::vector<int>> instances = {{0,1,2,3,4}, {2,3,4,5,6}, {0,1,4,5,6}};
std::vector<int> e_sample;
const Ptr<Quality> quality;
Ptr<EpipolarGeometryDegeneracy> e_degen, f_degen;
Ptr<EssentialMinimalSolver5pts> e_solver;
std::vector<Mat> e_models;
const int E_SAMPLE_SIZE = 5;
Matx33d K2_inv_t, K1_inv;
public:
FundamentalDegeneracyViaEImpl (const Ptr<Quality> &quality_, const Mat &pts, const Mat &calib_pts, const Matx33d &K1, const Matx33d &K2, bool is_f_objective)
: quality(quality_) {
is_F_objective = is_f_objective;
e_solver = EssentialMinimalSolver5pts::create(calib_pts, false/*svd*/, true/*nister*/);
f_degen = is_F_objective ? EpipolarGeometryDegeneracy::create(pts, 7) : EpipolarGeometryDegeneracy::create(calib_pts, 7);
e_degen = EpipolarGeometryDegeneracy::create(calib_pts, E_SAMPLE_SIZE);
e_sample = std::vector<int>(E_SAMPLE_SIZE);
if (is_f_objective) {
K2_inv_t = K2.inv().t();
K1_inv = K1.inv();
}
}
bool isModelValid (const Mat &F, const std::vector<int> &sample) const override {
return f_degen->isModelValid(F, sample);
}
bool recoverIfDegenerate (const std::vector<int> &sample_7pt, const Mat &/*best*/, const Score &best_score,
Mat &out_model, Score &out_score) override {
out_score = Score();
for (const auto &instance : instances) {
for (int i = 0; i < E_SAMPLE_SIZE; i++)
e_sample[i] = sample_7pt[instance[i]];
const int num_models = e_solver->estimate(e_sample, e_models);
for (int i = 0; i < num_models; i++) {
if (e_degen->isModelValid(e_models[i], e_sample)) {
const Mat model = is_F_objective ? Mat(K2_inv_t * Matx33d(e_models[i]) * K1_inv) : e_models[i];
const auto sc = quality->getScore(model);
if (sc.isBetter(out_score)) {
out_score = sc;
model.copyTo(out_model);
}
}
}
if (out_score.isBetter(best_score)) break;
}
return true;
}
};
Ptr<FundamentalDegeneracyViaE> FundamentalDegeneracyViaE::create (const Ptr<Quality> &quality, const Mat &pts,
const Mat &calib_pts, const Matx33d &K1, const Matx33d &K2, bool is_f_objective) {
return makePtr<FundamentalDegeneracyViaEImpl>(quality, pts, calib_pts, K1, K2, is_f_objective);
}
///////////////////////////////// Fundamental Matrix Degeneracy /////////////////////////////////// ///////////////////////////////// Fundamental Matrix Degeneracy ///////////////////////////////////
class FundamentalDegeneracyImpl : public FundamentalDegeneracy { class FundamentalDegeneracyImpl : public FundamentalDegeneracy {
private: private:
RNG rng; RNG rng;
const Ptr<Quality> quality; const Ptr<Quality> quality;
const float * const points; const Ptr<Error> f_error;
const Mat * points_mat; Ptr<Quality> h_repr_quality;
Mat points_mat;
const Ptr<ReprojectionErrorForward> h_reproj_error; const Ptr<ReprojectionErrorForward> h_reproj_error;
Ptr<EpipolarNonMinimalSolver> f_non_min_solver;
Ptr<HomographyNonMinimalSolver> h_non_min_solver; Ptr<HomographyNonMinimalSolver> h_non_min_solver;
Ptr<UniformRandomGenerator> random_gen_H;
const EpipolarGeometryDegeneracyImpl ep_deg; const EpipolarGeometryDegeneracyImpl ep_deg;
// threshold to find inliers for homography model // threshold to find inliers for homography model
const double homography_threshold, log_conf = log(0.05); const double homography_threshold, log_conf = log(0.05), H_SAMPLE_THR_SQR = 49/*7^2*/, MAX_H_THR = 225/*15^2*/;
double f_threshold_sqr, best_focal = -1;
// points (1-7) to verify in sample // points (1-7) to verify in sample
std::vector<std::vector<int>> h_sample {{0,1,2},{3,4,5},{0,1,6},{3,4,6},{2,5,6}}; std::vector<std::vector<int>> h_sample {{0,1,2},{3,4,5},{0,1,6},{3,4,6},{2,5,6}};
std::vector<int> h_inliers; std::vector<std::vector<int>> h_sample_ver {{3,4,5,6},{0,1,2,6},{2,3,4,5},{0,1,2,5},{0,1,3,4}};
std::vector<int> non_planar_supports, h_inliers, h_outliers, h_outliers_eval, f_inliers;
std::vector<double> weights; std::vector<double> weights;
std::vector<Mat> h_models; std::vector<Mat> h_models;
const int points_size, sample_size; const int points_size, max_iters_plane_and_parallax, MAX_H_SUBSET = 50, MAX_ITERS_H = 6;
int num_h_outliers, num_models_used_so_far = 0, estimated_min_non_planar_support,
num_h_outliers_eval, TENT_MIN_NON_PLANAR_SUPP;
const int MAX_MODELS_TO_TEST = 21, H_INLS_DEGEN_SAMPLE = 5; // 5 by DEGENSAC, Chum et al.
Matx33d K, K2, K_inv, K2_inv, K2_inv_t, true_K2_inv, true_K2_inv_t, true_K1_inv, true_K1, true_K2_t;
Score best_focal_score;
bool true_K_given, is_principal_pt_set = false;
public: public:
FundamentalDegeneracyImpl (int state, const Ptr<Quality> &quality_, const Mat &points_, FundamentalDegeneracyImpl (int state, const Ptr<Quality> &quality_, const Mat &points_,
int sample_size_, double homography_threshold_) : int sample_size_, int plane_and_parallax_iters, double homography_threshold_,
rng (state), quality(quality_), points((float *) points_.data), points_mat(&points_), double f_inlier_thr_sqr, const Mat true_K1_, const Mat true_K2_) :
rng (state), quality(quality_), f_error(quality_->getErrorFnc()), points_mat(points_),
h_reproj_error(ReprojectionErrorForward::create(points_)), h_reproj_error(ReprojectionErrorForward::create(points_)),
ep_deg (points_, sample_size_), homography_threshold (homography_threshold_), ep_deg (points_, sample_size_), homography_threshold (homography_threshold_),
points_size (quality_->getPointsSize()), sample_size (sample_size_) { points_size (quality_->getPointsSize()),
max_iters_plane_and_parallax(plane_and_parallax_iters) {
if (sample_size_ == 8) { if (sample_size_ == 8) {
// add more homography samples to test for 8-points F // add more homography samples to test for 8-points F
h_sample.emplace_back(std::vector<int>{0, 1, 7}); h_sample.emplace_back(std::vector<int>{0, 1, 7}); h_sample_ver.emplace_back(std::vector<int>{2,3,4,5,6});
h_sample.emplace_back(std::vector<int>{0, 2, 7}); h_sample.emplace_back(std::vector<int>{0, 2, 7}); h_sample_ver.emplace_back(std::vector<int>{1,3,4,5,6});
h_sample.emplace_back(std::vector<int>{3, 5, 7}); h_sample.emplace_back(std::vector<int>{3, 5, 7}); h_sample_ver.emplace_back(std::vector<int>{0,1,2,4,6});
h_sample.emplace_back(std::vector<int>{3, 6, 7}); h_sample.emplace_back(std::vector<int>{3, 6, 7}); h_sample_ver.emplace_back(std::vector<int>{0,1,2,4,5});
h_sample.emplace_back(std::vector<int>{2, 4, 7}); h_sample.emplace_back(std::vector<int>{2, 4, 7}); h_sample_ver.emplace_back(std::vector<int>{0,1,3,5,6});
} }
non_planar_supports = std::vector<int>(MAX_MODELS_TO_TEST);
h_inliers = std::vector<int>(points_size); h_inliers = std::vector<int>(points_size);
h_outliers = std::vector<int>(points_size);
h_outliers_eval = std::vector<int>(points_size);
f_inliers = std::vector<int>(points_size);
h_non_min_solver = HomographyNonMinimalSolver::create(points_); h_non_min_solver = HomographyNonMinimalSolver::create(points_);
f_non_min_solver = EpipolarNonMinimalSolver::create(points_, true /*F*/);
num_h_outliers_eval = num_h_outliers = points_size;
f_threshold_sqr = f_inlier_thr_sqr;
h_repr_quality = MsacQuality::create(points_.rows, homography_threshold_, h_reproj_error);
true_K_given = ! true_K1_.empty() && ! true_K2_.empty();
if (true_K_given) {
true_K1 = Matx33d((double *)true_K1_.data);
true_K2_inv = Matx33d(Mat(true_K2_.inv()));
true_K2_t = Matx33d(true_K2_).t();
true_K1_inv = true_K1.inv();
true_K2_inv_t = true_K2_inv.t();
}
random_gen_H = UniformRandomGenerator::create(rng.uniform(0, INT_MAX), points_size, MAX_H_SUBSET);
estimated_min_non_planar_support = TENT_MIN_NON_PLANAR_SUPP = std::min(5, (int)(0.05*points_size));
} }
inline bool isModelValid(const Mat &F, const std::vector<int> &sample) const override { bool estimateHfrom3Points (const Mat &F_best, const std::vector<int> &sample, Mat &H_best) {
return ep_deg.isModelValid(F, sample); Score H_best_score;
} // find e', null vector of F^T
bool recoverIfDegenerate (const std::vector<int> &sample, const Mat &F_best, const Vec3d e_prime = Utils::getLeftEpipole(F_best);
Mat &non_degenerate_model, Score &non_degenerate_model_score) override {
non_degenerate_model_score = Score(); // set worst case
// According to Two-view Geometry Estimation Unaffected by a Dominant Plane
// (http://cmp.felk.cvut.cz/~matas/papers/chum-degen-cvpr05.pdf)
// only 5 homographies enough to test
// triplets {1,2,3}, {4,5,6}, {1,2,7}, {4,5,7} and {3,6,7}
// H = A - e' (M^-1 b)^T
// A = [e']_x F
// b_i = (x′i × (A xi))^T (x′i × e′)‖x′i×e′‖^−2,
// M is a 3×3 matrix with rows x^T_i
// epipole e' is left nullspace of F s.t. e′^T F=0,
// find e', null space of F^T
Vec3d e_prime = F_best.col(0).cross(F_best.col(2));
if (fabs(e_prime(0)) < 1e-10 && fabs(e_prime(1)) < 1e-10 &&
fabs(e_prime(2)) < 1e-10) // if e' is zero
e_prime = F_best.col(1).cross(F_best.col(2));
const Matx33d A = Math::getSkewSymmetric(e_prime) * Matx33d(F_best); const Matx33d A = Math::getSkewSymmetric(e_prime) * Matx33d(F_best);
bool is_degenerate = false;
Vec3d xi_prime(0,0,1), xi(0,0,1), b; int idx = -1;
Matx33d M(0,0,1,0,0,1,0,0,1); // last column of M is 1
bool is_model_degenerate = false;
for (const auto &h_i : h_sample) { // only 5 samples for (const auto &h_i : h_sample) { // only 5 samples
for (int pt_i = 0; pt_i < 3; pt_i++) { idx++;
// find b and M Matx33d H;
const int smpl = 4*sample[h_i[pt_i]]; if (!getH(A, e_prime, 4 * sample[h_i[0]], 4 * sample[h_i[1]], 4 * sample[h_i[2]], H))
xi[0] = points[smpl]; continue;
xi[1] = points[smpl+1];
xi_prime[0] = points[smpl+2];
xi_prime[1] = points[smpl+3];
// (x′i × e')
const Vec3d xprime_X_eprime = xi_prime.cross(e_prime);
// (x′i × (A xi))
const Vec3d xprime_X_Ax = xi_prime.cross(A * xi);
// x′i × (A xi))^T (x′i × e′) / ‖x′i×e′‖^2,
b[pt_i] = xprime_X_Ax.dot(xprime_X_eprime) /
std::pow(norm(xprime_X_eprime), 2);
// M from x^T
M(pt_i, 0) = xi[0];
M(pt_i, 1) = xi[1];
}
// compute H
Matx33d H = A - e_prime * (M.inv() * b).t();
int inliers_out_plane = 0;
h_reproj_error->setModelParameters(Mat(H)); h_reproj_error->setModelParameters(Mat(H));
const auto &ver_pts = h_sample_ver[idx];
// find inliers from sample, points related to H, x' ~ Hx int inliers_in_plane = 3; // 3 points are always inliers
for (int s = 0; s < sample_size; s++) for (int s : ver_pts)
if (h_reproj_error->getError(sample[s]) > homography_threshold) if (h_reproj_error->getError(sample[s]) < homography_threshold) {
if (++inliers_out_plane > 2) if (++inliers_in_plane >= H_INLS_DEGEN_SAMPLE)
break; break;
}
// if there are at least 5 points lying on plane then F is degenerate if (inliers_in_plane >= H_INLS_DEGEN_SAMPLE) {
if (inliers_out_plane <= 2) { is_degenerate = true;
is_model_degenerate = true; const auto h_score = h_repr_quality->getScore(Mat(H));
if (h_score.isBetter(H_best_score)) {
// update homography by polishing on all inliers H_best_score = h_score;
int h_inls_cnt = 0; H_best = Mat(H);
const auto &h_errors = h_reproj_error->getErrors(Mat(H));
for (int pt = 0; pt < points_size; pt++)
if (h_errors[pt] < homography_threshold)
h_inliers[h_inls_cnt++] = pt;
if (h_non_min_solver->estimate(h_inliers, h_inls_cnt, h_models, weights) != 0)
H = Matx33d(h_models[0]);
Mat newF;
const Score newF_score = planeAndParallaxRANSAC(H, newF, h_errors);
if (newF_score.isBetter(non_degenerate_model_score)) {
// store non degenerate model
non_degenerate_model_score = newF_score;
newF.copyTo(non_degenerate_model);
} }
} }
} }
return is_model_degenerate; if (!is_degenerate)
return false;
int h_inls_cnt = optimizeH(H_best, H_best_score);
for (int iter = 0; iter < 2; iter++) {
if (h_non_min_solver->estimate(h_inliers, h_inls_cnt, h_models, weights) == 0)
break;
const auto h_score = h_repr_quality->getScore(h_models[0]);
if (h_score.isBetter(H_best_score)) {
H_best_score = h_score;
h_models[0].copyTo(H_best);
h_inls_cnt = h_repr_quality->getInliers(H_best, h_inliers);
} else break;
}
getOutliersH(H_best);
return true;
} }
Ptr<Degeneracy> clone(int state) const override { bool optimizeF (const Mat &F, const Score &score, Mat &F_new, Score &new_score) {
return makePtr<FundamentalDegeneracyImpl>(state, quality->clone(), *points_mat, std::vector<Mat> Fs;
sample_size, homography_threshold); if (f_non_min_solver->estimate(f_inliers, quality->getInliers(F, f_inliers), Fs, weights)) {
const auto F_polished_score = quality->getScore(f_error->getErrors(Fs[0]));
if (F_polished_score.isBetter(score)) {
Fs[0].copyTo(F_new); new_score = F_polished_score;
return true;
}
}
return false;
} }
private: int optimizeH (Mat &H_best, Score &H_best_score) {
int h_inls_cnt = h_repr_quality->getInliers(H_best, h_inliers);
random_gen_H->setSubsetSize(h_inls_cnt <= MAX_H_SUBSET ? (int)(0.8*h_inls_cnt) : MAX_H_SUBSET);
if (random_gen_H->getSubsetSize() >= 4/*min H sample size*/) {
for (int iter = 0; iter < MAX_ITERS_H; iter++) {
if (h_non_min_solver->estimate(random_gen_H->generateUniqueRandomSubset(h_inliers, h_inls_cnt), random_gen_H->getSubsetSize(), h_models, weights) == 0)
continue;
const auto h_score = h_repr_quality->getScore(h_models[0]);
if (h_score.isBetter(H_best_score)) {
h_models[0].copyTo(H_best);
// if more inliers than previous best
if (h_score.inlier_number > H_best_score.inlier_number || h_score.inlier_number >= MAX_H_SUBSET) {
h_inls_cnt = h_repr_quality->getInliers(H_best, h_inliers);
random_gen_H->setSubsetSize(h_inls_cnt <= MAX_H_SUBSET ? (int)(0.8*h_inls_cnt) : MAX_H_SUBSET);
}
H_best_score = h_score;
}
}
}
return h_inls_cnt;
}
bool recoverIfDegenerate (const std::vector<int> &sample, const Mat &F_best, const Score &F_best_score,
Mat &non_degenerate_model, Score &non_degenerate_model_score) override {
const auto swapF = [&] (const Mat &_F, const Score &_score) {
const auto non_min_solver = EpipolarNonMinimalSolver::create(points_mat, true);
if (! optimizeF(_F, _score, non_degenerate_model, non_degenerate_model_score)) {
_F.copyTo(non_degenerate_model);
non_degenerate_model_score = _score;
}
};
Mat F_from_H, F_from_E, H_best; Score F_from_H_score, F_from_E_score;
if (! estimateHfrom3Points(F_best, sample, H_best)) {
return false; // non degenerate
}
if (true_K_given) {
if (getFfromTrueK(H_best, F_from_H, F_from_H_score)) {
if (F_from_H_score.isBetter(F_from_E_score))
swapF(F_from_H, F_from_H_score);
else swapF(F_from_E, F_from_E_score);
return true;
} else {
non_degenerate_model_score = Score();
return true; // no translation
}
}
const int non_planar_support_degen_F = getNonPlanarSupport(F_best);
Score F_pl_par_score, F_calib_score; Mat F_pl_par, F_calib;
if (calibDegensac(H_best, F_calib, F_calib_score, non_planar_support_degen_F, F_best_score)) {
if (planeAndParallaxRANSAC(H_best, h_outliers, num_h_outliers, max_iters_plane_and_parallax, true,
F_best_score, non_planar_support_degen_F, F_pl_par, F_pl_par_score) && F_pl_par_score.isBetter(F_calib_score)
&& getNonPlanarSupport(F_pl_par) > getNonPlanarSupport(F_calib)) {
swapF(F_pl_par, F_pl_par_score);
return true;
}
swapF(F_calib, F_calib_score);
return true;
} else {
if (planeAndParallaxRANSAC(H_best, h_outliers, num_h_outliers, max_iters_plane_and_parallax, true,
F_best_score, non_planar_support_degen_F, F_pl_par, F_pl_par_score)) {
swapF(F_pl_par, F_pl_par_score);
return true;
}
}
if (! isFDegenerate(non_planar_support_degen_F)) {
return false;
}
non_degenerate_model_score = Score();
return true;
}
// RANSAC with plane-and-parallax to find new Fundamental matrix // RANSAC with plane-and-parallax to find new Fundamental matrix
Score planeAndParallaxRANSAC (const Matx33d &H, Mat &best_F, const std::vector<float> &h_errors) { bool getFfromTrueK (const Matx33d &H, Mat &F_from_K, Score &F_from_K_score) {
int max_iters = 100; // with 95% confidence assume at least 17% of inliers std::vector<Matx33d> R; std::vector<Vec3d> t;
Score best_score; if (decomposeHomographyMat(true_K2_inv * H * true_K1, Matx33d::eye(), R, t, noArray()) == 1) {
for (int iters = 0; iters < max_iters; iters++) { // std::cout << "Warning: translation is zero!\n";
return false; // is degenerate
}
// sign of translation does not make difference
const Mat F1 = Mat(true_K2_inv_t * Math::getSkewSymmetric(t[0]) * R[0] * true_K1_inv);
const Mat F2 = Mat(true_K2_inv_t * Math::getSkewSymmetric(t[1]) * R[1] * true_K1_inv);
const auto score_f1 = quality->getScore(f_error->getErrors(F1)), score_f2 = quality->getScore(f_error->getErrors(F2));
if (score_f1.isBetter(score_f2)) {
F_from_K = F1; F_from_K_score = score_f1;
} else {
F_from_K = F2; F_from_K_score = score_f2;
}
return true;
}
bool planeAndParallaxRANSAC (const Matx33d &H, std::vector<int> &non_planar_pts, int num_non_planar_pts,
int max_iters_pl_par, bool use_preemptive, const Score &score_degen_F, int non_planar_support_degen_F,
Mat &F_new, Score &F_new_score) {
if (num_non_planar_pts < 2)
return false;
num_models_used_so_far = 0; // reset estimation of lambda for plane-and-parallax
int max_iters = max_iters_pl_par, non_planar_support = 0, iters = 0;
const float * points = points_mat.ptr<float>();
std::vector<Matx33d> F_good;
const double CLOSE_THR = 1.0;
for (; iters < max_iters; iters++) {
// draw two random points // draw two random points
int h_outlier1 = rng.uniform(0, points_size); int h_outlier1 = 4 * non_planar_pts[rng.uniform(0, num_non_planar_pts)];
int h_outlier2 = rng.uniform(0, points_size); int h_outlier2 = 4 * non_planar_pts[rng.uniform(0, num_non_planar_pts)];
while (h_outlier1 == h_outlier2) while (h_outlier1 == h_outlier2)
h_outlier2 = rng.uniform(0, points_size); h_outlier2 = 4 * non_planar_pts[rng.uniform(0, num_non_planar_pts)];
const auto x1 = points[h_outlier1], y1 = points[h_outlier1+1], X1 = points[h_outlier1+2], Y1 = points[h_outlier1+3];
const auto x2 = points[h_outlier2], y2 = points[h_outlier2+1], X2 = points[h_outlier2+2], Y2 = points[h_outlier2+3];
if ((fabsf(X1 - X2) < CLOSE_THR && fabsf(Y1 - Y2) < CLOSE_THR) ||
(fabsf(x1 - x2) < CLOSE_THR && fabsf(y1 - y2) < CLOSE_THR))
continue;
// do plane and parallax with outliers of H
// F = [(p1' x Hp1) x (p2' x Hp2)]_x H = [e']_x H
Vec3d ep = (Vec3d(X1, Y1, 1).cross(H * Vec3d(x1, y1, 1))) // l1 = p1' x Hp1
.cross((Vec3d(X2, Y2, 1).cross(H * Vec3d(x2, y2, 1))));// l2 = p2' x Hp2
const Matx33d F = Math::getSkewSymmetric(ep) * H;
const auto * const f = F.val;
// check orientation constraint of epipolar lines
const bool valid = (f[0]*x1+f[1]*y1+f[2])*(ep[1]-ep[2]*Y1) * (f[0]*x2+f[1]*y2+f[2])*(ep[1]-ep[2]*Y2) > 0;
if (!valid) continue;
const int num_f_inliers_of_h_outliers = getNonPlanarSupport(Mat(F), num_models_used_so_far >= MAX_MODELS_TO_TEST, non_planar_support);
if (non_planar_support < num_f_inliers_of_h_outliers) {
non_planar_support = num_f_inliers_of_h_outliers;
const double predicted_iters = log_conf / log(1 - pow(static_cast<double>
(getNonPlanarSupport(Mat(F), non_planar_pts, num_non_planar_pts)) / num_non_planar_pts, 2));
if (use_preemptive && ! std::isinf(predicted_iters) && predicted_iters < max_iters)
max_iters = static_cast<int>(predicted_iters);
F_good = { F };
} else if (non_planar_support == num_f_inliers_of_h_outliers) {
F_good.emplace_back(F);
}
}
// find outliers of homography H F_new_score = Score();
if (h_errors[h_outlier1] > homography_threshold && for (const auto &F_ : F_good) {
h_errors[h_outlier2] > homography_threshold) { const auto sc = quality->getScore(f_error->getErrors(Mat(F_)));
if (sc.isBetter(F_new_score)) {
F_new_score = sc;
F_new = Mat(F_);
}
}
if (F_new_score.isBetter(score_degen_F) && non_planar_support > non_planar_support_degen_F)
return true;
if (isFDegenerate(non_planar_support))
return false;
return true;
}
bool calibDegensac (const Matx33d &H, Mat &F_new, Score &F_new_score, int non_planar_support_degen_F, const Score &F_degen_score) {
const float * points = points_mat.ptr<float>();
if (! is_principal_pt_set) {
// estimate principal points from coordinates
float px1 = 0, py1 = 0, px2 = 0, py2 = 0;
for (int i = 0; i < points_size; i++) {
const int idx = 4*i;
if (px1 < points[idx ]) px1 = points[idx ];
if (py1 < points[idx+1]) py1 = points[idx+1];
if (px2 < points[idx+2]) px2 = points[idx+2];
if (py2 < points[idx+3]) py2 = points[idx+3];
}
setPrincipalPoint((int)(px1/2)+1, (int)(py1/2)+1, (int)(px2/2)+1, (int)(py2/2)+1);
}
std::vector<Mat> R; std::vector<Mat> t; std::vector<Mat> F_good;
std::vector<double> best_f;
int non_planar_support_out = 0;
for (double f = 300; f <= 3000; f += 150.) {
K(0,0) = K(1,1) = K2(0,0) = K2(1,1) = f;
const double one_over_f = 1/f;
K_inv(0,0) = K_inv(1,1) = K2_inv(0,0) = K2_inv(1,1) = K2_inv_t(0,0) = K2_inv_t(1,1) = one_over_f;
K_inv(0,2) = -K(0,2)*one_over_f; K_inv(1,2) = -K(1,2)*one_over_f;
K2_inv_t(2,0) = K2_inv(0,2) = -K2(0,2)*one_over_f; K2_inv_t(2,1) = K2_inv(1,2) = -K2(1,2)*one_over_f;
if (decomposeHomographyMat(K2_inv * H * K, Matx33d::eye(), R, t, noArray()) == 1) continue;
Mat F1 = Mat(K2_inv_t * Math::getSkewSymmetric(Vec3d(t[0])) * Matx33d(R[0]) * K_inv);
Mat F2 = Mat(K2_inv_t * Math::getSkewSymmetric(Vec3d(t[2])) * Matx33d(R[2]) * K_inv);
int non_planar_f1 = getNonPlanarSupport(F1, true, non_planar_support_out),
non_planar_f2 = getNonPlanarSupport(F2, true, non_planar_support_out);
if (non_planar_f1 < non_planar_f2) {
non_planar_f1 = non_planar_f2; F1 = F2;
}
if (non_planar_support_out < non_planar_f1) {
non_planar_support_out = non_planar_f1;
F_good = {F1};
best_f = { f };
} else if (non_planar_support_out == non_planar_f1) {
F_good.emplace_back(F1);
best_f.emplace_back(f);
}
}
F_new_score = Score();
for (int i = 0; i < (int) F_good.size(); i++) {
const auto sc = quality->getScore(f_error->getErrors(F_good[i]));
if (sc.isBetter(F_new_score)) {
F_new_score = sc;
F_good[i].copyTo(F_new);
if (sc.isBetter(best_focal_score)) {
best_focal = best_f[i]; // save best focal length
best_focal_score = sc;
}
}
}
if (F_degen_score.isBetter(F_new_score) && non_planar_support_out <= non_planar_support_degen_F)
return false;
// do plane and parallax with outliers of H /*
// F = [(p1' x Hp1) x (p2' x Hp2)]_x H // logarithmic search -> faster but less accurate
const Matx33d F = Math::getSkewSymmetric( double f_min = 300, f_max = 3500;
(Vec3d(points[4*h_outlier1+2], points[4*h_outlier1+3], 1).cross // p1' while (f_max - f_min > 100) {
(H * Vec3d(points[4*h_outlier1 ], points[4*h_outlier1+1], 1))).cross // Hp1 const double f_half = (f_max + f_min) * 0.5f, left_half = (f_min + f_half) * 0.5f, right_half = (f_half + f_max) * 0.5f;
(Vec3d(points[4*h_outlier2+2], points[4*h_outlier2+3], 1).cross // p2' const double inl_in_left = eval_f(left_half), inl_in_right = eval_f(right_half);
(H * Vec3d(points[4*h_outlier2 ], points[4*h_outlier2+1], 1))) // Hp2 if (inl_in_left > inl_in_right)
) * H; f_max = f_half;
else f_min = f_half;
}
*/
return true;
}
void getOutliersH (const Mat &H_best) {
// get H outliers
num_h_outliers_eval = num_h_outliers = 0;
const auto &h_errors = h_reproj_error->getErrors(H_best);
for (int pt = 0; pt < points_size; pt++)
if (h_errors[pt] > H_SAMPLE_THR_SQR) {
h_outliers[num_h_outliers++] = pt;
if (h_errors[pt] > MAX_H_THR)
h_outliers_eval[num_h_outliers_eval++] = pt;
}
}
const Score score = quality->getScore(Mat(F)); bool verifyFundamental (const Mat &F_best, const Score &F_score, const std::vector<bool> &inliers_mask, Mat &F_new, Score &new_score) override {
if (score.isBetter(best_score)) { const int f_sample_size = 3, max_H_iters = 5; // 3.52 = log(0.01) / log(1 - std::pow(0.9, 3));
best_score = score; int num_f_inliers = 0;
best_F = Mat(F); std::vector<int> inliers(points_size), f_sample(f_sample_size);
const double predicted_iters = log_conf / log(1 - std::pow for (int i = 0; i < points_size; i++) if (inliers_mask[i]) inliers[num_f_inliers++] = i;
(static_cast<double>(score.inlier_number) / points_size, 2)); const auto sampler = UniformSampler::create(0, f_sample_size, num_f_inliers);
// find e', null space of F^T
const Vec3d e_prime = Utils::getLeftEpipole(F_best);
const Matx33d A = Math::getSkewSymmetric(e_prime) * Matx33d(F_best);
Score H_best_score; Mat H_best;
for (int iter = 0; iter < max_H_iters; iter++) {
sampler->generateSample(f_sample);
Matx33d H;
if (!getH(A, e_prime, 4*inliers[f_sample[0]], 4*inliers[f_sample[1]], 4*inliers[f_sample[2]], H))
continue;
const auto h_score = h_repr_quality->getScore(Mat(H));
if (h_score.isBetter(H_best_score)) {
H_best_score = h_score; H_best = Mat(H);
}
}
if (H_best.empty()) return false; // non-degenerate
optimizeH(H_best, H_best_score);
getOutliersH(H_best);
const int non_planar_support_best_F = getNonPlanarSupport(F_best);
const bool is_F_degen = isFDegenerate(non_planar_support_best_F);
Mat F_from_K; Score F_from_K_score;
bool success = false;
// generate non-degenerate F even though so-far-the-best one may not be degenerate
if (true_K_given) {
// use GT calibration
if (getFfromTrueK(H_best, F_from_K, F_from_K_score)) {
new_score = F_from_K_score;
F_from_K.copyTo(F_new);
success = true;
}
} else {
// use calibrated DEGENSAC
if (calibDegensac(H_best, F_from_K, F_from_K_score, non_planar_support_best_F, F_score)) {
new_score = F_from_K_score;
F_from_K.copyTo(F_new);
success = true;
}
}
if (!is_F_degen) {
return false;
} else if (success) // F is degenerate
return true; // but successfully recovered
// recover degenerate F using plane-and-parallax
Score plane_parallax_score; Mat F_plane_parallax;
if (planeAndParallaxRANSAC(H_best, h_outliers, num_h_outliers, 20, true,
F_score, non_planar_support_best_F, F_plane_parallax, plane_parallax_score)) {
new_score = plane_parallax_score;
F_plane_parallax.copyTo(F_new);
return true;
}
// plane-and-parallax failed. A previous non-degenerate so-far-the-best model will be used instead
new_score = Score();
return true;
}
void setPrincipalPoint (double px_, double py_) override {
setPrincipalPoint(px_, py_, 0, 0);
}
void setPrincipalPoint (double px_, double py_, double px2_, double py2_) override {
if (px_ > DBL_EPSILON && py_ > DBL_EPSILON) {
is_principal_pt_set = true;
K = {1, 0, px_, 0, 1, py_, 0, 0, 1};
if (px2_ > DBL_EPSILON && py2_ > DBL_EPSILON) K2 = {1, 0, px2_, 0, 1, py2_, 0, 0, 1};
else K2 = K;
K_inv = K2_inv = K2_inv_t = Matx33d::eye();
}
}
private:
bool getH (const Matx33d &A, const Vec3d &e_prime, int smpl1, int smpl2, int smpl3, Matx33d &H) {
const float * points = points_mat.ptr<float>();
Vec3d p1(points[smpl1 ], points[smpl1+1], 1), p2(points[smpl2 ], points[smpl2+1], 1), p3(points[smpl3 ], points[smpl3+1], 1);
Vec3d P1(points[smpl1+2], points[smpl1+3], 1), P2(points[smpl2+2], points[smpl2+3], 1), P3(points[smpl3+2], points[smpl3+3], 1);
const Matx33d M (p1[0], p1[1], 1, p2[0], p2[1], 1, p3[0], p3[1], 1);
if (p1.cross(p2).dot(p3) * P1.cross(P2).dot(P3) < 0) return false;
// (x′i × e')
const Vec3d P1e = P1.cross(e_prime), P2e = P2.cross(e_prime), P3e = P3.cross(e_prime);
// x′i × (A xi))^T (x′i × e′) / ‖x′i×e′‖^2,
const Vec3d b (P1.cross(A * p1).dot(P1e) / (P1e[0]*P1e[0]+P1e[1]*P1e[1]+P1e[2]*P1e[2]),
P2.cross(A * p2).dot(P2e) / (P2e[0]*P2e[0]+P2e[1]*P2e[1]+P2e[2]*P2e[2]),
P3.cross(A * p3).dot(P3e) / (P3e[0]*P3e[0]+P3e[1]*P3e[1]+P3e[2]*P3e[2]));
H = A - e_prime * (M.inv() * b).t();
return true;
}
int getNonPlanarSupport (const Mat &F, const std::vector<int> &pts, int num_pts) {
int non_rand_support = 0;
f_error->setModelParameters(F);
for (int pt = 0; pt < num_pts; pt++)
if (f_error->getError(pts[pt]) < f_threshold_sqr)
non_rand_support++;
return non_rand_support;
}
if (! std::isinf(predicted_iters) && predicted_iters < max_iters) int getNonPlanarSupport (const Mat &F, bool preemptive=false, int max_so_far=0) {
max_iters = static_cast<int>(predicted_iters); int non_rand_support = 0;
f_error->setModelParameters(F);
if (preemptive) {
const auto preemptive_thr = -num_h_outliers_eval + max_so_far;
for (int pt = 0; pt < num_h_outliers_eval; pt++)
if (f_error->getError(h_outliers_eval[pt]) < f_threshold_sqr)
non_rand_support++;
else if (non_rand_support - pt < preemptive_thr)
break;
} else {
for (int pt = 0; pt < num_h_outliers_eval; pt++)
if (f_error->getError(h_outliers_eval[pt]) < f_threshold_sqr)
non_rand_support++;
if (num_models_used_so_far < MAX_MODELS_TO_TEST && !true_K_given/*for K we know that recovered F cannot be degenerate*/) {
non_planar_supports[num_models_used_so_far++] = non_rand_support;
if (num_models_used_so_far == MAX_MODELS_TO_TEST) {
getLambda(non_planar_supports, 2.32, num_h_outliers_eval, 0, false, estimated_min_non_planar_support);
if (estimated_min_non_planar_support < 3) estimated_min_non_planar_support = 3;
} }
} }
} }
return best_score; return non_rand_support;
}
inline bool isModelValid(const Mat &F, const std::vector<int> &sample) const override {
return ep_deg.isModelValid(F, sample);
}
bool isFDegenerate (int num_f_inliers_h_outliers) const {
if (num_models_used_so_far < MAX_MODELS_TO_TEST)
// the minimum number of non-planar support has not estimated yet -> use tentative
return num_f_inliers_h_outliers < std::min(TENT_MIN_NON_PLANAR_SUPP, (int)(0.1 * num_h_outliers_eval));
return num_f_inliers_h_outliers < estimated_min_non_planar_support;
} }
}; };
Ptr<FundamentalDegeneracy> FundamentalDegeneracy::create (int state, const Ptr<Quality> &quality_, Ptr<FundamentalDegeneracy> FundamentalDegeneracy::create (int state, const Ptr<Quality> &quality_,
const Mat &points_, int sample_size_, double homography_threshold_) { const Mat &points_, int sample_size_, int max_iters_plane_and_parallax, double homography_threshold_,
double f_inlier_thr_sqr, const Mat true_K1, const Mat true_K2) {
return makePtr<FundamentalDegeneracyImpl>(state, quality_, points_, sample_size_, return makePtr<FundamentalDegeneracyImpl>(state, quality_, points_, sample_size_,
homography_threshold_); max_iters_plane_and_parallax, homography_threshold_, f_inlier_thr_sqr, true_K1, true_K2);
} }
class EssentialDegeneracyImpl : public EssentialDegeneracy { class EssentialDegeneracyImpl : public EssentialDegeneracy {
private: private:
const Mat * points_mat;
const int sample_size;
const EpipolarGeometryDegeneracyImpl ep_deg; const EpipolarGeometryDegeneracyImpl ep_deg;
public: public:
explicit EssentialDegeneracyImpl (const Mat &points, int sample_size_) : explicit EssentialDegeneracyImpl (const Mat &points, int sample_size_) :
points_mat(&points), sample_size(sample_size_), ep_deg (points, sample_size_) {} ep_deg (points, sample_size_) {}
inline bool isModelValid(const Mat &E, const std::vector<int> &sample) const override { inline bool isModelValid(const Mat &E, const std::vector<int> &sample) const override {
return ep_deg.isModelValid(E, sample); return ep_deg.isModelValid(E, sample);
} }
Ptr<Degeneracy> clone(int /*state*/) const override {
return makePtr<EssentialDegeneracyImpl>(*points_mat, sample_size);
}
}; };
Ptr<EssentialDegeneracy> EssentialDegeneracy::create (const Mat &points_, int sample_size_) { Ptr<EssentialDegeneracy> EssentialDegeneracy::create (const Mat &points_, int sample_size_) {
return makePtr<EssentialDegeneracyImpl>(points_, sample_size_); return makePtr<EssentialDegeneracyImpl>(points_, sample_size_);

@ -46,29 +46,27 @@
#endif #endif
namespace cv { namespace usac { namespace cv { namespace usac {
// This is the estimator class for estimating a homography matrix between two images. A model estimation method and error calculation method are implemented
class DLSPnPImpl : public DLSPnP { class DLSPnPImpl : public DLSPnP {
private:
const Mat * points_mat, * calib_norm_points_mat;
const Matx33d * K_mat;
#if defined(HAVE_LAPACK) || defined(HAVE_EIGEN) #if defined(HAVE_LAPACK) || defined(HAVE_EIGEN)
const Matx33d &K; private:
const float * const calib_norm_points, * const points; Mat points_mat, calib_norm_points_mat;
#endif const Matx33d K;
public: public:
explicit DLSPnPImpl (const Mat &points_, const Mat &calib_norm_points_, const Matx33d &K_) : explicit DLSPnPImpl (const Mat &points_, const Mat &calib_norm_points_, const Mat &K_)
points_mat(&points_), calib_norm_points_mat(&calib_norm_points_), K_mat (&K_) : points_mat(points_), calib_norm_points_mat(calib_norm_points_), K(K_)
#if defined(HAVE_LAPACK) || defined(HAVE_EIGEN) {
, K(K_), calib_norm_points((float*)calib_norm_points_.data), points((float*)points_.data) CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
CV_DbgAssert(!calib_norm_points_mat.empty() && calib_norm_points_mat.isContinuous());
}
#else
public:
explicit DLSPnPImpl (const Mat &, const Mat &, const Mat &) {}
#endif #endif
{}
// return minimal sample size required for non-minimal estimation. // return minimal sample size required for non-minimal estimation.
int getMinimumRequiredSampleSize() const override { return 3; } int getMinimumRequiredSampleSize() const override { return 3; }
// return maximum number of possible solutions. // return maximum number of possible solutions.
int getMaxNumberOfSolutions () const override { return 27; } int getMaxNumberOfSolutions () const override { return 27; }
Ptr<NonMinimalSolver> clone () const override {
return makePtr<DLSPnPImpl>(*points_mat, *calib_norm_points_mat, *K_mat);
}
#if defined(HAVE_LAPACK) || defined(HAVE_EIGEN) #if defined(HAVE_LAPACK) || defined(HAVE_EIGEN)
int estimate(const std::vector<int> &sample, int sample_number, int estimate(const std::vector<int> &sample, int sample_number,
std::vector<Mat> &models_, const std::vector<double> &/*weights_*/) const override { std::vector<Mat> &models_, const std::vector<double> &/*weights_*/) const override {
@ -92,7 +90,8 @@ public:
// Compute V*W*b with the rotation parameters factored out. This is the // Compute V*W*b with the rotation parameters factored out. This is the
// translation parameterized by the 9 entries of the rotation matrix. // translation parameterized by the 9 entries of the rotation matrix.
Matx<double, 3, 9> translation_factor = Matx<double, 3, 9>::zeros(); Matx<double, 3, 9> translation_factor = Matx<double, 3, 9>::zeros();
const float * points = points_mat.ptr<float>();
const float * calib_norm_points = calib_norm_points_mat.ptr<float>();
for (int i = 0; i < sample_number; i++) { for (int i = 0; i < sample_number; i++) {
const int idx_world = 5 * sample[i], idx_calib = 3 * sample[i]; const int idx_world = 5 * sample[i], idx_calib = 3 * sample[i];
Vec3d normalized_feature_pos(calib_norm_points[idx_calib], Vec3d normalized_feature_pos(calib_norm_points[idx_calib],
@ -170,7 +169,6 @@ public:
for (int i = 0; i < sample_number; i++) for (int i = 0; i < sample_number; i++)
pts_random_shuffle[i] = i; pts_random_shuffle[i] = i;
randShuffle(pts_random_shuffle); randShuffle(pts_random_shuffle);
for (int i = 0; i < 27; i++) { for (int i = 0; i < 27; i++) {
// If the rotation solutions are real, treat this as a valid candidate // If the rotation solutions are real, treat this as a valid candidate
// rotation. // rotation.
@ -227,6 +225,12 @@ public:
#endif #endif
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
protected: protected:
#if defined(HAVE_LAPACK) || defined(HAVE_EIGEN) #if defined(HAVE_LAPACK) || defined(HAVE_EIGEN)
const int indices[1968] = { const int indices[1968] = {
@ -871,7 +875,7 @@ protected:
2 * D[74] - 2 * D[78]); // s1^3 2 * D[74] - 2 * D[78]); // s1^3
} }
}; };
Ptr<DLSPnP> DLSPnP::create(const Mat &points_, const Mat &calib_norm_pts, const Matx33d &K) { Ptr<DLSPnP> DLSPnP::create(const Mat &points_, const Mat &calib_norm_pts, const Mat &K) {
return makePtr<DLSPnPImpl>(points_, calib_norm_pts, K); return makePtr<DLSPnPImpl>(points_, calib_norm_pts, K);
} }
}} }}

@ -11,29 +11,29 @@
#endif #endif
namespace cv { namespace usac { namespace cv { namespace usac {
// Essential matrix solver:
/* /*
* H. Stewenius, C. Engels, and D. Nister. Recent developments on direct relative orientation. * H. Stewenius, C. Engels, and D. Nister. Recent developments on direct relative orientation.
* ISPRS J. of Photogrammetry and Remote Sensing, 60:284,294, 2006 * ISPRS J. of Photogrammetry and Remote Sensing, 60:284,294, 2006
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.61.9329&rep=rep1&type=pdf * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.61.9329&rep=rep1&type=pdf
*
* D. Nister. An efficient solution to the five-point relative pose problem
* IEEE Transactions on Pattern Analysis and Machine Intelligence
* https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.86.8769&rep=rep1&type=pdf
*/ */
class EssentialMinimalSolverStewenius5ptsImpl : public EssentialMinimalSolverStewenius5pts { class EssentialMinimalSolver5ptsImpl : public EssentialMinimalSolver5pts {
private: private:
// Points must be calibrated K^-1 x // Points must be calibrated K^-1 x
const Mat * points_mat; const Mat points_mat;
#if defined(HAVE_EIGEN) || defined(HAVE_LAPACK) const bool use_svd, is_nister;
const float * const pts;
#endif
public: public:
explicit EssentialMinimalSolverStewenius5ptsImpl (const Mat &points_) : explicit EssentialMinimalSolver5ptsImpl (const Mat &points_, bool use_svd_=false, bool is_nister_=false) :
points_mat(&points_) points_mat(points_), use_svd(use_svd_), is_nister(is_nister_)
#if defined(HAVE_EIGEN) || defined(HAVE_LAPACK) {
, pts((float*)points_.data) CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
#endif }
{}
#if defined(HAVE_LAPACK) || defined(HAVE_EIGEN)
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
const float * pts = points_mat.ptr<float>();
// (1) Extract 4 null vectors from linear equations of epipolar constraint // (1) Extract 4 null vectors from linear equations of epipolar constraint
std::vector<double> coefficients(45); // 5 pts=rows, 9 columns std::vector<double> coefficients(45); // 5 pts=rows, 9 columns
auto *coefficients_ = &coefficients[0]; auto *coefficients_ = &coefficients[0];
@ -53,25 +53,35 @@ public:
const int num_cols = 9, num_e_mat = 4; const int num_cols = 9, num_e_mat = 4;
double ee[36]; // 9*4 double ee[36]; // 9*4
// eliminate linear equations if (use_svd) {
if (!Math::eliminateUpperTriangular(coefficients, 5, num_cols)) Matx<double, 5, 9> coeffs (&coefficients[0]);
return 0; Mat D, U, Vt;
for (int i = 0; i < num_e_mat; i++) SVDecomp(coeffs, D, U, Vt, SVD::FULL_UV + SVD::MODIFY_A);
for (int j = 5; j < num_cols; j++) const auto * const vt = (double *) Vt.data;
ee[num_cols * i + j] = (i + 5 == j) ? 1 : 0; for (int i = 0; i < num_e_mat; i++)
// use back-substitution for (int j = 0; j < num_cols; j++)
for (int e = 0; e < num_e_mat; e++) { ee[i * num_cols + j] = vt[(8-i)*num_cols+j];
const int curr_e = num_cols * e; } else {
// start from the last row // eliminate linear equations
for (int i = 4; i >= 0; i--) { if (!Math::eliminateUpperTriangular(coefficients, 5, num_cols))
const int row_i = i * num_cols; return 0;
double acc = 0; for (int i = 0; i < num_e_mat; i++)
for (int j = i + 1; j < num_cols; j++) for (int j = 5; j < num_cols; j++)
acc -= coefficients[row_i + j] * ee[curr_e + j]; ee[num_cols * i + j] = (i + 5 == j) ? 1 : 0;
ee[curr_e + i] = acc / coefficients[row_i + i]; // use back-substitution
// due to numerical errors return 0 solutions for (int e = 0; e < num_e_mat; e++) {
if (std::isnan(ee[curr_e + i])) const int curr_e = num_cols * e;
return 0; // start from the last row
for (int i = 4; i >= 0; i--) {
const int row_i = i * num_cols;
double acc = 0;
for (int j = i + 1; j < num_cols; j++)
acc -= coefficients[row_i + j] * ee[curr_e + j];
ee[curr_e + i] = acc / coefficients[row_i + i];
// due to numerical errors return 0 solutions
if (std::isnan(ee[curr_e + i]))
return 0;
}
} }
} }
@ -80,130 +90,215 @@ public:
{null_space.col(0), null_space.col(3), null_space.col(6)}, {null_space.col(0), null_space.col(3), null_space.col(6)},
{null_space.col(1), null_space.col(4), null_space.col(7)}, {null_space.col(1), null_space.col(4), null_space.col(7)},
{null_space.col(2), null_space.col(5), null_space.col(8)}}; {null_space.col(2), null_space.col(5), null_space.col(8)}};
Mat_<double> constraint_mat(10, 20);
Matx<double, 1, 10> eet[3][3];
// (2) Use the rank constraint and the trace constraint to build ten third-order polynomial // (2) Use the rank constraint and the trace constraint to build ten third-order polynomial
// equations in the three unknowns. The monomials are ordered in GrLex order and // equations in the three unknowns. The monomials are ordered in GrLex order and
// represented in a 10×20 matrix, where each row corresponds to an equation and each column // represented in a 10×20 matrix, where each row corresponds to an equation and each column
// corresponds to a monomial // corresponds to a monomial
Matx<double, 1, 10> eet[3][3]; if (is_nister) {
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++) for (int j = 0; j < 3; j++)
// compute EE Transpose // compute EE Transpose
// Shorthand for multiplying the Essential matrix with its transpose. // Shorthand for multiplying the Essential matrix with its transpose.
eet[i][j] = 2 * (multPolysDegOne(null_space_mat[i][0].val, null_space_mat[j][0].val) + eet[i][j] = multPolysDegOne(null_space_mat[i][0].val, null_space_mat[j][0].val) +
multPolysDegOne(null_space_mat[i][1].val, null_space_mat[j][1].val) + multPolysDegOne(null_space_mat[i][1].val, null_space_mat[j][1].val) +
multPolysDegOne(null_space_mat[i][2].val, null_space_mat[j][2].val)); multPolysDegOne(null_space_mat[i][2].val, null_space_mat[j][2].val);
const Matx<double, 1, 10> trace = eet[0][0] + eet[1][1] + eet[2][2]; const Matx<double, 1, 10> trace = 0.5*(eet[0][0] + eet[1][1] + eet[2][2]);
Mat_<double> constraint_mat(10, 20); // Trace constraint
// Trace constraint for (int i = 0; i < 3; i++)
for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++)
for (int j = 0; j < 3; j++) Mat(multPolysDegOneAndTwoNister(i == 0 ? (eet[i][0] - trace).val : eet[i][0].val, null_space_mat[0][j].val) +
Mat(multPolysDegOneAndTwo(eet[i][0].val, null_space_mat[0][j].val) + multPolysDegOneAndTwoNister(i == 1 ? (eet[i][1] - trace).val : eet[i][1].val, null_space_mat[1][j].val) +
multPolysDegOneAndTwo(eet[i][1].val, null_space_mat[1][j].val) + multPolysDegOneAndTwoNister(i == 2 ? (eet[i][2] - trace).val : eet[i][2].val, null_space_mat[2][j].val)).copyTo(constraint_mat.row(1+3 * j + i));
multPolysDegOneAndTwo(eet[i][2].val, null_space_mat[2][j].val) -
0.5 * multPolysDegOneAndTwo(trace.val, null_space_mat[i][j].val)) // Rank = zero determinant constraint
.copyTo(constraint_mat.row(3 * i + j)); Mat(multPolysDegOneAndTwoNister(
(multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][2].val) -
multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][1].val)).val,
null_space_mat[2][0].val) +
multPolysDegOneAndTwoNister(
(multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][0].val) -
multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][2].val)).val,
null_space_mat[2][1].val) +
multPolysDegOneAndTwoNister(
(multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][1].val) -
multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][0].val)).val,
null_space_mat[2][2].val)).copyTo(constraint_mat.row(0));
// Rank = zero determinant constraint Matx<double, 10, 10> Acoef = constraint_mat.colRange(0, 10),
Mat(multPolysDegOneAndTwo( Bcoef = constraint_mat.colRange(10, 20), A_;
(multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][2].val) -
multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][1].val)).val, if (!solve(Acoef, Bcoef, A_, DECOMP_LU)) return 0;
null_space_mat[2][0].val) +
double b[3 * 13];
const auto * const a = A_.val;
for (int i = 0; i < 3; i++) {
const int r1_idx = i * 2 + 4, r2_idx = i * 2 + 5; // process from 4th row
for (int j = 0, r1_j = 0, r2_j = 0; j < 13; j++)
b[i*13+j] = ((j == 0 || j == 4 || j == 8) ? 0 : a[r1_idx*A_.cols+r1_j++]) - ((j == 3 || j == 7 || j == 12) ? 0 : a[r2_idx*A_.cols+r2_j++]);
}
std::vector<double> c(11), rs;
// filling coefficients of 10-degree polynomial satysfying zero-determinant constraint of essential matrix, ie., det(E) = 0
// based on "An Efficient Solution to the Five-Point Relative Pose Problem" (David Nister)
// same as in five-point.cpp
c[10] = (b[0]*b[17]*b[34]+b[26]*b[4]*b[21]-b[26]*b[17]*b[8]-b[13]*b[4]*b[34]-b[0]*b[21]*b[30]+b[13]*b[30]*b[8]);
c[9] = (b[26]*b[4]*b[22]+b[14]*b[30]*b[8]+b[13]*b[31]*b[8]+b[1]*b[17]*b[34]-b[13]*b[5]*b[34]+b[26]*b[5]*b[21]-b[0]*b[21]*b[31]-b[26]*b[17]*b[9]-b[1]*b[21]*b[30]+b[27]*b[4]*b[21]+b[0]*b[17]*b[35]-b[0]*b[22]*b[30]+b[13]*b[30]*b[9]+b[0]*b[18]*b[34]-b[27]*b[17]*b[8]-b[14]*b[4]*b[34]-b[13]*b[4]*b[35]-b[26]*b[18]*b[8]);
c[8] = (b[14]*b[30]*b[9]+b[14]*b[31]*b[8]+b[13]*b[31]*b[9]-b[13]*b[4]*b[36]-b[13]*b[5]*b[35]+b[15]*b[30]*b[8]-b[13]*b[6]*b[34]+b[13]*b[30]*b[10]+b[13]*b[32]*b[8]-b[14]*b[4]*b[35]-b[14]*b[5]*b[34]+b[26]*b[4]*b[23]+b[26]*b[5]*b[22]+b[26]*b[6]*b[21]-b[26]*b[17]*b[10]-b[15]*b[4]*b[34]-b[26]*b[18]*b[9]-b[26]*b[19]*b[8]+b[27]*b[4]*b[22]+b[27]*b[5]*b[21]-b[27]*b[17]*b[9]-b[27]*b[18]*b[8]-b[1]*b[21]*b[31]-b[0]*b[23]*b[30]-b[0]*b[21]*b[32]+b[28]*b[4]*b[21]-b[28]*b[17]*b[8]+b[2]*b[17]*b[34]+b[0]*b[18]*b[35]-b[0]*b[22]*b[31]+b[0]*b[17]*b[36]+b[0]*b[19]*b[34]-b[1]*b[22]*b[30]+b[1]*b[18]*b[34]+b[1]*b[17]*b[35]-b[2]*b[21]*b[30]);
c[7] = (b[14]*b[30]*b[10]+b[14]*b[32]*b[8]-b[3]*b[21]*b[30]+b[3]*b[17]*b[34]+b[13]*b[32]*b[9]+b[13]*b[33]*b[8]-b[13]*b[4]*b[37]-b[13]*b[5]*b[36]+b[15]*b[30]*b[9]+b[15]*b[31]*b[8]-b[16]*b[4]*b[34]-b[13]*b[6]*b[35]-b[13]*b[7]*b[34]+b[13]*b[30]*b[11]+b[13]*b[31]*b[10]+b[14]*b[31]*b[9]-b[14]*b[4]*b[36]-b[14]*b[5]*b[35]-b[14]*b[6]*b[34]+b[16]*b[30]*b[8]-b[26]*b[20]*b[8]+b[26]*b[4]*b[24]+b[26]*b[5]*b[23]+b[26]*b[6]*b[22]+b[26]*b[7]*b[21]-b[26]*b[17]*b[11]-b[15]*b[4]*b[35]-b[15]*b[5]*b[34]-b[26]*b[18]*b[10]-b[26]*b[19]*b[9]+b[27]*b[4]*b[23]+b[27]*b[5]*b[22]+b[27]*b[6]*b[21]-b[27]*b[17]*b[10]-b[27]*b[18]*b[9]-b[27]*b[19]*b[8]+b[0]*b[17]*b[37]-b[0]*b[23]*b[31]-b[0]*b[24]*b[30]-b[0]*b[21]*b[33]-b[29]*b[17]*b[8]+b[28]*b[4]*b[22]+b[28]*b[5]*b[21]-b[28]*b[17]*b[9]-b[28]*b[18]*b[8]+b[29]*b[4]*b[21]+b[1]*b[19]*b[34]-b[2]*b[21]*b[31]+b[0]*b[20]*b[34]+b[0]*b[19]*b[35]+b[0]*b[18]*b[36]-b[0]*b[22]*b[32]-b[1]*b[23]*b[30]-b[1]*b[21]*b[32]+b[1]*b[18]*b[35]-b[1]*b[22]*b[31]-b[2]*b[22]*b[30]+b[2]*b[17]*b[35]+b[1]*b[17]*b[36]+b[2]*b[18]*b[34]);
c[6] = (-b[14]*b[6]*b[35]-b[14]*b[7]*b[34]-b[3]*b[22]*b[30]-b[3]*b[21]*b[31]+b[3]*b[17]*b[35]+b[3]*b[18]*b[34]+b[13]*b[32]*b[10]+b[13]*b[33]*b[9]-b[13]*b[4]*b[38]-b[13]*b[5]*b[37]-b[15]*b[6]*b[34]+b[15]*b[30]*b[10]+b[15]*b[32]*b[8]-b[16]*b[4]*b[35]-b[13]*b[6]*b[36]-b[13]*b[7]*b[35]+b[13]*b[31]*b[11]+b[13]*b[30]*b[12]+b[14]*b[32]*b[9]+b[14]*b[33]*b[8]-b[14]*b[4]*b[37]-b[14]*b[5]*b[36]+b[16]*b[30]*b[9]+b[16]*b[31]*b[8]-b[26]*b[20]*b[9]+b[26]*b[4]*b[25]+b[26]*b[5]*b[24]+b[26]*b[6]*b[23]+b[26]*b[7]*b[22]-b[26]*b[17]*b[12]+b[14]*b[30]*b[11]+b[14]*b[31]*b[10]+b[15]*b[31]*b[9]-b[15]*b[4]*b[36]-b[15]*b[5]*b[35]-b[26]*b[18]*b[11]-b[26]*b[19]*b[10]-b[27]*b[20]*b[8]+b[27]*b[4]*b[24]+b[27]*b[5]*b[23]+b[27]*b[6]*b[22]+b[27]*b[7]*b[21]-b[27]*b[17]*b[11]-b[27]*b[18]*b[10]-b[27]*b[19]*b[9]-b[16]*b[5]*b[34]-b[29]*b[17]*b[9]-b[29]*b[18]*b[8]+b[28]*b[4]*b[23]+b[28]*b[5]*b[22]+b[28]*b[6]*b[21]-b[28]*b[17]*b[10]-b[28]*b[18]*b[9]-b[28]*b[19]*b[8]+b[29]*b[4]*b[22]+b[29]*b[5]*b[21]-b[2]*b[23]*b[30]+b[2]*b[18]*b[35]-b[1]*b[22]*b[32]-b[2]*b[21]*b[32]+b[2]*b[19]*b[34]+b[0]*b[19]*b[36]-b[0]*b[22]*b[33]+b[0]*b[20]*b[35]-b[0]*b[23]*b[32]-b[0]*b[25]*b[30]+b[0]*b[17]*b[38]+b[0]*b[18]*b[37]-b[0]*b[24]*b[31]+b[1]*b[17]*b[37]-b[1]*b[23]*b[31]-b[1]*b[24]*b[30]-b[1]*b[21]*b[33]+b[1]*b[20]*b[34]+b[1]*b[19]*b[35]+b[1]*b[18]*b[36]+b[2]*b[17]*b[36]-b[2]*b[22]*b[31]);
c[5] = (-b[14]*b[6]*b[36]-b[14]*b[7]*b[35]+b[14]*b[31]*b[11]-b[3]*b[23]*b[30]-b[3]*b[21]*b[32]+b[3]*b[18]*b[35]-b[3]*b[22]*b[31]+b[3]*b[17]*b[36]+b[3]*b[19]*b[34]+b[13]*b[32]*b[11]+b[13]*b[33]*b[10]-b[13]*b[5]*b[38]-b[15]*b[6]*b[35]-b[15]*b[7]*b[34]+b[15]*b[30]*b[11]+b[15]*b[31]*b[10]+b[16]*b[31]*b[9]-b[13]*b[6]*b[37]-b[13]*b[7]*b[36]+b[13]*b[31]*b[12]+b[14]*b[32]*b[10]+b[14]*b[33]*b[9]-b[14]*b[4]*b[38]-b[14]*b[5]*b[37]-b[16]*b[6]*b[34]+b[16]*b[30]*b[10]+b[16]*b[32]*b[8]-b[26]*b[20]*b[10]+b[26]*b[5]*b[25]+b[26]*b[6]*b[24]+b[26]*b[7]*b[23]+b[14]*b[30]*b[12]+b[15]*b[32]*b[9]+b[15]*b[33]*b[8]-b[15]*b[4]*b[37]-b[15]*b[5]*b[36]+b[29]*b[5]*b[22]+b[29]*b[6]*b[21]-b[26]*b[18]*b[12]-b[26]*b[19]*b[11]-b[27]*b[20]*b[9]+b[27]*b[4]*b[25]+b[27]*b[5]*b[24]+b[27]*b[6]*b[23]+b[27]*b[7]*b[22]-b[27]*b[17]*b[12]-b[27]*b[18]*b[11]-b[27]*b[19]*b[10]-b[28]*b[20]*b[8]-b[16]*b[4]*b[36]-b[16]*b[5]*b[35]-b[29]*b[17]*b[10]-b[29]*b[18]*b[9]-b[29]*b[19]*b[8]+b[28]*b[4]*b[24]+b[28]*b[5]*b[23]+b[28]*b[6]*b[22]+b[28]*b[7]*b[21]-b[28]*b[17]*b[11]-b[28]*b[18]*b[10]-b[28]*b[19]*b[9]+b[29]*b[4]*b[23]-b[2]*b[22]*b[32]-b[2]*b[21]*b[33]-b[1]*b[24]*b[31]+b[0]*b[18]*b[38]-b[0]*b[24]*b[32]+b[0]*b[19]*b[37]+b[0]*b[20]*b[36]-b[0]*b[25]*b[31]-b[0]*b[23]*b[33]+b[1]*b[19]*b[36]-b[1]*b[22]*b[33]+b[1]*b[20]*b[35]+b[2]*b[19]*b[35]-b[2]*b[24]*b[30]-b[2]*b[23]*b[31]+b[2]*b[20]*b[34]+b[2]*b[17]*b[37]-b[1]*b[25]*b[30]+b[1]*b[18]*b[37]+b[1]*b[17]*b[38]-b[1]*b[23]*b[32]+b[2]*b[18]*b[36]);
c[4] = (-b[14]*b[6]*b[37]-b[14]*b[7]*b[36]+b[14]*b[31]*b[12]+b[3]*b[17]*b[37]-b[3]*b[23]*b[31]-b[3]*b[24]*b[30]-b[3]*b[21]*b[33]+b[3]*b[20]*b[34]+b[3]*b[19]*b[35]+b[3]*b[18]*b[36]-b[3]*b[22]*b[32]+b[13]*b[32]*b[12]+b[13]*b[33]*b[11]-b[15]*b[6]*b[36]-b[15]*b[7]*b[35]+b[15]*b[31]*b[11]+b[15]*b[30]*b[12]+b[16]*b[32]*b[9]+b[16]*b[33]*b[8]-b[13]*b[6]*b[38]-b[13]*b[7]*b[37]+b[14]*b[32]*b[11]+b[14]*b[33]*b[10]-b[14]*b[5]*b[38]-b[16]*b[6]*b[35]-b[16]*b[7]*b[34]+b[16]*b[30]*b[11]+b[16]*b[31]*b[10]-b[26]*b[19]*b[12]-b[26]*b[20]*b[11]+b[26]*b[6]*b[25]+b[26]*b[7]*b[24]+b[15]*b[32]*b[10]+b[15]*b[33]*b[9]-b[15]*b[4]*b[38]-b[15]*b[5]*b[37]+b[29]*b[5]*b[23]+b[29]*b[6]*b[22]+b[29]*b[7]*b[21]-b[27]*b[20]*b[10]+b[27]*b[5]*b[25]+b[27]*b[6]*b[24]+b[27]*b[7]*b[23]-b[27]*b[18]*b[12]-b[27]*b[19]*b[11]-b[28]*b[20]*b[9]-b[16]*b[4]*b[37]-b[16]*b[5]*b[36]+b[0]*b[19]*b[38]-b[0]*b[24]*b[33]+b[0]*b[20]*b[37]-b[29]*b[17]*b[11]-b[29]*b[18]*b[10]-b[29]*b[19]*b[9]+b[28]*b[4]*b[25]+b[28]*b[5]*b[24]+b[28]*b[6]*b[23]+b[28]*b[7]*b[22]-b[28]*b[17]*b[12]-b[28]*b[18]*b[11]-b[28]*b[19]*b[10]-b[29]*b[20]*b[8]+b[29]*b[4]*b[24]+b[2]*b[18]*b[37]-b[0]*b[25]*b[32]+b[1]*b[18]*b[38]-b[1]*b[24]*b[32]+b[1]*b[19]*b[37]+b[1]*b[20]*b[36]-b[1]*b[25]*b[31]+b[2]*b[17]*b[38]+b[2]*b[19]*b[36]-b[2]*b[24]*b[31]-b[2]*b[22]*b[33]-b[2]*b[23]*b[32]+b[2]*b[20]*b[35]-b[1]*b[23]*b[33]-b[2]*b[25]*b[30]);
c[3] = (-b[14]*b[6]*b[38]-b[14]*b[7]*b[37]+b[3]*b[19]*b[36]-b[3]*b[22]*b[33]+b[3]*b[20]*b[35]-b[3]*b[23]*b[32]-b[3]*b[25]*b[30]+b[3]*b[17]*b[38]+b[3]*b[18]*b[37]-b[3]*b[24]*b[31]-b[15]*b[6]*b[37]-b[15]*b[7]*b[36]+b[15]*b[31]*b[12]+b[16]*b[32]*b[10]+b[16]*b[33]*b[9]+b[13]*b[33]*b[12]-b[13]*b[7]*b[38]+b[14]*b[32]*b[12]+b[14]*b[33]*b[11]-b[16]*b[6]*b[36]-b[16]*b[7]*b[35]+b[16]*b[31]*b[11]+b[16]*b[30]*b[12]+b[15]*b[32]*b[11]+b[15]*b[33]*b[10]-b[15]*b[5]*b[38]+b[29]*b[5]*b[24]+b[29]*b[6]*b[23]-b[26]*b[20]*b[12]+b[26]*b[7]*b[25]-b[27]*b[19]*b[12]-b[27]*b[20]*b[11]+b[27]*b[6]*b[25]+b[27]*b[7]*b[24]-b[28]*b[20]*b[10]-b[16]*b[4]*b[38]-b[16]*b[5]*b[37]+b[29]*b[7]*b[22]-b[29]*b[17]*b[12]-b[29]*b[18]*b[11]-b[29]*b[19]*b[10]+b[28]*b[5]*b[25]+b[28]*b[6]*b[24]+b[28]*b[7]*b[23]-b[28]*b[18]*b[12]-b[28]*b[19]*b[11]-b[29]*b[20]*b[9]+b[29]*b[4]*b[25]-b[2]*b[24]*b[32]+b[0]*b[20]*b[38]-b[0]*b[25]*b[33]+b[1]*b[19]*b[38]-b[1]*b[24]*b[33]+b[1]*b[20]*b[37]-b[2]*b[25]*b[31]+b[2]*b[20]*b[36]-b[1]*b[25]*b[32]+b[2]*b[19]*b[37]+b[2]*b[18]*b[38]-b[2]*b[23]*b[33]);
c[2] = (b[3]*b[18]*b[38]-b[3]*b[24]*b[32]+b[3]*b[19]*b[37]+b[3]*b[20]*b[36]-b[3]*b[25]*b[31]-b[3]*b[23]*b[33]-b[15]*b[6]*b[38]-b[15]*b[7]*b[37]+b[16]*b[32]*b[11]+b[16]*b[33]*b[10]-b[16]*b[5]*b[38]-b[16]*b[6]*b[37]-b[16]*b[7]*b[36]+b[16]*b[31]*b[12]+b[14]*b[33]*b[12]-b[14]*b[7]*b[38]+b[15]*b[32]*b[12]+b[15]*b[33]*b[11]+b[29]*b[5]*b[25]+b[29]*b[6]*b[24]-b[27]*b[20]*b[12]+b[27]*b[7]*b[25]-b[28]*b[19]*b[12]-b[28]*b[20]*b[11]+b[29]*b[7]*b[23]-b[29]*b[18]*b[12]-b[29]*b[19]*b[11]+b[28]*b[6]*b[25]+b[28]*b[7]*b[24]-b[29]*b[20]*b[10]+b[2]*b[19]*b[38]-b[1]*b[25]*b[33]+b[2]*b[20]*b[37]-b[2]*b[24]*b[33]-b[2]*b[25]*b[32]+b[1]*b[20]*b[38]);
c[1] = (b[29]*b[7]*b[24]-b[29]*b[20]*b[11]+b[2]*b[20]*b[38]-b[2]*b[25]*b[33]-b[28]*b[20]*b[12]+b[28]*b[7]*b[25]-b[29]*b[19]*b[12]-b[3]*b[24]*b[33]+b[15]*b[33]*b[12]+b[3]*b[19]*b[38]-b[16]*b[6]*b[38]+b[3]*b[20]*b[37]+b[16]*b[32]*b[12]+b[29]*b[6]*b[25]-b[16]*b[7]*b[37]-b[3]*b[25]*b[32]-b[15]*b[7]*b[38]+b[16]*b[33]*b[11]);
c[0] = -b[29]*b[20]*b[12]+b[29]*b[7]*b[25]+b[16]*b[33]*b[12]-b[16]*b[7]*b[38]+b[3]*b[20]*b[38]-b[3]*b[25]*b[33];
const auto poly_solver = SolverPoly::create();
const int num_roots = poly_solver->getRealRoots(c, rs);
models = std::vector<Mat>(); models.reserve(num_roots);
for (int i = 0; i < num_roots; i++) {
const double z1 = rs[i], z2 = z1*z1, z3 = z2*z1, z4 = z3*z1;
double bz[9], norm_bz = 0;
for (int j = 0; j < 3; j++) {
double * const br = b + j * 13, * Bz = bz + 3*j;
Bz[0] = br[0] * z3 + br[1] * z2 + br[2] * z1 + br[3];
Bz[1] = br[4] * z3 + br[5] * z2 + br[6] * z1 + br[7];
Bz[2] = br[8] * z4 + br[9] * z3 + br[10] * z2 + br[11] * z1 + br[12];
norm_bz += Bz[0]*Bz[0] + Bz[1]*Bz[1] + Bz[2]*Bz[2];
}
Matx33d Bz(bz);
// Bz is rank 2, matrix, so epipole is its null-vector
Vec3d xy1 = Utils::getRightEpipole(Mat(Bz * (1/sqrt(norm_bz))));
if (fabs(xy1(2)) < 1e-10) continue;
Mat_<double> E(3,3);
double * e_arr = (double *)E.data, x = xy1(0) / xy1(2), y = xy1(1) / xy1(2);
for (int e_i = 0; e_i < 9; e_i++)
e_arr[e_i] = ee[e_i] * x + ee[9+e_i] * y + ee[18+e_i]*z1 + ee[27+e_i];
models.emplace_back(E);
}
} else {
#if defined(HAVE_EIGEN) || defined(HAVE_LAPACK)
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
// compute EE Transpose
// Shorthand for multiplying the Essential matrix with its transpose.
eet[i][j] = 2 * (multPolysDegOne(null_space_mat[i][0].val, null_space_mat[j][0].val) +
multPolysDegOne(null_space_mat[i][1].val, null_space_mat[j][1].val) +
multPolysDegOne(null_space_mat[i][2].val, null_space_mat[j][2].val));
const Matx<double, 1, 10> trace = eet[0][0] + eet[1][1] + eet[2][2];
// Trace constraint
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
Mat(multPolysDegOneAndTwo(eet[i][0].val, null_space_mat[0][j].val) +
multPolysDegOneAndTwo(eet[i][1].val, null_space_mat[1][j].val) +
multPolysDegOneAndTwo(eet[i][2].val, null_space_mat[2][j].val) -
0.5 * multPolysDegOneAndTwo(trace.val, null_space_mat[i][j].val))
.copyTo(constraint_mat.row(3 * i + j));
// Rank = zero determinant constraint
Mat(multPolysDegOneAndTwo(
(multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][2].val) -
multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][1].val)).val,
null_space_mat[2][0].val) +
multPolysDegOneAndTwo( multPolysDegOneAndTwo(
(multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][0].val) - (multPolysDegOne(null_space_mat[0][2].val, null_space_mat[1][0].val) -
multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][2].val)).val, multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][2].val)).val,
null_space_mat[2][1].val) + null_space_mat[2][1].val) +
multPolysDegOneAndTwo( multPolysDegOneAndTwo(
(multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][1].val) - (multPolysDegOne(null_space_mat[0][0].val, null_space_mat[1][1].val) -
multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][0].val)).val, multPolysDegOne(null_space_mat[0][1].val, null_space_mat[1][0].val)).val,
null_space_mat[2][2].val)).copyTo(constraint_mat.row(9)); null_space_mat[2][2].val)).copyTo(constraint_mat.row(9));
#ifdef HAVE_EIGEN #ifdef HAVE_EIGEN
const Eigen::Matrix<double, 10, 20, Eigen::RowMajor> constraint_mat_eig((double *) constraint_mat.data); const Eigen::Matrix<double, 10, 20, Eigen::RowMajor> constraint_mat_eig((double *) constraint_mat.data);
// (3) Compute the Gröbner basis. This turns out to be as simple as performing a // (3) Compute the Gröbner basis. This turns out to be as simple as performing a
// Gauss-Jordan elimination on the 10×20 matrix // Gauss-Jordan elimination on the 10×20 matrix
const Eigen::Matrix<double, 10, 10> eliminated_mat_eig = constraint_mat_eig.block<10, 10>(0, 0) const Eigen::Matrix<double, 10, 10> eliminated_mat_eig = constraint_mat_eig.block<10, 10>(0, 0)
.fullPivLu().solve(constraint_mat_eig.block<10, 10>(0, 10)); .fullPivLu().solve(constraint_mat_eig.block<10, 10>(0, 10));
// (4) Compute the 10×10 action matrix for multiplication by one of the un-knowns. // (4) Compute the 10×10 action matrix for multiplication by one of the unknowns.
// This is a simple matter of extracting the correct elements fromthe eliminated // This is a simple matter of extracting the correct elements from the eliminated
// 10×20 matrix and organising them to form the action matrix. // 10×20 matrix and organising them to form the action matrix.
Eigen::Matrix<double, 10, 10> action_mat_eig = Eigen::Matrix<double, 10, 10>::Zero(); Eigen::Matrix<double, 10, 10> action_mat_eig = Eigen::Matrix<double, 10, 10>::Zero();
action_mat_eig.block<3, 10>(0, 0) = eliminated_mat_eig.block<3, 10>(0, 0); action_mat_eig.block<3, 10>(0, 0) = eliminated_mat_eig.block<3, 10>(0, 0);
action_mat_eig.block<2, 10>(3, 0) = eliminated_mat_eig.block<2, 10>(4, 0); action_mat_eig.block<2, 10>(3, 0) = eliminated_mat_eig.block<2, 10>(4, 0);
action_mat_eig.row(5) = eliminated_mat_eig.row(7); action_mat_eig.row(5) = eliminated_mat_eig.row(7);
action_mat_eig(6, 0) = -1.0; action_mat_eig(6, 0) = -1.0;
action_mat_eig(7, 1) = -1.0; action_mat_eig(7, 1) = -1.0;
action_mat_eig(8, 3) = -1.0; action_mat_eig(8, 3) = -1.0;
action_mat_eig(9, 6) = -1.0; action_mat_eig(9, 6) = -1.0;
// (5) Compute the left eigenvectors of the action matrix // (5) Compute the left eigenvectors of the action matrix
Eigen::EigenSolver<Eigen::Matrix<double, 10, 10>> eigensolver(action_mat_eig); Eigen::EigenSolver<Eigen::Matrix<double, 10, 10>> eigensolver(action_mat_eig);
const Eigen::VectorXcd &eigenvalues = eigensolver.eigenvalues(); const Eigen::VectorXcd &eigenvalues = eigensolver.eigenvalues();
const auto * const eig_vecs_ = (double *) eigensolver.eigenvectors().real().data(); const auto * const eig_vecs_ = (double *) eigensolver.eigenvectors().real().data();
#else #else
Matx<double, 10, 10> A = constraint_mat.colRange(0, 10), Matx<double, 10, 10> A = constraint_mat.colRange(0, 10),
B = constraint_mat.colRange(10, 20), eliminated_mat; B = constraint_mat.colRange(10, 20), eliminated_mat;
if (!solve(A, B, eliminated_mat, DECOMP_LU)) return 0; if (!solve(A, B, eliminated_mat, DECOMP_LU)) return 0;
Mat eliminated_mat_dyn = Mat(eliminated_mat); Mat eliminated_mat_dyn = Mat(eliminated_mat);
Mat action_mat = Mat_<double>::zeros(10, 10); Mat action_mat = Mat_<double>::zeros(10, 10);
eliminated_mat_dyn.rowRange(0,3).copyTo(action_mat.rowRange(0,3)); eliminated_mat_dyn.rowRange(0,3).copyTo(action_mat.rowRange(0,3));
eliminated_mat_dyn.rowRange(4,6).copyTo(action_mat.rowRange(3,5)); eliminated_mat_dyn.rowRange(4,6).copyTo(action_mat.rowRange(3,5));
eliminated_mat_dyn.row(7).copyTo(action_mat.row(5)); eliminated_mat_dyn.row(7).copyTo(action_mat.row(5));
auto * action_mat_data = (double *) action_mat.data; auto * action_mat_data = (double *) action_mat.data;
action_mat_data[60] = -1.0; // 6 row, 0 col action_mat_data[60] = -1.0; // 6 row, 0 col
action_mat_data[71] = -1.0; // 7 row, 1 col action_mat_data[71] = -1.0; // 7 row, 1 col
action_mat_data[83] = -1.0; // 8 row, 3 col action_mat_data[83] = -1.0; // 8 row, 3 col
action_mat_data[96] = -1.0; // 9 row, 6 col action_mat_data[96] = -1.0; // 9 row, 6 col
int mat_order = 10, info, lda = 10, ldvl = 10, ldvr = 1, lwork = 100; int mat_order = 10, info, lda = 10, ldvl = 10, ldvr = 1, lwork = 100;
double wr[10], wi[10] = {0}, eig_vecs[100], work[100]; // 10 = mat_order, 100 = lwork double wr[10], wi[10] = {0}, eig_vecs[100], work[100]; // 10 = mat_order, 100 = lwork
char jobvl = 'V', jobvr = 'N'; // only left eigen vectors are computed char jobvl = 'V', jobvr = 'N'; // only left eigen vectors are computed
OCV_LAPACK_FUNC(dgeev)(&jobvl, &jobvr, &mat_order, action_mat_data, &lda, wr, wi, eig_vecs, &ldvl, OCV_LAPACK_FUNC(dgeev)(&jobvl, &jobvr, &mat_order, action_mat_data, &lda, wr, wi, eig_vecs, &ldvl,
nullptr, &ldvr, work, &lwork, &info); nullptr, &ldvr, work, &lwork, &info);
if (info != 0) return 0; if (info != 0) return 0;
#endif #endif
models = std::vector<Mat>(); models.reserve(10);
models = std::vector<Mat>(); models.reserve(10); // Read off the values for the three unknowns at all the solution points and
// back-substitute to obtain the solutions for the essential matrix.
// Read off the values for the three unknowns at all the solution points and for (int i = 0; i < 10; i++)
// back-substitute to obtain the solutions for the essential matrix. // process only real solutions
for (int i = 0; i < 10; i++)
// process only real solutions
#ifdef HAVE_EIGEN #ifdef HAVE_EIGEN
if (eigenvalues(i).imag() == 0) { if (eigenvalues(i).imag() == 0) {
Mat_<double> model(3, 3); Mat_<double> model(3, 3);
auto * model_data = (double *) model.data; auto * model_data = (double *) model.data;
const int eig_i = 20 * i + 12; // eigen stores imaginary values too const int eig_i = 20 * i + 12; // eigen stores imaginary values too
for (int j = 0; j < 9; j++) for (int j = 0; j < 9; j++)
model_data[j] = ee[j ] * eig_vecs_[eig_i ] + ee[j+9 ] * eig_vecs_[eig_i+2] + model_data[j] = ee[j ] * eig_vecs_[eig_i ] + ee[j+9 ] * eig_vecs_[eig_i+2] +
ee[j+18] * eig_vecs_[eig_i+4] + ee[j+27] * eig_vecs_[eig_i+6]; ee[j+18] * eig_vecs_[eig_i+4] + ee[j+27] * eig_vecs_[eig_i+6];
#else #else
if (wi[i] == 0) { if (wi[i] == 0) {
Mat_<double> model (3,3); Mat_<double> model (3,3);
auto * model_data = (double *) model.data; auto * model_data = (double *) model.data;
const int eig_i = 10 * i + 6; const int eig_i = 10 * i + 6;
for (int j = 0; j < 9; j++) for (int j = 0; j < 9; j++)
model_data[j] = ee[j ]*eig_vecs[eig_i ] + ee[j+9 ]*eig_vecs[eig_i+1] + model_data[j] = ee[j ]*eig_vecs[eig_i ] + ee[j+9 ]*eig_vecs[eig_i+1] +
ee[j+18]*eig_vecs[eig_i+2] + ee[j+27]*eig_vecs[eig_i+3]; ee[j+18]*eig_vecs[eig_i+2] + ee[j+27]*eig_vecs[eig_i+3];
#endif #endif
models.emplace_back(model); models.emplace_back(model);
} }
return static_cast<int>(models.size());
#else #else
int estimate (const std::vector<int> &/*sample*/, std::vector<Mat> &/*models*/) const override { CV_Error(cv::Error::StsNotImplemented, "To run essential matrix estimation of Stewenius method you need to have either Eigen or LAPACK installed! Or switch to Nister algorithm");
CV_Error(cv::Error::StsNotImplemented, "To use essential matrix solver LAPACK or Eigen has to be installed!"); return 0;
#endif #endif
}
return static_cast<int>(models.size());
} }
// number of possible solutions is 0,2,4,6,8,10 // number of possible solutions is 0,2,4,6,8,10
int getMaxNumberOfSolutions () const override { return 10; } int getMaxNumberOfSolutions () const override { return 10; }
int getSampleSize() const override { return 5; } int getSampleSize() const override { return 5; }
Ptr<MinimalSolver> clone () const override {
return makePtr<EssentialMinimalSolverStewenius5ptsImpl>(*points_mat);
}
private: private:
/* /*
* Multiply two polynomials of degree one with unknowns x y z * Multiply two polynomials of degree one with unknowns x y z
@ -236,105 +331,19 @@ private:
p[5]*q[3]+p[8]*q[2], p[6]*q[3]+p[9]*q[0], p[7]*q[3]+p[9]*q[1], p[8]*q[3]+p[9]*q[2], p[5]*q[3]+p[8]*q[2], p[6]*q[3]+p[9]*q[0], p[7]*q[3]+p[9]*q[1], p[8]*q[3]+p[9]*q[2],
p[9]*q[3]}); p[9]*q[3]});
} }
}; static inline Matx<double, 1, 20> multPolysDegOneAndTwoNister(const double * const p,
Ptr<EssentialMinimalSolverStewenius5pts> EssentialMinimalSolverStewenius5pts::create const double * const q) {
(const Mat &points_) { // permutation {0, 3, 1, 2, 4, 10, 6, 12, 5, 11, 7, 13, 16, 8, 14, 17, 9, 15, 18, 19};
return makePtr<EssentialMinimalSolverStewenius5ptsImpl>(points_); return Matx<double, 1, 20>
} ({p[0]*q[0], p[2]*q[1], p[0]*q[1]+p[1]*q[0], p[1]*q[1]+p[2]*q[0], p[0]*q[2]+p[3]*q[0],
p[0]*q[3]+p[6]*q[0], p[2]*q[2]+p[4]*q[1], p[2]*q[3]+p[7]*q[1], p[1]*q[2]+p[3]*q[1]+p[4]*q[0],
class EssentialNonMinimalSolverImpl : public EssentialNonMinimalSolver { p[1]*q[3]+p[6]*q[1]+p[7]*q[0], p[3]*q[2]+p[5]*q[0], p[3]*q[3]+p[6]*q[2]+p[8]*q[0],
private: p[6]*q[3]+p[9]*q[0], p[4]*q[2]+p[5]*q[1], p[4]*q[3]+p[7]*q[2]+p[8]*q[1], p[7]*q[3]+p[9]*q[1],
const Mat * points_mat; p[5]*q[2], p[5]*q[3]+p[8]*q[2], p[8]*q[3]+p[9]*q[2], p[9]*q[3]});
const float * const points;
public:
/*
* Input calibrated points K^-1 x.
* Linear 8 points algorithm is used for estimation.
*/
explicit EssentialNonMinimalSolverImpl (const Mat &points_) :
points_mat(&points_), points ((float *) points_.data) {}
int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat>
&models, const std::vector<double> &weights) const override {
if (sample_size < getMinimumRequiredSampleSize())
return 0;
// ------- 8 points algorithm with Eigen and covariance matrix --------------
double a[9] = {0, 0, 0, 0, 0, 0, 0, 0, 1};
double AtA[81] = {0}; // 9x9
if (weights.empty()) {
for (int i = 0; i < sample_size; i++) {
const int pidx = 4*sample[i];
const double x1 = points[pidx ], y1 = points[pidx+1],
x2 = points[pidx+2], y2 = points[pidx+3];
a[0] = x2*x1;
a[1] = x2*y1;
a[2] = x2;
a[3] = y2*x1;
a[4] = y2*y1;
a[5] = y2;
a[6] = x1;
a[7] = y1;
// calculate covariance for eigen
for (int row = 0; row < 9; row++)
for (int col = row; col < 9; col++)
AtA[row*9+col] += a[row]*a[col];
}
} else {
for (int i = 0; i < sample_size; i++) {
const int smpl = 4*sample[i];
const double weight = weights[i];
const double x1 = points[smpl ], y1 = points[smpl+1],
x2 = points[smpl+2], y2 = points[smpl+3];
const double weight_times_x2 = weight * x2,
weight_times_y2 = weight * y2;
a[0] = weight_times_x2 * x1;
a[1] = weight_times_x2 * y1;
a[2] = weight_times_x2;
a[3] = weight_times_y2 * x1;
a[4] = weight_times_y2 * y1;
a[5] = weight_times_y2;
a[6] = weight * x1;
a[7] = weight * y1;
a[8] = weight;
// calculate covariance for eigen
for (int row = 0; row < 9; row++)
for (int col = row; col < 9; col++)
AtA[row*9+col] += a[row]*a[col];
}
}
// copy symmetric part of covariance matrix
for (int j = 1; j < 9; j++)
for (int z = 0; z < j; z++)
AtA[j*9+z] = AtA[z*9+j];
#ifdef HAVE_EIGEN
models = std::vector<Mat>{ Mat_<double>(3,3) };
const Eigen::JacobiSVD<Eigen::Matrix<double, 9, 9>> svd((Eigen::Matrix<double, 9, 9>(AtA)),
Eigen::ComputeFullV);
// extract the last nullspace
Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)models[0].data) = svd.matrixV().col(8);
#else
Matx<double, 9, 9> AtA_(AtA), U, Vt;
Vec<double, 9> W;
SVD::compute(AtA_, W, U, Vt, SVD::FULL_UV + SVD::MODIFY_A);
models = std::vector<Mat> { Mat_<double>(3, 3, Vt.val + 72 /*=8*9*/) };
#endif
FundamentalDegeneracy::recoverRank(models[0], false /*E*/);
return 1;
}
int getMinimumRequiredSampleSize() const override { return 8; }
int getMaxNumberOfSolutions () const override { return 1; }
Ptr<NonMinimalSolver> clone () const override {
return makePtr<EssentialNonMinimalSolverImpl>(*points_mat);
} }
}; };
Ptr<EssentialNonMinimalSolver> EssentialNonMinimalSolver::create (const Mat &points_) { Ptr<EssentialMinimalSolver5pts> EssentialMinimalSolver5pts::create
return makePtr<EssentialNonMinimalSolverImpl>(points_); (const Mat &points_, bool use_svd, bool is_nister) {
return makePtr<EssentialMinimalSolver5ptsImpl>(points_, use_svd, is_nister);
} }
}} }}

@ -37,10 +37,7 @@ public:
int getNonMinimalSampleSize () const override { int getNonMinimalSampleSize () const override {
return non_min_solver->getMinimumRequiredSampleSize(); return non_min_solver->getMinimumRequiredSampleSize();
} }
Ptr<Estimator> clone() const override { void enforceRankConstraint (bool /*enforce*/) override {}
return makePtr<HomographyEstimatorImpl>(min_solver->clone(), non_min_solver->clone(),
degeneracy->clone(0 /*we don't need state here*/));
}
}; };
Ptr<HomographyEstimator> HomographyEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<HomographyEstimator> HomographyEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) { const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) {
@ -81,13 +78,12 @@ public:
int getNonMinimalSampleSize () const override { int getNonMinimalSampleSize () const override {
return non_min_solver->getMinimumRequiredSampleSize(); return non_min_solver->getMinimumRequiredSampleSize();
} }
void enforceRankConstraint (bool enforce) override {
non_min_solver->enforceRankConstraint(enforce);
}
int getMaxNumSolutionsNonMinimal () const override { int getMaxNumSolutionsNonMinimal () const override {
return non_min_solver->getMaxNumberOfSolutions(); return non_min_solver->getMaxNumberOfSolutions();
} }
Ptr<Estimator> clone() const override {
return makePtr<FundamentalEstimatorImpl>(min_solver->clone(), non_min_solver->clone(),
degeneracy->clone(0));
}
}; };
Ptr<FundamentalEstimator> FundamentalEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<FundamentalEstimator> FundamentalEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) { const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) {
@ -107,7 +103,7 @@ public:
inline int inline int
estimateModels(const std::vector<int> &sample, std::vector<Mat> &models) const override { estimateModels(const std::vector<int> &sample, std::vector<Mat> &models) const override {
std::vector<Mat> E; std::vector<Mat> E;
const int models_count = min_solver->estimate(sample, E); const int models_count = min_solver->estimate(sample, E);
int valid_models_count = 0; int valid_models_count = 0;
for (int i = 0; i < models_count; i++) for (int i = 0; i < models_count; i++)
@ -116,6 +112,10 @@ public:
return valid_models_count; return valid_models_count;
} }
int estimateModelNonMinimalSample (const Mat &model, const std::vector<int> &sample, int sample_size, std::vector<Mat>
&models, const std::vector<double> &weights) const override {
return non_min_solver->estimate(model, sample, sample_size, models, weights);
}
int estimateModelNonMinimalSample(const std::vector<int> &sample, int sample_size, int estimateModelNonMinimalSample(const std::vector<int> &sample, int sample_size,
std::vector<Mat> &models, const std::vector<double> &weights) const override { std::vector<Mat> &models, const std::vector<double> &weights) const override {
return non_min_solver->estimate(sample, sample_size, models, weights); return non_min_solver->estimate(sample, sample_size, models, weights);
@ -129,13 +129,12 @@ public:
int getNonMinimalSampleSize () const override { int getNonMinimalSampleSize () const override {
return non_min_solver->getMinimumRequiredSampleSize(); return non_min_solver->getMinimumRequiredSampleSize();
} }
void enforceRankConstraint (bool enforce) override {
non_min_solver->enforceRankConstraint(enforce);
}
int getMaxNumSolutionsNonMinimal () const override { int getMaxNumSolutionsNonMinimal () const override {
return non_min_solver->getMaxNumberOfSolutions(); return non_min_solver->getMaxNumberOfSolutions();
} }
Ptr<Estimator> clone() const override {
return makePtr<EssentialEstimatorImpl>(min_solver->clone(), non_min_solver->clone(),
degeneracy->clone(0));
}
}; };
Ptr<EssentialEstimator> EssentialEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<EssentialEstimator> EssentialEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) { const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<Degeneracy> &degeneracy_) {
@ -171,9 +170,7 @@ public:
int getMaxNumSolutionsNonMinimal () const override { int getMaxNumSolutionsNonMinimal () const override {
return non_min_solver->getMaxNumberOfSolutions(); return non_min_solver->getMaxNumberOfSolutions();
} }
Ptr<Estimator> clone() const override { void enforceRankConstraint (bool /*enforce*/) override {}
return makePtr<AffineEstimatorImpl>(min_solver->clone(), non_min_solver->clone());
}
}; };
Ptr<AffineEstimator> AffineEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<AffineEstimator> AffineEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_) { const Ptr<NonMinimalSolver> &non_min_solver_) {
@ -209,9 +206,7 @@ public:
int getMaxNumSolutionsNonMinimal () const override { int getMaxNumSolutionsNonMinimal () const override {
return non_min_solver->getMaxNumberOfSolutions(); return non_min_solver->getMaxNumberOfSolutions();
} }
Ptr<Estimator> clone() const override { void enforceRankConstraint (bool /*enforce*/) override {}
return makePtr<PnPEstimatorImpl>(min_solver->clone(), non_min_solver->clone());
}
}; };
Ptr<PnPEstimator> PnPEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<PnPEstimator> PnPEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_) { const Ptr<NonMinimalSolver> &non_min_solver_) {
@ -274,10 +269,7 @@ public:
int getMaxNumSolutionsNonMinimal () const override { int getMaxNumSolutionsNonMinimal () const override {
return non_min_solver->getMaxNumberOfSolutions(); return non_min_solver->getMaxNumberOfSolutions();
} }
Ptr<Estimator> clone() const override { void enforceRankConstraint (bool /*enforce*/) override {}
return makePtr<PointCloudModelEstimatorImpl>(min_solver->clone(), non_min_solver->clone(),
custom_model_constraints);
}
}; };
Ptr<PointCloudModelEstimator> PointCloudModelEstimator::create (const Ptr<MinimalSolver> &min_solver_, Ptr<PointCloudModelEstimator> PointCloudModelEstimator::create (const Ptr<MinimalSolver> &min_solver_,
const Ptr<NonMinimalSolver> &non_min_solver_, const Ptr<NonMinimalSolver> &non_min_solver_,
@ -289,19 +281,18 @@ Ptr<PointCloudModelEstimator> PointCloudModelEstimator::create (const Ptr<Minima
// Symmetric Reprojection Error // Symmetric Reprojection Error
class ReprojectionErrorSymmetricImpl : public ReprojectionErrorSymmetric { class ReprojectionErrorSymmetricImpl : public ReprojectionErrorSymmetric {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
float m11, m12, m13, m21, m22, m23, m31, m32, m33; float m11, m12, m13, m21, m22, m23, m31, m32, m33;
float minv11, minv12, minv13, minv21, minv22, minv23, minv31, minv32, minv33; float minv11, minv12, minv13, minv21, minv22, minv23, minv31, minv32, minv33;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit ReprojectionErrorSymmetricImpl (const Mat &points_) explicit ReprojectionErrorSymmetricImpl (const Mat &points_)
: points_mat(&points_), points ((float *) points_.data) : points_mat(points_)
, m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0) , m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0)
, minv11(0), minv12(0), minv13(0), minv21(0), minv22(0), minv23(0), minv31(0), minv32(0), minv33(0) , minv11(0), minv12(0), minv13(0), minv21(0), minv22(0), minv23(0), minv31(0), minv32(0), minv33(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
inline void setModelParameters(const Mat& model) override inline void setModelParameters(const Mat& model) override
@ -321,9 +312,10 @@ public:
minv21=(float)minv[3]; minv22=(float)minv[4]; minv23=(float)minv[5]; minv21=(float)minv[3]; minv22=(float)minv[4]; minv23=(float)minv[5];
minv31=(float)minv[6]; minv32=(float)minv[7]; minv33=(float)minv[8]; minv31=(float)minv[6]; minv32=(float)minv[7]; minv33=(float)minv[8];
} }
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 4*point_idx; idx *= 4;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float * points = points_mat.ptr<float>();
const float x1=points[idx], y1=points[idx+1], x2=points[idx+2], y2=points[idx+3];
const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33), const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33),
dx2 = x2 - (m11 * x1 + m12 * y1 + m13) * est_z2, dx2 = x2 - (m11 * x1 + m12 * y1 + m13) * est_z2,
dy2 = y2 - (m21 * x1 + m22 * y1 + m23) * est_z2; dy2 = y2 - (m21 * x1 + m22 * y1 + m23) * est_z2;
@ -334,7 +326,8 @@ public:
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 4*point_idx; const int smpl = 4*point_idx;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33), const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33),
@ -347,9 +340,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<ReprojectionErrorSymmetricImpl>(*points_mat);
}
}; };
Ptr<ReprojectionErrorSymmetric> Ptr<ReprojectionErrorSymmetric>
ReprojectionErrorSymmetric::create(const Mat &points) { ReprojectionErrorSymmetric::create(const Mat &points) {
@ -359,17 +349,16 @@ ReprojectionErrorSymmetric::create(const Mat &points) {
// Forward Reprojection Error // Forward Reprojection Error
class ReprojectionErrorForwardImpl : public ReprojectionErrorForward { class ReprojectionErrorForwardImpl : public ReprojectionErrorForward {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
float m11, m12, m13, m21, m22, m23, m31, m32, m33; float m11, m12, m13, m21, m22, m23, m31, m32, m33;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit ReprojectionErrorForwardImpl (const Mat &points_) explicit ReprojectionErrorForwardImpl (const Mat &points_)
: points_mat(&points_), points ((float *)points_.data) : points_mat(points_)
, m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0) , m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
inline void setModelParameters(const Mat& model) override inline void setModelParameters(const Mat& model) override
@ -382,9 +371,10 @@ public:
m21=static_cast<float>(m[3]); m22=static_cast<float>(m[4]); m23=static_cast<float>(m[5]); m21=static_cast<float>(m[3]); m22=static_cast<float>(m[4]); m23=static_cast<float>(m[5]);
m31=static_cast<float>(m[6]); m32=static_cast<float>(m[7]); m33=static_cast<float>(m[8]); m31=static_cast<float>(m[6]); m32=static_cast<float>(m[7]); m33=static_cast<float>(m[8]);
} }
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 4*point_idx; idx *= 4;
const float x1 = points[smpl], y1 = points[smpl+1], x2 = points[smpl+2], y2 = points[smpl+3]; const float * points = points_mat.ptr<float>();
const float x1 = points[idx], y1 = points[idx+1], x2 = points[idx+2], y2 = points[idx+3];
const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33), const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33),
dx2 = x2 - (m11 * x1 + m12 * y1 + m13) * est_z2, dx2 = x2 - (m11 * x1 + m12 * y1 + m13) * est_z2,
dy2 = y2 - (m21 * x1 + m22 * y1 + m23) * est_z2; dy2 = y2 - (m21 * x1 + m22 * y1 + m23) * est_z2;
@ -392,7 +382,8 @@ public:
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 4*point_idx; const int smpl = 4*point_idx;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33), const float est_z2 = 1 / (m31 * x1 + m32 * y1 + m33),
@ -402,9 +393,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<ReprojectionErrorForwardImpl>(*points_mat);
}
}; };
Ptr<ReprojectionErrorForward> Ptr<ReprojectionErrorForward>
ReprojectionErrorForward::create(const Mat &points) { ReprojectionErrorForward::create(const Mat &points) {
@ -413,17 +401,16 @@ ReprojectionErrorForward::create(const Mat &points) {
class SampsonErrorImpl : public SampsonError { class SampsonErrorImpl : public SampsonError {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
float m11, m12, m13, m21, m22, m23, m31, m32, m33; float m11, m12, m13, m21, m22, m23, m31, m32, m33;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit SampsonErrorImpl (const Mat &points_) explicit SampsonErrorImpl (const Mat &points_)
: points_mat(&points_), points ((float *) points_.data) : points_mat(points_)
, m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0) , m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
inline void setModelParameters(const Mat& model) override inline void setModelParameters(const Mat& model) override
@ -447,9 +434,10 @@ public:
* [ F(3,1) F(3,2) F(3,3) ] [ 1 ] * [ F(3,1) F(3,2) F(3,3) ] [ 1 ]
* *
*/ */
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 4*point_idx; idx *= 4;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float * points = points_mat.ptr<float>();
const float x1=points[idx], y1=points[idx+1], x2=points[idx+2], y2=points[idx+3];
const float F_pt1_x = m11 * x1 + m12 * y1 + m13, const float F_pt1_x = m11 * x1 + m12 * y1 + m13,
F_pt1_y = m21 * x1 + m22 * y1 + m23; F_pt1_y = m21 * x1 + m22 * y1 + m23;
const float pt2_F_x = x2 * m11 + y2 * m21 + m31, const float pt2_F_x = x2 * m11 + y2 * m21 + m31,
@ -460,7 +448,8 @@ public:
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 4*point_idx; const int smpl = 4*point_idx;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
const float F_pt1_x = m11 * x1 + m12 * y1 + m13, const float F_pt1_x = m11 * x1 + m12 * y1 + m13,
@ -473,9 +462,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<SampsonErrorImpl>(*points_mat);
}
}; };
Ptr<SampsonError> Ptr<SampsonError>
SampsonError::create(const Mat &points) { SampsonError::create(const Mat &points) {
@ -484,17 +470,16 @@ SampsonError::create(const Mat &points) {
class SymmetricGeometricDistanceImpl : public SymmetricGeometricDistance { class SymmetricGeometricDistanceImpl : public SymmetricGeometricDistance {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
float m11, m12, m13, m21, m22, m23, m31, m32, m33; float m11, m12, m13, m21, m22, m23, m31, m32, m33;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit SymmetricGeometricDistanceImpl (const Mat &points_) explicit SymmetricGeometricDistanceImpl (const Mat &points_)
: points_mat(&points_), points ((float *) points_.data) : points_mat(points_)
, m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0) , m11(0), m12(0), m13(0), m21(0), m22(0), m23(0), m31(0), m32(0), m33(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
inline void setModelParameters(const Mat& model) override inline void setModelParameters(const Mat& model) override
@ -508,9 +493,10 @@ public:
m31=static_cast<float>(m[6]); m32=static_cast<float>(m[7]); m33=static_cast<float>(m[8]); m31=static_cast<float>(m[6]); m32=static_cast<float>(m[7]); m33=static_cast<float>(m[8]);
} }
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 4*point_idx; idx *= 4;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float * points = points_mat.ptr<float>();
const float x1=points[idx], y1=points[idx+1], x2=points[idx+2], y2=points[idx+3];
// pt2^T * E, line 1 = [l1 l2] // pt2^T * E, line 1 = [l1 l2]
const float l1 = x2 * m11 + y2 * m21 + m31, const float l1 = x2 * m11 + y2 * m21 + m31,
l2 = x2 * m12 + y2 * m22 + m32; l2 = x2 * m12 + y2 * m22 + m32;
@ -525,7 +511,8 @@ public:
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 4*point_idx; const int smpl = 4*point_idx;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
const float l1 = x2 * m11 + y2 * m21 + m31, t1 = m11 * x1 + m12 * y1 + m13, const float l1 = x2 * m11 + y2 * m21 + m31, t1 = m11 * x1 + m12 * y1 + m13,
@ -536,9 +523,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<SymmetricGeometricDistanceImpl>(*points_mat);
}
}; };
Ptr<SymmetricGeometricDistance> Ptr<SymmetricGeometricDistance>
SymmetricGeometricDistance::create(const Mat &points) { SymmetricGeometricDistance::create(const Mat &points) {
@ -547,17 +531,16 @@ SymmetricGeometricDistance::create(const Mat &points) {
class ReprojectionErrorPmatrixImpl : public ReprojectionErrorPmatrix { class ReprojectionErrorPmatrixImpl : public ReprojectionErrorPmatrix {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
float p11, p12, p13, p14, p21, p22, p23, p24, p31, p32, p33, p34; float p11, p12, p13, p14, p21, p22, p23, p24, p31, p32, p33, p34;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit ReprojectionErrorPmatrixImpl (const Mat &points_) explicit ReprojectionErrorPmatrixImpl (const Mat &points_)
: points_mat(&points_), points ((float *) points_.data) : points_mat(points_)
, p11(0), p12(0), p13(0), p14(0), p21(0), p22(0), p23(0), p24(0), p31(0), p32(0), p33(0), p34(0) , p11(0), p12(0), p13(0), p14(0), p21(0), p22(0), p23(0), p24(0), p31(0), p32(0), p33(0), p34(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
@ -572,10 +555,11 @@ public:
p31 = (float)p[8]; p32 = (float)p[9]; p33 = (float)p[10]; p34 = (float)p[11]; p31 = (float)p[8]; p32 = (float)p[9]; p33 = (float)p[10]; p34 = (float)p[11];
} }
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 5*point_idx; idx *= 5;
const float u = points[smpl ], v = points[smpl+1], const float * points = points_mat.ptr<float>();
x = points[smpl+2], y = points[smpl+3], z = points[smpl+4]; const float u = points[idx ], v = points[idx+1],
x = points[idx+2], y = points[idx+3], z = points[idx+4];
const float depth = 1 / (p31 * x + p32 * y + p33 * z + p34); const float depth = 1 / (p31 * x + p32 * y + p33 * z + p34);
const float du = u - (p11 * x + p12 * y + p13 * z + p14) * depth; const float du = u - (p11 * x + p12 * y + p13 * z + p14) * depth;
const float dv = v - (p21 * x + p22 * y + p23 * z + p24) * depth; const float dv = v - (p21 * x + p22 * y + p23 * z + p24) * depth;
@ -583,7 +567,8 @@ public:
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 5*point_idx; const int smpl = 5*point_idx;
const float u = points[smpl ], v = points[smpl+1], const float u = points[smpl ], v = points[smpl+1],
x = points[smpl+2], y = points[smpl+3], z = points[smpl+4]; x = points[smpl+2], y = points[smpl+3], z = points[smpl+4];
@ -594,9 +579,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<ReprojectionErrorPmatrixImpl>(*points_mat);
}
}; };
Ptr<ReprojectionErrorPmatrix> ReprojectionErrorPmatrix::create(const Mat &points) { Ptr<ReprojectionErrorPmatrix> ReprojectionErrorPmatrix::create(const Mat &points) {
return makePtr<ReprojectionErrorPmatrixImpl>(points); return makePtr<ReprojectionErrorPmatrixImpl>(points);
@ -677,11 +659,6 @@ public:
cache_valid = true; cache_valid = true;
return errors_cache; return errors_cache;
} }
Ptr<Error> clone () const override {
return makePtr<PlaneModelErrorImpl>(*points_mat);
}
}; };
Ptr<PlaneModelError> PlaneModelError::create(const Mat &points) { Ptr<PlaneModelError> PlaneModelError::create(const Mat &points) {
return makePtr<PlaneModelErrorImpl>(points); return makePtr<PlaneModelErrorImpl>(points);
@ -773,11 +750,6 @@ public:
return errors_cache; return errors_cache;
} }
Ptr<Error> clone () const override {
return makePtr<SphereModelErrorImpl>(*points_mat);
}
}; };
Ptr<SphereModelError> SphereModelError::create(const Mat &points) { Ptr<SphereModelError> SphereModelError::create(const Mat &points) {
return makePtr<SphereModelErrorImpl>(points); return makePtr<SphereModelErrorImpl>(points);
@ -792,17 +764,16 @@ private:
* m21 m22 m23 * m21 m22 m23
* 0 0 1 * 0 0 1
*/ */
const Mat * points_mat; Mat points_mat;
const float * const points;
float m11, m12, m13, m21, m22, m23; float m11, m12, m13, m21, m22, m23;
std::vector<float> errors; std::vector<float> errors;
public: public:
explicit ReprojectionDistanceAffineImpl (const Mat &points_) explicit ReprojectionDistanceAffineImpl (const Mat &points_)
: points_mat(&points_), points ((float *) points_.data) : points_mat(points_)
, m11(0), m12(0), m13(0), m21(0), m22(0), m23(0) , m11(0), m12(0), m13(0), m21(0), m22(0), m23(0)
, errors(points_.rows) , errors(points_.rows)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
inline void setModelParameters(const Mat& model) override inline void setModelParameters(const Mat& model) override
@ -814,15 +785,17 @@ public:
m11 = (float)m[0]; m12 = (float)m[1]; m13 = (float)m[2]; m11 = (float)m[0]; m12 = (float)m[1]; m13 = (float)m[2];
m21 = (float)m[3]; m22 = (float)m[4]; m23 = (float)m[5]; m21 = (float)m[3]; m22 = (float)m[4]; m23 = (float)m[5];
} }
inline float getError (int point_idx) const override { inline float getError (int idx) const override {
const int smpl = 4*point_idx; idx *= 4;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float * points = points_mat.ptr<float>();
const float x1=points[idx], y1=points[idx+1], x2=points[idx+2], y2=points[idx+3];
const float dx2 = x2 - (m11 * x1 + m12 * y1 + m13), dy2 = y2 - (m21 * x1 + m22 * y1 + m23); const float dx2 = x2 - (m11 * x1 + m12 * y1 + m13), dy2 = y2 - (m21 * x1 + m22 * y1 + m23);
return dx2 * dx2 + dy2 * dy2; return dx2 * dx2 + dy2 * dy2;
} }
const std::vector<float> &getErrors (const Mat &model) override { const std::vector<float> &getErrors (const Mat &model) override {
setModelParameters(model); setModelParameters(model);
for (int point_idx = 0; point_idx < points_mat->rows; point_idx++) { const float * points = points_mat.ptr<float>();
for (int point_idx = 0; point_idx < points_mat.rows; point_idx++) {
const int smpl = 4*point_idx; const int smpl = 4*point_idx;
const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3]; const float x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
const float dx2 = x2 - (m11 * x1 + m12 * y1 + m13), dy2 = y2 - (m21 * x1 + m22 * y1 + m23); const float dx2 = x2 - (m11 * x1 + m12 * y1 + m13), dy2 = y2 - (m21 * x1 + m22 * y1 + m23);
@ -830,9 +803,6 @@ public:
} }
return errors; return errors;
} }
Ptr<Error> clone () const override {
return makePtr<ReprojectionDistanceAffineImpl>(*points_mat);
}
}; };
Ptr<ReprojectionErrorAffine> Ptr<ReprojectionErrorAffine>
ReprojectionErrorAffine::create(const Mat &points) { ReprojectionErrorAffine::create(const Mat &points) {
@ -842,26 +812,25 @@ ReprojectionErrorAffine::create(const Mat &points) {
////////////////////////////////////// NORMALIZING TRANSFORMATION ///////////////////////// ////////////////////////////////////// NORMALIZING TRANSFORMATION /////////////////////////
class NormTransformImpl : public NormTransform { class NormTransformImpl : public NormTransform {
private: private:
const float * const points; Mat points_mat;
public: public:
explicit NormTransformImpl (const Mat &points_) : points((float*)points_.data) {} explicit NormTransformImpl (const Mat &points_) : points_mat(points_) {}
// Compute normalized points and transformation matrices. // Compute normalized points and transformation matrices.
void getNormTransformation (Mat& norm_points, const std::vector<int> &sample, void getNormTransformation (Mat& norm_points, const std::vector<int> &sample,
int sample_size, Matx33d &T1, Matx33d &T2) const override { int sample_size, Matx33d &T1, Matx33d &T2) const override {
const float * points = points_mat.ptr<float>();
double mean_pts1_x = 0, mean_pts1_y = 0, mean_pts2_x = 0, mean_pts2_y = 0; double mean_pts1_x = 0, mean_pts1_y = 0, mean_pts2_x = 0, mean_pts2_y = 0;
// find average of each coordinate of points. // find average of each coordinate of points.
int smpl; int smpl;
for (int i = 0; i < sample_size; i++) { for (int i = 0; i < sample_size; i++) {
smpl = 4 * sample[i]; smpl = 4 * sample[i];
mean_pts1_x += points[smpl ]; mean_pts1_x += points[smpl ];
mean_pts1_y += points[smpl + 1]; mean_pts1_y += points[smpl + 1];
mean_pts2_x += points[smpl + 2]; mean_pts2_x += points[smpl + 2];
mean_pts2_y += points[smpl + 3]; mean_pts2_y += points[smpl + 3];
} }
mean_pts1_x /= sample_size; mean_pts1_y /= sample_size; mean_pts1_x /= sample_size; mean_pts1_y /= sample_size;
mean_pts2_x /= sample_size; mean_pts2_y /= sample_size; mean_pts2_x /= sample_size; mean_pts2_y /= sample_size;
@ -901,9 +870,9 @@ public:
auto * norm_points_ptr = (float *) norm_points.data; auto * norm_points_ptr = (float *) norm_points.data;
// Normalize points: Npts = T*pts 3x3 * 3xN // Normalize points: Npts = T*pts 3x3 * 3xN
const float avg_dist1f = (float)avg_dist1, avg_dist2f = (float)avg_dist2; const auto avg_dist1f = (float)avg_dist1, avg_dist2f = (float)avg_dist2;
const float transl_x1f = (float)transl_x1, transl_y1f = (float)transl_y1; const auto transl_x1f = (float)transl_x1, transl_y1f = (float)transl_y1;
const float transl_x2f = (float)transl_x2, transl_y2f = (float)transl_y2; const auto transl_x2f = (float)transl_x2, transl_y2f = (float)transl_y2;
for (int i = 0; i < sample_size; i++) { for (int i = 0; i < sample_size; i++) {
smpl = 4 * sample[i]; smpl = 4 * sample[i];
(*norm_points_ptr++) = avg_dist1f * points[smpl ] + transl_x1f; (*norm_points_ptr++) = avg_dist1f * points[smpl ] + transl_x1f;

@ -4,24 +4,28 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../usac.hpp" #include "../usac.hpp"
#include "../polynom_solver.h"
#ifdef HAVE_EIGEN #ifdef HAVE_EIGEN
#include <Eigen/Eigen> #include <Eigen/Eigen>
#endif #endif
namespace cv { namespace usac { namespace cv { namespace usac {
// Fundamental Matrix Solver:
class FundamentalMinimalSolver7ptsImpl: public FundamentalMinimalSolver7pts { class FundamentalMinimalSolver7ptsImpl: public FundamentalMinimalSolver7pts {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points; const bool use_ge;
public: public:
explicit FundamentalMinimalSolver7ptsImpl (const Mat &points_) : explicit FundamentalMinimalSolver7ptsImpl (const Mat &points_, bool use_ge_) :
points_mat (&points_), points ((float *) points_.data) {} points_mat (points_), use_ge(use_ge_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
const int m = 7, n = 9; // rows, cols const int m = 7, n = 9; // rows, cols
std::vector<double> a(63); // m*n std::vector<double> a(63); // m*n
auto * a_ = &a[0]; auto * a_ = &a[0];
const float * points = points_mat.ptr<float>();
for (int i = 0; i < m; i++ ) { for (int i = 0; i < m; i++ ) {
const int smpl = 4*sample[i]; const int smpl = 4*sample[i];
@ -38,52 +42,57 @@ public:
(*a_++) = y1; (*a_++) = y1;
(*a_++) = 1; (*a_++) = 1;
} }
if (!Math::eliminateUpperTriangular(a, m, n))
return 0;
/*
[a11 a12 a13 a14 a15 a16 a17 a18 a19]
[ 0 a22 a23 a24 a25 a26 a27 a28 a29]
[ 0 0 a33 a34 a35 a36 a37 a38 a39]
[ 0 0 0 a44 a45 a46 a47 a48 a49]
[ 0 0 0 0 a55 a56 a57 a58 a59]
[ 0 0 0 0 0 a66 a67 a68 a69]
[ 0 0 0 0 0 0 a77 a78 a79]
f9 = 1
*/
double f1[9], f2[9]; double f1[9], f2[9];
if (use_ge) {
f1[8] = 1.; if (!Math::eliminateUpperTriangular(a, m, n))
f1[7] = 0.;
f1[6] = -a[6*n+8] / a[6*n+6];
f2[8] = 1.;
f2[7] = -a[6*n+8] / a[6*n+7];
f2[6] = 0.;
// start from the last row
for (int i = m-2; i >= 0; i--) {
const int row_i = i*n;
double acc1 = 0, acc2 = 0;
for (int j = i+1; j < n; j++) {
acc1 -= a[row_i + j] * f1[j];
acc2 -= a[row_i + j] * f2[j];
}
f1[i] = acc1 / a[row_i + i];
f2[i] = acc2 / a[row_i + i];
// due to numerical errors return 0 solutions
if (std::isnan(f1[i]) || std::isnan(f2[i]))
return 0; return 0;
/*
[a11 a12 a13 a14 a15 a16 a17 a18 a19]
[ 0 a22 a23 a24 a25 a26 a27 a28 a29]
[ 0 0 a33 a34 a35 a36 a37 a38 a39]
[ 0 0 0 a44 a45 a46 a47 a48 a49]
[ 0 0 0 0 a55 a56 a57 a58 a59]
[ 0 0 0 0 0 a66 a67 a68 a69]
[ 0 0 0 0 0 0 a77 a78 a79]
*/
f1[8] = 1.;
f1[7] = 0.;
f1[6] = -a[6*n+8] / a[6*n+6];
f2[8] = 0.;
f2[7] = -a[6*n+6] / a[6*n+7];
f2[6] = 1;
// start from the last row
for (int i = m-2; i >= 0; i--) {
const int row_i = i*n;
double acc1 = 0, acc2 = 0;
for (int j = i+1; j < n; j++) {
acc1 -= a[row_i + j] * f1[j];
acc2 -= a[row_i + j] * f2[j];
}
f1[i] = acc1 / a[row_i + i];
f2[i] = acc2 / a[row_i + i];
if (std::isnan(f1[i]) || std::isnan(f2[i]))
return 0; // due to numerical errors return 0 solutions
}
} else {
Mat U, Vt, D;
cv::Matx<double, 7, 9> A(&a[0]);
SVD::compute(A, D, U, Vt, SVD::FULL_UV+SVD::MODIFY_A);
const auto * const vt = (double *) Vt.data;
int i1 = 8*9, i2 = 7*9;
for (int i = 0; i < 9; i++) {
f1[i] = vt[i1+i];
f2[i] = vt[i2+i];
}
} }
// OpenCV: // OpenCV:
double c[4] = { 0 }, r[3] = { 0 }; double c[4] = { 0 }, r[3] = { 0 };
double t0 = 0, t1 = 0, t2 = 0; double t0 = 0, t1 = 0, t2 = 0;
Mat_<double> coeffs (1, 4, c);
Mat_<double> roots (1, 3, r);
for (int i = 0; i < 9; i++) for (int i = 0; i < 9; i++)
f1[i] -= f2[i]; f1[i] -= f2[i];
@ -117,7 +126,7 @@ public:
c[0] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2; c[0] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2;
// solve the cubic equation; there can be 1 to 3 roots ... // solve the cubic equation; there can be 1 to 3 roots ...
int nroots = solveCubic (coeffs, roots); const int nroots = solve_deg3(c[0], c[1], c[2], c[3], r[0], r[1], r[2]);
if (nroots < 1) return 0; if (nroots < 1) return 0;
models = std::vector<Mat>(nroots); models = std::vector<Mat>(nroots);
@ -145,29 +154,26 @@ public:
int getMaxNumberOfSolutions () const override { return 3; } int getMaxNumberOfSolutions () const override { return 3; }
int getSampleSize() const override { return 7; } int getSampleSize() const override { return 7; }
Ptr<MinimalSolver> clone () const override {
return makePtr<FundamentalMinimalSolver7ptsImpl>(*points_mat);
}
}; };
Ptr<FundamentalMinimalSolver7pts> FundamentalMinimalSolver7pts::create(const Mat &points_) { Ptr<FundamentalMinimalSolver7pts> FundamentalMinimalSolver7pts::create(const Mat &points, bool use_ge) {
return makePtr<FundamentalMinimalSolver7ptsImpl>(points_); return makePtr<FundamentalMinimalSolver7ptsImpl>(points, use_ge);
} }
class FundamentalMinimalSolver8ptsImpl : public FundamentalMinimalSolver8pts { class FundamentalMinimalSolver8ptsImpl : public FundamentalMinimalSolver8pts {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
public: public:
explicit FundamentalMinimalSolver8ptsImpl (const Mat &points_) : explicit FundamentalMinimalSolver8ptsImpl (const Mat &points_) :
points_mat (&points_), points ((float*) points_.data) points_mat (points_)
{ {
CV_DbgAssert(points); CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
} }
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
const int m = 8, n = 9; // rows, cols const int m = 8, n = 9; // rows, cols
std::vector<double> a(72); // m*n std::vector<double> a(72); // m*n
auto * a_ = &a[0]; auto * a_ = &a[0];
const float * points = points_mat.ptr<float>();
for (int i = 0; i < m; i++ ) { for (int i = 0; i < m; i++ ) {
const int smpl = 4*sample[i]; const int smpl = 4*sample[i];
@ -225,22 +231,31 @@ public:
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
int getSampleSize() const override { return 8; } int getSampleSize() const override { return 8; }
Ptr<MinimalSolver> clone () const override {
return makePtr<FundamentalMinimalSolver8ptsImpl>(*points_mat);
}
}; };
Ptr<FundamentalMinimalSolver8pts> FundamentalMinimalSolver8pts::create(const Mat &points_) { Ptr<FundamentalMinimalSolver8pts> FundamentalMinimalSolver8pts::create(const Mat &points_) {
return makePtr<FundamentalMinimalSolver8ptsImpl>(points_); return makePtr<FundamentalMinimalSolver8ptsImpl>(points_);
} }
class FundamentalNonMinimalSolverImpl : public FundamentalNonMinimalSolver { class EpipolarNonMinimalSolverImpl : public EpipolarNonMinimalSolver {
private: private:
const Mat * points_mat; Mat points_mat;
const Ptr<NormTransform> normTr; const bool do_norm;
Matx33d _T1, _T2;
Ptr<NormTransform> normTr = nullptr;
bool enforce_rank = true, is_fundamental, use_ge;
public: public:
explicit FundamentalNonMinimalSolverImpl (const Mat &points_) : explicit EpipolarNonMinimalSolverImpl (const Mat &points_, const Matx33d &T1, const Matx33d &T2, bool use_ge_)
points_mat(&points_), normTr (NormTransform::create(points_)) {} : points_mat(points_), do_norm(false), _T1(T1), _T2(T2), is_fundamental(true), use_ge(use_ge_) {
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
explicit EpipolarNonMinimalSolverImpl (const Mat &points_, bool is_fundamental_) :
points_mat(points_), do_norm(is_fundamental_), use_ge(false) {
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
is_fundamental = is_fundamental_;
if (is_fundamental)
normTr = NormTransform::create(points_);
}
void enforceRankConstraint (bool enforce) override { enforce_rank = enforce; }
int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat> int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat>
&models, const std::vector<double> &weights) const override { &models, const std::vector<double> &weights) const override {
if (sample_size < getMinimumRequiredSampleSize()) if (sample_size < getMinimumRequiredSampleSize())
@ -248,18 +263,210 @@ public:
Matx33d T1, T2; Matx33d T1, T2;
Mat norm_points; Mat norm_points;
normTr->getNormTransformation(norm_points, sample, sample_size, T1, T2); if (do_norm)
const auto * const norm_pts = (float *) norm_points.data; normTr->getNormTransformation(norm_points, sample, sample_size, T1, T2);
const float * const norm_pts = do_norm ? norm_points.ptr<const float>() : points_mat.ptr<float>();
if (use_ge) {
double a[8];
std::vector<double> AtAb(72, 0); // 8x9
if (weights.empty()) {
for (int i = 0; i < sample_size; i++) {
const int idx = do_norm ? 4*i : 4*sample[i];
const double x1 = norm_pts[idx], y1 = norm_pts[idx+1], x2 = norm_pts[idx+2], y2 = norm_pts[idx+3];
a[0] = x2*x1;
a[1] = x2*y1;
a[2] = x2;
a[3] = y2*x1;
a[4] = y2*y1;
a[5] = y2;
a[6] = x1;
a[7] = y1;
// calculate covariance for eigen
for (int row = 0; row < 8; row++) {
for (int col = row; col < 8; col++)
AtAb[row * 9 + col] += a[row] * a[col];
AtAb[row * 9 + 8] += a[row];
}
}
} else { // use weights
for (int i = 0; i < sample_size; i++) {
const auto weight = weights[i];
if (weight < FLT_EPSILON) continue;
const int idx = do_norm ? 4*i : 4*sample[i];
const double x1 = norm_pts[idx], y1 = norm_pts[idx+1], x2 = norm_pts[idx+2], y2 = norm_pts[idx+3];
const double weight_times_x2 = weight * x2, weight_times_y2 = weight * y2;
a[0] = weight_times_x2 * x1;
a[1] = weight_times_x2 * y1;
a[2] = weight_times_x2;
a[3] = weight_times_y2 * x1;
a[4] = weight_times_y2 * y1;
a[5] = weight_times_y2;
a[6] = weight * x1;
a[7] = weight * y1;
// calculate covariance for eigen
for (int row = 0; row < 8; row++) {
for (int col = row; col < 8; col++)
AtAb[row * 9 + col] += a[row] * a[col];
AtAb[row * 9 + 8] += a[row];
}
}
}
// copy symmetric part of covariance matrix
for (int j = 1; j < 8; j++)
for (int z = 0; z < j; z++)
AtAb[j*9+z] = AtAb[z*9+j];
Math::eliminateUpperTriangular(AtAb, 8, 9);
models = std::vector<Mat>{ Mat_<double>(3,3) };
auto * f = (double *) models[0].data;
f[8] = 1.;
const int m = 8, n = 9;
// start from the last row
for (int i = m-1; i >= 0; i--) {
double acc = 0;
for (int j = i+1; j < n; j++)
acc -= AtAb[i*n+j]*f[j];
f[i] = acc / AtAb[i*n+i];
// due to numerical errors return 0 solutions
if (std::isnan(f[i]))
return 0;
}
} else {
// ------- 8 points algorithm with Eigen and covariance matrix --------------
double a[9] = {0, 0, 0, 0, 0, 0, 0, 0, 1}, AtA[81] = {0}; // 9x9
if (weights.empty()) {
for (int i = 0; i < sample_size; i++) {
const int idx = do_norm ? 4*i : 4*sample[i];
const auto x1 = norm_pts[idx], y1 = norm_pts[idx+1], x2 = norm_pts[idx+2], y2 = norm_pts[idx+3];
a[0] = x2*x1;
a[1] = x2*y1;
a[2] = x2;
a[3] = y2*x1;
a[4] = y2*y1;
a[5] = y2;
a[6] = x1;
a[7] = y1;
// calculate covariance matrix
for (int row = 0; row < 9; row++)
for (int col = row; col < 9; col++)
AtA[row*9+col] += a[row]*a[col];
}
} else { // use weights
for (int i = 0; i < sample_size; i++) {
const auto weight = weights[i];
if (weight < FLT_EPSILON) continue;
const int smpl = do_norm ? 4*i : 4*sample[i];
const auto x1 = norm_pts[smpl], y1 = norm_pts[smpl+1], x2 = norm_pts[smpl+2], y2 = norm_pts[smpl+3];
const double weight_times_x2 = weight * x2, weight_times_y2 = weight * y2;
a[0] = weight_times_x2 * x1;
a[1] = weight_times_x2 * y1;
a[2] = weight_times_x2;
a[3] = weight_times_y2 * x1;
a[4] = weight_times_y2 * y1;
a[5] = weight_times_y2;
a[6] = weight * x1;
a[7] = weight * y1;
a[8] = weight;
for (int row = 0; row < 9; row++)
for (int col = row; col < 9; col++)
AtA[row*9+col] += a[row]*a[col];
}
}
for (int j = 1; j < 9; j++)
for (int z = 0; z < j; z++)
AtA[j*9+z] = AtA[z*9+j];
#ifdef HAVE_EIGEN
models = std::vector<Mat>{ Mat_<double>(3,3) };
// extract the last null-vector
Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)models[0].data) = Eigen::JacobiSVD
<Eigen::Matrix<double, 9, 9>> ((Eigen::Matrix<double, 9, 9>(AtA)),
Eigen::ComputeFullV).matrixV().col(8);
#else
Matx<double, 9, 9> AtA_(AtA), U, Vt;
Vec<double, 9> W;
SVD::compute(AtA_, W, U, Vt, SVD::FULL_UV + SVD::MODIFY_A);
models = std::vector<Mat> { Mat_<double>(3, 3, Vt.val + 72 /*=8*9*/) };
#endif
}
if (enforce_rank)
FundamentalDegeneracy::recoverRank(models[0], is_fundamental);
if (is_fundamental) {
const auto * const f = (double *) models[0].data;
const auto * const t1 = do_norm ? T1.val : _T1.val, * t2 = do_norm ? T2.val : _T2.val;
// F = T2^T F T1
models[0] = Mat(Matx33d(t1[0]*t2[0]*f[0],t1[0]*t2[0]*f[1], t2[0]*f[2] + t2[0]*f[0]*t1[2] +
t2[0]*f[1]*t1[5], t1[0]*t2[0]*f[3],t1[0]*t2[0]*f[4], t2[0]*f[5] + t2[0]*f[3]*t1[2] +
t2[0]*f[4]*t1[5], t1[0]*(f[6] + f[0]*t2[2] + f[3]*t2[5]), t1[0]*(f[7] + f[1]*t2[2] +
f[4]*t2[5]), f[8] + t1[2]*(f[6] + f[0]*t2[2] + f[3]*t2[5]) + t1[5]*(f[7] + f[1]*t2[2] +
f[4]*t2[5]) + f[2]*t2[2] + f[5]*t2[5]));
}
return 1;
}
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
int getMinimumRequiredSampleSize() const override { return 8; }
int getMaxNumberOfSolutions () const override { return 1; }
};
Ptr<EpipolarNonMinimalSolver> EpipolarNonMinimalSolver::create(const Mat &points_, bool is_fundamental) {
return makePtr<EpipolarNonMinimalSolverImpl>(points_, is_fundamental);
}
Ptr<EpipolarNonMinimalSolver> EpipolarNonMinimalSolver::create(const Mat &points_, const Matx33d &T1, const Matx33d &T2, bool use_ge) {
return makePtr<EpipolarNonMinimalSolverImpl>(points_, T1, T2, use_ge);
}
// ------- 8 points algorithm with Eigen and covariance matrix -------------- class CovarianceEpipolarSolverImpl : public CovarianceEpipolarSolver {
private:
Mat norm_pts;
Matx33d T1, T2;
float * norm_points;
std::vector<bool> mask;
int points_size;
double covariance[81] = {0}, * t1, * t2;
bool is_fundamental, enforce_rank = true;
public:
explicit CovarianceEpipolarSolverImpl (const Mat &norm_points_, const Matx33d &T1_, const Matx33d &T2_)
: norm_pts(norm_points_), T1(T1_), T2(T2_) {
points_size = norm_points_.rows;
norm_points = (float *) norm_pts.data;
t1 = T1.val; t2 = T2.val;
mask = std::vector<bool>(points_size, false);
is_fundamental = true;
}
explicit CovarianceEpipolarSolverImpl (const Mat &points_, bool is_fundamental_) {
points_size = points_.rows;
is_fundamental = is_fundamental_;
if (is_fundamental) { // normalize image points only for fundamental matrix
std::vector<int> sample(points_size);
for (int i = 0; i < points_size; i++) sample[i] = i;
const Ptr<NormTransform> normTr = NormTransform::create(points_);
normTr->getNormTransformation(norm_pts, sample, points_size, T1, T2);
t1 = T1.val; t2 = T2.val;
} else norm_pts = points_; // otherwise points are normalized by intrinsics
norm_points = (float *)norm_pts.data;
mask = std::vector<bool>(points_size, false);
}
void enforceRankConstraint (bool enforce_) override { enforce_rank = enforce_; }
void reset () override {
std::fill(covariance, covariance+81, 0);
std::fill(mask.begin(), mask.end(), false);
}
/*
* Find fundamental matrix using 8-point algorithm with covariance matrix and PCA
*/
int estimate (const std::vector<bool> &new_mask, std::vector<Mat> &models,
const std::vector<double> &/*weights*/) override {
double a[9] = {0, 0, 0, 0, 0, 0, 0, 0, 1}; double a[9] = {0, 0, 0, 0, 0, 0, 0, 0, 1};
double AtA[81] = {0}; // 9x9
if (weights.empty()) { for (int i = 0; i < points_size; i++) {
for (int i = 0; i < sample_size; i++) { if (mask[i] != new_mask[i]) {
const int norm_points_idx = 4*i; const int smpl = 4*i;
const double x1 = norm_pts[norm_points_idx ], y1 = norm_pts[norm_points_idx+1], const double x1 = norm_points[smpl ], y1 = norm_points[smpl+1],
x2 = norm_pts[norm_points_idx+2], y2 = norm_pts[norm_points_idx+3]; x2 = norm_points[smpl+2], y2 = norm_points[smpl+3];
a[0] = x2*x1; a[0] = x2*x1;
a[1] = x2*y1; a[1] = x2*y1;
a[2] = x2; a[2] = x2;
@ -269,71 +476,129 @@ public:
a[6] = x1; a[6] = x1;
a[7] = y1; a[7] = y1;
// calculate covariance for eigen if (mask[i]) // if mask[i] is true then new_mask[i] must be false
for (int row = 0; row < 9; row++) for (int j = 0; j < 9; j++)
for (int col = row; col < 9; col++) for (int z = j; z < 9; z++)
AtA[row*9+col] += a[row]*a[col]; covariance[j*9+z] -= a[j]*a[z];
} else
} else { for (int j = 0; j < 9; j++)
for (int i = 0; i < sample_size; i++) { for (int z = j; z < 9; z++)
const int smpl = 4*i; covariance[j*9+z] += a[j]*a[z];
const double weight = weights[i];
const double x1 = norm_pts[smpl ], y1 = norm_pts[smpl+1],
x2 = norm_pts[smpl+2], y2 = norm_pts[smpl+3];
const double weight_times_x2 = weight * x2,
weight_times_y2 = weight * y2;
a[0] = weight_times_x2 * x1;
a[1] = weight_times_x2 * y1;
a[2] = weight_times_x2;
a[3] = weight_times_y2 * x1;
a[4] = weight_times_y2 * y1;
a[5] = weight_times_y2;
a[6] = weight * x1;
a[7] = weight * y1;
a[8] = weight;
// calculate covariance for eigen
for (int row = 0; row < 9; row++)
for (int col = row; col < 9; col++)
AtA[row*9+col] += a[row]*a[col];
} }
} }
mask = new_mask;
// copy symmetric part of covariance matrix // copy symmetric part of covariance matrix
for (int j = 1; j < 9; j++) for (int j = 1; j < 9; j++)
for (int z = 0; z < j; z++) for (int z = 0; z < j; z++)
AtA[j*9+z] = AtA[z*9+j]; covariance[j*9+z] = covariance[z*9+j];
#ifdef HAVE_EIGEN #ifdef HAVE_EIGEN
models = std::vector<Mat>{ Mat_<double>(3,3) }; models = std::vector<Mat>{ Mat_<double>(3,3) };
const Eigen::JacobiSVD<Eigen::Matrix<double, 9, 9>> svd((Eigen::Matrix<double, 9, 9>(AtA)), // extract the last null-vector
Eigen::ComputeFullV); Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)models[0].data) = Eigen::JacobiSVD
// extract the last nullspace <Eigen::Matrix<double, 9, 9>> ((Eigen::Matrix<double, 9, 9>(covariance)),
Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)models[0].data) = svd.matrixV().col(8); Eigen::ComputeFullV).matrixV().col(8);
#else #else
Matx<double, 9, 9> AtA_(AtA), U, Vt; Matx<double, 9, 9> AtA_(covariance), U, Vt;
Vec<double, 9> W; Vec<double, 9> W;
SVD::compute(AtA_, W, U, Vt, SVD::FULL_UV + SVD::MODIFY_A); SVD::compute(AtA_, W, U, Vt, SVD::FULL_UV + SVD::MODIFY_A);
models = std::vector<Mat> { Mat_<double>(3, 3, Vt.val + 72 /*=8*9*/) }; models = std::vector<Mat> { Mat_<double>(3, 3, Vt.val + 72 /*=8*9*/) };
#endif #endif
FundamentalDegeneracy::recoverRank(models[0], true/*F*/); if (enforce_rank)
FundamentalDegeneracy::recoverRank(models[0], is_fundamental);
if (is_fundamental) {
const auto * const f = (double *) models[0].data;
// F = T2^T F T1
models[0] = Mat(Matx33d(t1[0]*t2[0]*f[0],t1[0]*t2[0]*f[1], t2[0]*f[2] + t2[0]*f[0]*t1[2] +
t2[0]*f[1]*t1[5], t1[0]*t2[0]*f[3],t1[0]*t2[0]*f[4], t2[0]*f[5] + t2[0]*f[3]*t1[2] +
t2[0]*f[4]*t1[5], t1[0]*(f[6] + f[0]*t2[2] + f[3]*t2[5]), t1[0]*(f[7] + f[1]*t2[2] +
f[4]*t2[5]), f[8] + t1[2]*(f[6] + f[0]*t2[2] + f[3]*t2[5]) + t1[5]*(f[7] + f[1]*t2[2] +
f[4]*t2[5]) + f[2]*t2[2] + f[5]*t2[5]));
}
return 1;
}
int getMinimumRequiredSampleSize() const override { return 8; }
int getMaxNumberOfSolutions () const override { return 1; }
};
Ptr<CovarianceEpipolarSolver> CovarianceEpipolarSolver::create (const Mat &points, bool is_fundamental) {
return makePtr<CovarianceEpipolarSolverImpl>(points, is_fundamental);
}
Ptr<CovarianceEpipolarSolver> CovarianceEpipolarSolver::create (const Mat &points, const Matx33d &T1, const Matx33d &T2) {
return makePtr<CovarianceEpipolarSolverImpl>(points, T1, T2);
}
class LarssonOptimizerImpl : public LarssonOptimizer {
private:
const Mat &calib_points;
Matx33d K1, K2, K2_t, K1_inv, K2_inv_t;
bool is_fundamental;
BundleOptions opt;
public:
LarssonOptimizerImpl (const Mat &calib_points_, const Matx33d &K1_, const Matx33d &K2_, int max_iters_, bool is_fundamental_) :
calib_points(calib_points_), K1(K1_), K2(K2_){
is_fundamental = is_fundamental_;
opt.max_iterations = max_iters_;
opt.loss_scale = Utils::getCalibratedThreshold(std::max(1.5, opt.loss_scale), Mat(K1), Mat(K2));
if (is_fundamental) {
K1_inv = K1.inv();
K2_t = K2.t();
K2_inv_t = K2_t.inv();
}
}
// Transpose T2 (in T2 the lower diagonal is zero) int estimate (const Mat &model, const std::vector<int> &sample, int sample_size, std::vector<Mat>
T2(2, 0) = T2(0, 2); T2(2, 1) = T2(1, 2); &models, const std::vector<double> &weights) const override {
T2(0, 2) = 0; T2(1, 2) = 0; if (sample_size < 5) return 0;
const Matx33d E = is_fundamental ? K2_t * Matx33d(model) * K1 : model;
RNG rng (sample_size);
cv::Matx33d R1, R2; cv::Vec3d t;
cv::decomposeEssentialMat(E, R1, R2, t);
int positive_depth[4] = {0};
const auto * const pts_ = (float *) calib_points.data;
// a few point are enough to test
// actually due to Sampson error minimization, the input R,t do not really matter
// for a correct pair there is a slightly faster convergence
for (int i = 0; i < 3; i++) { // could be 1 point
const int rand_inl = 4 * sample[rng.uniform(0, sample_size)];
Vec3d p1 (pts_[rand_inl], pts_[rand_inl+1], 1), p2(pts_[rand_inl+2], pts_[rand_inl+3], 1);
p1 /= norm(p1); p2 /= norm(p2);
if (satisfyCheirality(R1, t, p1, p2)) positive_depth[0]++;
if (satisfyCheirality(R1, -t, p1, p2)) positive_depth[1]++;
if (satisfyCheirality(R2, t, p1, p2)) positive_depth[2]++;
if (satisfyCheirality(R2, -t, p1, p2)) positive_depth[3]++;
}
int corr_idx = 0, max_good_pts = positive_depth[0];
for (int i = 1; i < 4; i++) {
if (max_good_pts < positive_depth[i]) {
max_good_pts = positive_depth[i];
corr_idx = i;
}
}
models[0] = T2 * models[0] * T1; CameraPose pose;
pose.R = corr_idx < 2 ? R1 : R2;
pose.t = corr_idx % 2 == 1 ? -t : t;
refine_relpose(calib_points, sample, sample_size, &pose, opt, &weights[0]);
Matx33d model_new = Math::getSkewSymmetric(pose.t) * pose.R;
if (is_fundamental)
model_new = K2_inv_t * model_new * K1_inv;
models = std::vector<Mat> { Mat(model_new) };
return 1; return 1;
} }
int getMinimumRequiredSampleSize() const override { return 8; } int estimate (const std::vector<int>&, int, std::vector<Mat>&, const std::vector<double>&) const override {
int getMaxNumberOfSolutions () const override { return 1; } return 0;
Ptr<NonMinimalSolver> clone () const override {
return makePtr<FundamentalNonMinimalSolverImpl>(*points_mat);
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
int getMinimumRequiredSampleSize() const override { return 5; }
int getMaxNumberOfSolutions () const override { return 1; }
}; };
Ptr<FundamentalNonMinimalSolver> FundamentalNonMinimalSolver::create(const Mat &points_) { Ptr<LarssonOptimizer> LarssonOptimizer::create(const Mat &calib_points_, const Matx33d &K1, const Matx33d &K2, int max_iters_, bool is_fundamental) {
return makePtr<FundamentalNonMinimalSolverImpl>(points_); return makePtr<LarssonOptimizerImpl>(calib_points_, K1, K2, max_iters_, is_fundamental);
} }
}} }}

@ -7,101 +7,145 @@
namespace cv { namespace usac { namespace cv { namespace usac {
GammaValues::GammaValues() class GammaValuesImpl : public GammaValues {
: max_range_complete(4.62) std::vector<double> gamma_complete, gamma_incomplete, gamma;
, max_range_gamma(1.52) double scale_complete_values, scale_gamma_values;
, max_size_table(3000) int max_size_table, DoF;
{ public:
/* GammaValuesImpl (int DoF_, int max_size_table_) {
* Gamma values for degrees of freedom n = 2 and sigma quantile 99% of chi distribution max_size_table = max_size_table_;
* (squared root of chi-squared distribution), in the range <0; 4.62> for complete values DoF = DoF_;
* and <0, 1.52> for gamma values. /*
* Number of anchor points is 50. Other values are approximated using linear interpolation * Gamma values for degrees of freedom n = 2 and sigma quantile 99% of chi distribution
*/ * (squared root of chi-squared distribution), in the range <0; 4.62> for complete values
const int number_of_anchor_points = 50; * and <0, 1.52> for gamma values.
std::vector<double> gamma_complete_anchor = std::vector<double> * Number of anchor points is 50. Other values are approximated using linear interpolation
{1.7724538509055159, 1.182606138403832, 0.962685372890749, 0.8090013493715409, */
0.6909325812483967, 0.5961199186942078, 0.5179833984918483, 0.45248091153099873, const int number_of_anchor_points = 50;
0.39690029823142897, 0.34930995878395804, 0.3082742109224103, 0.2726914551904204, std::vector<double> gamma_complete_anchor, gamma_incomplete_anchor, gamma_anchor;
0.2416954924567404, 0.21459196516027726, 0.190815580770884, 0.16990026519723456, if (DoF == 2) {
0.15145770273372564, 0.13516150988807635, 0.12073530906427948, 0.10794357255251595, const double max_thr = 7.5, gamma_quantile = 3.04;
0.0965844793065712, 0.08648426334883624, 0.07749268706639856, 0.06947937608738222, scale_complete_values = max_size_table_ / max_thr;
0.062330823249820304, 0.05594791865006951, 0.05024389794830681, 0.045142626552664405, scale_gamma_values = gamma_quantile * max_size_table_ / max_thr ;
0.040577155977706246, 0.03648850256745103, 0.03282460924226794, 0.029539458909083157,
0.02659231432268328, 0.023947063970062663, 0.021571657306774475, 0.01943761564987864,
0.017519607407598645, 0.015795078236273064, 0.014243928262247118, 0.012848229767187478,
0.011591979769030827, 0.010460882783057988, 0.009442159753944173, 0.008524379737926344,
0.007697311406424555, 0.006951791856026042, 0.006279610558635573, 0.005673406581042374,
0.005126577454218803, 0.004633198286725555};
std::vector<double> gamma_incomplete_anchor = std::vector<double> gamma_complete_anchor = std::vector<double>
{0.0, 0.01773096912803939, 0.047486924846289004, 0.08265437835139826, 0.120639343491371, {1.77245385e+00, 1.02824699e+00, 7.69267629e-01, 5.99047749e-01,
0.15993024714868515, 0.19954558593754865, 0.23881753504915218, 0.2772830648361923, 4.75998050e-01, 3.83008633e-01, 3.10886473e-01, 2.53983661e-01,
0.3146208784488923, 0.3506114446939783, 0.385110056889967, 0.41802785670077697, 2.08540472e-01, 1.71918718e-01, 1.42197872e-01, 1.17941854e-01,
0.44931803198258047, 0.47896553567848993, 0.5069792897777948, 0.5333861945970247, 9.80549104e-02, 8.16877552e-02, 6.81739145e-02, 5.69851046e-02,
0.5582264802664578, 0.581550074874317, 0.6034137543595729, 0.6238789008764282, 4.76991202e-02, 3.99417329e-02, 3.35126632e-02, 2.81470710e-02,
0.6430097394182639, 0.6608719532994989, 0.6775316015953519, 0.6930542783709592, 2.36624697e-02, 1.99092598e-02, 1.67644090e-02, 1.41264487e-02,
0.7075044661695132, 0.7209450459078338, 0.733436932830201, 0.7450388140484766, 1.19114860e-02, 1.00500046e-02, 8.48428689e-03, 7.16632498e-03,
0.7558069678435577, 0.7657951486073097, 0.7750545242776943, 0.7836336555215403, 6.05612291e-03, 5.12031042e-03, 4.33100803e-03, 3.66489504e-03,
0.7915785078697124, 0.798932489600361, 0.8057365094688473, 0.8120290494534339, 3.10244213e-03, 2.62514027e-03, 2.22385863e-03, 1.88454040e-03,
0.8178462485678104, 0.8232219945197348, 0.8281880205973585, 0.8327740056635289, 1.59749690e-03, 1.35457835e-03, 1.14892453e-03, 9.74756909e-04,
0.8370076755516281, 0.8409149044990385, 0.8445198155381767, 0.8478448790000731, 8.27205063e-04, 7.02161552e-04, 5.96160506e-04, 5.06275903e-04,
0.8509110084798414, 0.8537376537738418, 0.8563428904304485, 0.8587435056647642, 4.30036278e-04, 3.65353149e-04, 3.10460901e-04, 2.63866261e-04,
0.8609550804762539}; 2.24305797e-04, 1.90558599e-04};
std::vector<double> gamma_anchor = std::vector<double> gamma_incomplete_anchor = std::vector<double>
{1.7724538509055159, 1.427187162582056, 1.2890382454046982, 1.186244737282388, {0. , 0.0364325 , 0.09423626, 0.15858163, 0.22401622, 0.28773243,
1.1021938955410173, 1.0303674512016956, 0.9673796229113404, 0.9111932804012203, 0.34820493, 0.40463362, 0.45665762, 0.50419009, 0.54731575, 0.58622491,
0.8604640514722175, 0.814246149432561, 0.7718421763436497, 0.7327190195355812, 0.62116968, 0.65243473, 0.68031763, 0.70511575, 0.72711773, 0.74668782,
0.6964573670982434, 0.6627197089339725, 0.6312291454822467, 0.6017548373556638, 0.76389332, 0.77907386, 0.79244816, 0.80421561, 0.81455692, 0.8236351 ,
0.5741017071093776, 0.5481029597580317, 0.523614528104858, 0.5005108666212138, 0.83159653, 0.83857228, 0.84467927, 0.85002158, 0.85469163, 0.85877132,
0.478681711577816, 0.4580295473431646, 0.43846759792922513, 0.41991821541471996, 0.86233307, 0.86544086, 0.86815108, 0.87052421, 0.87258093, 0.87437198,
0.40231157253054745, 0.38558459136185, 0.3696800574963841, 0.3545458813847714, 0.87593103, 0.87728759, 0.87846752, 0.87949345, 0.88038518, 0.88116002,
0.340134477710645, 0.32640224021796493, 0.3133090943985706, 0.3008181141790485, 0.88183307, 0.88241755, 0.88292497, 0.88336537, 0.88374751, 0.88407901,
0.28889519159238314, 0.2775087506098113, 0.2666294980086962, 0.2562302054837794, 0.88436652, 0.88461696};
0.24628551826026082, 0.2367717863030556, 0.22766691488600885, 0.21895023182476064,
0.2106023691144937, 0.2026051570714723, 0.19494152937027823, 0.18759543761063277,
0.1805517742482484, 0.17379630289125447, 0.16731559510356395, 0.1610969729740903,
0.1551284568099053, 0.14939871739550692};
// allocate tables gamma_anchor = std::vector<double>
gamma_complete = std::vector<double>(max_size_table); {1.77245385e+00, 5.93375722e-01, 3.05833272e-01, 1.68019955e-01,
gamma_incomplete = std::vector<double>(max_size_table); 9.52188705e-02, 5.49876141e-02, 3.21629603e-02, 1.89881161e-02,
gamma = std::vector<double>(max_size_table); 1.12897384e-02, 6.75016002e-03, 4.05426969e-03, 2.44422283e-03,
1.47822780e-03, 8.96425642e-04, 5.44879754e-04, 3.31873268e-04,
2.02499478e-04, 1.23458651e-04, 7.55593392e-05, 4.63032752e-05,
2.84078946e-05, 1.74471428e-05, 1.07257506e-05, 6.59955061e-06,
4.06400013e-06, 2.50448635e-06, 1.54449028e-06, 9.53085308e-07,
5.88490160e-07, 3.63571768e-07, 2.24734099e-07, 1.38982938e-07,
8.59913580e-08, 5.31026827e-08, 3.28834964e-08, 2.03707922e-08,
1.26240063e-08, 7.82595771e-09, 4.85312084e-09, 3.01051575e-09,
1.86805770e-09, 1.15947962e-09, 7.19869372e-10, 4.47050615e-10,
2.77694421e-10, 1.72536278e-10, 1.07224039e-10, 6.66497131e-11,
4.14376355e-11, 2.57079508e-11};
} else if (DoF == 4) {
const double max_thr = 2.5, gamma_quantile = 3.64;
scale_complete_values = max_size_table_ / max_thr;
scale_gamma_values = gamma_quantile * max_size_table_ / max_thr ;
gamma_complete_anchor = std::vector<double>
{0.88622693, 0.87877828, 0.86578847, 0.84979442, 0.83179176, 0.81238452,
0.79199067, 0.77091934, 0.74940836, 0.72764529, 0.70578051, 0.68393585,
0.66221071, 0.64068639, 0.61942952, 0.59849449, 0.57792561, 0.55766078,
0.53792634, 0.51864482, 0.49983336, 0.48150466, 0.46366759, 0.44632776,
0.42948797, 0.41314862, 0.39730804, 0.38196282, 0.36710806, 0.35273761,
0.33884422, 0.32541979, 0.31245545, 0.29988151, 0.28781065, 0.2761701 ,
0.26494924, 0.25413723, 0.24372308, 0.23369573, 0.22404405, 0.21475696,
0.2058234 , 0.19723241, 0.18897314, 0.18103488, 0.17340708, 0.16607937,
0.15904157, 0.15225125};
gamma_incomplete_anchor = std::vector<double>
{0.00000000e+00, 2.26619558e-04, 1.23631005e-03, 3.28596265e-03,
6.50682297e-03, 1.09662062e-02, 1.66907233e-02, 2.36788942e-02,
3.19091043e-02, 4.13450655e-02, 5.19397673e-02, 6.36384378e-02,
7.63808171e-02, 9.01029320e-02, 1.04738496e-01, 1.20220023e-01,
1.36479717e-01, 1.53535010e-01, 1.71152805e-01, 1.89349599e-01,
2.08062142e-01, 2.27229225e-01, 2.46791879e-01, 2.66693534e-01,
2.86880123e-01, 3.07300152e-01, 3.27904735e-01, 3.48647611e-01,
3.69485130e-01, 3.90376227e-01, 4.11282379e-01, 4.32167556e-01,
4.52998149e-01, 4.73844336e-01, 4.94473655e-01, 5.14961263e-01,
5.35282509e-01, 5.55414767e-01, 5.75337352e-01, 5.95031429e-01,
6.14479929e-01, 6.33667460e-01, 6.52580220e-01, 6.71205917e-01,
6.89533681e-01, 7.07553988e-01, 7.25258581e-01, 7.42640393e-01,
7.59693477e-01, 7.76494059e-01};
gamma_anchor = std::vector<double>
{8.86226925e-01, 8.38460922e-01, 7.64931722e-01, 6.85680218e-01,
6.07663201e-01, 5.34128389e-01, 4.66574835e-01, 4.05560768e-01,
3.51114357e-01, 3.02965249e-01, 2.60682396e-01, 2.23758335e-01,
1.91661077e-01, 1.63865725e-01, 1.39873108e-01, 1.19220033e-01,
1.01484113e-01, 8.62162923e-02, 7.32253576e-02, 6.21314285e-02,
5.26713657e-02, 4.46151697e-02, 3.77626859e-02, 3.19403783e-02,
2.69982683e-02, 2.28070945e-02, 1.92557199e-02, 1.62487939e-02,
1.37046640e-02, 1.15535264e-02, 9.73579631e-03, 8.20068208e-03,
6.90494092e-03, 5.80688564e-03, 4.88587254e-03, 4.10958296e-03,
3.45555079e-03, 2.90474053e-03, 2.44103551e-03, 2.05079975e-03,
1.72250366e-03, 1.44640449e-03, 1.21427410e-03, 1.01916714e-03,
8.55224023e-04, 7.17503448e-04, 6.01840372e-04, 5.04725511e-04,
4.23203257e-04, 3.54478559e-04};
} else CV_Error(cv::Error::StsNotImplemented, "Not implemented for specific DoF!");
// allocate tables
gamma_complete = std::vector<double>(max_size_table);
gamma_incomplete = std::vector<double>(max_size_table);
gamma = std::vector<double>(max_size_table);
const int step = (int)((double)max_size_table / (number_of_anchor_points-1)); // do linear interpolation of gamma values
int arr_cnt = 0; const int step = (int)((double)max_size_table / (number_of_anchor_points-1));
for (int i = 0; i < number_of_anchor_points-1; i++) { int arr_cnt = 0;
const double complete_x0 = gamma_complete_anchor[i], step_complete = (gamma_complete_anchor[i+1] - complete_x0) / step; for (int i = 0; i < number_of_anchor_points-1; i++) {
const double incomplete_x0 = gamma_incomplete_anchor[i], step_incomplete = (gamma_incomplete_anchor[i+1] - incomplete_x0) / step; const double complete_x0 = gamma_complete_anchor[i], step_complete = (gamma_complete_anchor[i+1] - complete_x0) / step;
const double gamma_x0 = gamma_anchor[i], step_gamma = (gamma_anchor[i+1] - gamma_x0) / step; const double incomplete_x0 = gamma_incomplete_anchor[i], step_incomplete = (gamma_incomplete_anchor[i+1] - incomplete_x0) / step;
const double gamma_x0 = gamma_anchor[i], step_gamma = (gamma_anchor[i+1] - gamma_x0) / step;
for (int j = 0; j < step; j++) { for (int j = 0; j < step; j++) {
gamma_complete[arr_cnt] = complete_x0 + j * step_complete; gamma_complete[arr_cnt] = complete_x0 + j * step_complete;
gamma_incomplete[arr_cnt] = incomplete_x0 + j * step_incomplete; gamma_incomplete[arr_cnt] = incomplete_x0 + j * step_incomplete;
gamma[arr_cnt++] = gamma_x0 + j * step_gamma; gamma[arr_cnt++] = gamma_x0 + j * step_gamma;
} }
}
if (arr_cnt < max_size_table) {
// if array was not totally filled (in some cases can happen) then copy last values
std::fill(gamma_complete.begin()+arr_cnt, gamma_complete.end(), gamma_complete[arr_cnt-1]);
std::fill(gamma_incomplete.begin()+arr_cnt, gamma_incomplete.end(), gamma_incomplete[arr_cnt-1]);
std::fill(gamma.begin()+arr_cnt, gamma.end(), gamma[arr_cnt-1]);
}
} }
if (arr_cnt < max_size_table) {
// if array was not totally filled (in some cases can happen) then copy last values
std::fill(gamma_complete.begin()+arr_cnt, gamma_complete.end(), gamma_complete[arr_cnt-1]);
std::fill(gamma_incomplete.begin()+arr_cnt, gamma_incomplete.end(), gamma_incomplete[arr_cnt-1]);
std::fill(gamma.begin()+arr_cnt, gamma.end(), gamma[arr_cnt-1]);
}
}
const std::vector<double>& GammaValues::getCompleteGammaValues() const { return gamma_complete; } const std::vector<double> &getCompleteGammaValues() const override { return gamma_complete; }
const std::vector<double>& GammaValues::getIncompleteGammaValues() const { return gamma_incomplete; } const std::vector<double> &getIncompleteGammaValues() const override { return gamma_incomplete; }
const std::vector<double>& GammaValues::getGammaValues() const { return gamma; } const std::vector<double> &getGammaValues() const override { return gamma; }
double GammaValues::getScaleOfGammaCompleteValues () const { return gamma_complete.size() / max_range_complete; } double getScaleOfGammaCompleteValues () const override { return scale_complete_values; }
double GammaValues::getScaleOfGammaValues () const { return gamma.size() / max_range_gamma; } double getScaleOfGammaValues () const override { return scale_gamma_values; }
int GammaValues::getTableSize () const { return max_size_table; } int getTableSize () const override { return max_size_table; }
};
/* static */ Ptr<GammaValues> GammaValues::create(int DoF, int max_size_table) {
const GammaValues& GammaValues::getSingleton() return makePtr<GammaValuesImpl>(DoF, max_size_table);
{
static GammaValues g_gammaValues;
return g_gammaValues;
} }
}} // namespace }} // namespace

@ -9,21 +9,25 @@
#endif #endif
namespace cv { namespace usac { namespace cv { namespace usac {
class HomographyMinimalSolver4ptsGEMImpl : public HomographyMinimalSolver4ptsGEM { class HomographyMinimalSolver4ptsImpl : public HomographyMinimalSolver4pts {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points; const bool use_ge;
public: public:
explicit HomographyMinimalSolver4ptsGEMImpl (const Mat &points_) : explicit HomographyMinimalSolver4ptsImpl (const Mat &points_, bool use_ge_) :
points_mat(&points_), points ((float*) points_.data) {} points_mat(points_), use_ge(use_ge_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
int estimate (const std::vector<int>& sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int>& sample, std::vector<Mat> &models) const override {
const float * points = points_mat.ptr<float>();
int m = 8, n = 9; int m = 8, n = 9;
std::vector<double> A(72, 0); std::vector<double> A(72, 0);
int cnt = 0; int cnt = 0;
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
const int smpl = 4*sample[i]; const int smpl = 4*sample[i];
const double x1 = points[smpl], y1 = points[smpl+1], x2 = points[smpl+2], y2 = points[smpl+3]; const auto x1 = points[smpl], y1 = points[smpl+1], x2 = points[smpl+2], y2 = points[smpl+3];
A[cnt++] = -x1; A[cnt++] = -x1;
A[cnt++] = -y1; A[cnt++] = -y1;
@ -42,49 +46,59 @@ public:
A[cnt++] = y2; A[cnt++] = y2;
} }
if (!Math::eliminateUpperTriangular(A, m, n)) if (use_ge) {
return 0; if (!Math::eliminateUpperTriangular(A, m, n))
return 0;
models = std::vector<Mat>{ Mat_<double>(3,3) }; models = std::vector<Mat>{ Mat_<double>(3,3) };
auto * h = (double *) models[0].data; auto * h = (double *) models[0].data;
h[8] = 1.; h[8] = 1.;
// start from the last row // start from the last row
for (int i = m-1; i >= 0; i--) { for (int i = m-1; i >= 0; i--) {
double acc = 0; double acc = 0;
for (int j = i+1; j < n; j++) for (int j = i+1; j < n; j++)
acc -= A[i*n+j]*h[j]; acc -= A[i*n+j]*h[j];
h[i] = acc / A[i*n+i]; h[i] = acc / A[i*n+i];
// due to numerical errors return 0 solutions // due to numerical errors return 0 solutions
if (std::isnan(h[i])) if (std::isnan(h[i]))
return 0; return 0;
}
} else {
Mat U, Vt, D;
cv::Matx<double, 8, 9> A_svd(&A[0]);
SVD::compute(A_svd, D, U, Vt, SVD::FULL_UV+SVD::MODIFY_A);
models = std::vector<Mat> { Vt.row(Vt.rows-1).reshape(0, 3) };
} }
return 1; return 1;
} }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
int getSampleSize() const override { return 4; } int getSampleSize() const override { return 4; }
Ptr<MinimalSolver> clone () const override {
return makePtr<HomographyMinimalSolver4ptsGEMImpl>(*points_mat);
}
}; };
Ptr<HomographyMinimalSolver4ptsGEM> HomographyMinimalSolver4ptsGEM::create(const Mat &points_) { Ptr<HomographyMinimalSolver4pts> HomographyMinimalSolver4pts::create(const Mat &points, bool use_ge) {
return makePtr<HomographyMinimalSolver4ptsGEMImpl>(points_); return makePtr<HomographyMinimalSolver4ptsImpl>(points, use_ge);
} }
class HomographyNonMinimalSolverImpl : public HomographyNonMinimalSolver { class HomographyNonMinimalSolverImpl : public HomographyNonMinimalSolver {
private: private:
const Mat * points_mat; Mat points_mat;
const Ptr<NormTransform> normTr; const bool do_norm, use_ge;
Ptr<NormTransform> normTr;
Matx33d _T1, _T2;
public: public:
explicit HomographyNonMinimalSolverImpl (const Mat &points_) : explicit HomographyNonMinimalSolverImpl (const Mat &norm_points_, const Matx33d &T1, const Matx33d &T2, bool use_ge_) :
points_mat(&points_), normTr (NormTransform::create(points_)) {} points_mat(norm_points_), do_norm(false), use_ge(use_ge_), _T1(T1), _T2(T2)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
explicit HomographyNonMinimalSolverImpl (const Mat &points_, bool use_ge_) :
points_mat(points_), do_norm(true), use_ge(use_ge_), normTr (NormTransform::create(points_))
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
/*
* Find Homography matrix using (weighted) non-minimal estimation.
* Use Principal Component Analysis. Use normalized points.
*/
int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat> &models, int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat> &models,
const std::vector<double> &weights) const override { const std::vector<double> &weights) const override {
if (sample_size < getMinimumRequiredSampleSize()) if (sample_size < getMinimumRequiredSampleSize())
@ -92,21 +106,226 @@ public:
Matx33d T1, T2; Matx33d T1, T2;
Mat norm_points_; Mat norm_points_;
normTr->getNormTransformation(norm_points_, sample, sample_size, T1, T2); if (do_norm)
normTr->getNormTransformation(norm_points_, sample, sample_size, T1, T2);
const float * const npts = do_norm ? norm_points_.ptr<float>() : points_mat.ptr<float>();
Mat H;
if (use_ge) {
double a1[8] = {0, 0, -1, 0, 0, 0, 0, 0},
a2[8] = {0, 0, 0, 0, 0, -1, 0, 0};
std::vector<double> AtAb(72, 0); // 8x9
if (weights.empty()) {
for (int i = 0; i < sample_size; i++) {
const int idx = do_norm ? 4*i : 4*sample[i];
const double x1 = npts[idx], y1 = npts[idx+1], x2 = npts[idx+2], y2 = npts[idx+3];
a1[0] = -x1;
a1[1] = -y1;
a1[6] = x2*x1;
a1[7] = x2*y1;
a2[3] = -x1;
a2[4] = -y1;
a2[6] = y2*x1;
a2[7] = y2*y1;
// calculate covariance for eigen
for (int j = 0; j < 8; j++) {
for (int z = j; z < 8; z++)
AtAb[j * 9 + z] += a1[j]*a1[z] + a2[j]*a2[z];
AtAb[j * 9 + 8] += a1[j]*x2 + a2[j]*y2;
}
}
} else { // use weights
for (int i = 0; i < sample_size; i++) {
const double weight = weights[i];
if (weight < FLT_EPSILON) continue;
const int idx = do_norm ? 4*i : 4*sample[i];
const double x1 = npts[idx], y1 = npts[idx+1], x2 = npts[idx+2], y2 = npts[idx+3];
const double minus_weight_times_x1 = -weight * x1,
minus_weight_times_y1 = -weight * y1,
weight_times_x2 = weight * x2,
weight_times_y2 = weight * y2;
a1[0] = minus_weight_times_x1;
a1[1] = minus_weight_times_y1;
a1[2] = -weight;
a1[6] = weight_times_x2 * x1;
a1[7] = weight_times_x2 * y1;
a2[3] = minus_weight_times_x1;
a2[4] = minus_weight_times_y1;
a2[5] = -weight;
a2[6] = weight_times_y2 * x1;
a2[7] = weight_times_y2 * y1;
for (int j = 0; j < 8; j++) {
for (int z = j; z < 8; z++)
AtAb[j * 9 + z] += a1[j]*a1[z] + a2[j]*a2[z];
AtAb[j * 9 + 8] += a1[j]*weight_times_x2 + a2[j]*weight_times_y2;
}
}
}
for (int j = 1; j < 8; j++)
for (int z = 0; z < j; z++)
AtAb[j*9+z] = AtAb[z*9+j];
if (!Math::eliminateUpperTriangular(AtAb, 8, 9))
return 0;
H = Mat_<double>(3,3);
auto * h = (double *) H.data;
h[8] = 1.;
const int m = 8, n = 9;
// start from the last row
for (int i = m-1; i >= 0; i--) {
double acc = 0;
for (int j = i+1; j < n; j++)
acc -= AtAb[i*n+j]*h[j];
h[i] = acc / AtAb[i*n+i];
if (std::isnan(h[i]))
return 0; // numerical imprecision
}
} else {
double a1[9] = {0, 0, -1, 0, 0, 0, 0, 0, 0},
a2[9] = {0, 0, 0, 0, 0, -1, 0, 0, 0}, AtA[81] = {0};
if (weights.empty()) {
for (int i = 0; i < sample_size; i++) {
const int smpl = do_norm ? 4*i : 4*sample[i];
const auto x1 = npts[smpl], y1 = npts[smpl+1], x2 = npts[smpl+2], y2 = npts[smpl+3];
a1[0] = -x1;
a1[1] = -y1;
a1[6] = x2*x1;
a1[7] = x2*y1;
a1[8] = x2;
a2[3] = -x1;
a2[4] = -y1;
a2[6] = y2*x1;
a2[7] = y2*y1;
a2[8] = y2;
for (int j = 0; j < 9; j++)
for (int z = j; z < 9; z++)
AtA[j*9+z] += a1[j]*a1[z] + a2[j]*a2[z];
}
} else { // use weights
for (int i = 0; i < sample_size; i++) {
const double weight = weights[i];
if (weight < FLT_EPSILON) continue;
const int smpl = do_norm ? 4*i : 4*sample[i];
const auto x1 = npts[smpl], y1 = npts[smpl+1], x2 = npts[smpl+2], y2 = npts[smpl+3];
const double minus_weight_times_x1 = -weight * x1,
minus_weight_times_y1 = -weight * y1,
weight_times_x2 = weight * x2,
weight_times_y2 = weight * y2;
a1[0] = minus_weight_times_x1;
a1[1] = minus_weight_times_y1;
a1[2] = -weight;
a1[6] = weight_times_x2 * x1;
a1[7] = weight_times_x2 * y1;
a1[8] = weight_times_x2;
a2[3] = minus_weight_times_x1;
a2[4] = minus_weight_times_y1;
a2[5] = -weight;
a2[6] = weight_times_y2 * x1;
a2[7] = weight_times_y2 * y1;
a2[8] = weight_times_y2;
for (int j = 0; j < 9; j++)
for (int z = j; z < 9; z++)
AtA[j*9+z] += a1[j]*a1[z] + a2[j]*a2[z];
}
}
// copy symmetric part of covariance matrix
for (int j = 1; j < 9; j++)
for (int z = 0; z < j; z++)
AtA[j*9+z] = AtA[z*9+j];
/* #ifdef HAVE_EIGEN
* @norm_points is matrix 4 x inlier_size H = Mat_<double>(3,3);
* @weights is vector of inliers_size // extract the last null-vector
* weights[i] is weight of i-th inlier Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)H.data) = Eigen::Matrix<double, 9, 9>
*/ (Eigen::HouseholderQR<Eigen::Matrix<double, 9, 9>> (
const auto * const norm_points = (float *) norm_points_.data; (Eigen::Matrix<double, 9, 9> (AtA))).householderQ()).col(8);
#else
Matx<double, 9, 9> Vt;
Vec<double, 9> D;
if (! eigen(Matx<double, 9, 9>(AtA), D, Vt)) return 0;
H = Mat_<double>(3, 3, Vt.val + 72/*=8*9*/);
#endif
}
const auto * const h = (double *) H.data;
const auto * const t1 = do_norm ? T1.val : _T1.val, * const t2 = do_norm ? T2.val : _T2.val;
// H = T2^-1 H T1
models = std::vector<Mat>{ Mat(Matx33d(t1[0]*(h[0]/t2[0] - (h[6]*t2[2])/t2[0]),
t1[0]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]), h[2]/t2[0] + t1[2]*(h[0]/t2[0] -
(h[6]*t2[2])/t2[0]) + t1[5]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]) - (h[8]*t2[2])/t2[0],
t1[0]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]), t1[0]*(h[4]/t2[0] - (h[7]*t2[5])/t2[0]),
h[5]/t2[0] + t1[2]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]) + t1[5]*(h[4]/t2[0] -
(h[7]*t2[5])/t2[0]) - (h[8]*t2[5])/t2[0], t1[0]*h[6], t1[0]*h[7],
h[8] + h[6]*t1[2] + h[7]*t1[5])) };
return 1;
}
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
int getMinimumRequiredSampleSize() const override { return 4; }
int getMaxNumberOfSolutions () const override { return 1; }
void enforceRankConstraint (bool /*enforce*/) override {}
};
Ptr<HomographyNonMinimalSolver> HomographyNonMinimalSolver::create(const Mat &points_, bool use_ge_) {
return makePtr<HomographyNonMinimalSolverImpl>(points_, use_ge_);
}
Ptr<HomographyNonMinimalSolver> HomographyNonMinimalSolver::create(const Mat &points_, const Matx33d &T1, const Matx33d &T2, bool use_ge) {
return makePtr<HomographyNonMinimalSolverImpl>(points_, T1, T2, use_ge);
}
class CovarianceHomographySolverImpl : public CovarianceHomographySolver {
private:
Mat norm_pts;
Matx33d T1, T2;
float * norm_points;
std::vector<bool> mask;
int points_size;
double covariance[81] = {0}, * t1, * t2;
public:
explicit CovarianceHomographySolverImpl (const Mat &norm_points_, const Matx33d &T1_, const Matx33d &T2_)
: norm_pts(norm_points_), T1(T1_), T2(T2_) {
points_size = norm_points_.rows;
norm_points = (float *) norm_pts.data;
t1 = T1.val; t2 = T2.val;
mask = std::vector<bool>(points_size, false);
}
explicit CovarianceHomographySolverImpl (const Mat &points_) {
points_size = points_.rows;
// normalize points
std::vector<int> sample(points_size);
for (int i = 0; i < points_size; i++) sample[i] = i;
const Ptr<NormTransform> normTr = NormTransform::create(points_);
normTr->getNormTransformation(norm_pts, sample, points_size, T1, T2);
norm_points = (float *) norm_pts.data;
t1 = T1.val; t2 = T2.val;
mask = std::vector<bool>(points_size, false);
}
void reset () override {
// reset covariance matrix to zero and mask to false
std::fill(covariance, covariance+81, 0);
std::fill(mask.begin(), mask.end(), false);
}
/*
* Find homography using 4-point algorithm with covariance matrix and PCA
*/
int estimate (const std::vector<bool> &new_mask, std::vector<Mat> &models,
const std::vector<double> &/*weights*/) override {
double a1[9] = {0, 0, -1, 0, 0, 0, 0, 0, 0}, double a1[9] = {0, 0, -1, 0, 0, 0, 0, 0, 0},
a2[9] = {0, 0, 0, 0, 0, -1, 0, 0, 0}, a2[9] = {0, 0, 0, 0, 0, -1, 0, 0, 0};
AtA[81] = {0};
if (weights.empty()) { for (int i = 0; i < points_size; i++) {
for (int i = 0; i < sample_size; i++) { if (mask[i] != new_mask[i]) {
const int smpl = 4*i; const int smpl = 4*i;
const double x1 = norm_points[smpl ], y1 = norm_points[smpl+1], const double x1 = norm_points[smpl ], y1 = norm_points[smpl+1],
x2 = norm_points[smpl+2], y2 = norm_points[smpl+3]; x2 = norm_points[smpl+2], y2 = norm_points[smpl+3];
@ -123,80 +342,68 @@ public:
a2[7] = y2*y1; a2[7] = y2*y1;
a2[8] = y2; a2[8] = y2;
for (int j = 0; j < 9; j++) if (mask[i]) // if mask[i] is true then new_mask[i] must be false
for (int z = j; z < 9; z++) for (int j = 0; j < 9; j++)
AtA[j*9+z] += a1[j]*a1[z] + a2[j]*a2[z]; for (int z = j; z < 9; z++)
} covariance[j*9+z] +=-a1[j]*a1[z] - a2[j]*a2[z];
} else { else
for (int i = 0; i < sample_size; i++) { for (int j = 0; j < 9; j++)
const int smpl = 4*i; for (int z = j; z < 9; z++)
const double weight = weights[i]; covariance[j*9+z] += a1[j]*a1[z] + a2[j]*a2[z];
const double x1 = norm_points[smpl ], y1 = norm_points[smpl+1],
x2 = norm_points[smpl+2], y2 = norm_points[smpl+3];
const double minus_weight_times_x1 = -weight * x1,
minus_weight_times_y1 = -weight * y1,
weight_times_x2 = weight * x2,
weight_times_y2 = weight * y2;
a1[0] = minus_weight_times_x1;
a1[1] = minus_weight_times_y1;
a1[2] = -weight;
a1[6] = weight_times_x2 * x1;
a1[7] = weight_times_x2 * y1;
a1[8] = weight_times_x2;
a2[3] = minus_weight_times_x1;
a2[4] = minus_weight_times_y1;
a2[5] = -weight;
a2[6] = weight_times_y2 * x1;
a2[7] = weight_times_y2 * y1;
a2[8] = weight_times_y2;
for (int j = 0; j < 9; j++)
for (int z = j; z < 9; z++)
AtA[j*9+z] += a1[j]*a1[z] + a2[j]*a2[z];
} }
} }
mask = new_mask;
// copy symmetric part of covariance matrix // copy symmetric part of covariance matrix
for (int j = 1; j < 9; j++) for (int j = 1; j < 9; j++)
for (int z = 0; z < j; z++) for (int z = 0; z < j; z++)
AtA[j*9+z] = AtA[z*9+j]; covariance[j*9+z] = covariance[z*9+j];
#ifdef HAVE_EIGEN #ifdef HAVE_EIGEN
Mat H = Mat_<double>(3,3); Mat H = Mat_<double>(3,3);
Eigen::HouseholderQR<Eigen::Matrix<double, 9, 9>> qr((Eigen::Matrix<double, 9, 9> (AtA))); // extract the last null-vector
const Eigen::Matrix<double, 9, 9> &Q = qr.householderQ(); Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)H.data) = Eigen::Matrix<double, 9, 9>
// extract the last nullspace (Eigen::HouseholderQR<Eigen::Matrix<double, 9, 9>> (
Eigen::Map<Eigen::Matrix<double, 9, 1>>((double *)H.data) = Q.col(8); (Eigen::Matrix<double, 9, 9> (covariance))).householderQ()).col(8);
#else #else
Matx<double, 9, 9> Vt; Matx<double, 9, 9> Vt;
Vec<double, 9> D; Vec<double, 9> D;
if (! eigen(Matx<double, 9, 9>(AtA), D, Vt)) return 0; if (! eigen(Matx<double, 9, 9>(covariance), D, Vt)) return 0;
Mat H = Mat_<double>(3, 3, Vt.val + 72/*=8*9*/); Mat H = Mat_<double>(3, 3, Vt.val + 72/*=8*9*/);
#endif #endif
models = std::vector<Mat>{ T2.inv() * H * T1 }; const auto * const h = (double *) H.data;
// H = T2^-1 H T1
models = std::vector<Mat>{ Mat(Matx33d(t1[0]*(h[0]/t2[0] - (h[6]*t2[2])/t2[0]),
t1[0]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]), h[2]/t2[0] + t1[2]*(h[0]/t2[0] -
(h[6]*t2[2])/t2[0]) + t1[5]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]) - (h[8]*t2[2])/t2[0],
t1[0]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]), t1[0]*(h[4]/t2[0] - (h[7]*t2[5])/t2[0]),
h[5]/t2[0] + t1[2]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]) + t1[5]*(h[4]/t2[0] -
(h[7]*t2[5])/t2[0]) - (h[8]*t2[5])/t2[0], t1[0]*h[6], t1[0]*h[7],
h[8] + h[6]*t1[2] + h[7]*t1[5])) };
return 1; return 1;
} }
void enforceRankConstraint (bool /*enforce*/) override {}
int getMinimumRequiredSampleSize() const override { return 4; } int getMinimumRequiredSampleSize() const override { return 4; }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
Ptr<NonMinimalSolver> clone () const override {
return makePtr<HomographyNonMinimalSolverImpl>(*points_mat);
}
}; };
Ptr<HomographyNonMinimalSolver> HomographyNonMinimalSolver::create(const Mat &points_) { Ptr<CovarianceHomographySolver> CovarianceHomographySolver::create (const Mat &points) {
return makePtr<HomographyNonMinimalSolverImpl>(points_); return makePtr<CovarianceHomographySolverImpl>(points);
}
Ptr<CovarianceHomographySolver> CovarianceHomographySolver::create (const Mat &points, const Matx33d &T1, const Matx33d &T2) {
return makePtr<CovarianceHomographySolverImpl>(points, T1, T2);
} }
class AffineMinimalSolverImpl : public AffineMinimalSolver { class AffineMinimalSolverImpl : public AffineMinimalSolver {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
public: public:
explicit AffineMinimalSolverImpl (const Mat &points_) : explicit AffineMinimalSolverImpl (const Mat &points_) :
points_mat(&points_), points((float *) points_.data) {} points_mat(points_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
/* /*
Affine transformation Affine transformation
x1 y1 1 0 0 0 a u1 x1 y1 1 0 0 0 a u1
@ -208,6 +415,7 @@ public:
*/ */
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
const int smpl1 = 4*sample[0], smpl2 = 4*sample[1], smpl3 = 4*sample[2]; const int smpl1 = 4*sample[0], smpl2 = 4*sample[1], smpl3 = 4*sample[2];
const float * points = points_mat.ptr<float>();
const auto const auto
x1 = points[smpl1], y1 = points[smpl1+1], u1 = points[smpl1+2], v1 = points[smpl1+3], x1 = points[smpl1], y1 = points[smpl1+1], u1 = points[smpl1+2], v1 = points[smpl1+3],
x2 = points[smpl2], y2 = points[smpl2+1], u2 = points[smpl2+2], v2 = points[smpl2+3], x2 = points[smpl2], y2 = points[smpl2+1], u2 = points[smpl2+2], v2 = points[smpl2+3],
@ -232,9 +440,6 @@ public:
} }
int getSampleSize() const override { return 3; } int getSampleSize() const override { return 3; }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
Ptr<MinimalSolver> clone () const override {
return makePtr<AffineMinimalSolverImpl>(*points_mat);
}
}; };
Ptr<AffineMinimalSolver> AffineMinimalSolver::create(const Mat &points_) { Ptr<AffineMinimalSolver> AffineMinimalSolver::create(const Mat &points_) {
return makePtr<AffineMinimalSolverImpl>(points_); return makePtr<AffineMinimalSolverImpl>(points_);
@ -242,23 +447,34 @@ Ptr<AffineMinimalSolver> AffineMinimalSolver::create(const Mat &points_) {
class AffineNonMinimalSolverImpl : public AffineNonMinimalSolver { class AffineNonMinimalSolverImpl : public AffineNonMinimalSolver {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points; Ptr<NormTransform> normTr;
// const NormTransform<double> norm_transform; Matx33d _T1, _T2;
bool do_norm;
public: public:
explicit AffineNonMinimalSolverImpl (const Mat &points_) : explicit AffineNonMinimalSolverImpl (const Mat &points_, InputArray T1, InputArray T2) :
points_mat(&points_), points((float*) points_.data) points_mat(points_) {
/*, norm_transform(points_)*/ {} CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
if (!T1.empty() && !T2.empty()) {
do_norm = false;
_T1 = T1.getMat();
_T2 = T2.getMat();
} else {
do_norm = true;
normTr = NormTransform::create(points_);
}
}
int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat> &models, int estimate (const std::vector<int> &sample, int sample_size, std::vector<Mat> &models,
const std::vector<double> &weights) const override { const std::vector<double> &weights) const override {
// surprisingly normalization of points does not improve the output model
// Mat norm_points_, T1, T2;
// norm_transform.getNormTransformation(norm_points_, sample, sample_size, T1, T2);
// const auto * const n_pts = (double *) norm_points_.data;
if (sample_size < getMinimumRequiredSampleSize()) if (sample_size < getMinimumRequiredSampleSize())
return 0; return 0;
Matx33d T1, T2;
Mat norm_points_;
if (do_norm)
normTr->getNormTransformation(norm_points_, sample, sample_size, T1, T2);
const float * const pts = normTr ? norm_points_.ptr<float>() : points_mat.ptr<float>();
// do Least Squares // do Least Squares
// Ax = b -> A^T Ax = A^T b // Ax = b -> A^T Ax = A^T b
// x = (A^T A)^-1 A^T b // x = (A^T A)^-1 A^T b
@ -268,12 +484,8 @@ public:
if (weights.empty()) if (weights.empty())
for (int p = 0; p < sample_size; p++) { for (int p = 0; p < sample_size; p++) {
// if (weights != nullptr) weight = weights[sample[p]]; const int idx = do_norm ? 4*p : 4*sample[p];
const auto x1=pts[idx], y1=pts[idx+1], x2=pts[idx+2], y2=pts[idx+3];
const int smpl = 4*sample[p];
const double x1=points[smpl], y1=points[smpl+1], x2=points[smpl+2], y2=points[smpl+3];
// const double x1=n_pts[smpl], y1=n_pts[smpl+1], x2=n_pts[smpl+2], y2=n_pts[smpl+3];
r1[0] = x1; r1[0] = x1;
r1[1] = y1; r1[1] = y1;
@ -288,12 +500,13 @@ public:
} }
else else
for (int p = 0; p < sample_size; p++) { for (int p = 0; p < sample_size; p++) {
const int smpl = 4*sample[p]; const auto weight = weights[p];
const double weight = weights[p]; if (weight < FLT_EPSILON) continue;
const double weight_times_x1 = weight * points[smpl ], const int idx = do_norm ? 4*p : 4*sample[p];
weight_times_y1 = weight * points[smpl+1], const double weight_times_x1 = weight * pts[idx ],
weight_times_x2 = weight * points[smpl+2], weight_times_y1 = weight * pts[idx+1],
weight_times_y2 = weight * points[smpl+3]; weight_times_x2 = weight * pts[idx+2],
weight_times_y2 = weight * pts[idx+3];
r1[0] = weight_times_x1; r1[0] = weight_times_x1;
r1[1] = weight_times_y1; r1[1] = weight_times_y1;
@ -318,21 +531,126 @@ public:
Vec6d aff; Vec6d aff;
if (!solve(Matx66d(AtA), Vec6d(Ab), aff)) if (!solve(Matx66d(AtA), Vec6d(Ab), aff))
return 0; return 0;
models[0] = Mat(Matx33d(aff(0), aff(1), aff(2), const double h[9] = {aff(0), aff(1), aff(2),
aff(3), aff(4), aff(5), aff(3), aff(4), aff(5),
0, 0, 1)); 0, 0, 1};
const auto * const t1 = normTr ? T1.val : _T1.val, * const t2 = normTr ? T2.val : _T2.val;
// models[0] = T2.inv() * models[0] * T1; // A = T2^-1 A T1
models = std::vector<Mat>{ Mat(Matx33d(t1[0]*(h[0]/t2[0] - (h[6]*t2[2])/t2[0]),
t1[0]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]), h[2]/t2[0] + t1[2]*(h[0]/t2[0] -
(h[6]*t2[2])/t2[0]) + t1[5]*(h[1]/t2[0] - (h[7]*t2[2])/t2[0]) - (h[8]*t2[2])/t2[0],
t1[0]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]), t1[0]*(h[4]/t2[0] - (h[7]*t2[5])/t2[0]),
h[5]/t2[0] + t1[2]*(h[3]/t2[0] - (h[6]*t2[5])/t2[0]) + t1[5]*(h[4]/t2[0] -
(h[7]*t2[5])/t2[0]) - (h[8]*t2[5])/t2[0], t1[0]*h[6], t1[0]*h[7],
h[8] + h[6]*t1[2] + h[7]*t1[5])) };
return 1; return 1;
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
int getMinimumRequiredSampleSize() const override { return 3; } int getMinimumRequiredSampleSize() const override { return 3; }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
Ptr<NonMinimalSolver> clone () const override { };
return makePtr<AffineNonMinimalSolverImpl>(*points_mat); Ptr<AffineNonMinimalSolver> AffineNonMinimalSolver::create(const Mat &points_, InputArray T1, InputArray T2) {
return makePtr<AffineNonMinimalSolverImpl>(points_, T1, T2);
}
class CovarianceAffineSolverImpl : public CovarianceAffineSolver {
private:
Mat norm_pts;
Matx33d T1, T2;
float * norm_points;
std::vector<bool> mask;
int points_size;
double covariance[36] = {0}, Ab[6] = {0}, * t1, * t2;
public:
explicit CovarianceAffineSolverImpl (const Mat &norm_points_, const Matx33d &T1_, const Matx33d &T2_)
: norm_pts(norm_points_), T1(T1_), T2(T2_) {
points_size = norm_points_.rows;
norm_points = (float *) norm_pts.data;
t1 = T1.val; t2 = T2.val;
mask = std::vector<bool>(points_size, false);
}
explicit CovarianceAffineSolverImpl (const Mat &points_) {
points_size = points_.rows;
// normalize points
std::vector<int> sample(points_size);
for (int i = 0; i < points_size; i++) sample[i] = i;
const Ptr<NormTransform> normTr = NormTransform::create(points_);
normTr->getNormTransformation(norm_pts, sample, points_size, T1, T2);
norm_points = (float *) norm_pts.data;
t1 = T1.val; t2 = T2.val;
mask = std::vector<bool>(points_size, false);
}
void reset () override {
std::fill(covariance, covariance+36, 0);
std::fill(Ab, Ab+6, 0);
std::fill(mask.begin(), mask.end(), false);
}
/*
* Find affine transformation using linear method with covariance matrix and PCA
*/
int estimate (const std::vector<bool> &new_mask, std::vector<Mat> &models,
const std::vector<double> &) override {
double r1[6] = {0, 0, 1, 0, 0, 0}; // row 1 of A
double r2[6] = {0, 0, 0, 0, 0, 1}; // row 2 of A
for (int i = 0; i < points_size; i++) {
if (mask[i] != new_mask[i]) {
const int smpl = 4*i;
const double x1 = norm_points[smpl ], y1 = norm_points[smpl+1],
x2 = norm_points[smpl+2], y2 = norm_points[smpl+3];
r1[0] = x1;
r1[1] = y1;
r2[3] = x1;
r2[4] = y1;
if (mask[i]) // if mask[i] is true then new_mask[i] must be false
for (int j = 0; j < 6; j++) {
for (int z = j; z < 6; z++)
covariance[j*6+z] +=-r1[j]*r1[z] - r2[j]*r2[z];
Ab[j] +=-r1[j]*x2 - r2[j]*y2;
}
else
for (int j = 0; j < 6; j++) {
for (int z = j; z < 6; z++)
covariance[j*6+z] += r1[j]*r1[z] + r2[j]*r2[z];
Ab[j] += r1[j]*x2 + r2[j]*y2;
}
}
}
mask = new_mask;
// copy symmetric part of covariance matrix
for (int j = 1; j < 6; j++)
for (int z = 0; z < j; z++)
covariance[j*6+z] = covariance[z*6+j];
Vec6d aff;
if (!solve(Matx66d(covariance), Vec6d(Ab), aff))
return 0;
double a[9] = { aff(0), aff(1), aff(2), aff(3), aff(4), aff(5), 0, 0, 1 };
models = std::vector<Mat>{ Mat(Matx33d(t1[0]*(a[0]/t2[0] - (a[6]*t2[2])/t2[0]),
t1[0]*(a[1]/t2[0] - (a[7]*t2[2])/t2[0]), a[2]/t2[0] + t1[2]*(a[0]/t2[0] -
(a[6]*t2[2])/t2[0]) + t1[5]*(a[1]/t2[0] - (a[7]*t2[2])/t2[0]) - (a[8]*t2[2])/t2[0],
t1[0]*(a[3]/t2[0] - (a[6]*t2[5])/t2[0]), t1[0]*(a[4]/t2[0] - (a[7]*t2[5])/t2[0]),
a[5]/t2[0] + t1[2]*(a[3]/t2[0] - (a[6]*t2[5])/t2[0]) + t1[5]*(a[4]/t2[0] -
(a[7]*t2[5])/t2[0]) - (a[8]*t2[5])/t2[0], t1[0]*a[6], t1[0]*a[7],
a[8] + a[6]*t1[2] + a[7]*t1[5])) };
return 1;
} }
void enforceRankConstraint (bool /*enforce*/) override {}
int getMinimumRequiredSampleSize() const override { return 3; }
int getMaxNumberOfSolutions () const override { return 1; }
}; };
Ptr<AffineNonMinimalSolver> AffineNonMinimalSolver::create(const Mat &points_) { Ptr<CovarianceAffineSolver> CovarianceAffineSolver::create (const Mat &points, const Matx33d &T1, const Matx33d &T2) {
return makePtr<AffineNonMinimalSolverImpl>(points_); return makePtr<CovarianceAffineSolverImpl>(points, T1, T2);
}
Ptr<CovarianceAffineSolver> CovarianceAffineSolver::create (const Mat &points) {
return makePtr<CovarianceAffineSolverImpl>(points);
} }
}} }}

@ -20,16 +20,20 @@ protected:
std::vector<int> labeling_inliers; std::vector<int> labeling_inliers;
std::vector<double> energies, weights; std::vector<double> energies, weights;
std::set<int> used_edges; std::vector<bool> used_edges;
std::vector<Mat> gc_models; std::vector<Mat> gc_models;
Ptr<Termination> termination;
int num_lo_optimizations = 0, current_ransac_iter = 0;
public: public:
void setCurrentRANSACiter (int ransac_iter) override { current_ransac_iter = ransac_iter; }
// In lo_sampler_ the sample size should be set and be equal gc_sample_size_ // In lo_sampler_ the sample size should be set and be equal gc_sample_size_
GraphCutImpl (const Ptr<Estimator> &estimator_, const Ptr<Error> &error_, const Ptr<Quality> &quality_, GraphCutImpl (const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_,
const Ptr<NeighborhoodGraph> &neighborhood_graph_, const Ptr<RandomGenerator> &lo_sampler_, const Ptr<NeighborhoodGraph> &neighborhood_graph_, const Ptr<RandomGenerator> &lo_sampler_,
double threshold_, double spatial_coherence_term, int gc_inner_iteration_number_) : double threshold_, double spatial_coherence_term, int gc_inner_iteration_number_, Ptr<Termination> termination_) :
neighborhood_graph (neighborhood_graph_), estimator (estimator_), quality (quality_), neighborhood_graph (neighborhood_graph_), estimator (estimator_), quality (quality_),
lo_sampler (lo_sampler_), error (error_) { lo_sampler (lo_sampler_), error (quality_->getErrorFnc()), termination(termination_) {
points_size = quality_->getPointsSize(); points_size = quality_->getPointsSize();
spatial_coherence = spatial_coherence_term; spatial_coherence = spatial_coherence_term;
@ -40,7 +44,7 @@ public:
energies = std::vector<double>(points_size); energies = std::vector<double>(points_size);
labeling_inliers = std::vector<int>(points_size); labeling_inliers = std::vector<int>(points_size);
used_edges = std::set<int>(); used_edges = std::vector<bool>(points_size*points_size);
gc_models = std::vector<Mat> (estimator->getMaxNumSolutionsNonMinimal()); gc_models = std::vector<Mat> (estimator->getMaxNumSolutionsNonMinimal());
} }
@ -81,9 +85,13 @@ public:
gc_models[model_idx].copyTo(new_model); gc_models[model_idx].copyTo(new_model);
} }
} }
if (termination != nullptr && is_best_model_updated && current_ransac_iter > termination->update(best_model, best_model_score.inlier_number)) {
is_best_model_updated = false; // to break outer loop
}
} // end of inner GC local optimization } // end of inner GC local optimization
} // end of while loop } // end of while loop
return true; return true;
} }
@ -91,7 +99,6 @@ private:
// find inliers using graph cut algorithm. // find inliers using graph cut algorithm.
int labeling (const Mat& model) { int labeling (const Mat& model) {
const auto &errors = error->getErrors(model); const auto &errors = error->getErrors(model);
detail::GCGraph<double> graph; detail::GCGraph<double> graph;
for (int pt = 0; pt < points_size; pt++) for (int pt = 0; pt < points_size; pt++)
@ -115,7 +122,7 @@ private:
energies[pt] = energy > 1 ? 1 : energy; energies[pt] = energy > 1 ? 1 : energy;
} }
used_edges.clear(); std::fill(used_edges.begin(), used_edges.end(), false);
bool has_edges = false; bool has_edges = false;
// Iterate through all points and set their edges // Iterate through all points and set their edges
@ -125,12 +132,12 @@ private:
// Iterate through all neighbors // Iterate through all neighbors
for (int actual_neighbor_idx : neighborhood_graph->getNeighbors(point_idx)) { for (int actual_neighbor_idx : neighborhood_graph->getNeighbors(point_idx)) {
if (actual_neighbor_idx == point_idx || if (actual_neighbor_idx == point_idx ||
used_edges.count(actual_neighbor_idx*points_size + point_idx) > 0 || used_edges[actual_neighbor_idx*points_size + point_idx] ||
used_edges.count(point_idx*points_size + actual_neighbor_idx) > 0) used_edges[point_idx*points_size + actual_neighbor_idx])
continue; continue;
used_edges.insert(actual_neighbor_idx*points_size + point_idx); used_edges[actual_neighbor_idx*points_size + point_idx] = true;
used_edges.insert(point_idx*points_size + actual_neighbor_idx); used_edges[point_idx*points_size + actual_neighbor_idx] = true;
double a = (0.5 * (energy + energies[actual_neighbor_idx])) * spatial_coherence, double a = (0.5 * (energy + energies[actual_neighbor_idx])) * spatial_coherence,
b = spatial_coherence, c = spatial_coherence, d = 0; b = spatial_coherence, c = spatial_coherence, d = 0;
@ -151,10 +158,8 @@ private:
has_edges = true; has_edges = true;
} }
} }
if (! has_edges)
if (!has_edges)
return quality->getInliers(model, labeling_inliers); return quality->getInliers(model, labeling_inliers);
graph.maxFlow(); graph.maxFlow();
int inlier_number = 0; int inlier_number = 0;
@ -163,23 +168,17 @@ private:
labeling_inliers[inlier_number++] = pt; labeling_inliers[inlier_number++] = pt;
return inlier_number; return inlier_number;
} }
Ptr<LocalOptimization> clone(int state) const override { int getNumLOoptimizations () const override { return num_lo_optimizations; }
return makePtr<GraphCutImpl>(estimator->clone(), error->clone(), quality->clone(),
neighborhood_graph,lo_sampler->clone(state), sqr_trunc_thr / 2.25,
spatial_coherence, lo_inner_iterations);
}
}; };
Ptr<GraphCut> GraphCut::create(const Ptr<Estimator> &estimator_, const Ptr<Error> &error_, Ptr<GraphCut> GraphCut::create(const Ptr<Estimator> &estimator_,
const Ptr<Quality> &quality_, const Ptr<NeighborhoodGraph> &neighborhood_graph_, const Ptr<Quality> &quality_, const Ptr<NeighborhoodGraph> &neighborhood_graph_,
const Ptr<RandomGenerator> &lo_sampler_, double threshold_, const Ptr<RandomGenerator> &lo_sampler_, double threshold_,
double spatial_coherence_term, int gc_inner_iteration_number) { double spatial_coherence_term, int gc_inner_iteration_number, Ptr<Termination> termination_) {
return makePtr<GraphCutImpl>(estimator_, error_, quality_, neighborhood_graph_, lo_sampler_, return makePtr<GraphCutImpl>(estimator_, quality_, neighborhood_graph_, lo_sampler_,
threshold_, spatial_coherence_term, gc_inner_iteration_number); threshold_, spatial_coherence_term, gc_inner_iteration_number, termination_);
} }
/* // http://cmp.felk.cvut.cz/~matas/papers/chum-dagm03.pdf
* http://cmp.felk.cvut.cz/~matas/papers/chum-dagm03.pdf
*/
class InnerIterativeLocalOptimizationImpl : public InnerIterativeLocalOptimization { class InnerIterativeLocalOptimizationImpl : public InnerIterativeLocalOptimization {
private: private:
const Ptr<Estimator> estimator; const Ptr<Estimator> estimator;
@ -197,23 +196,18 @@ private:
double threshold, new_threshold, threshold_step; double threshold, new_threshold, threshold_step;
std::vector<double> weights; std::vector<double> weights;
public: public:
InnerIterativeLocalOptimizationImpl (const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_, InnerIterativeLocalOptimizationImpl (const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_,
const Ptr<RandomGenerator> &lo_sampler_, int pts_size, const Ptr<RandomGenerator> &lo_sampler_, int pts_size,
double threshold_, bool is_iterative_, int lo_iter_sample_size_, double threshold_, bool is_iterative_, int lo_iter_sample_size_,
int lo_inner_iterations_=10, int lo_iter_max_iterations_=5, int lo_inner_iterations_=10, int lo_iter_max_iterations_=5,
double threshold_multiplier_=4) double threshold_multiplier_=4)
: estimator (estimator_), quality (quality_), lo_sampler (lo_sampler_) : estimator (estimator_), quality (quality_), lo_sampler (lo_sampler_)
, lo_iter_sample_size(0) , lo_iter_sample_size(0), new_threshold(0), threshold_step(0) {
, new_threshold(0), threshold_step(0)
{
lo_inner_max_iterations = lo_inner_iterations_; lo_inner_max_iterations = lo_inner_iterations_;
lo_iter_max_iterations = lo_iter_max_iterations_; lo_iter_max_iterations = lo_iter_max_iterations_;
threshold = threshold_; threshold = threshold_;
lo_sample_size = lo_sampler->getSubsetSize(); lo_sample_size = lo_sampler->getSubsetSize();
is_iterative = is_iterative_; is_iterative = is_iterative_;
if (is_iterative) { if (is_iterative) {
lo_iter_sample_size = lo_iter_sample_size_; lo_iter_sample_size = lo_iter_sample_size_;
@ -225,7 +219,6 @@ public:
// In the last iteration there be original threshold θ. // In the last iteration there be original threshold θ.
threshold_step = (new_threshold - threshold) / lo_iter_max_iterations_; threshold_step = (new_threshold - threshold) / lo_iter_max_iterations_;
} }
lo_models = std::vector<Mat>(estimator->getMaxNumSolutionsNonMinimal()); lo_models = std::vector<Mat>(estimator->getMaxNumSolutionsNonMinimal());
// Allocate max memory to avoid reallocation // Allocate max memory to avoid reallocation
@ -327,12 +320,6 @@ public:
} }
return true; return true;
} }
Ptr<LocalOptimization> clone(int state) const override {
return makePtr<InnerIterativeLocalOptimizationImpl>(estimator->clone(), quality->clone(),
lo_sampler->clone(state),(int)inliers_of_best_model.size(), threshold, is_iterative,
lo_iter_sample_size, lo_inner_max_iterations, lo_iter_max_iterations,
new_threshold / threshold);
}
}; };
Ptr<InnerIterativeLocalOptimization> InnerIterativeLocalOptimization::create Ptr<InnerIterativeLocalOptimization> InnerIterativeLocalOptimization::create
(const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_, (const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_,
@ -345,293 +332,275 @@ Ptr<InnerIterativeLocalOptimization> InnerIterativeLocalOptimization::create
lo_inner_iterations_, lo_iter_max_iterations_, threshold_multiplier_); lo_inner_iterations_, lo_iter_max_iterations_, threshold_multiplier_);
} }
class SigmaConsensusImpl : public SigmaConsensus { class SimpleLocalOptimizationImpl : public SimpleLocalOptimization {
private: private:
const Ptr<Estimator> estimator;
const Ptr<Quality> quality; const Ptr<Quality> quality;
const Ptr<Error> error; const Ptr<Error> error;
const Ptr<ModelVerifier> verifier; const Ptr<NonMinimalSolver> estimator;
const GammaValues& gamma_generator; const Ptr<Termination> termination;
// The degrees of freedom of the data from which the model is estimated. const Ptr<RandomGenerator> random_generator;
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. const Ptr<WeightFunction> weight_fnc;
const int degrees_of_freedom; // unlike to @random_generator which has fixed subset size
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals // @random_generator_smaller_subset is used to draw smaller
const double k; // amount of points which depends on current number of inliers
// Calculating (DoF - 1) / 2 which will be used for the estimation and, Ptr<RandomGenerator> random_generator_smaller_subset;
// due to being constant, it is better to calculate it a priori. int points_size, max_lo_iters, non_min_sample_size, current_ransac_iter;
double dof_minus_one_per_two; std::vector<double> weights;
const double C; std::vector<int> inliers;
// The size of a minimal sample used for the estimation std::vector<cv::Mat> models;
const int sample_size; double inlier_threshold_sqr;
// Calculating 2^(DoF - 1) which will be used for the estimation and, int num_lo_optimizations = 0;
// due to being constant, it is better to calculate it a priori. bool updated_lo = false;
double two_ad_dof;
// Calculating C * 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double C_times_two_ad_dof;
// Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double squared_sigma_max_2, one_over_sigma;
// Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2.
const double gamma_k;
// Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double max_sigma_sqr;
const int points_size, number_of_irwls_iters;
const double maximum_threshold, max_sigma;
std::vector<double> sqr_residuals, sigma_weights;
std::vector<int> sqr_residuals_idxs;
// Models fit by weighted least-squares fitting
std::vector<Mat> sigma_models;
// Points used in the weighted least-squares fitting
std::vector<int> sigma_inliers;
// Weights used in the the weighted least-squares fitting
int max_lo_sample_size, stored_gamma_number_min1;
double scale_of_stored_gammas;
RNG rng;
const std::vector<double> &stored_gamma_values;
public: public:
SimpleLocalOptimizationImpl (const Ptr<Quality> &quality_, const Ptr<NonMinimalSolver> &estimator_,
SigmaConsensusImpl (const Ptr<Estimator> &estimator_, const Ptr<Error> &error_, const Ptr<Termination> termination_, const Ptr<RandomGenerator> &random_gen, Ptr<WeightFunction> weight_fnc_,
const Ptr<Quality> &quality_, const Ptr<ModelVerifier> &verifier_, int max_lo_iters_, double inlier_threshold_sqr_, bool update_lo_) :
int max_lo_sample_size_, int number_of_irwls_iters_, int DoF, quality(quality_), error(quality_->getErrorFnc()), estimator(estimator_), termination(termination_),
double sigma_quantile, double upper_incomplete_of_sigma_quantile, double C_, random_generator(random_gen), weight_fnc(weight_fnc_) {
double maximum_thr) : estimator (estimator_), quality(quality_), max_lo_iters = max_lo_iters_;
error (error_), verifier(verifier_), non_min_sample_size = random_generator->getSubsetSize();
gamma_generator(GammaValues::getSingleton()), current_ransac_iter = 0;
degrees_of_freedom(DoF), k (sigma_quantile), C(C_), inliers = std::vector<int>(quality_->getPointsSize());
sample_size(estimator_->getMinimalSampleSize()), models = std::vector<cv::Mat>(estimator_->getMaxNumberOfSolutions());
gamma_k (upper_incomplete_of_sigma_quantile), points_size (quality_->getPointsSize()), points_size = quality_->getPointsSize();
number_of_irwls_iters (number_of_irwls_iters_), inlier_threshold_sqr = inlier_threshold_sqr_;
maximum_threshold(maximum_thr), max_sigma (maximum_thr), if (weight_fnc != nullptr) weights = std::vector<double>(points_size);
stored_gamma_values(gamma_generator.getGammaValues()) random_generator_smaller_subset = nullptr;
{ updated_lo = update_lo_;
dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
two_ad_dof = std::pow(2.0, dof_minus_one_per_two);
C_times_two_ad_dof = C * two_ad_dof;
// Calculate 2 * \sigma_{max}^2 a priori
squared_sigma_max_2 = max_sigma * max_sigma * 2.0;
// Divide C * 2^(DoF - 1) by \sigma_{max} a priori
one_over_sigma = C_times_two_ad_dof / max_sigma;
max_sigma_sqr = squared_sigma_max_2 * 0.5;
sqr_residuals = std::vector<double>(points_size);
sqr_residuals_idxs = std::vector<int>(points_size);
sigma_inliers = std::vector<int>(points_size);
max_lo_sample_size = max_lo_sample_size_;
sigma_weights = std::vector<double>(points_size);
sigma_models = std::vector<Mat>(estimator->getMaxNumSolutionsNonMinimal());
stored_gamma_number_min1 = gamma_generator.getTableSize()-1;
scale_of_stored_gammas = gamma_generator.getScaleOfGammaValues();
} }
void setCurrentRANSACiter (int ransac_iter) override { current_ransac_iter = ransac_iter; }
int getNumLOoptimizations () const override { return num_lo_optimizations; }
bool refineModel (const Mat &best_model, const Score &best_model_score, Mat &new_model, Score &new_model_score) override {
new_model_score = best_model_score;
best_model.copyTo(new_model);
// https://github.com/danini/magsac int num_inliers;
bool refineModel (const Mat &in_model, const Score &best_model_score, if (weights.empty())
Mat &new_model, Score &new_model_score) override { num_inliers = Quality::getInliers(error, best_model, inliers, inlier_threshold_sqr);
int residual_cnt = 0; else num_inliers = weight_fnc->getInliersWeights(error->getErrors(best_model), inliers, weights);
auto update_generator = [&] (int num_inls) {
if (verifier->isModelGood(in_model)) { if (num_inls <= non_min_sample_size) {
if (verifier->hasErrors()) { // add new random generator if number of inliers is fewer than non-minimal sample size
const std::vector<float> &errors = verifier->getErrors(); const int new_sample_size = (int)(0.6*num_inls);
for (int point_idx = 0; point_idx < points_size; ++point_idx) { if (new_sample_size <= estimator->getMinimumRequiredSampleSize())
// Calculate the residual of the current point return false;
const auto residual = sqrtf(errors[point_idx]); if (random_generator_smaller_subset == nullptr)
if (max_sigma > residual) { random_generator_smaller_subset = UniformRandomGenerator::create(num_inls/*state*/, quality->getPointsSize(), new_sample_size);
// Store the residual of the current point and its index else random_generator_smaller_subset->setSubsetSize(new_sample_size);
sqr_residuals[residual_cnt] = residual; }
sqr_residuals_idxs[residual_cnt++] = point_idx; return true;
} };
if (!update_generator(num_inliers))
return false;
// Interrupt if there is no chance of being better int max_lo_iters_ = max_lo_iters, last_update = 0, last_inliers_update = 0;
if (residual_cnt + points_size - point_idx < best_model_score.inlier_number) for (int iter = 0; iter < max_lo_iters_; iter++) {
return false; int num_models;
} if (num_inliers <= non_min_sample_size)
} else { num_models = estimator->estimate(new_model, random_generator_smaller_subset->generateUniqueRandomSubset(inliers, num_inliers),
error->setModelParameters(in_model); random_generator_smaller_subset->getSubsetSize(), models, weights);
else num_models = estimator->estimate(new_model, random_generator->generateUniqueRandomSubset(inliers, num_inliers), non_min_sample_size, models, weights);
for (int point_idx = 0; point_idx < points_size; ++point_idx) { for (int m = 0; m < num_models; m++) {
const double sqr_residual = error->getError(point_idx); const auto score = quality->getScore(models[m]);
if (sqr_residual < max_sigma_sqr) { if (score.isBetter(new_model_score)) {
// Store the residual of the current point and its index last_update = iter; last_inliers_update = new_model_score.inlier_number - score.inlier_number;
sqr_residuals[residual_cnt] = sqr_residual; if (updated_lo) {
sqr_residuals_idxs[residual_cnt++] = point_idx; if (max_lo_iters_ < iter + 5 && last_inliers_update >= 1) {
max_lo_iters_ = iter + 5;
}
} }
models[m].copyTo(new_model);
if (residual_cnt + points_size - point_idx < best_model_score.inlier_number) new_model_score = score;
return false; if (termination != nullptr && current_ransac_iter > termination->update(new_model, new_model_score.inlier_number))
} return true; // terminate LO if max number of iterations reached
} if (score.inlier_number >= best_model_score.inlier_number ||
} else return false; score.inlier_number > non_min_sample_size) {
// update inliers if new model has more than previous best model
in_model.copyTo(new_model); if (weights.empty())
new_model_score = Score(); num_inliers = Quality::getInliers(error, best_model, inliers, inlier_threshold_sqr);
else num_inliers = weight_fnc->getInliersWeights(error->getErrors(best_model), inliers, weights);
// Do the iteratively re-weighted least squares fitting if (!update_generator(num_inliers))
for (int iterations = 0; iterations < number_of_irwls_iters; iterations++) { return true;
int sigma_inliers_cnt = 0;
// If the current iteration is not the first, the set of possibly inliers
// (i.e., points closer than the maximum threshold) have to be recalculated.
if (iterations > 0) {
// error->setModelParameters(polished_model);
error->setModelParameters(new_model);
// Remove everything from the residual vector
residual_cnt = 0;
// Collect the points which are closer than the maximum threshold
for (int point_idx = 0; point_idx < points_size; ++point_idx) {
// Calculate the residual of the current point
const double sqr_residual = error->getError(point_idx);
if (sqr_residual < max_sigma_sqr) {
// Store the residual of the current point and its index
sqr_residuals[residual_cnt] = sqr_residual;
sqr_residuals_idxs[residual_cnt++] = point_idx;
} }
}
sigma_inliers_cnt = 0;
}
// Calculate the weight of each point
for (int i = 0; i < residual_cnt; i++) {
// Get the position of the gamma value in the lookup table
int x = (int)round(scale_of_stored_gammas * sqr_residuals[i]
/ squared_sigma_max_2);
// If the sought gamma value is not stored in the lookup, return the closest element
if (x >= stored_gamma_number_min1 || x < 0 /*overflow*/) // actual number of gamma values is 1 more, so >=
x = stored_gamma_number_min1;
sigma_inliers[sigma_inliers_cnt] = sqr_residuals_idxs[i]; // store index of point for LSQ
sigma_weights[sigma_inliers_cnt++] = one_over_sigma * (stored_gamma_values[x] - gamma_k);
}
// random shuffle sigma inliers
if (sigma_inliers_cnt > max_lo_sample_size)
for (int i = sigma_inliers_cnt-1; i > 0; i--) {
const int idx = rng.uniform(0, i+1);
std::swap(sigma_inliers[i], sigma_inliers[idx]);
std::swap(sigma_weights[i], sigma_weights[idx]);
}
const int num_est_models = estimator->estimateModelNonMinimalSample
(sigma_inliers, std::min(max_lo_sample_size, sigma_inliers_cnt),
sigma_models, sigma_weights);
if (num_est_models == 0)
break; // break iterations
// Update the model parameters
Mat polished_model = sigma_models[0];
if (num_est_models > 1) {
// find best over other models
Score sigma_best_score = quality->getScore(polished_model);
for (int m = 1; m < num_est_models; m++) {
const Score sc = quality->getScore(sigma_models[m]);
if (sc.isBetter(sigma_best_score)) {
polished_model = sigma_models[m];
sigma_best_score = sc;
}
} }
} }
if (updated_lo && iter - last_update >= 10) {
const Score polished_model_score = quality->getScore(polished_model); break;
if (polished_model_score.isBetter(new_model_score)){
new_model_score = polished_model_score;
polished_model.copyTo(new_model);
} }
} }
const Score in_model_score = quality->getScore(in_model);
if (in_model_score.isBetter(new_model_score)) {
new_model_score = in_model_score;
in_model.copyTo(new_model);
}
return true; return true;
} }
Ptr<LocalOptimization> clone(int state) const override { };
return makePtr<SigmaConsensusImpl>(estimator->clone(), error->clone(), quality->clone(), Ptr<SimpleLocalOptimization> SimpleLocalOptimization::create (const Ptr<Quality> &quality_,
verifier->clone(state), max_lo_sample_size, const Ptr<NonMinimalSolver> &estimator_, const Ptr<Termination> termination_, const Ptr<RandomGenerator> &random_gen,
number_of_irwls_iters, degrees_of_freedom, k, gamma_k, C, maximum_threshold); const Ptr<WeightFunction> weight_fnc, int max_lo_iters_, double inlier_thr_sqr, bool updated_lo) {
return makePtr<SimpleLocalOptimizationImpl> (quality_, estimator_, termination_, random_gen, weight_fnc, max_lo_iters_, inlier_thr_sqr, updated_lo);
}
class MagsacWeightFunctionImpl : public MagsacWeightFunction {
private:
const std::vector<double> &stored_gamma_values;
double C, max_sigma, max_sigma_sqr, scale_of_stored_gammas, one_over_sigma, gamma_k, squared_sigma_max_2, rescale_err;
int DoF;
unsigned int stored_gamma_number_min1;
public:
MagsacWeightFunctionImpl (const Ptr<GammaValues> &gamma_generator,
int DoF_, double upper_incomplete_of_sigma_quantile, double C_, double max_sigma_) :
stored_gamma_values (gamma_generator->getGammaValues()) {
gamma_k = upper_incomplete_of_sigma_quantile;
stored_gamma_number_min1 = static_cast<unsigned int>(gamma_generator->getTableSize()-1);
scale_of_stored_gammas = gamma_generator->getScaleOfGammaValues();
DoF = DoF_; C = C_;
max_sigma = max_sigma_;
squared_sigma_max_2 = max_sigma * max_sigma * 2.0;
one_over_sigma = C * pow(2.0, (DoF - 1.0) * 0.5) / max_sigma;
max_sigma_sqr = squared_sigma_max_2 * 0.5;
rescale_err = scale_of_stored_gammas / squared_sigma_max_2;
}
int getInliersWeights (const std::vector<float> &errors, std::vector<int> &inliers, std::vector<double> &weights) const override {
return getInliersWeights(errors, inliers, weights, one_over_sigma, rescale_err, max_sigma_sqr);
}
int getInliersWeights (const std::vector<float> &errors, std::vector<int> &inliers, std::vector<double> &weights, double thr_sqr) const override {
const auto _max_sigma = thr_sqr;
const auto _squared_sigma_max_2 = _max_sigma * _max_sigma * 2.0;
const auto _one_over_sigma = C * pow(2.0, (DoF - 1.0) * 0.5) / _max_sigma;
const auto _max_sigma_sqr = _squared_sigma_max_2 * 0.5;
const auto _rescale_err = scale_of_stored_gammas / _squared_sigma_max_2;
return getInliersWeights(errors, inliers, weights, _one_over_sigma, _rescale_err, _max_sigma_sqr);
}
double getThreshold () const override {
return max_sigma_sqr;
}
private:
int getInliersWeights (const std::vector<float> &errors, std::vector<int> &inliers, std::vector<double> &weights,
double _one_over_sigma, double _rescale_err, double _max_sigma_sqr) const {
int num_inliers = 0, p = 0;
for (const auto &e : errors) {
// Calculate the residual of the current point
if (e < _max_sigma_sqr) {
// Get the position of the gamma value in the lookup table
auto x = static_cast<unsigned int>(_rescale_err * e);
if (x > stored_gamma_number_min1)
x = stored_gamma_number_min1;
inliers[num_inliers] = p; // store index of point for LSQ
weights[num_inliers++] = _one_over_sigma * (stored_gamma_values[x] - gamma_k);
}
p++;
}
return num_inliers;
} }
}; };
Ptr<SigmaConsensus> Ptr<MagsacWeightFunction> MagsacWeightFunction::create(const Ptr<GammaValues> &gamma_generator_,
SigmaConsensus::create(const Ptr<Estimator> &estimator_, const Ptr<Error> &error_, int DoF_, double upper_incomplete_of_sigma_quantile, double C_, double max_sigma_) {
const Ptr<Quality> &quality, const Ptr<ModelVerifier> &verifier_, return makePtr<MagsacWeightFunctionImpl>(gamma_generator_, DoF_, upper_incomplete_of_sigma_quantile, C_, max_sigma_);
int max_lo_sample_size, int number_of_irwls_iters_, int DoF,
double sigma_quantile, double upper_incomplete_of_sigma_quantile, double C_,
double maximum_thr) {
return makePtr<SigmaConsensusImpl>(estimator_, error_, quality, verifier_,
max_lo_sample_size, number_of_irwls_iters_, DoF, sigma_quantile,
upper_incomplete_of_sigma_quantile, C_, maximum_thr);
} }
/////////////////////////////////////////// FINAL MODEL POLISHER //////////////////////// /////////////////////////////////////////// FINAL MODEL POLISHER ////////////////////////
class LeastSquaresPolishingImpl : public LeastSquaresPolishing { class NonMinimalPolisherImpl : public NonMinimalPolisher {
private: private:
const Ptr<Estimator> estimator;
const Ptr<Quality> quality; const Ptr<Quality> quality;
int lsq_iterations; const Ptr<NonMinimalSolver> solver;
std::vector<int> inliers; const Ptr<Error> error_fnc;
const Ptr<WeightFunction> weight_fnc;
std::vector<bool> mask, mask_best;
std::vector<Mat> models; std::vector<Mat> models;
std::vector<double> weights; std::vector<double> weights;
std::vector<float> errors_best;
std::vector<int> inliers;
double threshold, iou_thr, max_thr;
int max_iters, points_size;
bool is_covariance, CHANGE_WEIGHTS = true;
public: public:
NonMinimalPolisherImpl (const Ptr<Quality> &quality_, const Ptr<NonMinimalSolver> &solver_,
LeastSquaresPolishingImpl(const Ptr<Estimator> &estimator_, const Ptr<Quality> &quality_, Ptr<WeightFunction> weight_fnc_, int max_iters_, double iou_thr_) :
int lsq_iterations_) : quality(quality_), solver(solver_), error_fnc(quality_->getErrorFnc()), weight_fnc(weight_fnc_) {
estimator(estimator_), quality(quality_) { max_iters = max_iters_;
lsq_iterations = lsq_iterations_; points_size = quality_->getPointsSize();
// allocate memory for inliers array and models threshold = quality_->getThreshold();
inliers = std::vector<int>(quality_->getPointsSize()); iou_thr = iou_thr_;
models = std::vector<Mat>(estimator->getMaxNumSolutionsNonMinimal()); is_covariance = dynamic_cast<const cv::usac::CovarianceSolver*>(solver_.get()) != nullptr;
mask = std::vector<bool>(points_size);
mask_best = std::vector<bool>(points_size);
inliers = std::vector<int>(points_size);
if (weight_fnc != nullptr) {
weights = std::vector<double>(points_size);
max_thr = weight_fnc->getThreshold();
if (is_covariance)
CV_Error(cv::Error::StsBadArg, "Covariance polisher cannot be combined with weights!");
}
} }
bool polishSoFarTheBestModel(const Mat &model, const Score &best_model_score, bool polishSoFarTheBestModel (const Mat &model, const Score &best_model_score,
Mat &new_model, Score &out_score) override { Mat &new_model, Score &new_model_score) override {
// get inliers from input model int num_inliers = 0;
int inlier_number = quality->getInliers(model, inliers); if (weights.empty()) {
if (inlier_number < estimator->getMinimalSampleSize()) quality->getInliers(model, mask_best);
return false; if (!is_covariance)
for (int p = 0; p < points_size; p++)
out_score = Score(); // set the worst case if (mask_best[p]) inliers[num_inliers++] = p;
} else {
// several all-inlier least-squares refines model better than only one but for errors_best = error_fnc->getErrors(model);
// big amount of points may be too time-consuming. num_inliers = weight_fnc->getInliersWeights(errors_best, inliers, weights, max_thr);
for (int lsq_iter = 0; lsq_iter < lsq_iterations; lsq_iter++) { }
bool model_updated = false; new_model_score = best_model_score;
model.copyTo(new_model);
// estimate non minimal models with all inliers int last_update = -1;
const int num_models = estimator->estimateModelNonMinimalSample(inliers, for (int iter = 0; iter < max_iters; iter++) {
inlier_number, models, weights); int num_sols;
for (int model_idx = 0; model_idx < num_models; model_idx++) { if (is_covariance) num_sols = solver->estimate(mask_best, models, weights);
const Score score = quality->getScore(models[model_idx]); else num_sols = solver->estimate(new_model, inliers, num_inliers, models, weights);
if (best_model_score.isBetter(score)) Score prev_score;
continue; for (int i = 0; i < num_sols; i++) {
if (score.isBetter(out_score)) { const auto &errors = error_fnc->getErrors(models[i]);
models[model_idx].copyTo(new_model); const auto score = quality->getScore(errors);
out_score = score; if (score.isBetter(new_model_score)) {
model_updated = true; last_update = iter;
models[i].copyTo(new_model);
errors_best = errors;
prev_score = new_model_score;
new_model_score = score;
} }
} }
if (weights.empty()) {
if (!model_updated) if (iter > last_update)
// if model was not updated at the first iteration then return false break;
// otherwise if all-inliers LSQ has not updated model then no sense else {
// to do it again -> return true (model was updated before). Quality::getInliers(errors_best, mask, threshold);
return lsq_iter > 0; if (Utils::intersectionOverUnion(mask, mask_best) >= iou_thr)
return true;
// if number of inliers doesn't increase more than 5% then break mask_best = mask;
if (fabs(static_cast<double>(out_score.inlier_number) - static_cast<double> num_inliers = 0;
(best_model_score.inlier_number)) / best_model_score.inlier_number < 0.05) if (!is_covariance)
return true; for (int p = 0; p < points_size; p++)
if (mask_best[p]) inliers[num_inliers++] = p;
if (lsq_iter != lsq_iterations - 1) }
// if not the last LSQ normalization then get inliers for next normalization } else {
inlier_number = quality->getInliers(new_model, inliers); if (iter > last_update) {
// new model is worse
if (CHANGE_WEIGHTS) {
// if failed more than 5 times then break
if (iter - std::max(0, last_update) >= 5)
break;
// try to change weights by changing threshold
if (fabs(new_model_score.score - prev_score.score) < FLT_EPSILON) {
// increase threshold if new model score is the same as the same as previous
max_thr *= 1.05; // increase by 5%
} else if (iter > last_update) {
// decrease max threshold if model is worse
max_thr *= 0.9; // decrease by 10%
}
} else break; // break if not changing weights
}
// generate new weights and inliers
num_inliers = weight_fnc->getInliersWeights(errors_best, inliers, weights, max_thr);
}
} }
return true; return last_update >= 0;
} }
}; };
Ptr<LeastSquaresPolishing> LeastSquaresPolishing::create (const Ptr<Estimator> &estimator_, Ptr<NonMinimalPolisher> NonMinimalPolisher::create(const Ptr<Quality> &quality_, const Ptr<NonMinimalSolver> &solver_,
const Ptr<Quality> &quality_, int lsq_iterations_) { Ptr<WeightFunction> weight_fnc_, int max_iters_, double iou_thr_) {
return makePtr<LeastSquaresPolishingImpl>(estimator_, quality_, lsq_iterations_); return makePtr<NonMinimalPolisherImpl>(quality_, solver_, weight_fnc_, max_iters_, iou_thr_);
} }
}} }}

@ -28,11 +28,6 @@ public:
return 1; return 1;
} }
Ptr <MinimalSolver> clone() const override
{
return makePtr<PlaneModelMinimalSolverImpl>(*points_mat);
}
/** [a, b, c, d] <--> ax + by + cz + d =0 /** [a, b, c, d] <--> ax + by + cz + d =0
Use the cross product of the vectors in the plane to calculate the plane normal vector. Use the cross product of the vectors in the plane to calculate the plane normal vector.
Use the plane normal vector and the points in the plane to calculate the coefficient d. Use the plane normal vector and the points in the plane to calculate the coefficient d.
@ -98,11 +93,6 @@ public:
return 1; return 1;
} }
Ptr <NonMinimalSolver> clone() const override
{
return makePtr<PlaneModelNonMinimalSolverImpl>(*points_mat);
}
/** [a, b, c, d] <--> ax + by + cz + d =0 /** [a, b, c, d] <--> ax + by + cz + d =0
Use total least squares (PCA can achieve the same calculation) to estimate the plane model equation. Use total least squares (PCA can achieve the same calculation) to estimate the plane model equation.
*/ */
@ -139,6 +129,11 @@ public:
models.emplace_back(cv::Mat(1, 4, CV_64F, plane_coeffs).clone()); models.emplace_back(cv::Mat(1, 4, CV_64F, plane_coeffs).clone());
return 1; return 1;
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
}; };
Ptr <PlaneModelNonMinimalSolver> PlaneModelNonMinimalSolver::create(const Mat &points_) Ptr <PlaneModelNonMinimalSolver> PlaneModelNonMinimalSolver::create(const Mat &points_)

@ -15,15 +15,17 @@
namespace cv { namespace usac { namespace cv { namespace usac {
class PnPMinimalSolver6PtsImpl : public PnPMinimalSolver6Pts { class PnPMinimalSolver6PtsImpl : public PnPMinimalSolver6Pts {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
public: public:
// linear 6 points required (11 equations) // linear 6 points required (11 equations)
int getSampleSize() const override { return 6; } int getSampleSize() const override { return 6; }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
explicit PnPMinimalSolver6PtsImpl (const Mat &points_) : explicit PnPMinimalSolver6PtsImpl (const Mat &points_) :
points_mat(&points_), points ((float*)points_.data) {} points_mat(points_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
/* /*
DLT: DLT:
d x = P X, x = (u, v, 1), X = (X, Y, Z, 1), P = K[R t] d x = P X, x = (u, v, 1), X = (X, Y, Z, 1), P = K[R t]
@ -72,6 +74,7 @@ public:
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
std::vector<double> A1 (60, 0), A2(56, 0); // 5x12, 7x8 std::vector<double> A1 (60, 0), A2(56, 0); // 5x12, 7x8
const float * points = points_mat.ptr<float>();
int cnt1 = 0, cnt2 = 0; int cnt1 = 0, cnt2 = 0;
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
@ -100,7 +103,7 @@ public:
A2[cnt2++] = -v * Z; A2[cnt2++] = -v * Z;
A2[cnt2++] = -v; A2[cnt2++] = -v;
} }
// matrix is sparse -> do not test for singularity
Math::eliminateUpperTriangular(A1, 5, 12); Math::eliminateUpperTriangular(A1, 5, 12);
int offset = 4*12; int offset = 4*12;
@ -139,13 +142,9 @@ public:
if (std::isnan(p[i])) if (std::isnan(p[i]))
return 0; return 0;
} }
models = std::vector<Mat>{P}; models = std::vector<Mat>{P};
return 1; return 1;
} }
Ptr<MinimalSolver> clone () const override {
return makePtr<PnPMinimalSolver6PtsImpl>(*points_mat);
}
}; };
Ptr<PnPMinimalSolver6Pts> PnPMinimalSolver6Pts::create(const Mat &points_) { Ptr<PnPMinimalSolver6Pts> PnPMinimalSolver6Pts::create(const Mat &points_) {
return makePtr<PnPMinimalSolver6PtsImpl>(points_); return makePtr<PnPMinimalSolver6PtsImpl>(points_);
@ -153,14 +152,17 @@ Ptr<PnPMinimalSolver6Pts> PnPMinimalSolver6Pts::create(const Mat &points_) {
class PnPNonMinimalSolverImpl : public PnPNonMinimalSolver { class PnPNonMinimalSolverImpl : public PnPNonMinimalSolver {
private: private:
const Mat * points_mat; Mat points_mat;
const float * const points;
public: public:
explicit PnPNonMinimalSolverImpl (const Mat &points_) : explicit PnPNonMinimalSolverImpl (const Mat &points_) :
points_mat(&points_), points ((float*)points_.data){} points_mat(points_)
{
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
}
int estimate (const std::vector<int> &sample, int sample_size, int estimate (const std::vector<int> &sample, int sample_size,
std::vector<Mat> &models, const std::vector<double> &weights) const override { std::vector<Mat> &models, const std::vector<double> &weights) const override {
const float * points = points_mat.ptr<float>();
if (sample_size < 6) if (sample_size < 6)
return 0; return 0;
@ -190,7 +192,7 @@ public:
a2[10] = v * Z; a2[10] = v * Z;
a2[11] = v; a2[11] = v;
// fill covarinace matrix // fill covariance matrix
for (int j = 0; j < 12; j++) for (int j = 0; j < 12; j++)
for (int z = j; z < 12; z++) for (int z = j; z < 12; z++)
AtA[j * 12 + z] += a1[j] * a1[z] + a2[j] * a2[z]; AtA[j * 12 + z] += a1[j] * a1[z] + a2[j] * a2[z];
@ -221,7 +223,7 @@ public:
a2[10] = v * weight_Z; a2[10] = v * weight_Z;
a2[11] = v * weight; a2[11] = v * weight;
// fill covarinace matrix // fill covariance matrix
for (int j = 0; j < 12; j++) for (int j = 0; j < 12; j++)
for (int z = j; z < 12; z++) for (int z = j; z < 12; z++)
AtA[j * 12 + z] += a1[j] * a1[z] + a2[j] * a2[z]; AtA[j * 12 + z] += a1[j] * a1[z] + a2[j] * a2[z];
@ -236,7 +238,7 @@ public:
models = std::vector<Mat>{ Mat_<double>(3,4) }; models = std::vector<Mat>{ Mat_<double>(3,4) };
Eigen::HouseholderQR<Eigen::Matrix<double, 12, 12>> qr((Eigen::Matrix<double, 12, 12>(AtA))); Eigen::HouseholderQR<Eigen::Matrix<double, 12, 12>> qr((Eigen::Matrix<double, 12, 12>(AtA)));
const Eigen::Matrix<double, 12, 12> &Q = qr.householderQ(); const Eigen::Matrix<double, 12, 12> &Q = qr.householderQ();
// extract the last nullspace // extract the last null-vector
Eigen::Map<Eigen::Matrix<double, 12, 1>>((double *)models[0].data) = Q.col(11); Eigen::Map<Eigen::Matrix<double, 12, 1>>((double *)models[0].data) = Q.col(11);
#else #else
Matx<double, 12, 12> Vt; Matx<double, 12, 12> Vt;
@ -246,35 +248,57 @@ public:
#endif #endif
return 1; return 1;
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
int getMinimumRequiredSampleSize() const override { return 6; } int getMinimumRequiredSampleSize() const override { return 6; }
int getMaxNumberOfSolutions () const override { return 1; } int getMaxNumberOfSolutions () const override { return 1; }
Ptr<NonMinimalSolver> clone () const override {
return makePtr<PnPNonMinimalSolverImpl>(*points_mat);
}
}; };
Ptr<PnPNonMinimalSolver> PnPNonMinimalSolver::create(const Mat &points) { Ptr<PnPNonMinimalSolver> PnPNonMinimalSolver::create(const Mat &points) {
return makePtr<PnPNonMinimalSolverImpl>(points); return makePtr<PnPNonMinimalSolverImpl>(points);
} }
class PnPSVDSolverImpl : public PnPSVDSolver {
private:
std::vector<double> w;
Ptr<PnPNonMinimalSolver> pnp_solver;
public:
int getSampleSize() const override { return 6; }
int getMaxNumberOfSolutions () const override { return 1; }
explicit PnPSVDSolverImpl (const Mat &points_) {
pnp_solver = PnPNonMinimalSolver::create(points_);
}
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
return pnp_solver->estimate(sample, 6, models, w);
}
};
Ptr<PnPSVDSolver> PnPSVDSolver::create(const Mat &points_) {
return makePtr<PnPSVDSolverImpl>(points_);
}
class P3PSolverImpl : public P3PSolver { class P3PSolverImpl : public P3PSolver {
private: private:
/* /*
* calibrated normalized points * calibrated normalized points
* K^-1 [u v 1]^T / ||K^-1 [u v 1]^T|| * K^-1 [u v 1]^T / ||K^-1 [u v 1]^T||
*/ */
const Mat * points_mat, * calib_norm_points_mat; const Mat points_mat, calib_norm_points_mat;
const Matx33d * K_mat, &K; const Matx33d K;
const float * const calib_norm_points, * const points;
const double VAL_THR = 1e-4; const double VAL_THR = 1e-4;
public: public:
/* /*
* @points_ is matrix N x 5 * @points_ is matrix N x 5
* u v x y z. (u,v) is image point, (x y z) is world point * u v x y z. (u,v) is image point, (x y z) is world point
*/ */
P3PSolverImpl (const Mat &points_, const Mat &calib_norm_points_, const Matx33d &K_) : P3PSolverImpl (const Mat &points_, const Mat &calib_norm_points_, const Mat &K_) :
points_mat(&points_), calib_norm_points_mat(&calib_norm_points_), K_mat (&K_), points_mat(points_), calib_norm_points_mat(calib_norm_points_), K(K_)
K(K_), calib_norm_points((float*)calib_norm_points_.data), points((float*)points_.data) {} {
CV_DbgAssert(!points_mat.empty() && points_mat.isContinuous());
CV_DbgAssert(!calib_norm_points_mat.empty() && calib_norm_points_mat.isContinuous());
}
int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override { int estimate (const std::vector<int> &sample, std::vector<Mat> &models) const override {
/* /*
@ -282,6 +306,9 @@ public:
* http://cmp.felk.cvut.cz/~pajdla/gvg/GVG-2016-Lecture.pdf * http://cmp.felk.cvut.cz/~pajdla/gvg/GVG-2016-Lecture.pdf
* pages: 51-59 * pages: 51-59
*/ */
const float * points = points_mat.ptr<float>();
const float * calib_norm_points = calib_norm_points_mat.ptr<float>();
const int idx1 = 5*sample[0], idx2 = 5*sample[1], idx3 = 5*sample[2]; const int idx1 = 5*sample[0], idx2 = 5*sample[1], idx3 = 5*sample[2];
const Vec3d X1 (points[idx1+2], points[idx1+3], points[idx1+4]); const Vec3d X1 (points[idx1+2], points[idx1+3], points[idx1+4]);
const Vec3d X2 (points[idx2+2], points[idx2+3], points[idx2+4]); const Vec3d X2 (points[idx2+2], points[idx2+3], points[idx2+4]);
@ -362,8 +389,8 @@ public:
zw[2] = Z3crZ1w(0); zw[5] = Z3crZ1w(1); zw[8] = Z3crZ1w(2); zw[2] = Z3crZ1w(0); zw[5] = Z3crZ1w(1); zw[8] = Z3crZ1w(2);
const Matx33d R = Math::rotVec2RotMat(Math::rotMat2RotVec(Z * Zw.inv())); const Matx33d R = Math::rotVec2RotMat(Math::rotMat2RotVec(Z * Zw.inv()));
Matx33d KR = K * R;
Mat P, KR = Mat(K * R); Matx34d P;
hconcat(KR, -KR * (X1 - R.t() * nX1), P); hconcat(KR, -KR * (X1 - R.t() * nX1), P);
models.emplace_back(P); models.emplace_back(P);
} }
@ -371,11 +398,8 @@ public:
} }
int getSampleSize() const override { return 3; } int getSampleSize() const override { return 3; }
int getMaxNumberOfSolutions () const override { return 4; } int getMaxNumberOfSolutions () const override { return 4; }
Ptr<MinimalSolver> clone () const override {
return makePtr<P3PSolverImpl>(*points_mat, *calib_norm_points_mat, *K_mat);
}
}; };
Ptr<P3PSolver> P3PSolver::create(const Mat &points_, const Mat &calib_norm_pts, const Matx33d &K) { Ptr<P3PSolver> P3PSolver::create(const Mat &points_, const Mat &calib_norm_pts, const Mat &K) {
return makePtr<P3PSolverImpl>(points_, calib_norm_pts, K); return makePtr<P3PSolverImpl>(points_, calib_norm_pts, K);
} }
}} }}

@ -25,6 +25,27 @@ int Quality::getInliers(const Ptr<Error> &error, const Mat &model, std::vector<b
} }
return num_inliers; return num_inliers;
} }
int Quality::getInliers (const std::vector<float> &errors, std::vector<bool> &inliers, double threshold) {
std::fill(inliers.begin(), inliers.end(), false);
int cnt = 0, inls = 0;
for (const auto e : errors) {
if (e < threshold) {
inliers[cnt] = true;
inls++;
}
cnt++;
}
return inls;
}
int Quality::getInliers (const std::vector<float> &errors, std::vector<int> &inliers, double threshold) {
int cnt = 0, inls = 0;
for (const auto e : errors) {
if (e < threshold)
inliers[inls++] = cnt;
cnt++;
}
return inls;
}
class RansacQualityImpl : public RansacQuality { class RansacQualityImpl : public RansacQuality {
private: private:
@ -39,16 +60,25 @@ public:
} }
Score getScore (const Mat &model) const override { Score getScore (const Mat &model) const override {
const std::vector<float>& errors = error->getErrors(model); error->setModelParameters(model);
int inlier_number = 0; int inlier_number = 0;
for (int point = 0; point < points_size; point++) { const auto preemptive_thr = -points_size - best_score;
for (int point = 0; point < points_size; point++)
if (error->getError(point) < threshold)
inlier_number++;
else if (inlier_number - point < preemptive_thr)
break;
// score is negative inlier number! If less then better
return {inlier_number, -static_cast<double>(inlier_number)};
}
Score getScore (const std::vector<float> &errors) const override {
int inlier_number = 0;
for (int point = 0; point < points_size; point++)
if (errors[point] < threshold) if (errors[point] < threshold)
inlier_number++; inlier_number++;
if (inlier_number + (points_size - point) < -best_score)
break;
}
// score is negative inlier number! If less then better // score is negative inlier number! If less then better
return Score(inlier_number, -static_cast<double>(inlier_number)); return {inlier_number, -static_cast<double>(inlier_number)};
} }
void setBestScore(double best_score_) override { void setBestScore(double best_score_) override {
@ -61,11 +91,9 @@ public:
{ return Quality::getInliers(error, model, inliers, thr); } { return Quality::getInliers(error, model, inliers, thr); }
int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override
{ return Quality::getInliers(error, model, inliers_mask, threshold); } { return Quality::getInliers(error, model, inliers_mask, threshold); }
double getThreshold () const override { return threshold; }
int getPointsSize () const override { return points_size; } int getPointsSize () const override { return points_size; }
Ptr<Quality> clone () const override { Ptr<Error> getErrorFnc () const override { return error; }
return makePtr<RansacQualityImpl>(points_size, threshold, error->clone());
}
}; };
Ptr<RansacQuality> RansacQuality::create(int points_size_, double threshold_, Ptr<RansacQuality> RansacQuality::create(int points_size_, double threshold_,
@ -77,13 +105,13 @@ class MsacQualityImpl : public MsacQuality {
protected: protected:
const Ptr<Error> error; const Ptr<Error> error;
const int points_size; const int points_size;
const double threshold; const double threshold, k_msac;
double best_score, norm_thr, one_over_thr; double best_score, norm_thr, one_over_thr;
public: public:
MsacQualityImpl (int points_size_, double threshold_, const Ptr<Error> &error_) MsacQualityImpl (int points_size_, double threshold_, const Ptr<Error> &error_, double k_msac_)
: error (error_), points_size (points_size_), threshold (threshold_) { : error (error_), points_size (points_size_), threshold (threshold_), k_msac(k_msac_) {
best_score = std::numeric_limits<double>::max(); best_score = std::numeric_limits<double>::max();
norm_thr = threshold*9/4; norm_thr = threshold*k_msac;
one_over_thr = 1 / norm_thr; one_over_thr = 1 / norm_thr;
} }
@ -91,17 +119,31 @@ public:
error->setModelParameters(model); error->setModelParameters(model);
double err, sum_errors = 0; double err, sum_errors = 0;
int inlier_number = 0; int inlier_number = 0;
const auto preemptive_thr = points_size + best_score;
for (int point = 0; point < points_size; point++) { for (int point = 0; point < points_size; point++) {
err = error->getError(point); err = error->getError(point);
if (err < norm_thr) { if (err < norm_thr) {
sum_errors -= (1 - err * one_over_thr); sum_errors -= (1 - err * one_over_thr);
if (err < threshold) if (err < threshold)
inlier_number++; inlier_number++;
} } else if (sum_errors + point > preemptive_thr)
if (sum_errors - points_size + point > best_score)
break; break;
} }
return Score(inlier_number, sum_errors); return {inlier_number, sum_errors};
}
Score getScore (const std::vector<float> &errors) const override {
double sum_errors = 0;
int inlier_number = 0;
for (int point = 0; point < points_size; point++) {
const auto err = errors[point];
if (err < norm_thr) {
sum_errors -= (1 - err * one_over_thr);
if (err < threshold)
inlier_number++;
}
}
return {inlier_number, sum_errors};
} }
void setBestScore(double best_score_) override { void setBestScore(double best_score_) override {
@ -114,103 +156,61 @@ public:
{ return Quality::getInliers(error, model, inliers, thr); } { return Quality::getInliers(error, model, inliers, thr); }
int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override
{ return Quality::getInliers(error, model, inliers_mask, threshold); } { return Quality::getInliers(error, model, inliers_mask, threshold); }
double getThreshold () const override { return threshold; }
int getPointsSize () const override { return points_size; } int getPointsSize () const override { return points_size; }
Ptr<Quality> clone () const override { Ptr<Error> getErrorFnc () const override { return error; }
return makePtr<MsacQualityImpl>(points_size, threshold, error->clone());
}
}; };
Ptr<MsacQuality> MsacQuality::create(int points_size_, double threshold_, Ptr<MsacQuality> MsacQuality::create(int points_size_, double threshold_,
const Ptr<Error> &error_) { const Ptr<Error> &error_, double k_msac) {
return makePtr<MsacQualityImpl>(points_size_, threshold_, error_); return makePtr<MsacQualityImpl>(points_size_, threshold_, error_, k_msac);
} }
class MagsacQualityImpl : public MagsacQuality { class MagsacQualityImpl : public MagsacQuality {
private: private:
const Ptr<Error> error; const Ptr<Error> error;
const GammaValues& gamma_generator; const Ptr<GammaValues> gamma_generator;
const int points_size; const int points_size;
// for example, maximum standard deviation of noise. // for example, maximum standard deviation of noise.
const double maximum_threshold_sqr, tentative_inlier_threshold; const double maximum_threshold_sqr, tentative_inlier_threshold;
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
const int degrees_of_freedom;
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
const double k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double squared_k_per_2;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double dof_minus_one_per_two;
// Calculating (DoF + 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double dof_plus_one_per_two;
const double C;
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double two_ad_dof_minus_one;
// Calculating 2^(DoF + 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
double two_ad_dof_plus_one;
// Calculate the gamma value of k // Calculate the gamma value of k
const double gamma_value_of_k; const double gamma_value_of_k;
// Calculate the lower incomplete gamma value of k
const double lower_gamma_value_of_k;
double previous_best_loss; double previous_best_loss;
// Convert the maximum threshold to a sigma value
float maximum_sigma;
// Calculate the squared maximum sigma
float maximum_sigma_2;
// Calculate \sigma_{max}^2 / 2
float maximum_sigma_2_per_2; float maximum_sigma_2_per_2;
// Calculate 2 * \sigma_{max}^2
float maximum_sigma_2_times_2;
// Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and, // Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori. // due to being constant, it is better to calculate it a priori.
double two_ad_dof_plus_one_per_maximum_sigma; double two_ad_dof_plus_one_per_maximum_sigma, rescale_err, norm_loss;
double scale_of_stored_incomplete_gammas;
double max_loss;
const std::vector<double> &stored_complete_gamma_values, &stored_lower_incomplete_gamma_values; const std::vector<double> &stored_complete_gamma_values, &stored_lower_incomplete_gamma_values;
int stored_incomplete_gamma_number_min1; unsigned int stored_incomplete_gamma_number_min1;
public: public:
MagsacQualityImpl (double maximum_thr, int points_size_, const Ptr<Error> &error_, MagsacQualityImpl (double maximum_thr, int points_size_, const Ptr<Error> &error_,
const Ptr<GammaValues> &gamma_generator_,
double tentative_inlier_threshold_, int DoF, double sigma_quantile, double tentative_inlier_threshold_, int DoF, double sigma_quantile,
double upper_incomplete_of_sigma_quantile, double upper_incomplete_of_sigma_quantile)
double lower_incomplete_of_sigma_quantile, double C_) : error (error_), gamma_generator(gamma_generator_), points_size(points_size_),
: error (error_), gamma_generator(GammaValues::getSingleton()), points_size(points_size_),
maximum_threshold_sqr(maximum_thr*maximum_thr), maximum_threshold_sqr(maximum_thr*maximum_thr),
tentative_inlier_threshold(tentative_inlier_threshold_), degrees_of_freedom(DoF), tentative_inlier_threshold(tentative_inlier_threshold_),
k(sigma_quantile), C(C_), gamma_value_of_k (upper_incomplete_of_sigma_quantile), gamma_value_of_k (upper_incomplete_of_sigma_quantile),
lower_gamma_value_of_k (lower_incomplete_of_sigma_quantile), stored_complete_gamma_values (gamma_generator->getCompleteGammaValues()),
stored_complete_gamma_values(gamma_generator.getCompleteGammaValues()), stored_lower_incomplete_gamma_values (gamma_generator->getIncompleteGammaValues()) {
stored_lower_incomplete_gamma_values(gamma_generator.getIncompleteGammaValues())
{
previous_best_loss = std::numeric_limits<double>::max(); previous_best_loss = std::numeric_limits<double>::max();
squared_k_per_2 = k * k / 2.0; const auto maximum_sigma = (float)sqrt(maximum_threshold_sqr) / sigma_quantile;
dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; const auto maximum_sigma_2 = (float) (maximum_sigma * maximum_sigma);
dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0;
two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two);
two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two);
maximum_sigma = (float)sqrt(maximum_threshold_sqr) / (float) k;
maximum_sigma_2 = maximum_sigma * maximum_sigma;
maximum_sigma_2_per_2 = maximum_sigma_2 / 2.f; maximum_sigma_2_per_2 = maximum_sigma_2 / 2.f;
maximum_sigma_2_times_2 = maximum_sigma_2 * 2.f; const auto maximum_sigma_2_times_2 = maximum_sigma_2 * 2.f;
two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma; two_ad_dof_plus_one_per_maximum_sigma = pow(2.0, (DoF + 1.0)*.5)/maximum_sigma;
scale_of_stored_incomplete_gammas = gamma_generator.getScaleOfGammaCompleteValues(); rescale_err = gamma_generator->getScaleOfGammaCompleteValues() / maximum_sigma_2_times_2;
stored_incomplete_gamma_number_min1 = gamma_generator.getTableSize()-1; stored_incomplete_gamma_number_min1 = static_cast<unsigned int>(gamma_generator->getTableSize()-1);
max_loss = 1e-10;
// MAGSAC maximum / minimum loss does not have to be in extrumum residuals double max_loss = 1e-10;
// make 50 iterations to find maximum loss // MAGSAC maximum / minimum loss does not have to be in extremum residuals
// make 30 iterations to find maximum loss
const double step = maximum_threshold_sqr / 30; const double step = maximum_threshold_sqr / 30;
double sqr_res = 0; double sqr_res = 0;
while (sqr_res < maximum_threshold_sqr) { while (sqr_res < maximum_threshold_sqr) {
int x=(int)round(scale_of_stored_incomplete_gammas * sqr_res auto x= static_cast<unsigned int>(rescale_err * sqr_res);
/ maximum_sigma_2_times_2); if (x > stored_incomplete_gamma_number_min1)
if (x >= stored_incomplete_gamma_number_min1 || x < 0 /*overflow*/) x = stored_incomplete_gamma_number_min1;
x = stored_incomplete_gamma_number_min1;
const double loss = two_ad_dof_plus_one_per_maximum_sigma * (maximum_sigma_2_per_2 * const double loss = two_ad_dof_plus_one_per_maximum_sigma * (maximum_sigma_2_per_2 *
stored_lower_incomplete_gamma_values[x] + sqr_res * 0.25 * stored_lower_incomplete_gamma_values[x] + sqr_res * 0.25 *
(stored_complete_gamma_values[x] - gamma_value_of_k)); (stored_complete_gamma_values[x] - gamma_value_of_k));
@ -218,6 +218,7 @@ public:
max_loss = loss; max_loss = loss;
sqr_res += step; sqr_res += step;
} }
norm_loss = two_ad_dof_plus_one_per_maximum_sigma / max_loss;
} }
// https://github.com/danini/magsac // https://github.com/danini/magsac
@ -225,26 +226,25 @@ public:
error->setModelParameters(model); error->setModelParameters(model);
double total_loss = 0.0; double total_loss = 0.0;
int num_tentative_inliers = 0; int num_tentative_inliers = 0;
const auto preemptive_thr = points_size + previous_best_loss;
for (int point_idx = 0; point_idx < points_size; point_idx++) { for (int point_idx = 0; point_idx < points_size; point_idx++) {
const float squared_residual = error->getError(point_idx); const float squared_residual = error->getError(point_idx);
if (squared_residual < tentative_inlier_threshold) if (squared_residual < tentative_inlier_threshold)
num_tentative_inliers++; num_tentative_inliers++;
if (squared_residual < maximum_threshold_sqr) { // consider point as inlier if (squared_residual < maximum_threshold_sqr) { // consider point as inlier
// Get the position of the gamma value in the lookup table // Get the position of the gamma value in the lookup table
int x=(int)round(scale_of_stored_incomplete_gammas * squared_residual auto x = static_cast<unsigned int>(rescale_err * squared_residual);
/ maximum_sigma_2_times_2);
// If the sought gamma value is not stored in the lookup, return the closest element // If the sought gamma value is not stored in the lookup, return the closest element
if (x >= stored_incomplete_gamma_number_min1 || x < 0 /*overflow*/) if (x > stored_incomplete_gamma_number_min1)
x = stored_incomplete_gamma_number_min1; x = stored_incomplete_gamma_number_min1;
// Calculate the loss implied by the current point // Calculate the loss implied by the current point
total_loss -= (1 - two_ad_dof_plus_one_per_maximum_sigma * (maximum_sigma_2_per_2 * total_loss -= (1 - (maximum_sigma_2_per_2 *
stored_lower_incomplete_gamma_values[x] + squared_residual * 0.25 * stored_lower_incomplete_gamma_values[x] + squared_residual * 0.25 *
(stored_complete_gamma_values[x] - gamma_value_of_k)) / max_loss); (stored_complete_gamma_values[x] - gamma_value_of_k)) * norm_loss);
} } else if (total_loss + point_idx > preemptive_thr)
if (total_loss - (points_size - point_idx) > previous_best_loss)
break; break;
} }
return Score(num_tentative_inliers, total_loss); return {num_tentative_inliers, total_loss};
} }
Score getScore (const std::vector<float> &errors) const override { Score getScore (const std::vector<float> &errors) const override {
@ -255,18 +255,15 @@ public:
if (squared_residual < tentative_inlier_threshold) if (squared_residual < tentative_inlier_threshold)
num_tentative_inliers++; num_tentative_inliers++;
if (squared_residual < maximum_threshold_sqr) { if (squared_residual < maximum_threshold_sqr) {
int x=(int)round(scale_of_stored_incomplete_gammas * squared_residual auto x = static_cast<unsigned int>(rescale_err * squared_residual);
/ maximum_sigma_2_times_2); if (x > stored_incomplete_gamma_number_min1)
if (x >= stored_incomplete_gamma_number_min1 || x < 0 /*overflow*/) x = stored_incomplete_gamma_number_min1;
x = stored_incomplete_gamma_number_min1; total_loss -= (1 - (maximum_sigma_2_per_2 *
total_loss -= (1 - two_ad_dof_plus_one_per_maximum_sigma * (maximum_sigma_2_per_2 *
stored_lower_incomplete_gamma_values[x] + squared_residual * 0.25 * stored_lower_incomplete_gamma_values[x] + squared_residual * 0.25 *
(stored_complete_gamma_values[x] - gamma_value_of_k)) / max_loss); (stored_complete_gamma_values[x] - gamma_value_of_k)) * norm_loss);
} }
if (total_loss - (points_size - point_idx) > previous_best_loss)
break;
} }
return Score(num_tentative_inliers, total_loss); return {num_tentative_inliers, total_loss};
} }
void setBestScore (double best_loss) override { void setBestScore (double best_loss) override {
@ -279,20 +276,16 @@ public:
{ return Quality::getInliers(error, model, inliers, thr); } { return Quality::getInliers(error, model, inliers, thr); }
int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override
{ return Quality::getInliers(error, model, inliers_mask, tentative_inlier_threshold); } { return Quality::getInliers(error, model, inliers_mask, tentative_inlier_threshold); }
double getThreshold () const override { return tentative_inlier_threshold; }
int getPointsSize () const override { return points_size; } int getPointsSize () const override { return points_size; }
Ptr<Quality> clone () const override { Ptr<Error> getErrorFnc () const override { return error; }
return makePtr<MagsacQualityImpl>(maximum_sigma, points_size, error->clone(),
tentative_inlier_threshold, degrees_of_freedom,
k, gamma_value_of_k, lower_gamma_value_of_k, C);
}
}; };
Ptr<MagsacQuality> MagsacQuality::create(double maximum_thr, int points_size_, const Ptr<Error> &error_, Ptr<MagsacQuality> MagsacQuality::create(double maximum_thr, int points_size_, const Ptr<Error> &error_,
const Ptr<GammaValues> &gamma_generator,
double tentative_inlier_threshold_, int DoF, double sigma_quantile, double tentative_inlier_threshold_, int DoF, double sigma_quantile,
double upper_incomplete_of_sigma_quantile, double upper_incomplete_of_sigma_quantile) {
double lower_incomplete_of_sigma_quantile, double C_) { return makePtr<MagsacQualityImpl>(maximum_thr, points_size_, error_, gamma_generator,
return makePtr<MagsacQualityImpl>(maximum_thr, points_size_, error_, tentative_inlier_threshold_, DoF, sigma_quantile, upper_incomplete_of_sigma_quantile);
tentative_inlier_threshold_, DoF, sigma_quantile, upper_incomplete_of_sigma_quantile,
lower_incomplete_of_sigma_quantile, C_);
} }
class LMedsQualityImpl : public LMedsQuality { class LMedsQualityImpl : public LMedsQuality {
@ -312,7 +305,16 @@ public:
if (errors[point] < threshold) if (errors[point] < threshold)
inlier_number++; inlier_number++;
// score is median of errors // score is median of errors
return Score(inlier_number, Utils::findMedian (errors)); return {inlier_number, Utils::findMedian (errors)};
}
Score getScore (const std::vector<float> &errors_) const override {
std::vector<float> errors = errors_;
int inlier_number = 0;
for (int point = 0; point < points_size; point++)
if (errors[point] < threshold)
inlier_number++;
// score is median of errors
return {inlier_number, Utils::findMedian (errors)};
} }
void setBestScore (double /*best_score*/) override {} void setBestScore (double /*best_score*/) override {}
@ -324,10 +326,8 @@ public:
{ return Quality::getInliers(error, model, inliers, thr); } { return Quality::getInliers(error, model, inliers, thr); }
int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override int getInliers (const Mat &model, std::vector<bool> &inliers_mask) const override
{ return Quality::getInliers(error, model, inliers_mask, threshold); } { return Quality::getInliers(error, model, inliers_mask, threshold); }
double getThreshold () const override { return threshold; }
Ptr<Quality> clone () const override { Ptr<Error> getErrorFnc () const override { return error; }
return makePtr<LMedsQualityImpl>(points_size, threshold, error->clone());
}
}; };
Ptr<LMedsQuality> LMedsQuality::create(int points_size_, double threshold_, const Ptr<Error> &error_) { Ptr<LMedsQuality> LMedsQuality::create(int points_size_, double threshold_, const Ptr<Error> &error_) {
return makePtr<LMedsQualityImpl>(points_size_, threshold_, error_); return makePtr<LMedsQualityImpl>(points_size_, threshold_, error_);
@ -335,47 +335,53 @@ Ptr<LMedsQuality> LMedsQuality::create(int points_size_, double threshold_, cons
class ModelVerifierImpl : public ModelVerifier { class ModelVerifierImpl : public ModelVerifier {
private: private:
std::vector<float> errors; Ptr<Quality> quality;
public: public:
inline bool isModelGood(const Mat &/*model*/) override { return true; } ModelVerifierImpl (const Ptr<Quality> &q) : quality(q) {}
inline bool getScore(Score &/*score*/) const override { return false; } inline bool isModelGood(const Mat &model, Score &score) override {
void update (int /*highest_inlier_number*/) override {} score = quality->getScore(model);
const std::vector<float> &getErrors() const override { return errors; } return true;
bool hasErrors () const override { return false; } }
Ptr<ModelVerifier> clone (int /*state*/) const override { return makePtr<ModelVerifierImpl>();} void update (const Score &/*score*/, int /*iteration*/) override {}
void reset() override {}
void updateSPRT (double , double , double , double , double , const Score &) override {}
}; };
Ptr<ModelVerifier> ModelVerifier::create() { Ptr<ModelVerifier> ModelVerifier::create(const Ptr<Quality> &quality) {
return makePtr<ModelVerifierImpl>(); return makePtr<ModelVerifierImpl>(quality);
} }
///////////////////////////////////// SPRT VERIFIER ////////////////////////////////////////// class AdaptiveSPRTImpl : public AdaptiveSPRT {
class SPRTImpl : public SPRT {
private: private:
RNG rng; RNG rng;
const Ptr<Error> err; const Ptr<Error> err;
const Ptr<Quality> quality;
const int points_size; const int points_size;
int highest_inlier_number, current_sprt_idx; // i int highest_inlier_number, last_iteration;
// time t_M needed to instantiate a model hypothesis given a sample // time t_M needed to instantiate a model hypothesis given a sample
// Let m_S be the number of models that are verified per sample // Let m_S be the number of models that are verified per sample
const double inlier_threshold, norm_thr, one_over_thr, t_M, m_S; const double inlier_threshold, norm_thr, one_over_thr;
double lowest_sum_errors, current_epsilon, current_delta, current_A, // alpha is false negative rate, alpha = 1 / A
delta_to_epsilon, complement_delta_to_complement_epsilon; double t_M, lowest_sum_errors, current_epsilon, current_delta, current_A,
delta_to_epsilon, complement_delta_to_complement_epsilon,
time_ver_corr_sprt = 0, time_ver_corr = 0,
one_over_complement_alpha, avg_num_checked_pts;
std::vector<SPRT_history> sprt_histories; std::vector<SPRT_history> sprt_histories, empty;
std::vector<int> points_random_pool; std::vector<int> points_random_pool;
std::vector<float> errors; std::vector<float> errors;
Score score; bool do_sprt, adapt, IS_ADAPTIVE;
const ScoreMethod score_type; const ScoreMethod score_type;
bool last_model_is_good, can_compute_score, has_errors; double m_S;
public: public:
SPRTImpl (int state, const Ptr<Error> &err_, int points_size_, AdaptiveSPRTImpl (int state, const Ptr<Quality> &quality_, int points_size_,
double inlier_threshold_, double prob_pt_of_good_model, double prob_pt_of_bad_model, double inlier_threshold_, double prob_pt_of_good_model, double prob_pt_of_bad_model,
double time_sample, double avg_num_models, ScoreMethod score_type_) : rng(state), err(err_), double time_sample, double avg_num_models, ScoreMethod score_type_,
points_size(points_size_), inlier_threshold (inlier_threshold_), double k_mlesac_, bool is_adaptive) : rng(state), err(quality_->getErrorFnc()),
norm_thr(inlier_threshold_*9/4), one_over_thr (1/norm_thr), t_M (time_sample), quality(quality_), points_size(points_size_), inlier_threshold (quality->getThreshold()),
m_S (avg_num_models), score_type (score_type_) { norm_thr(inlier_threshold_*k_mlesac_), one_over_thr (1/norm_thr), t_M (time_sample),
score_type (score_type_), m_S (avg_num_models) {
// Generate array of random points for randomized evaluation // Generate array of random points for randomized evaluation
points_random_pool = std::vector<int> (points_size_); points_random_pool = std::vector<int> (points_size_);
@ -387,19 +393,23 @@ public:
// reserve (approximately) some space for sprt vector. // reserve (approximately) some space for sprt vector.
sprt_histories.reserve(20); sprt_histories.reserve(20);
createTest(prob_pt_of_good_model, prob_pt_of_bad_model); highest_inlier_number = last_iteration = 0;
highest_inlier_number = 0;
lowest_sum_errors = std::numeric_limits<double>::max(); lowest_sum_errors = std::numeric_limits<double>::max();
last_model_is_good = false; if (score_type_ != ScoreMethod::SCORE_METHOD_MSAC)
can_compute_score = score_type_ == ScoreMethod::SCORE_METHOD_MSAC errors = std::vector<float>(points_size_);
|| score_type_ == ScoreMethod::SCORE_METHOD_RANSAC IS_ADAPTIVE = is_adaptive;
|| score_type_ == ScoreMethod::SCORE_METHOD_LMEDS; delta_to_epsilon = one_over_complement_alpha = complement_delta_to_complement_epsilon = current_A = -1;
// for MSAC and RANSAC errors not needed avg_num_checked_pts = points_size_;
if (score_type_ != ScoreMethod::SCORE_METHOD_MSAC && score_type_ != ScoreMethod::SCORE_METHOD_RANSAC) adapt = IS_ADAPTIVE;
errors = std::vector<float>(points_size_); do_sprt = !IS_ADAPTIVE;
// however return errors only if we can't compute score if (IS_ADAPTIVE) {
has_errors = !can_compute_score; // all these variables will be initialized later
current_epsilon = prob_pt_of_good_model;
current_delta = prob_pt_of_bad_model;
} else {
current_epsilon = current_delta = 1e-5;
createTest(prob_pt_of_good_model, prob_pt_of_bad_model);
}
} }
/* /*
@ -421,153 +431,143 @@ public:
* @current_hypothesis: current RANSAC iteration * @current_hypothesis: current RANSAC iteration
* Return: true if model is good, false - otherwise. * Return: true if model is good, false - otherwise.
*/ */
inline bool isModelGood(const Mat& model) override inline bool isModelGood (const Mat &model, Score &out_score) override {
{
if (model.empty())
return false;
// update error object with current model // update error object with current model
err->setModelParameters(model); bool last_model_is_good = true;
double sum_errors = 0;
double lambda = 1, sum_errors = 0; int tested_inliers = 0;
last_model_is_good = true; if (! do_sprt || adapt) { // if adapt or not sprt then compute model score directly
int random_pool_idx = rng.uniform(0, points_size), tested_point, tested_inliers = 0; out_score = quality->getScore(model);
for (tested_point = 0; tested_point < points_size; tested_point++) { tested_inliers = out_score.inlier_number;
if (random_pool_idx >= points_size) sum_errors = out_score.score;
random_pool_idx = 0; } else { // do sprt and not adapt
const double error = err->getError (points_random_pool[random_pool_idx++]); err->setModelParameters(model);
if (error < inlier_threshold) { double lambda = 1;
tested_inliers++; int random_pool_idx = rng.uniform(0, points_size), tested_point;
lambda *= delta_to_epsilon;
} else {
lambda *= complement_delta_to_complement_epsilon;
// since delta is always higher than epsilon, then lambda can increase only
// when point is not consistent with model
if (lambda > current_A)
break;
}
if (score_type == ScoreMethod::SCORE_METHOD_MSAC) {
if (error < norm_thr)
sum_errors -= (1 - error * one_over_thr);
if (sum_errors - points_size + tested_point > lowest_sum_errors)
break;
} else if (score_type == ScoreMethod::SCORE_METHOD_RANSAC) {
if (tested_inliers + points_size - tested_point < highest_inlier_number)
break;
} else errors[points_random_pool[random_pool_idx-1]] = (float)error;
}
last_model_is_good = tested_point == points_size;
// increase number of samples processed by current test
sprt_histories[current_sprt_idx].tested_samples++;
if (last_model_is_good) {
score.inlier_number = tested_inliers;
if (score_type == ScoreMethod::SCORE_METHOD_MSAC) { if (score_type == ScoreMethod::SCORE_METHOD_MSAC) {
score.score = sum_errors; const auto preemptive_thr = points_size + lowest_sum_errors;
if (lowest_sum_errors > sum_errors) for (tested_point = 0; tested_point < points_size; tested_point++) {
lowest_sum_errors = sum_errors; if (random_pool_idx == points_size)
} else if (score_type == ScoreMethod::SCORE_METHOD_RANSAC) random_pool_idx = 0;
score.score = -static_cast<double>(tested_inliers); const float error = err->getError (points_random_pool[random_pool_idx++]);
else if (score_type == ScoreMethod::SCORE_METHOD_LMEDS) if (error < inlier_threshold) {
score.score = Utils::findMedian(errors); tested_inliers++;
lambda *= delta_to_epsilon;
const double new_epsilon = static_cast<double>(tested_inliers) / points_size; } else {
if (new_epsilon > current_epsilon) { lambda *= complement_delta_to_complement_epsilon;
highest_inlier_number = tested_inliers; // update max inlier number // since delta is always higher than epsilon, then lambda can increase only
/* // when point is not consistent with model
* Model accepted and the largest support so far: if (lambda > current_A)
* design (i+1)-th test (εi + 1= εˆ, δi+1 = δ, i := i + 1). break;
* Store the current model parameters θ }
*/ if (error < norm_thr)
createTest(new_epsilon, current_delta); sum_errors -= (1 - error * one_over_thr);
else if (sum_errors + tested_point > preemptive_thr)
break;
}
} else { // save errors into array here
for (tested_point = 0; tested_point < points_size; tested_point++) {
if (random_pool_idx == points_size)
random_pool_idx = 0;
const int pt = points_random_pool[random_pool_idx++];
const float error = err->getError (pt);
if (error < inlier_threshold) {
tested_inliers++;
lambda *= delta_to_epsilon;
} else {
lambda *= complement_delta_to_complement_epsilon;
if (lambda > current_A)
break;
}
errors[pt] = error;
}
} }
} else { last_model_is_good = tested_point == points_size;
/* }
* Since almost all tested models are bad, the probability if (last_model_is_good && do_sprt) {
* δ can be estimated as the average fraction of consistent data points out_score.inlier_number = tested_inliers;
* in rejected models. if (score_type == ScoreMethod::SCORE_METHOD_MSAC)
*/ out_score.score = sum_errors;
// add 1 to tested_point, because loop over tested_point starts from 0 else if (score_type == ScoreMethod::SCORE_METHOD_RANSAC)
const double delta_estimated = static_cast<double> (tested_inliers) / (tested_point+1); out_score.score = -static_cast<double>(tested_inliers);
if (delta_estimated > 0 && fabs(current_delta - delta_estimated) else out_score = quality->getScore(errors);
/ current_delta > 0.05)
/*
* Model rejected: re-estimate δ. If the estimate δ_ differs
* from δi by more than 5% design (i+1)-th test (εi+1 = εi,
* δi+1 = δˆ, i := i + 1)
*/
createTest(current_epsilon, delta_estimated);
} }
return last_model_is_good; return last_model_is_good;
} }
inline bool getScore (Score &score_) const override { // update SPRT parameters = called only once inside usac
if (!last_model_is_good || !can_compute_score) void updateSPRT (double time_model_est, double time_corr_ver, double new_avg_models, double new_delta, double new_epsilon, const Score &best_score) override {
return false; if (adapt) {
score_ = score; adapt = false;
return true; m_S = new_avg_models;
} t_M = time_model_est / time_corr_ver;
bool hasErrors () const override { return has_errors; } time_ver_corr = time_corr_ver;
const std::vector<float> &getErrors () const override { return errors; } time_ver_corr_sprt = time_corr_ver * 1.05;
const std::vector<SPRT_history> &getSPRTvector () const override { return sprt_histories; } createTest(new_epsilon, new_delta);
void update (int highest_inlier_number_) override { highest_inlier_number = best_score.inlier_number;
const double new_epsilon = static_cast<double>(highest_inlier_number_) / points_size; lowest_sum_errors = best_score.score;
if (new_epsilon > current_epsilon) {
highest_inlier_number = highest_inlier_number_;
if (sprt_histories[current_sprt_idx].tested_samples == 0)
sprt_histories[current_sprt_idx].tested_samples = 1;
// save sprt test and create new one
createTest(new_epsilon, current_delta);
} }
} }
Ptr<ModelVerifier> clone (int state) const override {
return makePtr<SPRTImpl>(state, err->clone(), points_size, inlier_threshold, const std::vector<SPRT_history> &getSPRTvector () const override { return adapt ? empty : sprt_histories; }
sprt_histories[current_sprt_idx].epsilon, void update (const Score &score, int iteration) override {
sprt_histories[current_sprt_idx].delta, t_M, m_S, score_type); if (adapt || highest_inlier_number > score.inlier_number)
return;
if (sprt_histories.size() == 1 && sprt_histories.back().tested_samples == 0)
sprt_histories.back().tested_samples = iteration;
else if (! sprt_histories.empty())
sprt_histories.back().tested_samples += iteration - last_iteration;
SPRT_history new_sprt_history;
new_sprt_history.epsilon = (double)score.inlier_number / points_size;
highest_inlier_number = score.inlier_number;
lowest_sum_errors = score.score;
createTest(static_cast<double>(highest_inlier_number) / points_size, current_delta);
new_sprt_history.delta = current_delta;
new_sprt_history.A = current_A;
sprt_histories.emplace_back(new_sprt_history);
last_iteration = iteration;
}
int avgNumCheckedPts () const override { return do_sprt ? (int)avg_num_checked_pts + 1 : points_size; }
void reset() override {
adapt = true;
do_sprt = false;
highest_inlier_number = last_iteration = 0;
lowest_sum_errors = DBL_MAX;
sprt_histories.clear();
} }
private: private:
// Update current epsilon, delta and threshold (A).
// Saves sprt test to sprt history and update current epsilon, delta and threshold. bool createTest (double epsilon, double delta) {
void createTest (double epsilon, double delta) { if (fabs(current_epsilon - epsilon) < FLT_EPSILON && fabs(current_delta - delta) < FLT_EPSILON)
return false;
// if epsilon is closed to 1 then set them to 0.99 to avoid numerical problems // if epsilon is closed to 1 then set them to 0.99 to avoid numerical problems
if (epsilon > 0.999999) epsilon = 0.999; if (epsilon > 0.999999) epsilon = 0.999;
// delta can't be higher than epsilon, because ratio delta / epsilon will be greater than 1 // delta can't be higher than epsilon, because ratio delta / epsilon will be greater than 1
if (epsilon < delta) delta = epsilon-0.0001; if (epsilon < delta) delta = epsilon-0.001;
// avoid delta going too high as it is very unlikely // avoid delta going too high as it is very unlikely
// e.g., 30% of points are consistent with bad model is not very real // e.g., 30% of points are consistent with bad model is not very real
if (delta > 0.3) delta = 0.3; if (delta > 0.3) delta = 0.3;
SPRT_history new_sprt_history; const auto AC = estimateThresholdA (epsilon, delta);
new_sprt_history.epsilon = epsilon; current_A = AC.first;
new_sprt_history.delta = delta; const auto C = AC.second;
new_sprt_history.A = estimateThresholdA (epsilon, delta);
sprt_histories.emplace_back(new_sprt_history);
current_A = new_sprt_history.A;
current_delta = delta; current_delta = delta;
current_epsilon = epsilon; current_epsilon = epsilon;
one_over_complement_alpha = 1 / (1 - 1 / current_A);
delta_to_epsilon = delta / epsilon; delta_to_epsilon = delta / epsilon;
complement_delta_to_complement_epsilon = (1 - delta) / (1 - epsilon); complement_delta_to_complement_epsilon = (1 - delta) / (1 - epsilon);
current_sprt_idx = static_cast<int>(sprt_histories.size()) - 1;
}
/* if (IS_ADAPTIVE) {
* A(0) = K1/K2 + 1 avg_num_checked_pts = std::min((log(current_A) / C) * one_over_complement_alpha, (double)points_size);
* A(n+1) = K1/K2 + 1 + log (A(n)) do_sprt = time_ver_corr_sprt * avg_num_checked_pts < time_ver_corr * points_size;
* K1 = t_M / P_g }
* K2 = m_S/(P_g*C) return true;
* t_M is time needed to instantiate a model hypotheses given a sample }
* P_g = epsilon ^ m, m is the number of data point in the Ransac sample. std::pair<double,double> estimateThresholdA (double epsilon, double delta) {
* m_S is the number of models that are verified per sample. const double C = (1 - delta) * log ((1 - delta) / (1 - epsilon)) + delta * log (delta / epsilon);
* p (0|Hb) p (1|Hb)
* C = p(0|Hb) log (---------) + p(1|Hb) log (---------)
* p (0|Hg) p (1|Hg)
*/
double estimateThresholdA (double epsilon, double delta) {
const double C = (1 - delta) * log ((1 - delta) / (1 - epsilon)) +
delta * (log(delta / epsilon));
// K = K1/K2 + 1 = (t_M / P_g) / (m_S / (C * P_g)) + 1 = (t_M * C)/m_S + 1 // K = K1/K2 + 1 = (t_M / P_g) / (m_S / (C * P_g)) + 1 = (t_M * C)/m_S + 1
const double K = t_M * C / m_S + 1; const double K = t_M * C / m_S + 1;
double An, An_1 = K; double An, An_1 = K;
@ -579,13 +579,13 @@ private:
break; break;
An_1 = An; An_1 = An;
} }
return An; return std::make_pair(An, C);
} }
}; };
Ptr<SPRT> SPRT::create (int state, const Ptr<Error> &err_, int points_size_, Ptr<AdaptiveSPRT> AdaptiveSPRT::create (int state, const Ptr<Quality> &quality, int points_size_,
double inlier_threshold_, double prob_pt_of_good_model, double prob_pt_of_bad_model, double inlier_threshold_, double prob_pt_of_good_model, double prob_pt_of_bad_model,
double time_sample, double avg_num_models, ScoreMethod score_type_) { double time_sample, double avg_num_models, ScoreMethod score_type_, double k_mlesac, bool is_adaptive) {
return makePtr<SPRTImpl>(state, err_, points_size_, inlier_threshold_, return makePtr<AdaptiveSPRTImpl>(state, quality, points_size_, inlier_threshold_,
prob_pt_of_good_model, prob_pt_of_bad_model, time_sample, avg_num_models, score_type_); prob_pt_of_good_model, prob_pt_of_bad_model, time_sample, avg_num_models, score_type_, k_mlesac, is_adaptive);
} }
}} }}

File diff suppressed because it is too large Load Diff

@ -40,9 +40,6 @@ public:
points_random_pool[--random_pool_size]); points_random_pool[--random_pool_size]);
} }
} }
Ptr<Sampler> clone (int state) const override {
return makePtr<UniformSamplerImpl>(state, sample_size, points_size);
}
private: private:
void setPointsSize (int points_size_) { void setPointsSize (int points_size_) {
CV_Assert (sample_size <= points_size_); CV_Assert (sample_size <= points_size_);
@ -143,10 +140,6 @@ public:
points_size = points_size_; points_size = points_size_;
initialize (); initialize ();
} }
Ptr<Sampler> clone (int state) const override {
return makePtr<ProsacSimpleSamplerImpl>(state, points_size, sample_size,
max_prosac_samples_count);
}
private: private:
void initialize () { void initialize () {
largest_sample_size = points_size; // termination length, n* largest_sample_size = points_size; // termination length, n*
@ -266,15 +259,19 @@ public:
// Choice of the hypothesis generation set // Choice of the hypothesis generation set
// if (t = T'_n) & (n < n*) then n = n + 1 (eqn. 4) // if (t = T'_n) & (n < n*) then n = n + 1 (eqn. 4)
if (kth_sample_number == growth_function[subset_size-1] && subset_size < termination_length) if (kth_sample_number >= growth_function[subset_size-1] && subset_size < termination_length)
subset_size++; subset_size++;
// Semi-random sample M_t of size m // Semi-random sample M_t of size m
// if T'n < t then // if T'n < t then
if (growth_function[subset_size-1] < kth_sample_number) { if (growth_function[subset_size-1] < kth_sample_number) {
// The sample contains m-1 points selected from U_(n-1) at random and u_n if (subset_size >= termination_length) {
random_gen->generateUniqueRandomSet(sample, sample_size-1, subset_size-1); random_gen->generateUniqueRandomSet(sample, sample_size, subset_size);
sample[sample_size-1] = subset_size-1; } else {
// The sample contains m-1 points selected from U_(n-1) at random and u_n
random_gen->generateUniqueRandomSet(sample, sample_size-1, subset_size-1);
sample[sample_size-1] = subset_size-1;
}
} else { } else {
// Select m points from U_n at random. // Select m points from U_n at random.
random_gen->generateUniqueRandomSet(sample, sample_size, subset_size); random_gen->generateUniqueRandomSet(sample, sample_size, subset_size);
@ -306,10 +303,6 @@ public:
CV_Error(cv::Error::StsError, "Changing points size in PROSAC requires to change also " CV_Error(cv::Error::StsError, "Changing points size in PROSAC requires to change also "
"termination criteria! Use PROSAC simpler version"); "termination criteria! Use PROSAC simpler version");
} }
Ptr<Sampler> clone (int state) const override {
return makePtr<ProsacSamplerImpl>(state, points_size, sample_size,
growth_max_samples);
}
}; };
Ptr<ProsacSampler> ProsacSampler::create(int state, int points_size_, int sample_size_, Ptr<ProsacSampler> ProsacSampler::create(int state, int points_size_, int sample_size_,
@ -465,10 +458,6 @@ public:
CV_Error(cv::Error::StsError, "Changing points size requires changing neighborhood graph! " CV_Error(cv::Error::StsError, "Changing points size requires changing neighborhood graph! "
"You must reinitialize P-NAPSAC!"); "You must reinitialize P-NAPSAC!");
} }
Ptr<Sampler> clone (int state) const override {
return makePtr<ProgressiveNapsacImpl>(state, points_size, sample_size, *layers,
sampler_length);
}
}; };
Ptr<ProgressiveNapsac> ProgressiveNapsac::create(int state, int points_size_, int sample_size_, Ptr<ProgressiveNapsac> ProgressiveNapsac::create(int state, int points_size_, int sample_size_,
const std::vector<Ptr<NeighborhoodGraph>> &layers, int sampler_length_) { const std::vector<Ptr<NeighborhoodGraph>> &layers, int sampler_length_) {
@ -537,9 +526,6 @@ public:
CV_Error(cv::Error::StsError, "Changing points size requires changing neighborhood graph!" CV_Error(cv::Error::StsError, "Changing points size requires changing neighborhood graph!"
" You must reinitialize NAPSAC!"); " You must reinitialize NAPSAC!");
} }
Ptr<Sampler> clone (int state) const override {
return makePtr<NapsacSamplerImpl>(state, points_size, sample_size, neighborhood_graph);
}
}; };
Ptr<NapsacSampler> NapsacSampler::create(int state, int points_size_, int sample_size_, Ptr<NapsacSampler> NapsacSampler::create(int state, int points_size_, int sample_size_,
const Ptr<NeighborhoodGraph> &neighborhood_graph_) { const Ptr<NeighborhoodGraph> &neighborhood_graph_) {

@ -27,11 +27,6 @@ public:
return 1; return 1;
} }
Ptr <MinimalSolver> clone() const override
{
return makePtr<SphereModelMinimalSolverImpl>(*points_mat);
}
/** [center_x, center_y, center_z, radius] <--> (x - center_x)^2 + (y - center_y)^2 + (z - center_z)^2 = radius^2 /** [center_x, center_y, center_z, radius] <--> (x - center_x)^2 + (y - center_y)^2 + (z - center_z)^2 = radius^2
Fitting the sphere using Cramer's Rule. Fitting the sphere using Cramer's Rule.
*/ */
@ -132,11 +127,6 @@ public:
return 1; return 1;
} }
Ptr <NonMinimalSolver> clone() const override
{
return makePtr<SphereModelNonMinimalSolverImpl>(*points_mat);
}
/** [center_x, center_y, center_z, radius] <--> (x - center_x)^2 + (y - center_y)^2 + (z - center_z)^2 = radius^2 /** [center_x, center_y, center_z, radius] <--> (x - center_x)^2 + (y - center_y)^2 + (z - center_z)^2 = radius^2
Fitting Sphere Using Differences of Squared Lengths and Squared Radius. Fitting Sphere Using Differences of Squared Lengths and Squared Radius.
Reference https://www.geometrictools.com/Documentation/LeastSquaresFitting.pdf section 5.2. Reference https://www.geometrictools.com/Documentation/LeastSquaresFitting.pdf section 5.2.
@ -261,6 +251,11 @@ public:
} }
} }
int estimate (const std::vector<bool> &/*mask*/, std::vector<Mat> &/*models*/,
const std::vector<double> &/*weights*/) override {
return 0;
}
void enforceRankConstraint (bool /*enforce*/) override {}
}; };
Ptr <SphereModelNonMinimalSolver> SphereModelNonMinimalSolver::create(const Mat &points_) Ptr <SphereModelNonMinimalSolver> SphereModelNonMinimalSolver::create(const Mat &points_)

@ -28,7 +28,8 @@ public:
* (1 - w^n) is probability that at least one point of N is outlier. * (1 - w^n) is probability that at least one point of N is outlier.
* 1 - p = (1-w^n)^k is probability that in K steps of getting at least one outlier is 1% (5%). * 1 - p = (1-w^n)^k is probability that in K steps of getting at least one outlier is 1% (5%).
*/ */
int update (const Mat &/*model*/, int inlier_number) override { int update (const Mat &model, int inlier_number) const override {
CV_UNUSED(model);
const double predicted_iters = log_confidence / log(1 - std::pow const double predicted_iters = log_confidence / log(1 - std::pow
(static_cast<double>(inlier_number) / points_size, sample_size)); (static_cast<double>(inlier_number) / points_size, sample_size));
@ -40,9 +41,11 @@ public:
return MAX_ITERATIONS; return MAX_ITERATIONS;
} }
Ptr<TerminationCriteria> clone () const override { static int getMaxIterations (int inlier_number, int sample_size, int points_size, double conf) {
return makePtr<StandardTerminationCriteriaImpl>(1-exp(log_confidence), points_size, const double pred_iters = log(1 - conf) / log(1 - pow(static_cast<double>(inlier_number)/points_size, sample_size));
sample_size, MAX_ITERATIONS); if (std::isinf(pred_iters))
return INT_MAX;
return (int) pred_iters + 1;
} }
}; };
Ptr<StandardTerminationCriteria> StandardTerminationCriteria::create(double confidence, Ptr<StandardTerminationCriteria> StandardTerminationCriteria::create(double confidence,
@ -54,13 +57,13 @@ Ptr<StandardTerminationCriteria> StandardTerminationCriteria::create(double conf
/////////////////////////////////////// SPRT TERMINATION ////////////////////////////////////////// /////////////////////////////////////// SPRT TERMINATION //////////////////////////////////////////
class SPRTTerminationImpl : public SPRTTermination { class SPRTTerminationImpl : public SPRTTermination {
private: private:
const std::vector<SPRT_history> &sprt_histories; const Ptr<AdaptiveSPRT> sprt;
const double log_eta_0; const double log_eta_0;
const int points_size, sample_size, MAX_ITERATIONS; const int points_size, sample_size, MAX_ITERATIONS;
public: public:
SPRTTerminationImpl (const std::vector<SPRT_history> &sprt_histories_, double confidence, SPRTTerminationImpl (const Ptr<AdaptiveSPRT> &sprt_, double confidence,
int points_size_, int sample_size_, int max_iterations_) int points_size_, int sample_size_, int max_iterations_)
: sprt_histories (sprt_histories_), log_eta_0(log(1-confidence)), : sprt (sprt_), log_eta_0(log(1-confidence)),
points_size (points_size_), sample_size (sample_size_),MAX_ITERATIONS(max_iterations_){} points_size (points_size_), sample_size (sample_size_),MAX_ITERATIONS(max_iterations_){}
/* /*
@ -80,9 +83,11 @@ public:
* this equation does not have to be evaluated before nR < n0 * this equation does not have to be evaluated before nR < n0
* nR = (1 - P_g)^k * nR = (1 - P_g)^k
*/ */
int update (const Mat &/*model*/, int inlier_size) override { int update (const Mat &model, int inlier_size) const override {
if (sprt_histories.empty()) CV_UNUSED(model);
return std::min(MAX_ITERATIONS, getStandardUpperBound(inlier_size)); const auto &sprt_histories = sprt->getSPRTvector();
if (sprt_histories.size() <= 1)
return getStandardUpperBound(inlier_size);
const double epsilon = static_cast<double>(inlier_size) / points_size; // inlier probability const double epsilon = static_cast<double>(inlier_size) / points_size; // inlier probability
const double P_g = pow (epsilon, sample_size); // probability of good sample const double P_g = pow (epsilon, sample_size); // probability of good sample
@ -90,23 +95,21 @@ public:
double log_eta_lmin1 = 0; double log_eta_lmin1 = 0;
int total_number_of_tested_samples = 0; int total_number_of_tested_samples = 0;
const int sprts_size_min1 = static_cast<int>(sprt_histories.size())-1;
if (sprts_size_min1 < 0) return getStandardUpperBound(inlier_size);
// compute log n(l-1), l is number of tests // compute log n(l-1), l is number of tests
for (int test = 0; test < sprts_size_min1; test++) { for (const auto &test : sprt_histories) {
log_eta_lmin1 += log (1 - P_g * (1 - pow (sprt_histories[test].A, if (test.tested_samples == 0) continue;
-computeExponentH(sprt_histories[test].epsilon, epsilon,sprt_histories[test].delta)))) log_eta_lmin1 += log (1 - P_g * (1 - pow (test.A,
* sprt_histories[test].tested_samples; -computeExponentH(test.epsilon, epsilon,test.delta)))) * test.tested_samples;
total_number_of_tested_samples += sprt_histories[test].tested_samples; total_number_of_tested_samples += test.tested_samples;
} }
// Implementation note: since η > ηR the equation (9) does not have to be evaluated // Implementation note: since η > ηR the equation (9) does not have to be evaluated
// before ηR < η0 is satisfied. // before ηR < η0 is satisfied.
if (std::pow(1 - P_g, total_number_of_tested_samples) < log_eta_0) if (std::pow(1 - P_g, total_number_of_tested_samples) < log_eta_0)
return std::min(MAX_ITERATIONS, getStandardUpperBound(inlier_size)); return getStandardUpperBound(inlier_size);
// use decision threshold A for last test (l-th) // use decision threshold A for last test (l-th)
const double predicted_iters_sprt = (log_eta_0 - log_eta_lmin1) / const double predicted_iters_sprt = total_number_of_tested_samples + (log_eta_0 - log_eta_lmin1) /
log (1 - P_g * (1 - 1 / sprt_histories[sprts_size_min1].A)); // last A log (1 - P_g * (1 - 1 / sprt_histories.back().A)); // last A
if (std::isnan(predicted_iters_sprt) || std::isinf(predicted_iters_sprt)) if (std::isnan(predicted_iters_sprt) || std::isinf(predicted_iters_sprt))
return getStandardUpperBound(inlier_size); return getStandardUpperBound(inlier_size);
@ -117,11 +120,6 @@ public:
getStandardUpperBound(inlier_size)); getStandardUpperBound(inlier_size));
return getStandardUpperBound(inlier_size); return getStandardUpperBound(inlier_size);
} }
Ptr<TerminationCriteria> clone () const override {
return makePtr<SPRTTerminationImpl>(sprt_histories, 1-exp(log_eta_0), points_size,
sample_size, MAX_ITERATIONS);
}
private: private:
inline int getStandardUpperBound(int inlier_size) const { inline int getStandardUpperBound(int inlier_size) const {
const double predicted_iters = log_eta_0 / log(1 - std::pow const double predicted_iters = log_eta_0 / log(1 - std::pow
@ -157,9 +155,9 @@ private:
return h; return h;
} }
}; };
Ptr<SPRTTermination> SPRTTermination::create(const std::vector<SPRT_history> &sprt_histories_, Ptr<SPRTTermination> SPRTTermination::create(const Ptr<AdaptiveSPRT> &sprt_,
double confidence, int points_size_, int sample_size_, int max_iterations_) { double confidence, int points_size_, int sample_size_, int max_iterations_) {
return makePtr<SPRTTerminationImpl>(sprt_histories_, confidence, points_size_, sample_size_, return makePtr<SPRTTerminationImpl>(sprt_, confidence, points_size_, sample_size_,
max_iterations_); max_iterations_);
} }
@ -167,21 +165,20 @@ Ptr<SPRTTermination> SPRTTermination::create(const std::vector<SPRT_history> &sp
class SPRTPNapsacTerminationImpl : public SPRTPNapsacTermination { class SPRTPNapsacTerminationImpl : public SPRTPNapsacTermination {
private: private:
SPRTTerminationImpl sprt_termination; SPRTTerminationImpl sprt_termination;
const std::vector<SPRT_history> &sprt_histories;
const double relax_coef, log_confidence; const double relax_coef, log_confidence;
const int points_size, sample_size, MAX_ITERS; const int points_size, sample_size, MAX_ITERS;
public: public:
SPRTPNapsacTerminationImpl (const std::vector<SPRT_history> &sprt_histories_, SPRTPNapsacTerminationImpl (const Ptr<AdaptiveSPRT> &sprt,
double confidence, int points_size_, int sample_size_, double confidence, int points_size_, int sample_size_,
int max_iterations_, double relax_coef_) int max_iterations_, double relax_coef_)
: sprt_termination (sprt_histories_, confidence, points_size_, sample_size_, : sprt_termination (sprt, confidence, points_size_, sample_size_,
max_iterations_), sprt_histories (sprt_histories_), max_iterations_),
relax_coef (relax_coef_), log_confidence(log(1-confidence)), relax_coef (relax_coef_), log_confidence(log(1-confidence)),
points_size (points_size_), sample_size (sample_size_), points_size (points_size_), sample_size (sample_size_),
MAX_ITERS (max_iterations_) {} MAX_ITERS (max_iterations_) {}
int update (const Mat &model, int inlier_number) override { int update (const Mat &model, int inlier_number) const override {
int predicted_iterations = sprt_termination.update(model, inlier_number); int predicted_iterations = sprt_termination.update(model, inlier_number);
const double inlier_prob = static_cast<double>(inlier_number) / points_size + relax_coef; const double inlier_prob = static_cast<double>(inlier_number) / points_size + relax_coef;
@ -192,52 +189,41 @@ public:
if (! std::isinf(predicted_iters) && predicted_iters < predicted_iterations) if (! std::isinf(predicted_iters) && predicted_iters < predicted_iterations)
return static_cast<int>(predicted_iters); return static_cast<int>(predicted_iters);
return predicted_iterations; return std::min(MAX_ITERS, predicted_iterations);
}
Ptr<TerminationCriteria> clone () const override {
return makePtr<SPRTPNapsacTerminationImpl>(sprt_histories, 1-exp(log_confidence),
points_size, sample_size, MAX_ITERS, relax_coef);
} }
}; };
Ptr<SPRTPNapsacTermination> SPRTPNapsacTermination::create(const std::vector<SPRT_history>& Ptr<SPRTPNapsacTermination> SPRTPNapsacTermination::create(const Ptr<AdaptiveSPRT> &
sprt_histories_, double confidence, int points_size_, int sample_size_, sprt, double confidence, int points_size_, int sample_size_,
int max_iterations_, double relax_coef_) { int max_iterations_, double relax_coef_) {
return makePtr<SPRTPNapsacTerminationImpl>(sprt_histories_, confidence, points_size_, return makePtr<SPRTPNapsacTerminationImpl>(sprt, confidence, points_size_,
sample_size_, max_iterations_, relax_coef_); sample_size_, max_iterations_, relax_coef_);
} }
////////////////////////////////////// PROSAC TERMINATION /////////////////////////////////////////
////////////////////////////////////// PROSAC TERMINATION /////////////////////////////////////////
class ProsacTerminationCriteriaImpl : public ProsacTerminationCriteria { class ProsacTerminationCriteriaImpl : public ProsacTerminationCriteria {
private: private:
const double log_confidence, beta, non_randomness_phi, inlier_threshold; const double log_conf, beta, non_randomness_phi, inlier_threshold;
const int MAX_ITERATIONS, points_size, min_termination_length, sample_size; const int MAX_ITERATIONS, points_size, min_termination_length, sample_size;
const Ptr<ProsacSampler> sampler; const Ptr<ProsacSampler> sampler;
std::vector<int> non_random_inliers; std::vector<int> non_random_inliers;
const Ptr<Error> error; const Ptr<Error> error;
public: public:
ProsacTerminationCriteriaImpl (const Ptr<Error> &error_, int points_size_,int sample_size_,
double confidence, int max_iterations, int min_termination_length_, double beta_,
double non_randomness_phi_, double inlier_threshold_) : log_confidence
(log(1-confidence)), beta(beta_), non_randomness_phi(non_randomness_phi_),
inlier_threshold(inlier_threshold_), MAX_ITERATIONS(max_iterations),
points_size (points_size_), min_termination_length (min_termination_length_),
sample_size(sample_size_), error (error_) { init(); }
ProsacTerminationCriteriaImpl (const Ptr<ProsacSampler> &sampler_,const Ptr<Error> &error_, ProsacTerminationCriteriaImpl (const Ptr<ProsacSampler> &sampler_,const Ptr<Error> &error_,
int points_size_, int sample_size_, double confidence, int max_iterations, int points_size_, int sample_size_, double confidence, int max_iterations,
int min_termination_length_, double beta_, double non_randomness_phi_, int min_termination_length_, double beta_, double non_randomness_phi_,
double inlier_threshold_) : log_confidence(log(1-confidence)), beta(beta_), double inlier_threshold_, const std::vector<int> &non_rand_inliers) : log_conf(log(1-confidence)), beta(beta_),
non_randomness_phi(non_randomness_phi_), inlier_threshold(inlier_threshold_), non_randomness_phi(non_randomness_phi_), inlier_threshold(inlier_threshold_),
MAX_ITERATIONS(max_iterations), points_size (points_size_), MAX_ITERATIONS(max_iterations), points_size (points_size_),
min_termination_length (min_termination_length_), sample_size(sample_size_), min_termination_length (min_termination_length_), sample_size(sample_size_),
sampler(sampler_), error (error_) { init(); } sampler(sampler_), error (error_) {
CV_Assert(min_termination_length_ <= points_size_ && min_termination_length_ >= 0);
if (non_rand_inliers.empty())
init();
else non_random_inliers = non_rand_inliers;
}
void init () { void init () {
// m is sample_size // m is sample_size, N is points_size
// N is points_size
// non-randomness constraint // non-randomness constraint
// The non-randomness requirement prevents PROSAC // The non-randomness requirement prevents PROSAC
// from selecting a solution supported by outliers that are // from selecting a solution supported by outliers that are
@ -245,20 +231,15 @@ public:
// checked ex-post in standard approaches [1]. The distribution // checked ex-post in standard approaches [1]. The distribution
// of the cardinalities of sets of random ‘inliers’ is binomial // of the cardinalities of sets of random ‘inliers’ is binomial
// i-th entry - inlier counts for termination up to i-th point (term length = i+1) // i-th entry - inlier counts for termination up to i-th point (term length = i+1)
// ------------------------------------------------------------------------
// initialize the data structures that determine stopping // initialize the data structures that determine stopping
// see probabilities description below. // see probabilities description below.
non_random_inliers = std::vector<int>(points_size, 0); non_random_inliers = std::vector<int>(points_size, 0);
std::vector<double> pn_i_arr(points_size); std::vector<double> pn_i_arr(points_size, 0);
const double beta2compl_beta = beta / (1-beta); const double beta2compl_beta = beta / (1-beta);
const int step_n = 50, max_n = std::min(points_size, 1200); const int step_n = 50, max_n = std::min(points_size, 1200);
for (int n = sample_size; n <= points_size; n+=step_n) { for (int n = sample_size; n < points_size; n+=step_n) {
if (n > max_n) { if (n > max_n) break; // skip expensive calculation
// skip expensive calculation
break;
}
// P^R_n(i) = β^(i−m) (1−β)^(n−i+m) (n−m i−m). (7) i = m,...,N // P^R_n(i) = β^(i−m) (1−β)^(n−i+m) (n−m i−m). (7) i = m,...,N
// initial value for i = m = sample_size // initial value for i = m = sample_size
@ -288,7 +269,7 @@ public:
non_random_inliers[n-1] = i_min; non_random_inliers[n-1] = i_min;
} }
// approximate values of binomial distribution // approximate values of binomial distribution using linear interpolation
for (int n = sample_size; n <= points_size; n+=step_n) { for (int n = sample_size; n <= points_size; n+=step_n) {
if (n-1+step_n >= max_n) { if (n-1+step_n >= max_n) {
// copy rest of the values // copy rest of the values
@ -301,19 +282,24 @@ public:
non_random_inliers[n+i] = (int)(non_rand_n + (i+1)*step); non_random_inliers[n+i] = (int)(non_rand_n + (i+1)*step);
} }
} }
const std::vector<int> &getNonRandomInliers () const override { return non_random_inliers; }
/* /*
* The PROSAC algorithm terminates if the number of inliers I_n* * The PROSAC algorithm terminates if the number of inliers I_n*
* within the set U_n* satisfies the following conditions: * within the set U_n* satisfies the following conditions:
* *
* non-randomness the probability that I_n* out of n* (termination_length) * non-randomness the probability that I_n* out of n* (termination_length)
* data points are by chance inliers to an arbitrary incorrect model * data points are by chance inliers to an arbitrary incorrect model
* is smaller than Ψ (typically set to 5%) * is smaller than Sigma (typically set to 5%)
* *
* maximality the probability that a solution with more than * maximality the probability that a solution with more than
* In* inliers in U_n* exists and was not found after k * In* inliers in U_n* exists and was not found after k
* samples is smaller than η0 (typically set to 5%). * samples is smaller than eta_0 (typically set to 5%).
*/ */
int update (const Mat &model, int inliers_size) override { int update (const Mat &model, int inliers_size) const override {
int t; return updateTerminationLength(model, inliers_size, t);
}
int updateTerminationLength (const Mat &model, int inliers_size, int &found_termination_length) const override {
found_termination_length = points_size;
int predicted_iterations = MAX_ITERATIONS; int predicted_iterations = MAX_ITERATIONS;
/* /*
* The termination length n* is chosen to minimize k_n*(η0) subject to I_n* I_min n*; * The termination length n* is chosen to minimize k_n*(η0) subject to I_n* I_min n*;
@ -327,23 +313,22 @@ public:
for (int pt = 0; pt < min_termination_length; pt++) for (int pt = 0; pt < min_termination_length; pt++)
if (errors[pt] < inlier_threshold) if (errors[pt] < inlier_threshold)
num_inliers_under_termination_len++; num_inliers_under_termination_len++;
for (int termination_len = min_termination_length; termination_len < points_size;termination_len++){ for (int termination_len = min_termination_length; termination_len < points_size;termination_len++){
if (errors[termination_len /* = point*/] < inlier_threshold) { if (errors[termination_len /* = point*/] < inlier_threshold) {
num_inliers_under_termination_len++; num_inliers_under_termination_len++;
// non-random constraint must satisfy I_n* ≥ I_min n*. // non-random constraint must satisfy I_n* ≥ I_min n*.
if (num_inliers_under_termination_len < non_random_inliers[termination_len]) if (num_inliers_under_termination_len < non_random_inliers[termination_len] || (double) num_inliers_under_termination_len/(points_size) < 0.2)
continue; continue;
// add 1 to termination length since num_inliers_under_termination_len is updated // add 1 to termination length since num_inliers_under_termination_len is updated
const double new_max_samples = log_confidence / log(1 - const double new_max_samples = log_conf/log(1-pow(static_cast<double>(num_inliers_under_termination_len)
std::pow(static_cast<double>(num_inliers_under_termination_len)
/ (termination_len+1), sample_size)); / (termination_len+1), sample_size));
if (! std::isinf(new_max_samples) && predicted_iterations > new_max_samples) { if (! std::isinf(new_max_samples) && predicted_iterations > new_max_samples) {
predicted_iterations = static_cast<int>(new_max_samples); predicted_iterations = static_cast<int>(new_max_samples);
if (predicted_iterations == 0) break; if (predicted_iterations == 0) break;
found_termination_length = termination_len;
if (sampler != nullptr) if (sampler != nullptr)
sampler->setTerminationLength(termination_len); sampler->setTerminationLength(termination_len);
} }
@ -352,27 +337,22 @@ public:
// compare also when termination length = points_size, // compare also when termination length = points_size,
// so inliers under termination length is total number of inliers: // so inliers under termination length is total number of inliers:
const double predicted_iters = log_confidence / log(1 - std::pow const double predicted_iters = log_conf / log(1 - std::pow
(static_cast<double>(inliers_size) / points_size, sample_size)); (static_cast<double>(inliers_size) / points_size, sample_size));
if (! std::isinf(predicted_iters) && predicted_iters < predicted_iterations) if (! std::isinf(predicted_iters) && predicted_iters < predicted_iterations)
return static_cast<int>(predicted_iters); return static_cast<int>(predicted_iters);
return predicted_iterations; return predicted_iterations;
} }
Ptr<TerminationCriteria> clone () const override {
return makePtr<ProsacTerminationCriteriaImpl>(error->clone(),
points_size, sample_size, 1-exp(log_confidence), MAX_ITERATIONS,
min_termination_length, beta, non_randomness_phi, inlier_threshold);
}
}; };
Ptr<ProsacTerminationCriteria> Ptr<ProsacTerminationCriteria>
ProsacTerminationCriteria::create(const Ptr<ProsacSampler> &sampler, const Ptr<Error> &error, ProsacTerminationCriteria::create(const Ptr<ProsacSampler> &sampler, const Ptr<Error> &error,
int points_size_, int sample_size_, double confidence, int max_iterations, int points_size_, int sample_size_, double confidence, int max_iterations,
int min_termination_length_, double beta, double non_randomness_phi, double inlier_thresh) { int min_termination_length_, double beta, double non_randomness_phi, double inlier_thresh,
const std::vector<int> &non_rand_inliers) {
return makePtr<ProsacTerminationCriteriaImpl> (sampler, error, points_size_, sample_size_, return makePtr<ProsacTerminationCriteriaImpl> (sampler, error, points_size_, sample_size_,
confidence, max_iterations, min_termination_length_, confidence, max_iterations, min_termination_length_,
beta, non_randomness_phi, inlier_thresh); beta, non_randomness_phi, inlier_thresh, non_rand_inliers);
} }
}} }}

@ -8,9 +8,234 @@
#include <map> #include <map>
namespace cv { namespace usac { namespace cv { namespace usac {
double Utils::getCalibratedThreshold (double threshold, const Matx33d &K1, const Matx33d &K2) { /*
return threshold / ((K1(0, 0) + K1(1, 1) + SolvePoly is used to find only real roots of N-degree polynomial using Sturm sequence.
K2(0, 0) + K2(1, 1)) / 4.0); It recursively finds interval where a root lies, and the actual root is found using Regula-Falsi method.
*/
class SolvePoly : public SolverPoly {
private:
static int sgn(double val) {
return (double(0) < val) - (val < double(0));
}
class Poly {
public:
Poly () = default;
Poly (const std::vector<double> &coef_) {
coef = coef_;
checkDegree();
}
Poly (const Poly &p) { coef = p.coef; }
// a_n x^n + a_n-1 x^(n-1) + ... + a_1 x + a_0
// coef[i] = a_i
std::vector<double> coef = {0};
inline int degree() const { return (int)coef.size()-1; }
void multiplyScalar (double s) {
// multiplies polynom by scalar
if (fabs(s) < DBL_EPSILON) { // check if scalar is 0
coef = {0};
return;
}
for (double &c : coef) c *= s;
}
void checkDegree() {
int deg = degree(); // approximate degree
// check if coefficients of the highest power is non-zero
while (fabs(coef[deg]) < DBL_EPSILON) {
coef.pop_back(); // remove last zero element
if (--deg == 0)
break;
}
}
double eval (double x) const {
// Horner method a0 + x (a1 + x (a2 + x (a3 + ... + x (an-1 + x an))))
const int d = degree();
double y = coef[d];
for (int i = d; i >= 1; i--)
y = coef[i-1] + x * y;
return y;
}
// at +inf and -inf
std::pair<int,int> signsAtInf () const {
// lim x->+-inf p(x) = lim x->+-inf a_n x^n
const int d = degree();
const int s = sgn(coef[d]); // sign of the highest coefficient
// compare even and odd degree
return std::make_pair(s, d % 2 == 0 ? s : -s);
}
Poly derivative () const {
Poly deriv;
if (degree() == 0)
return deriv;
// derive.degree = poly.degree-1;
deriv.coef = std::vector<double>(coef.size()-1);
for (int i = degree(); i > 0; i--)
// (a_n * x^n)' = n * a_n * x^(n-1)
deriv.coef[i-1] = i * coef[i];
return deriv;
}
void copyFrom (const Poly &p) { coef = p.coef; }
};
// return remainder
static void dividePoly (const Poly &p1, const Poly &p2, /*Poly &quotient,*/ Poly &remainder) {
remainder.copyFrom(p1);
int p2_degree = p2.degree(), remainder_degree = remainder.degree();
if (p1.degree() < p2_degree)
return;
if (p2_degree == 0) { // special case for dividing polynomial by constant
remainder.multiplyScalar(1/p2.coef[0]);
// quotient.coef[0] = p2.coef[0];
return;
}
// quotient.coef = std::vector<double>(p1.degree() - p2_degree + 1, 0);
const double p2_term = 1/p2.coef[p2_degree];
while (remainder_degree >= p2_degree) {
const double temp = remainder.coef[remainder_degree] * p2_term;
// quotient.coef[remainder_degree-p2_degree] = temp;
// polynoms now have the same degree, but p2 is shorter than remainder
for (int i = p2_degree, j = remainder_degree; i >= 0; i--, j--)
remainder.coef[j] -= temp * p2.coef[i];
remainder.checkDegree();
remainder_degree = remainder.degree();
}
}
constexpr static int REGULA_FALSI_MAX_ITERS = 500, MAX_POWER = 10, MAX_LEVEL = 200;
constexpr static double TOLERANCE = 1e-10, DIFF_TOLERANCE = 1e-7;
static bool findRootRegulaFalsi (const Poly &poly, double min, double max, double &root) {
double f_min = poly.eval(min), f_max = poly.eval(max);
if (f_min * f_max > 0 || min > max) {// conditions are not fulfilled
return false;
}
int sign = 0, iter = 0;
for (; iter < REGULA_FALSI_MAX_ITERS; iter++) {
root = (f_min * max - f_max * min) / (f_min - f_max);
const double f_root = poly.eval(root);
if (fabs(f_root) < TOLERANCE || fabs(min - max) < DIFF_TOLERANCE) {
return true; // root is found
}
if (f_root * f_max > 0) {
max = root; f_max = f_root;
if (sign == -1)
f_min *= 0.5;
sign = -1;
} else if (f_min * f_root > 0) {
min = root; f_min = f_root;
if (sign == 1)
f_max *= 0.5;
sign = 1;
}
}
return false;
}
static int numberOfSignChanges (const std::vector<Poly> &sturm, double x) {
int prev_sign = 0, sign_changes = 0;
for (const auto &poly : sturm) {
const int s = sgn(poly.eval(x));
if (s != 0 && prev_sign != 0 && s != prev_sign)
sign_changes++;
prev_sign = s;
}
return sign_changes;
}
static void findRootsRecursive (const Poly &poly, const std::vector<Poly> &sturm, double min, double max,
int sign_changes_at_min, int sign_changes_at_max, std::vector<double> &roots, int level) {
const int num_roots = sign_changes_at_min - sign_changes_at_max;
if (level == MAX_LEVEL) {
// roots are too close
const double mid = (min + max) * 0.5;
if (fabs(poly.eval(mid)) < DBL_EPSILON) {
roots.emplace_back(mid);
}
} else if (num_roots == 1) {
double root;
if (findRootRegulaFalsi(poly, min, max, root)) {
roots.emplace_back(root);
}
} else if (num_roots > 1) { // at least 2 roots
const double mid = (min + max) * 0.5;
const int sign_changes_at_mid = numberOfSignChanges(sturm, mid);
// try to split interval equally for the roots
if (sign_changes_at_min - sign_changes_at_mid > 0)
findRootsRecursive(poly, sturm, min, mid, sign_changes_at_min, sign_changes_at_mid, roots, level+1);
if (sign_changes_at_mid - sign_changes_at_max > 0)
findRootsRecursive(poly, sturm, mid, max, sign_changes_at_mid, sign_changes_at_max, roots, level+1);
}
}
public:
int getRealRoots (const std::vector<double> &coeffs, std::vector<double> &real_roots) override {
if (coeffs.empty())
return 0;
Poly input(coeffs);
if (input.degree() < 1)
return 0;
// derivative of input polynomial
const Poly input_der = input.derivative();
/////////// build Sturm sequence //////////
Poly p (input), q (input_der), remainder;
std::vector<std::pair<int,int>> signs_at_inf; signs_at_inf.reserve(p.degree()); // +inf, -inf pair
signs_at_inf.emplace_back(p.signsAtInf());
signs_at_inf.emplace_back(q.signsAtInf());
std::vector<Poly> sturm_sequence; sturm_sequence.reserve(input.degree());
sturm_sequence.emplace_back(input);
sturm_sequence.emplace_back(input_der);
while (q.degree() > 0) {
dividePoly(p, q, remainder);
remainder.multiplyScalar(-1);
p.copyFrom(q);
q.copyFrom(remainder);
sturm_sequence.emplace_back(remainder);
signs_at_inf.emplace_back(remainder.signsAtInf());
}
////////// find changes in signs of Sturm sequence /////////
int num_sign_changes_at_pos_inf = 0, num_sign_changes_at_neg_inf = 0;
int prev_sign_pos_inf = signs_at_inf[0].first, prev_sign_neg_inf = signs_at_inf[0].second;
for (int i = 1; i < (int)signs_at_inf.size(); i++) {
const auto s_pos_inf = signs_at_inf[i].first, s_neg_inf = signs_at_inf[i].second;
// zeros must be ignored
if (s_pos_inf != 0) {
if (prev_sign_pos_inf != 0 && prev_sign_pos_inf != s_pos_inf)
num_sign_changes_at_pos_inf++;
prev_sign_pos_inf = s_pos_inf;
}
if (s_neg_inf != 0) {
if (prev_sign_neg_inf != 0 && prev_sign_neg_inf != s_neg_inf)
num_sign_changes_at_neg_inf++;
prev_sign_neg_inf = s_neg_inf;
}
}
////////// find roots' bounds for numerical method for roots finding /////////
double root_neg_bound = -0.01, root_pos_bound = 0.01;
int num_sign_changes_min_x = -1, num_sign_changes_pos_x = -1; // -1 = unknown, trigger next if condition
for (int i = 0; i < MAX_POWER; i++) {
if (num_sign_changes_min_x != num_sign_changes_at_neg_inf) {
root_neg_bound *= 10;
num_sign_changes_min_x = numberOfSignChanges(sturm_sequence, root_neg_bound);
}
if (num_sign_changes_pos_x != num_sign_changes_at_pos_inf) {
root_pos_bound *= 10;
num_sign_changes_pos_x = numberOfSignChanges(sturm_sequence, root_pos_bound);
}
}
/////////// get real roots //////////
real_roots.clear();
findRootsRecursive(input, sturm_sequence, root_neg_bound, root_pos_bound, num_sign_changes_min_x, num_sign_changes_pos_x, real_roots, 0 /*level*/);
///////////////////////////////
if ((int)real_roots.size() > input.degree())
real_roots.resize(input.degree()); // must not happen, unless some roots repeat
return (int) real_roots.size();
}
};
Ptr<SolverPoly> SolverPoly::create() {
return makePtr<SolvePoly>();
}
double Utils::getCalibratedThreshold (double threshold, const Mat &K1, const Mat &K2) {
const auto * const k1 = (double *) K1.data, * const k2 = (double *) K2.data;
return threshold / ((k1[0] + k1[4] + k2[0] + k2[4]) / 4.0);
} }
/* /*
@ -20,20 +245,22 @@ double Utils::getCalibratedThreshold (double threshold, const Matx33d &K1, const
* 0 k22 k23 * 0 k22 k23
* 0 0 1] * 0 0 1]
*/ */
void Utils::calibratePoints (const Matx33d &K1, const Matx33d &K2, const Mat &points, Mat &calib_points) { void Utils::calibratePoints (const Mat &K1, const Mat &K2, const Mat &points, Mat &calib_points) {
const auto inv1_k11 = float(1 / K1(0, 0)); // 1 / k11
const auto inv1_k12 = float(-K1(0, 1) / (K1(0, 0)*K1(1, 1))); // -k12 / (k11*k22)
// (-k13*k22 + k12*k23) / (k11*k22)
const auto inv1_k13 = float((-K1(0, 2)*K1(1, 1) + K1(0, 1)*K1(1, 2)) / (K1(0, 0)*K1(1, 1)));
const auto inv1_k22 = float(1 / K1(1, 1)); // 1 / k22
const auto inv1_k23 = float(-K1(1, 2) / K1(1, 1)); // -k23 / k22
const auto inv2_k11 = float(1 / K2(0, 0));
const auto inv2_k12 = float(-K2(0, 1) / (K2(0, 0)*K2(1, 1)));
const auto inv2_k13 = float((-K2(0, 2)*K2(1, 1) + K2(0, 1)*K2(1, 2)) / (K2(0, 0)*K2(1, 1)));
const auto inv2_k22 = float(1 / K2(1, 1));
const auto inv2_k23 = float(-K2(1, 2) / K2(1, 1));
const auto * const points_ = (float *) points.data; const auto * const points_ = (float *) points.data;
const auto * const k1 = (double *) K1.data;
const auto inv1_k11 = float(1 / k1[0]); // 1 / k11
const auto inv1_k12 = float(-k1[1] / (k1[0]*k1[4])); // -k12 / (k11*k22)
// (-k13*k22 + k12*k23) / (k11*k22)
const auto inv1_k13 = float((-k1[2]*k1[4] + k1[1]*k1[5]) / (k1[0]*k1[4]));
const auto inv1_k22 = float(1 / k1[4]); // 1 / k22
const auto inv1_k23 = float(-k1[5] / k1[4]); // -k23 / k22
const auto * const k2 = (double *) K2.data;
const auto inv2_k11 = float(1 / k2[0]);
const auto inv2_k12 = float(-k2[1] / (k2[0]*k2[4]));
const auto inv2_k13 = float((-k2[2]*k2[4] + k2[1]*k2[5]) / (k2[0]*k2[4]));
const auto inv2_k22 = float(1 / k2[4]);
const auto inv2_k23 = float(-k2[5] / k2[4]);
calib_points = Mat ( points.rows, 4, points.type()); calib_points = Mat ( points.rows, 4, points.type());
auto * calib_points_ = (float *) calib_points.data; auto * calib_points_ = (float *) calib_points.data;
@ -52,13 +279,15 @@ void Utils::calibratePoints (const Matx33d &K1, const Matx33d &K2, const Mat &po
* points is matrix of size |N| x 5, first two columns are image points [u_i, v_i] * points is matrix of size |N| x 5, first two columns are image points [u_i, v_i]
* calib_norm_pts are K^-1 [u v 1]^T / ||K^-1 [u v 1]^T|| * calib_norm_pts are K^-1 [u v 1]^T / ||K^-1 [u v 1]^T||
*/ */
void Utils::calibrateAndNormalizePointsPnP (const Matx33d &K, const Mat &pts, Mat &calib_norm_pts) { void Utils::calibrateAndNormalizePointsPnP (const Mat &K, const Mat &pts, Mat &calib_norm_pts) {
const auto * const points = (float *) pts.data; const auto * const points = (float *) pts.data;
const auto inv_k11 = float(1 / K(0, 0)); const auto * const k = (double *) K.data;
const auto inv_k12 = float(-K(0, 1) / (K(0, 0)*K(1, 1))); const auto inv_k11 = float(1 / k[0]);
const auto inv_k13 = float((-K(0, 2)*K(1, 1) + K(0, 1)*K(1, 2)) / (K(0, 0)*K(1, 1))); const auto inv_k12 = float(-k[1] / (k[0]*k[4]));
const auto inv_k22 = float(1 / K(1, 1)); const auto inv_k13 = float((-k[2]*k[4] + k[1]*k[5]) / (k[0]*k[4]));
const auto inv_k23 = float(-K(1, 2) / K(1, 1)); const auto inv_k22 = float(1 / k[4]);
const auto inv_k23 = float(-k[5] / k[4]);
calib_norm_pts = Mat (pts.rows, 3, pts.type()); calib_norm_pts = Mat (pts.rows, 3, pts.type());
auto * calib_norm_pts_ = (float *) calib_norm_pts.data; auto * calib_norm_pts_ = (float *) calib_norm_pts.data;
@ -73,9 +302,10 @@ void Utils::calibrateAndNormalizePointsPnP (const Matx33d &K, const Mat &pts, Ma
} }
} }
void Utils::normalizeAndDecalibPointsPnP (const Matx33d &K_, Mat &pts, Mat &calib_norm_pts) { void Utils::normalizeAndDecalibPointsPnP (const Mat &K_, Mat &pts, Mat &calib_norm_pts) {
const auto k11 = (float)K_(0, 0), k12 = (float)K_(0, 1), k13 = (float)K_(0, 2), const auto * const K = (double *) K_.data;
k22 = (float)K_(1, 1), k23 = (float)K_(1, 2); const auto k11 = (float)K[0], k12 = (float)K[1], k13 = (float)K[2],
k22 = (float)K[4], k23 = (float)K[5];
calib_norm_pts = Mat (pts.rows, 3, pts.type()); calib_norm_pts = Mat (pts.rows, 3, pts.type());
auto * points = (float *) pts.data; auto * points = (float *) pts.data;
auto * calib_norm_pts_ = (float *) calib_norm_pts.data; auto * calib_norm_pts_ = (float *) calib_norm_pts.data;
@ -98,25 +328,95 @@ void Utils::normalizeAndDecalibPointsPnP (const Matx33d &K_, Mat &pts, Mat &cali
* 0 fy ty * 0 fy ty
* 0 0 1] * 0 0 1]
*/ */
void Utils::decomposeProjection (const Mat &P, Matx33d &K_, Mat &R, Mat &t, bool same_focal) { void Utils::decomposeProjection (const Mat &P, Matx33d &K, Matx33d &R, Vec3d &t, bool same_focal) {
const Mat M = P.colRange(0,3); const Matx33d M = P.colRange(0,3);
double scale = norm(M.row(2)); scale *= scale; double scale = norm(M.row(2)); scale *= scale;
K_ = Matx33d::eye(); K = Matx33d::eye();
K_(1,2) = M.row(1).dot(M.row(2)) / scale; K(1,2) = M.row(1).dot(M.row(2)) / scale;
K_(0,2) = M.row(0).dot(M.row(2)) / scale; K(0,2) = M.row(0).dot(M.row(2)) / scale;
K_(1,1) = sqrt(M.row(1).dot(M.row(1)) / scale - K_(1,2)*K_(1,2)); K(1,1) = sqrt(M.row(1).dot(M.row(1)) / scale - K(1,2)*K(1,2));
K_(0,0) = sqrt(M.row(0).dot(M.row(0)) / scale - K_(0,2)*K_(0,2)); K(0,0) = sqrt(M.row(0).dot(M.row(0)) / scale - K(0,2)*K(0,2));
if (same_focal) if (same_focal)
K_(0,0) = K_(1,1) = (K_(0,0) + K_(1,1)) / 2; K(0,0) = K(1,1) = (K(0,0) + K(1,1)) / 2;
R = K_.inv() * M / sqrt(scale); R = K.inv() * M / sqrt(scale);
if (determinant(M) < 0) R *= -1; if (determinant(M) < 0) R *= -1;
t = R * M.inv() * P.col(3); t = R * M.inv() * Vec3d(P.col(3));
}
double Utils::getPoissonCDF (double lambda, int inliers) {
double exp_lambda = exp(-lambda), cdf = exp_lambda, lambda_i_div_fact_i = 1;
for (int i = 1; i <= inliers; i++) {
lambda_i_div_fact_i *= (lambda / i);
cdf += exp_lambda * lambda_i_div_fact_i;
if (fabs(cdf - 1) < DBL_EPSILON) // cdf is almost 1
break;
}
return cdf;
}
// since F(E) has rank 2 we use cross product to compute epipole,
// since the third column / row is linearly dependent on two first
// this is faster than SVD
// It is recommended to normalize F, such that ||F|| = 1
Vec3d Utils::getLeftEpipole (const Mat &F/*E*/) {
Vec3d _e = F.col(0).cross(F.col(2)); // F^T e' = 0; e'^T F = 0
const auto * const e = _e.val;
if (e[0] <= DBL_EPSILON && e[0] > -DBL_EPSILON &&
e[1] <= DBL_EPSILON && e[1] > -DBL_EPSILON &&
e[2] <= DBL_EPSILON && e[2] > -DBL_EPSILON)
_e = Vec3d(Mat(F.col(1))).cross(F.col(2)); // if e' is zero
return _e; // e'
}
Vec3d Utils::getRightEpipole (const Mat &F/*E*/) {
Vec3d _e = F.row(0).cross(F.row(2)); // Fe = 0
const auto * const e = _e.val;
if (e[0] <= DBL_EPSILON && e[0] > -DBL_EPSILON &&
e[1] <= DBL_EPSILON && e[1] > -DBL_EPSILON &&
e[2] <= DBL_EPSILON && e[2] > -DBL_EPSILON)
_e = F.row(1).cross(F.row(2)); // if e is zero
return _e;
}
void Utils::densitySort (const Mat &points, int knn, Mat &sorted_points, std::vector<int> &sorted_mask) {
// mask of sorted points (array of indexes)
const int points_size = points.rows, dim = points.cols;
sorted_mask = std::vector<int >(points_size);
for (int i = 0; i < points_size; i++)
sorted_mask[i] = i;
// get neighbors
FlannNeighborhoodGraph &graph = *FlannNeighborhoodGraph::create(points, points_size, knn,
true /*get distances */, 6, 1);
std::vector<double> sum_knn_distances (points_size, 0);
for (int p = 0; p < points_size; p++) {
const std::vector<double> &dists = graph.getNeighborsDistances(p);
for (int k = 0; k < knn; k++)
sum_knn_distances[p] += dists[k];
}
// compare by sum of distances to k nearest neighbors.
std::sort(sorted_mask.begin(), sorted_mask.end(), [&](int a, int b) {
return sum_knn_distances[a] < sum_knn_distances[b];
});
// copy array of points to array with sorted points
// using @sorted_idx mask of sorted points indexes
sorted_points = Mat(points_size, dim, points.type());
const auto * const points_ptr = (float *) points.data;
auto * spoints_ptr = (float *) sorted_points.data;
for (int i = 0; i < points_size; i++) {
const int pt2 = sorted_mask[i] * dim;
for (int j = 0; j < dim; j++)
(*spoints_ptr++) = points_ptr[pt2+j];
}
} }
Matx33d Math::getSkewSymmetric(const Vec3d &v) { Matx33d Math::getSkewSymmetric(const Vec3d &v) {
return Matx33d(0, -v[2], v[1], return {0, -v[2], v[1],
v[2], 0, -v[0], v[2], 0, -v[0],
-v[1], v[0], 0); -v[1], v[0], 0};
} }
Matx33d Math::rotVec2RotMat (const Vec3d &v) { Matx33d Math::rotVec2RotMat (const Vec3d &v) {
@ -124,9 +424,9 @@ Matx33d Math::rotVec2RotMat (const Vec3d &v) {
const double x = v[0] / phi, y = v[1] / phi, z = v[2] / phi; const double x = v[0] / phi, y = v[1] / phi, z = v[2] / phi;
const double a = std::sin(phi), b = std::cos(phi); const double a = std::sin(phi), b = std::cos(phi);
// R = I + sin(phi) * skew(v) + (1 - cos(phi) * skew(v)^2 // R = I + sin(phi) * skew(v) + (1 - cos(phi) * skew(v)^2
return Matx33d((b - 1)*y*y + (b - 1)*z*z + 1, -a*z - x*y*(b - 1), a*y - x*z*(b - 1), return {(b - 1)*y*y + (b - 1)*z*z + 1, -a*z - x*y*(b - 1), a*y - x*z*(b - 1),
a*z - x*y*(b - 1), (b - 1)*x*x + (b - 1)*z*z + 1, -a*x - y*z*(b - 1), a*z - x*y*(b - 1), (b - 1)*x*x + (b - 1)*z*z + 1, -a*x - y*z*(b - 1),
-a*y - x*z*(b - 1), a*x - y*z*(b - 1), (b - 1)*x*x + (b - 1)*y*y + 1); -a*y - x*z*(b - 1), a*x - y*z*(b - 1), (b - 1)*x*x + (b - 1)*y*y + 1};
} }
Vec3d Math::rotMat2RotVec (const Matx33d &R) { Vec3d Math::rotMat2RotVec (const Matx33d &R) {
@ -140,8 +440,8 @@ Vec3d Math::rotMat2RotVec (const Matx33d &R) {
} else if (3 - FLT_EPSILON > trace && trace > -1 + FLT_EPSILON) { } else if (3 - FLT_EPSILON > trace && trace > -1 + FLT_EPSILON) {
double theta = std::acos((trace - 1) / 2); double theta = std::acos((trace - 1) / 2);
rot_vec = (theta / (2 * std::sin(theta))) * Vec3d(R(2,1)-R(1,2), rot_vec = (theta / (2 * std::sin(theta))) * Vec3d(R(2,1)-R(1,2),
R(0,2)-R(2,0), R(0,2)-R(2,0),
R(1,0)-R(0,1)); R(1,0)-R(0,1));
} else { } else {
int a; int a;
if (R(0,0) > R(1,1)) if (R(0,0) > R(1,1))
@ -176,7 +476,7 @@ bool Math::eliminateUpperTriangular (std::vector<double> &a, int m, int n) {
// if pivot value is 0 continue // if pivot value is 0 continue
if (fabs(pivot) < DBL_EPSILON) if (fabs(pivot) < DBL_EPSILON)
return false; // matrix is not full rank -> terminate continue;
// swap row with maximum pivot value with current row // swap row with maximum pivot value with current row
for (int c = r; c < n; c++) for (int c = r; c < n; c++)
@ -194,6 +494,18 @@ bool Math::eliminateUpperTriangular (std::vector<double> &a, int m, int n) {
return true; return true;
} }
double Utils::intersectionOverUnion (const std::vector<bool> &a, const std::vector<bool> &b) {
int intersects = 0, unions = 0;
for (int i = 0; i < (int)a.size(); i++)
if (a[i] || b[i]) {
unions++; // one value is true
if (a[i] && b[i])
intersects++; // a[i] == b[i] and if they both true
}
if (unions == 0) return 0.0;
return (double) intersects / unions;
}
//////////////////////////////////////// RANDOM GENERATOR ///////////////////////////// //////////////////////////////////////// RANDOM GENERATOR /////////////////////////////
class UniformRandomGeneratorImpl : public UniformRandomGenerator { class UniformRandomGeneratorImpl : public UniformRandomGenerator {
private: private:
@ -209,15 +521,12 @@ public:
max_range = max_range_; max_range = max_range_;
subset = std::vector<int>(subset_size_); subset = std::vector<int>(subset_size_);
} }
int getRandomNumber () override { int getRandomNumber () override {
return rng.uniform(0, max_range); return rng.uniform(0, max_range);
} }
int getRandomNumber (int max_rng) override { int getRandomNumber (int max_rng) override {
return rng.uniform(0, max_rng); return rng.uniform(0, max_rng);
} }
// closed range // closed range
void resetGenerator (int max_range_) override { void resetGenerator (int max_range_) override {
CV_CheckGE(0, max_range_, "max range must be greater than 0"); CV_CheckGE(0, max_range_, "max range must be greater than 0");
@ -243,7 +552,6 @@ public:
// interval is <0; max_range) // interval is <0; max_range)
void generateUniqueRandomSet (std::vector<int>& sample, int max_range_) override { void generateUniqueRandomSet (std::vector<int>& sample, int max_range_) override {
/* /*
* necessary condition:
* if subset size is bigger than range then array cannot be unique, * if subset size is bigger than range then array cannot be unique,
* so function has infinite loop. * so function has infinite loop.
*/ */
@ -282,14 +590,12 @@ public:
} }
return subset; return subset;
} }
void setSubsetSize (int subset_size_) override { void setSubsetSize (int subset_size_) override {
if (subset_size < subset_size_)
subset.resize(subset_size_);
subset_size = subset_size_; subset_size = subset_size_;
} }
int getSubsetSize () const override { return subset_size; } int getSubsetSize () const override { return subset_size; }
Ptr<RandomGenerator> clone (int state) const override {
return makePtr<UniformRandomGeneratorImpl>(state, max_range, subset_size);
}
}; };
Ptr<UniformRandomGenerator> UniformRandomGenerator::create (int state) { Ptr<UniformRandomGenerator> UniformRandomGenerator::create (int state) {
@ -338,19 +644,17 @@ float Utils::findMedian (std::vector<float> &array) {
} else { } else {
// even: return average // even: return average
return (quicksort_median(array, length/2 , 0, length-1) + return (quicksort_median(array, length/2 , 0, length-1) +
quicksort_median(array, length/2+1, 0, length-1))/2; quicksort_median(array, length/2+1, 0, length-1))*.5f;
} }
} }
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Radius Search Graph ///////////////////////////////////////////// ///////////////////////////////// Radius Search Graph /////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
class RadiusSearchNeighborhoodGraphImpl : public RadiusSearchNeighborhoodGraph { class RadiusSearchNeighborhoodGraphImpl : public RadiusSearchNeighborhoodGraph {
private: private:
std::vector<std::vector<int>> graph; std::vector<std::vector<int>> graph;
public: public:
RadiusSearchNeighborhoodGraphImpl (const Mat &container_, int points_size, RadiusSearchNeighborhoodGraphImpl (const Mat &container_, int points_size,
double radius, int flann_search_params, int num_kd_trees) { double radius, int flann_search_params, int num_kd_trees) {
// Radius search OpenCV works only with float data // Radius search OpenCV works only with float data
CV_Assert(container_.type() == CV_32F); CV_Assert(container_.type() == CV_32F);
@ -363,6 +667,8 @@ public:
int pt = 0; int pt = 0;
for (const auto &n : neighbours) { for (const auto &n : neighbours) {
if (n.size() <= 1)
continue;
auto &graph_row = graph[pt]; auto &graph_row = graph[pt];
graph_row = std::vector<int>(n.size()-1); graph_row = std::vector<int>(n.size()-1);
int j = 0; int j = 0;
@ -373,7 +679,7 @@ public:
pt++; pt++;
} }
} }
const std::vector<std::vector<int>> &getGraph () const override { return graph; }
inline const std::vector<int> &getNeighbors(int point_idx) const override { inline const std::vector<int> &getNeighbors(int point_idx) const override {
return graph[point_idx]; return graph[point_idx];
} }
@ -384,9 +690,7 @@ Ptr<RadiusSearchNeighborhoodGraph> RadiusSearchNeighborhoodGraph::create (const
flann_search_params, num_kd_trees); flann_search_params, num_kd_trees);
} }
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// FLANN Graph ///////////////////////////////////////////// ///////////////////////////////// FLANN Graph /////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
class FlannNeighborhoodGraphImpl : public FlannNeighborhoodGraph { class FlannNeighborhoodGraphImpl : public FlannNeighborhoodGraph {
private: private:
std::vector<std::vector<int>> graph; std::vector<std::vector<int>> graph;
@ -425,6 +729,7 @@ public:
const std::vector<double>& getNeighborsDistances (int idx) const override { const std::vector<double>& getNeighborsDistances (int idx) const override {
return distances[idx]; return distances[idx];
} }
const std::vector<std::vector<int>> &getGraph () const override { return graph; }
inline const std::vector<int> &getNeighbors(int point_idx) const override { inline const std::vector<int> &getNeighbors(int point_idx) const override {
// CV_Assert(point_idx_ < num_vertices); // CV_Assert(point_idx_ < num_vertices);
return graph[point_idx]; return graph[point_idx];
@ -438,9 +743,7 @@ Ptr<FlannNeighborhoodGraph> FlannNeighborhoodGraph::create(const Mat &points,
k_nearest_neighbors_, get_distances, flann_search_params_, num_kd_trees); k_nearest_neighbors_, get_distances, flann_search_params_, num_kd_trees);
} }
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Grid Neighborhood Graph ///////////////////////////////////////// ///////////////////////////////// Grid Neighborhood Graph /////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
class GridNeighborhoodGraphImpl : public GridNeighborhoodGraph { class GridNeighborhoodGraphImpl : public GridNeighborhoodGraph {
private: private:
// This struct is used for the nearest neighbors search by griding two images. // This struct is used for the nearest neighbors search by griding two images.
@ -460,13 +763,13 @@ private:
} }
}; };
std::map<CellCoord, std::vector<int >> neighbors_map;
std::vector<std::vector<int>> graph; std::vector<std::vector<int>> graph;
public: public:
GridNeighborhoodGraphImpl (const Mat &container_, int points_size, GridNeighborhoodGraphImpl (const Mat &container_, int points_size,
int cell_size_x_img1, int cell_size_y_img1, int cell_size_x_img2, int cell_size_y_img2, int cell_size_x_img1, int cell_size_y_img1, int cell_size_x_img2, int cell_size_y_img2,
int max_neighbors) { int max_neighbors) {
std::map<CellCoord, std::vector<int >> neighbors_map;
const auto * const container = (float *) container_.data; const auto * const container = (float *) container_.data;
// <int, int, int, int> -> {neighbors set} // <int, int, int, int> -> {neighbors set}
// Key is cell position. The value is indexes of neighbors. // Key is cell position. The value is indexes of neighbors.
@ -490,7 +793,6 @@ public:
// store neighbors cells into graph (2D vector) // store neighbors cells into graph (2D vector)
for (const auto &cell : neighbors_map) { for (const auto &cell : neighbors_map) {
const int neighbors_in_cell = static_cast<int>(cell.second.size()); const int neighbors_in_cell = static_cast<int>(cell.second.size());
// only one point in cell -> no neighbors // only one point in cell -> no neighbors
if (neighbors_in_cell < 2) continue; if (neighbors_in_cell < 2) continue;
@ -510,7 +812,7 @@ public:
} }
} }
} }
const std::vector<std::vector<int>> &getGraph () const override { return graph; }
inline const std::vector<int> &getNeighbors(int point_idx) const override { inline const std::vector<int> &getNeighbors(int point_idx) const override {
// Note, neighbors vector also includes point_idx! // Note, neighbors vector also includes point_idx!
// return neighbors_map[vertices_to_cells[point_idx]]; // return neighbors_map[vertices_to_cells[point_idx]];

@ -228,7 +228,7 @@ TEST_F(SacSegmentationTest, PlaneSacSegmentation)
pt_cloud.release(); pt_cloud.release();
sacSegmentation->setCustomModelConstraints(nullptr); sacSegmentation->setCustomModelConstraints(nullptr);
sacSegmentation->setParallel(true); // sacSegmentation->setParallel(true); // parallel version is not deterministic and should be initialized differently
// Multi-plane segmentation // Multi-plane segmentation
for (int i = 0; i < models_num; i++) for (int i = 0; i < models_num; i++)
@ -298,7 +298,7 @@ TEST_F(SacSegmentationTest, SphereSacSegmentation)
pt_cloud.release(); pt_cloud.release();
sacSegmentation->setCustomModelConstraints(nullptr); sacSegmentation->setCustomModelConstraints(nullptr);
sacSegmentation->setParallel(true); // sacSegmentation->setParallel(true); // parallel version is not deterministic and should be initialized differently
// Multi-sphere segmentation // Multi-sphere segmentation
for (int i = 0; i < models_num; i++) for (int i = 0; i < models_num; i++)

@ -181,12 +181,9 @@ static double getError (TestSolver test_case, int pt_idx, const cv::Mat &pts1, c
if (test_case == TestSolver::Fundam || test_case == TestSolver::Essen) { if (test_case == TestSolver::Fundam || test_case == TestSolver::Essen) {
cv::Mat l2 = model * pt1; cv::Mat l2 = model * pt1;
cv::Mat l1 = model.t() * pt2; cv::Mat l1 = model.t() * pt2;
if (test_case == TestSolver::Fundam) // sampson error // Sampson error
return fabs(pt2.dot(l2)) / sqrt(pow(l1.at<double>(0), 2) + pow(l1.at<double>(1), 2) + return fabs(pt2.dot(l2)) / sqrt(pow(l1.at<double>(0), 2) + pow(l1.at<double>(1), 2) +
pow(l2.at<double>(0), 2) + pow(l2.at<double>(1), 2)); pow(l2.at<double>(0), 2) + pow(l2.at<double>(1), 2));
else // symmetric geometric distance
return sqrt(pow(pt1.dot(l1),2) / (pow(l1.at<double>(0),2) + pow(l1.at<double>(1),2)) +
pow(pt2.dot(l2),2) / (pow(l2.at<double>(0),2) + pow(l2.at<double>(1),2)));
} else } else
if (test_case == TestSolver::PnP) { // PnP, reprojection error if (test_case == TestSolver::PnP) { // PnP, reprojection error
cv::Mat img_pt = model * pt2; img_pt /= img_pt.at<double>(2); cv::Mat img_pt = model * pt2; img_pt /= img_pt.at<double>(2);
@ -340,7 +337,7 @@ TEST_P(usac_Essential, maxiters) {
cv::Mat K1 = cv::Mat(cv::Matx33d(1, 0, 0, cv::Mat K1 = cv::Mat(cv::Matx33d(1, 0, 0,
0, 1, 0, 0, 1, 0,
0, 0, 1.)); 0, 0, 1.));
const double conf = 0.99, thr = 0.5; const double conf = 0.99, thr = 0.25;
int roll_results_sum = 0; int roll_results_sum = 0;
for (int iters = 0; iters < 10; iters++) { for (int iters = 0; iters < 10; iters++) {
@ -361,8 +358,8 @@ TEST_P(usac_Essential, maxiters) {
FAIL() << "Essential matrix estimation failed!\n"; FAIL() << "Essential matrix estimation failed!\n";
else continue; else continue;
} }
EXPECT_NE(roll_results_sum, 0);
} }
EXPECT_NE(roll_results_sum, 0);
} }
INSTANTIATE_TEST_CASE_P(Calib3d, usac_Essential, UsacMethod::all()); INSTANTIATE_TEST_CASE_P(Calib3d, usac_Essential, UsacMethod::all());
@ -416,7 +413,7 @@ TEST (usac_Affine2D, accuracy) {
TEST(usac_testUsacParams, accuracy) { TEST(usac_testUsacParams, accuracy) {
std::vector<int> gt_inliers; std::vector<int> gt_inliers;
const int pts_size = 150000; const int pts_size = 1500;
cv::RNG &rng = cv::theRNG(); cv::RNG &rng = cv::theRNG();
const cv::UsacParams usac_params = cv::UsacParams(); const cv::UsacParams usac_params = cv::UsacParams();
cv::Mat pts1, pts2, K1, K2, mask, model, rvec, tvec, R; cv::Mat pts1, pts2, K1, K2, mask, model, rvec, tvec, R;

Loading…
Cancel
Save