now all the samples and opencv_contrib compile!

pull/3339/head
Vadim Pisarevsky 10 years ago
parent 5e667ee53a
commit d017faa06c
  1. 4
      modules/features2d/include/opencv2/features2d.hpp
  2. 32
      modules/features2d/src/mser.cpp
  3. 2
      modules/stitching/src/matchers.cpp
  4. 18
      samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h
  5. 173
      samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp
  6. 107
      samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp
  7. 6
      samples/cpp/tutorial_code/features2D/AKAZE_match.cpp
  8. 8
      samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp
  9. 4
      samples/cpp/videostab.cpp

@ -197,6 +197,10 @@ public:
CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label,
OutputArray stats=noArray() ) = 0;
CV_WRAP virtual void detectAndStore( InputArray image,
std::vector<std::vector<Point> >& msers,
OutputArray stats=noArray() ) = 0;
};
//! detects corners using FAST algorithm by E. Rosten

@ -301,6 +301,9 @@ public:
};
int detectAndLabel( InputArray _src, OutputArray _labels, OutputArray _bboxes );
void detectAndStore( InputArray image,
std::vector<std::vector<Point> >& msers,
OutputArray stats );
void detect( InputArray _src, vector<KeyPoint>& keypoints, InputArray _mask );
void preprocess1( const Mat& img, int* level_size )
@ -955,6 +958,35 @@ int MSER_Impl::detectAndLabel( InputArray _src, OutputArray _labels, OutputArray
return (int)bboxvec.size();
}
void MSER_Impl::detectAndStore( InputArray image,
std::vector<std::vector<Point> >& msers,
OutputArray stats )
{
vector<Rect> bboxvec;
Mat labels;
int i, x, y, nregs = detectAndLabel(image, labels, bboxvec);
msers.resize(nregs);
for( i = 0; i < nregs; i++ )
{
Rect r = bboxvec[i];
vector<Point>& msers_i = msers[i];
msers_i.clear();
for( y = r.y; y < r.y + r.height; y++ )
{
const int* lptr = labels.ptr<int>(y);
for( x = r.x; x < r.x + r.width; x++ )
{
if( lptr[x] == i+1 )
msers_i.push_back(Point(x, y));
}
}
}
if( stats.needed() )
Mat(bboxvec).copyTo(stats);
}
void MSER_Impl::detect( InputArray _image, vector<KeyPoint>& keypoints, InputArray _mask )
{
vector<Rect> bboxes;

@ -48,8 +48,6 @@ using namespace cv::cuda;
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d.hpp"
static bool makeUseOfXfeatures2d = xfeatures2d::initModule_xfeatures2d();
#endif
namespace {

@ -19,23 +19,23 @@ public:
RobustMatcher() : ratio_(0.8f)
{
// ORB is the default feature
detector_ = new cv::OrbFeatureDetector();
extractor_ = new cv::OrbDescriptorExtractor();
detector_ = cv::ORB::create();
extractor_ = cv::ORB::create();
// BruteFroce matcher with Norm Hamming is the default matcher
matcher_ = new cv::BFMatcher(cv::NORM_HAMMING, false);
matcher_ = cv::makePtr<cv::BFMatcher>(cv::NORM_HAMMING, false);
}
virtual ~RobustMatcher();
// Set the feature detector
void setFeatureDetector(cv::FeatureDetector * detect) { detector_ = detect; }
void setFeatureDetector(const cv::Ptr<cv::FeatureDetector>& detect) { detector_ = detect; }
// Set the descriptor extractor
void setDescriptorExtractor(cv::DescriptorExtractor * desc) { extractor_ = desc; }
void setDescriptorExtractor(const cv::Ptr<cv::DescriptorExtractor>& desc) { extractor_ = desc; }
// Set the matcher
void setDescriptorMatcher(cv::DescriptorMatcher * match) { matcher_ = match; }
void setDescriptorMatcher(const cv::Ptr<cv::DescriptorMatcher>& match) { matcher_ = match; }
// Compute the keypoints of an image
void computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints);
@ -69,11 +69,11 @@ public:
private:
// pointer to the feature point detector object
cv::FeatureDetector * detector_;
cv::Ptr<cv::FeatureDetector> detector_;
// pointer to the feature descriptor extractor object
cv::DescriptorExtractor * extractor_;
cv::Ptr<cv::DescriptorExtractor> extractor_;
// pointer to the matcher object
cv::DescriptorMatcher * matcher_;
cv::Ptr<cv::DescriptorMatcher> matcher_;
// max ratio between 1st and 2nd NN
float ratio_;
};

@ -18,11 +18,14 @@
/** GLOBAL VARIABLES **/
std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
using namespace cv;
using namespace std;
std::string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video
std::string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors
std::string ply_read_path = tutorial_path + "Data/box.ply"; // mesh
string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video
string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors
string ply_read_path = tutorial_path + "Data/box.ply"; // mesh
// Intrinsic camera parameters: UVC WEBCAM
double f = 55; // focal length in mm
@ -35,15 +38,15 @@ double params_WEBCAM[] = { width*f/sx, // fx
height/2}; // cy
// Some basic colors
cv::Scalar red(0, 0, 255);
cv::Scalar green(0,255,0);
cv::Scalar blue(255,0,0);
cv::Scalar yellow(0,255,255);
Scalar red(0, 0, 255);
Scalar green(0,255,0);
Scalar blue(255,0,0);
Scalar yellow(0,255,255);
// Robust Matcher parameters
int numKeyPoints = 2000; // number of detected keypoints
float ratio = 0.70f; // ratio test
float ratioTest = 0.70f; // ratio test
bool fast_match = true; // fastRobustMatch() or robustMatch()
// RANSAC parameters
@ -55,16 +58,16 @@ double confidence = 0.95; // ransac successful confidence.
int minInliersKalman = 30; // Kalman threshold updating
// PnP parameters
int pnpMethod = cv::SOLVEPNP_ITERATIVE;
int pnpMethod = SOLVEPNP_ITERATIVE;
/** Functions headers **/
void help();
void initKalmanFilter( cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurements,
cv::Mat &translation_estimated, cv::Mat &rotation_estimated );
void fillMeasurements( cv::Mat &measurements,
const cv::Mat &translation_measured, const cv::Mat &rotation_measured);
void initKalmanFilter( KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
void updateKalmanFilter( KalmanFilter &KF, Mat &measurements,
Mat &translation_estimated, Mat &rotation_estimated );
void fillMeasurements( Mat &measurements,
const Mat &translation_measured, const Mat &rotation_measured);
/** Main program **/
@ -73,7 +76,7 @@ int main(int argc, char *argv[])
help();
const cv::String keys =
const String keys =
"{help h | | print this message }"
"{video v | | path to recorded video }"
"{model | | path to yml model }"
@ -87,7 +90,7 @@ int main(int argc, char *argv[])
"{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS}"
"{fast f |true | use of robust fast match }"
;
cv::CommandLineParser parser(argc, argv, keys);
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
@ -96,11 +99,11 @@ int main(int argc, char *argv[])
}
else
{
video_read_path = parser.get<std::string>("video").size() > 0 ? parser.get<std::string>("video") : video_read_path;
yml_read_path = parser.get<std::string>("model").size() > 0 ? parser.get<std::string>("model") : yml_read_path;
ply_read_path = parser.get<std::string>("mesh").size() > 0 ? parser.get<std::string>("mesh") : ply_read_path;
video_read_path = parser.get<string>("video").size() > 0 ? parser.get<string>("video") : video_read_path;
yml_read_path = parser.get<string>("model").size() > 0 ? parser.get<string>("model") : yml_read_path;
ply_read_path = parser.get<string>("mesh").size() > 0 ? parser.get<string>("mesh") : ply_read_path;
numKeyPoints = !parser.has("keypoints") ? parser.get<int>("keypoints") : numKeyPoints;
ratio = !parser.has("ratio") ? parser.get<float>("ratio") : ratio;
ratioTest = !parser.has("ratio") ? parser.get<float>("ratio") : ratioTest;
fast_match = !parser.has("fast") ? parser.get<bool>("fast") : fast_match;
iterationsCount = !parser.has("iterations") ? parser.get<int>("iterations") : iterationsCount;
reprojectionError = !parser.has("error") ? parser.get<float>("error") : reprojectionError;
@ -120,45 +123,45 @@ int main(int argc, char *argv[])
RobustMatcher rmatcher; // instantiate RobustMatcher
cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instatiate ORB feature detector
cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instatiate ORB descriptor extractor
Ptr<FeatureDetector> orb = ORB::create();
rmatcher.setFeatureDetector(detector); // set feature detector
rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor
rmatcher.setFeatureDetector(orb); // set feature detector
rmatcher.setDescriptorExtractor(orb); // set descriptor extractor
cv::Ptr<cv::flann::IndexParams> indexParams = cv::makePtr<cv::flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
cv::Ptr<cv::flann::SearchParams> searchParams = cv::makePtr<cv::flann::SearchParams>(50); // instantiate flann search parameters
Ptr<flann::IndexParams> indexParams = makePtr<flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>(50); // instantiate flann search parameters
cv::DescriptorMatcher * matcher = new cv::FlannBasedMatcher(indexParams, searchParams); // instantiate FlannBased matcher
// instantiate FlannBased matcher
Ptr<DescriptorMatcher> matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
rmatcher.setDescriptorMatcher(matcher); // set matcher
rmatcher.setRatio(ratio); // set ratio test parameter
rmatcher.setRatio(ratioTest); // set ratio test parameter
cv::KalmanFilter KF; // instantiate Kalman Filter
KalmanFilter KF; // instantiate Kalman Filter
int nStates = 18; // the number of states
int nMeasurements = 6; // the number of measured states
int nInputs = 0; // the number of control actions
double dt = 0.125; // time between measurements (1/FPS)
initKalmanFilter(KF, nStates, nMeasurements, nInputs, dt); // init function
cv::Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(cv::Scalar(0));
Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(Scalar(0));
bool good_measurement = false;
// Get the MODEL INFO
std::vector<cv::Point3f> list_points3d_model = model.get_points3d(); // list with model 3D coordinates
cv::Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate
vector<Point3f> list_points3d_model = model.get_points3d(); // list with model 3D coordinates
Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate
// Create & Open Window
cv::namedWindow("REAL TIME DEMO", cv::WINDOW_KEEPRATIO);
namedWindow("REAL TIME DEMO", WINDOW_KEEPRATIO);
cv::VideoCapture cap; // instantiate VideoCapture
VideoCapture cap; // instantiate VideoCapture
cap.open(video_read_path); // open a recorded video
if(!cap.isOpened()) // check if we succeeded
{
std::cout << "Could not open the camera device" << std::endl;
cout << "Could not open the camera device" << endl;
return -1;
}
@ -175,9 +178,9 @@ int main(int argc, char *argv[])
// start the clock
time(&start);
cv::Mat frame, frame_vis;
Mat frame, frame_vis;
while(cap.read(frame) && cv::waitKey(30) != 27) // capture frame until ESC is pressed
while(cap.read(frame) && waitKey(30) != 27) // capture frame until ESC is pressed
{
frame_vis = frame.clone(); // refresh visualisation frame
@ -185,8 +188,8 @@ int main(int argc, char *argv[])
// -- Step 1: Robust matching between model descriptors and scene descriptors
std::vector<cv::DMatch> good_matches; // to obtain the 3D points of the model
std::vector<cv::KeyPoint> keypoints_scene; // to obtain the 2D points of the scene
vector<DMatch> good_matches; // to obtain the 3D points of the model
vector<KeyPoint> keypoints_scene; // to obtain the 2D points of the scene
if(fast_match)
@ -201,13 +204,13 @@ int main(int argc, char *argv[])
// -- Step 2: Find out the 2D/3D correspondences
std::vector<cv::Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
std::vector<cv::Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
vector<Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
vector<Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index)
{
cv::Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model
cv::Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model
Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
list_points3d_model_match.push_back(point3d_model); // add 3D point
list_points2d_scene_match.push_back(point2d_scene); // add 2D point
}
@ -216,8 +219,8 @@ int main(int argc, char *argv[])
draw2DPoints(frame_vis, list_points2d_scene_match, red);
cv::Mat inliers_idx;
std::vector<cv::Point2f> list_points2d_inliers;
Mat inliers_idx;
vector<Point2f> list_points2d_inliers;
if(good_matches.size() > 0) // None matches, then RANSAC crashes
{
@ -231,7 +234,7 @@ int main(int argc, char *argv[])
for(int inliers_index = 0; inliers_index < inliers_idx.rows; ++inliers_index)
{
int n = inliers_idx.at<int>(inliers_index); // i-inlier
cv::Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
list_points2d_inliers.push_back(point2d); // add i-inlier to list
}
@ -248,11 +251,11 @@ int main(int argc, char *argv[])
{
// Get the measured translation
cv::Mat translation_measured(3, 1, CV_64F);
Mat translation_measured(3, 1, CV_64F);
translation_measured = pnp_detection.get_t_matrix();
// Get the measured rotation
cv::Mat rotation_measured(3, 3, CV_64F);
Mat rotation_measured(3, 3, CV_64F);
rotation_measured = pnp_detection.get_R_matrix();
// fill the measurements vector
@ -263,8 +266,8 @@ int main(int argc, char *argv[])
}
// Instantiate estimated translation and rotation
cv::Mat translation_estimated(3, 1, CV_64F);
cv::Mat rotation_estimated(3, 3, CV_64F);
Mat translation_estimated(3, 1, CV_64F);
Mat rotation_estimated(3, 3, CV_64F);
// update the Kalman filter with good measurements
updateKalmanFilter( KF, measurements,
@ -288,11 +291,11 @@ int main(int argc, char *argv[])
}
float l = 5;
std::vector<cv::Point2f> pose_points2d;
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,0))); // axis center
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(l,0,0))); // axis x
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,l,0))); // axis y
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,l))); // axis z
vector<Point2f> pose_points2d;
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,0))); // axis center
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(l,0,0))); // axis x
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,l,0))); // axis y
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,l))); // axis z
draw3DCoordinateAxes(frame_vis, pose_points2d); // draw axes
// FRAME RATE
@ -316,49 +319,49 @@ int main(int argc, char *argv[])
// Draw some debug text
int inliers_int = inliers_idx.rows;
int outliers_int = (int)good_matches.size() - inliers_int;
std::string inliers_str = IntToString(inliers_int);
std::string outliers_str = IntToString(outliers_int);
std::string n = IntToString((int)good_matches.size());
std::string text = "Found " + inliers_str + " of " + n + " matches";
std::string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
string inliers_str = IntToString(inliers_int);
string outliers_str = IntToString(outliers_int);
string n = IntToString((int)good_matches.size());
string text = "Found " + inliers_str + " of " + n + " matches";
string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
drawText(frame_vis, text, green);
drawText2(frame_vis, text2, red);
cv::imshow("REAL TIME DEMO", frame_vis);
imshow("REAL TIME DEMO", frame_vis);
}
// Close and Destroy Window
cv::destroyWindow("REAL TIME DEMO");
destroyWindow("REAL TIME DEMO");
std::cout << "GOODBYE ..." << std::endl;
cout << "GOODBYE ..." << endl;
}
/**********************************************************************************************************/
void help()
{
std::cout
<< "--------------------------------------------------------------------------" << std::endl
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to detect an object given its 3D textured model. You can choose to "
<< "use a recorded video or the webcam." << std::endl
<< "Usage:" << std::endl
<< "./cpp-tutorial-pnp_detection -help" << std::endl
<< "Keys:" << std::endl
<< "'esc' - to quit." << std::endl
<< "--------------------------------------------------------------------------" << std::endl
<< std::endl;
<< "use a recorded video or the webcam." << endl
<< "Usage:" << endl
<< "./cpp-tutorial-pnp_detection -help" << endl
<< "Keys:" << endl
<< "'esc' - to quit." << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
/**********************************************************************************************************/
void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
void initKalmanFilter(KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
{
KF.init(nStates, nMeasurements, nInputs, CV_64F); // init Kalman Filter
cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-5)); // set process noise
cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-2)); // set measurement noise
cv::setIdentity(KF.errorCovPost, cv::Scalar::all(1)); // error covariance
setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); // set process noise
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-2)); // set measurement noise
setIdentity(KF.errorCovPost, Scalar::all(1)); // error covariance
/** DYNAMIC MODEL **/
@ -424,15 +427,15 @@ void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int
}
/**********************************************************************************************************/
void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement,
cv::Mat &translation_estimated, cv::Mat &rotation_estimated )
void updateKalmanFilter( KalmanFilter &KF, Mat &measurement,
Mat &translation_estimated, Mat &rotation_estimated )
{
// First predict, to update the internal statePre variable
cv::Mat prediction = KF.predict();
Mat prediction = KF.predict();
// The "correct" phase that is going to use the predicted value and our measurement
cv::Mat estimated = KF.correct(measurement);
Mat estimated = KF.correct(measurement);
// Estimated translation
translation_estimated.at<double>(0) = estimated.at<double>(0);
@ -440,7 +443,7 @@ void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement,
translation_estimated.at<double>(2) = estimated.at<double>(2);
// Estimated euler angles
cv::Mat eulers_estimated(3, 1, CV_64F);
Mat eulers_estimated(3, 1, CV_64F);
eulers_estimated.at<double>(0) = estimated.at<double>(9);
eulers_estimated.at<double>(1) = estimated.at<double>(10);
eulers_estimated.at<double>(2) = estimated.at<double>(11);
@ -451,11 +454,11 @@ void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement,
}
/**********************************************************************************************************/
void fillMeasurements( cv::Mat &measurements,
const cv::Mat &translation_measured, const cv::Mat &rotation_measured)
void fillMeasurements( Mat &measurements,
const Mat &translation_measured, const Mat &rotation_measured)
{
// Convert rotation matrix to euler angles
cv::Mat measured_eulers(3, 1, CV_64F);
Mat measured_eulers(3, 1, CV_64F);
measured_eulers = rot2euler(rotation_measured);
// Set measurement to predict

@ -13,13 +13,16 @@
#include "ModelRegistration.h"
#include "Utils.h"
using namespace cv;
using namespace std;
/** GLOBAL VARIABLES **/
std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
std::string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
std::string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
std::string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
// Boolean the know if the registration it's done
bool end_registration = false;
@ -39,10 +42,10 @@ int n = 8;
int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4
// Some basic colors
cv::Scalar red(0, 0, 255);
cv::Scalar green(0,255,0);
cv::Scalar blue(255,0,0);
cv::Scalar yellow(0,255,255);
Scalar red(0, 0, 255);
Scalar green(0,255,0);
Scalar blue(255,0,0);
Scalar yellow(0,255,255);
/*
* CREATE MODEL REGISTRATION OBJECT
@ -61,13 +64,13 @@ void help();
// Mouse events for model registration
static void onMouseModelRegistration( int event, int x, int y, int, void* )
{
if ( event == cv::EVENT_LBUTTONUP )
if ( event == EVENT_LBUTTONUP )
{
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
cv::Point2f point_2d = cv::Point2f((float)x,(float)y);
cv::Point3f point_3d = mesh.getVertex(n_vertex-1);
Point2f point_2d = Point2f((float)x,(float)y);
Point3f point_3d = mesh.getVertex(n_vertex-1);
bool is_registrable = registration.is_registrable();
if (is_registrable)
@ -92,23 +95,23 @@ int main()
//Instantiate robust matcher: detector, extractor, matcher
RobustMatcher rmatcher;
cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);
Ptr<FeatureDetector> detector = ORB::create(numKeyPoints);
rmatcher.setFeatureDetector(detector);
/** GROUND TRUTH OF THE FIRST IMAGE **/
// Create & Open Window
cv::namedWindow("MODEL REGISTRATION", cv::WINDOW_KEEPRATIO);
namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO);
// Set up the mouse events
cv::setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
// Open the image to register
cv::Mat img_in = cv::imread(img_path, cv::IMREAD_COLOR);
cv::Mat img_vis = img_in.clone();
Mat img_in = imread(img_path, IMREAD_COLOR);
Mat img_vis = img_in.clone();
if (!img_in.data) {
std::cout << "Could not open or find the image" << std::endl;
cout << "Could not open or find the image" << endl;
return -1;
}
@ -116,18 +119,18 @@ int main()
int num_registrations = n;
registration.setNumMax(num_registrations);
std::cout << "Click the box corners ..." << std::endl;
std::cout << "Waiting ..." << std::endl;
cout << "Click the box corners ..." << endl;
cout << "Waiting ..." << endl;
// Loop until all the points are registered
while ( cv::waitKey(30) < 0 )
while ( waitKey(30) < 0 )
{
// Refresh debug image
img_vis = img_in.clone();
// Current registered points
std::vector<cv::Point2f> list_points2d = registration.get_points2d();
std::vector<cv::Point3f> list_points3d = registration.get_points3d();
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Draw current registered points
drawPoints(img_vis, list_points2d, list_points3d, red);
@ -139,7 +142,7 @@ int main()
// Draw debug text
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1);
Point3f current_poin3d = mesh.getVertex(n_vertex-1);
drawQuestion(img_vis, current_poin3d, green);
drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
@ -153,43 +156,43 @@ int main()
}
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
}
/** COMPUTE CAMERA POSE **/
std::cout << "COMPUTING POSE ..." << std::endl;
cout << "COMPUTING POSE ..." << endl;
// The list of registered points
std::vector<cv::Point2f> list_points2d = registration.get_points2d();
std::vector<cv::Point3f> list_points3d = registration.get_points3d();
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Estimate pose given the registered points
bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, cv::SOLVEPNP_ITERATIVE);
bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE);
if ( is_correspondence )
{
std::cout << "Correspondence found" << std::endl;
cout << "Correspondence found" << endl;
// Compute all the 2D points of the mesh to verify the algorithm and draw it
std::vector<cv::Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
draw2DPoints(img_vis, list_points2d_mesh, green);
} else {
std::cout << "Correspondence not found" << std::endl << std::endl;
cout << "Correspondence not found" << endl << endl;
}
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
// Show image until ESC pressed
cv::waitKey(0);
waitKey(0);
/** COMPUTE 3D of the image Keypoints **/
// Containers for keypoints and descriptors of the model
std::vector<cv::KeyPoint> keypoints_model;
cv::Mat descriptors;
vector<KeyPoint> keypoints_model;
Mat descriptors;
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(img_in, keypoints_model);
@ -197,8 +200,8 @@ int main()
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
cv::Point2f point2d(keypoints_model[i].pt);
cv::Point3f point3d;
Point2f point2d(keypoints_model[i].pt);
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
{
@ -219,12 +222,12 @@ int main()
img_vis = img_in.clone();
// The list of the points2d of the model
std::vector<cv::Point2f> list_points_in = model.get_points2d_in();
std::vector<cv::Point2f> list_points_out = model.get_points2d_out();
vector<Point2f> list_points_in = model.get_points2d_in();
vector<Point2f> list_points_out = model.get_points2d_out();
// Draw some debug text
std::string num = IntToString((int)list_points_in.size());
std::string text = "There are " + num + " inliers";
string num = IntToString((int)list_points_in.size());
string text = "There are " + num + " inliers";
drawText(img_vis, text, green);
// Draw some debug text
@ -240,26 +243,26 @@ int main()
draw2DPoints(img_vis, list_points_out, red);
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
// Wait until ESC pressed
cv::waitKey(0);
waitKey(0);
// Close and Destroy Window
cv::destroyWindow("MODEL REGISTRATION");
destroyWindow("MODEL REGISTRATION");
std::cout << "GOODBYE" << std::endl;
cout << "GOODBYE" << endl;
}
/**********************************************************************************************************/
void help()
{
std::cout
<< "--------------------------------------------------------------------------" << std::endl
<< "This program shows how to create your 3D textured model. " << std::endl
<< "Usage:" << std::endl
<< "./cpp-tutorial-pnp_registration" << std::endl
<< "--------------------------------------------------------------------------" << std::endl
<< std::endl;
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to create your 3D textured model. " << endl
<< "Usage:" << endl
<< "./cpp-tutorial-pnp_registration" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}

@ -22,9 +22,9 @@ int main(void)
vector<KeyPoint> kpts1, kpts2;
Mat desc1, desc2;
AKAZE akaze;
akaze(img1, noArray(), kpts1, desc1);
akaze(img2, noArray(), kpts2, desc2);
Ptr<AKAZE> akaze = AKAZE::create();
akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;

@ -41,7 +41,7 @@ protected:
void Tracker::setFirstFrame(const Mat frame, vector<Point2f> bb, string title, Stats& stats)
{
first_frame = frame.clone();
(*detector)(first_frame, noArray(), first_kp, first_desc);
detector->detectAndCompute(first_frame, noArray(), first_kp, first_desc);
stats.keypoints = (int)first_kp.size();
drawBoundingBox(first_frame, bb);
putText(first_frame, title, Point(0, 60), FONT_HERSHEY_PLAIN, 5, Scalar::all(0), 4);
@ -52,7 +52,7 @@ Mat Tracker::process(const Mat frame, Stats& stats)
{
vector<KeyPoint> kp;
Mat desc;
(*detector)(frame, noArray(), kp, desc);
detector->detectAndCompute(frame, noArray(), kp, desc);
stats.keypoints = (int)kp.size();
vector< vector<DMatch> > matches;
@ -135,9 +135,9 @@ int main(int argc, char **argv)
return 1;
}
fs["bounding_box"] >> bb;
Ptr<Feature2D> akaze = Feature2D::create("AKAZE");
Ptr<Feature2D> akaze = AKAZE::create();
akaze->set("threshold", akaze_thresh);
Ptr<Feature2D> orb = Feature2D::create("ORB");
Ptr<Feature2D> orb = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
Tracker akaze_tracker(akaze, matcher);
Tracker orb_tracker(orb, matcher);

@ -227,7 +227,7 @@ public:
#endif
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
@ -268,7 +268,7 @@ public:
#endif
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}

Loading…
Cancel
Save