diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.cpp
new file mode 100644
index 0000000000..d97afdb73d
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.cpp
@@ -0,0 +1,80 @@
+#include "CsvReader.h"
+#include "Utils.h"
+
+/** The default constructor of the CSV reader Class */
+CsvReader::CsvReader(const std::string &path, const char &separator){
+    _file.open(path.c_str(), ifstream::in);
+    _separator = separator;
+}
+
+/* Read a plane text file with .ply format */
+void CsvReader::readPLY(std::vector<cv::Point3f> &list_vertex, std::vector<std::vector<int> > &list_triangles)
+{
+    std::string line, tmp_str, n;
+    int num_vertex, num_triangles;
+    int count = 0;
+    bool end_header = false;
+    bool end_vertex = false;
+
+    // Read the whole *.ply file
+    while (getline(_file, line)) {
+    stringstream liness(line);
+
+    // read header
+    if(!end_header)
+    {
+        getline(liness, tmp_str, _separator);
+        if( tmp_str == "element" )
+        {
+            getline(liness, tmp_str, _separator);
+            getline(liness, n);
+            if(tmp_str == "vertex") num_vertex = StringToInt(n);
+            if(tmp_str == "face") num_triangles = StringToInt(n);
+        }
+        if(tmp_str == "end_header") end_header = true;
+    }
+
+    // read file content
+    else if(end_header)
+    {
+         // read vertex and add into 'list_vertex'
+         if(!end_vertex && count < num_vertex)
+         {
+             string x, y, z;
+             getline(liness, x, _separator);
+             getline(liness, y, _separator);
+             getline(liness, z);
+
+             cv::Point3f tmp_p;
+             tmp_p.x = StringToInt(x);
+             tmp_p.y = StringToInt(y);
+             tmp_p.z = StringToInt(z);
+             list_vertex.push_back(tmp_p);
+
+             count++;
+             if(count == num_vertex)
+             {
+                 count = 0;
+                 end_vertex = !end_vertex;
+             }
+         }
+         // read faces and add into 'list_triangles'
+         else if(end_vertex  && count < num_triangles)
+         {
+             std::string num_pts_per_face, id0, id1, id2;
+             getline(liness, num_pts_per_face, _separator);
+             getline(liness, id0, _separator);
+             getline(liness, id1, _separator);
+             getline(liness, id2);
+
+             std::vector<int> tmp_triangle(3);
+             tmp_triangle[0] = StringToInt(id0);
+             tmp_triangle[1] = StringToInt(id1);
+             tmp_triangle[2] = StringToInt(id2);
+             list_triangles.push_back(tmp_triangle);
+
+             count++;
+      }
+    }
+  }
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.h
new file mode 100644
index 0000000000..ab94e4c1b9
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvReader.h
@@ -0,0 +1,40 @@
+#ifndef CSVREADER_H
+#define	CSVREADER_H
+
+#include <iostream>
+#include <fstream>
+
+#include <opencv2/core/core.hpp>
+
+using namespace std;
+using namespace cv;
+
+class CsvReader {
+public:
+  /**
+  * The default constructor of the CSV reader Class.
+  * The default separator is ' ' (empty space)
+  *
+  * @param path - The path of the file to read
+  * @param separator - The separator character between words per line
+  * @return
+  */
+  CsvReader(const std::string &path, const char &separator = ' ');
+
+  /**
+  * Read a plane text file with .ply format
+  *
+  * @param list_vertex - The container of the vertices list of the mesh
+  * @param list_triangle - The container of the triangles list of the mesh
+  * @return
+  */
+  void readPLY(std::vector<cv::Point3f> &list_vertex, std::vector<std::vector<int> > &list_triangles);
+
+private:
+  /** The current stream file for the reader */
+  ifstream _file;
+  /** The separator character between words for each line */
+  char _separator;
+};
+
+#endif
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.cpp
new file mode 100644
index 0000000000..4671fd098e
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.cpp
@@ -0,0 +1,50 @@
+#include "CsvWriter.h"
+#include "Utils.h"
+
+CsvWriter::CsvWriter(const std::string &path, const std::string &separator){
+  _file.open(path.c_str(), std::ofstream::out);
+  _isFirstTerm = true;
+  _separator = separator;
+}
+
+CsvWriter::~CsvWriter() {
+  _file.flush();
+  _file.close();
+}
+
+void CsvWriter::writeXYZ(const std::vector<cv::Point3f> &list_points3d)
+{
+  std::string x, y, z;
+  for(unsigned int i = 0; i < list_points3d.size(); ++i)
+  {
+    x = FloatToString(list_points3d[i].x);
+    y = FloatToString(list_points3d[i].y);
+    z = FloatToString(list_points3d[i].z);
+
+    _file << x << _separator << y << _separator << z << std::endl;
+  }
+
+}
+
+void CsvWriter::writeUVXYZ(const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d, const cv::Mat &descriptors)
+{
+  std::string u, v, x, y, z, descriptor_str;
+  for(int i = 0; i < list_points3d.size(); ++i)
+  {
+    u = FloatToString(list_points2d[i].x);
+    v = FloatToString(list_points2d[i].y);
+    x = FloatToString(list_points3d[i].x);
+    y = FloatToString(list_points3d[i].y);
+    z = FloatToString(list_points3d[i].z);
+
+    _file << u << _separator << v << _separator << x << _separator << y << _separator << z;
+
+    for(int j = 0; j < 32; ++j)
+    {
+      std::cout << descriptors.at<float>(i,j) << std::endl;
+      descriptor_str = FloatToString(descriptors.at<float>(i,j));
+      _file << _separator << descriptor_str;
+    }
+    _file << std::endl;
+  }
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.h
new file mode 100644
index 0000000000..c2eea752d6
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/CsvWriter.h
@@ -0,0 +1,22 @@
+#ifndef CSVWRITER_H
+#define	CSVWRITER_H
+
+#include <fstream>
+#include <iostream>
+
+#include <opencv2/core/core.hpp>
+
+class CsvWriter {
+public:
+  CsvWriter(const std::string &path, const std::string &separator = " ");
+  ~CsvWriter();
+  void writeXYZ(const std::vector<cv::Point3f> &list_points3d);
+  void writeUVXYZ(const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d, const cv::Mat &descriptors);
+
+private:
+  std::ofstream _file;
+  std::string _separator;
+  bool _isFirstTerm;
+};
+
+#endif
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.cpp
new file mode 100644
index 0000000000..6458cfde36
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.cpp
@@ -0,0 +1,82 @@
+/*
+ * Mesh.cpp
+ *
+ *  Created on: Apr 9, 2014
+ *      Author: edgar
+ */
+
+#include "Mesh.h"
+#include "CsvReader.h"
+
+
+// --------------------------------------------------- //
+//                   TRIANGLE CLASS                    //
+// --------------------------------------------------- //
+
+/**  The custom constructor of the Triangle Class */
+Triangle::Triangle(int id, cv::Point3f V0, cv::Point3f V1, cv::Point3f V2)
+{
+  id_ = id; v0_ = V0; v1_ = V1; v2_ = V2;
+}
+
+/**  The default destructor of the Class */
+Triangle::~Triangle()
+{
+  // TODO Auto-generated destructor stub
+}
+
+
+// --------------------------------------------------- //
+//                     RAY CLASS                       //
+// --------------------------------------------------- //
+
+/**  The custom constructor of the Ray Class */
+Ray::Ray(cv::Point3f P0, cv::Point3f P1) {
+  p0_ = P0; p1_ = P1;
+}
+
+/**  The default destructor of the Class */
+Ray::~Ray()
+{
+  // TODO Auto-generated destructor stub
+}
+
+
+// --------------------------------------------------- //
+//                 OBJECT MESH CLASS                   //
+// --------------------------------------------------- //
+
+/** The default constructor of the ObjectMesh Class */
+Mesh::Mesh() : list_vertex_(0) , list_triangles_(0)
+{
+  id_ = 0;
+  num_vertexs_ = 0;
+  num_triangles_ = 0;
+}
+
+/** The default destructor of the ObjectMesh Class */
+Mesh::~Mesh()
+{
+  // TODO Auto-generated destructor stub
+}
+
+
+/** Load a CSV with *.ply format **/
+void Mesh::load(const std::string path)
+{
+
+  // Create the reader
+  CsvReader csvReader(path);
+
+  // Clear previous data
+  list_vertex_.clear();
+  list_triangles_.clear();
+
+  // Read from .ply file
+  csvReader.readPLY(list_vertex_, list_triangles_);
+
+  // Update mesh attributes
+  num_vertexs_ = list_vertex_.size();
+  num_triangles_ = list_triangles_.size();
+
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.h
new file mode 100644
index 0000000000..2ca625d3c3
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Mesh.h
@@ -0,0 +1,86 @@
+/*
+ * Mesh.h
+ *
+ *  Created on: Apr 9, 2014
+ *      Author: edgar
+ */
+
+#ifndef MESH_H_
+#define MESH_H_
+
+#include <iostream>
+#include <opencv2/core/core.hpp>
+
+
+// --------------------------------------------------- //
+//                 TRIANGLE CLASS                      //
+// --------------------------------------------------- //
+
+class Triangle {
+public:
+
+  explicit Triangle(int id, cv::Point3f V0, cv::Point3f V1, cv::Point3f V2);
+  virtual ~Triangle();
+
+  cv::Point3f getV0() const { return v0_; }
+  cv::Point3f getV1() const { return v1_; }
+  cv::Point3f getV2() const { return v2_; }
+
+private:
+  /** The identifier number of the triangle */
+  int id_;
+  /** The three vertices that defines the triangle */
+  cv::Point3f v0_, v1_, v2_;
+};
+
+
+// --------------------------------------------------- //
+//                     RAY CLASS                       //
+// --------------------------------------------------- //
+
+class Ray {
+public:
+
+  explicit Ray(cv::Point3f P0, cv::Point3f P1);
+  virtual ~Ray();
+
+  cv::Point3f getP0() { return p0_; }
+  cv::Point3f getP1() { return p1_; }
+
+private:
+  /** The two points that defines the ray */
+  cv::Point3f p0_, p1_;
+};
+
+
+// --------------------------------------------------- //
+//                OBJECT MESH CLASS                    //
+// --------------------------------------------------- //
+
+class Mesh
+{
+public:
+
+  Mesh();
+  virtual ~Mesh();
+
+  std::vector<std::vector<int> > getTrianglesList() const { return list_triangles_; }
+  cv::Point3f getVertex(int pos) const { return list_vertex_[pos]; }
+  int getNumVertices() const { return num_vertexs_; }
+
+  void load(const std::string path_file);
+
+private:
+  /** The identification number of the mesh */
+  int id_;
+  /** The current number of vertices in the mesh */
+  int num_vertexs_;
+  /** The current number of triangles in the mesh */
+  int num_triangles_;
+  /* The list of triangles of the mesh */
+  std::vector<cv::Point3f> list_vertex_;
+  /* The list of triangles of the mesh */
+  std::vector<std::vector<int> > list_triangles_;
+};
+
+#endif /* OBJECTMESH_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.cpp
new file mode 100644
index 0000000000..3256cef057
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.cpp
@@ -0,0 +1,73 @@
+/*
+ * Model.cpp
+ *
+ *  Created on: Apr 9, 2014
+ *      Author: edgar
+ */
+
+#include "Model.h"
+#include "CsvWriter.h"
+
+Model::Model() : list_points2d_in_(0), list_points2d_out_(0), list_points3d_in_(0)
+{
+  n_correspondences_ = 0;
+}
+
+Model::~Model()
+{
+  // TODO Auto-generated destructor stub
+}
+
+void Model::add_correspondence(const cv::Point2f &point2d, const cv::Point3f &point3d)
+{
+  list_points2d_in_.push_back(point2d);
+  list_points3d_in_.push_back(point3d);
+  n_correspondences_++;
+}
+
+void Model::add_outlier(const cv::Point2f &point2d)
+{
+  list_points2d_out_.push_back(point2d);
+}
+
+void Model::add_descriptor(const cv::Mat &descriptor)
+{
+  descriptors_.push_back(descriptor);
+}
+
+void Model::add_keypoint(const cv::KeyPoint &kp)
+{
+  list_keypoints_.push_back(kp);
+}
+
+
+/** Save a CSV file and fill the object mesh */
+void Model::save(const std::string path)
+{
+  cv::Mat points3dmatrix = cv::Mat(list_points3d_in_);
+  cv::Mat points2dmatrix = cv::Mat(list_points2d_in_);
+  //cv::Mat keyPointmatrix = cv::Mat(list_keypoints_);
+
+  cv::FileStorage storage(path, cv::FileStorage::WRITE);
+  storage << "points_3d" << points3dmatrix;
+  storage << "points_2d" << points2dmatrix;
+  storage << "keypoints" << list_keypoints_;
+  storage << "descriptors" << descriptors_;
+
+  storage.release();
+}
+
+/** Load a YAML file using OpenCv functions **/
+void Model::load(const std::string path)
+{
+  cv::Mat points3d_mat;
+
+  cv::FileStorage storage(path, cv::FileStorage::READ);
+  storage["points_3d"] >> points3d_mat;
+  storage["descriptors"] >> descriptors_;
+
+  points3d_mat.copyTo(list_points3d_in_);
+
+  storage.release();
+
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h
new file mode 100644
index 0000000000..79af18f0fb
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Model.h
@@ -0,0 +1,54 @@
+/*
+ * Model.h
+ *
+ *  Created on: Apr 9, 2014
+ *      Author: edgar
+ */
+
+#ifndef MODEL_H_
+#define MODEL_H_
+
+#include <iostream>
+#include <opencv2/core/core.hpp>
+#include <opencv2/features2d/features2d.hpp>
+
+class Model
+{
+public:
+  Model();
+  virtual ~Model();
+
+  std::vector<cv::Point2f> get_points2d_in() const { return list_points2d_in_; }
+  std::vector<cv::Point2f> get_points2d_out() const { return list_points2d_out_; }
+  std::vector<cv::Point3f> get_points3d() const { return list_points3d_in_; }
+  std::vector<cv::KeyPoint> get_keypoints() const { return list_keypoints_; }
+  cv::Mat get_descriptors() const { return descriptors_; }
+  int get_numDescriptors() const { return descriptors_.rows; }
+
+
+  void add_correspondence(const cv::Point2f &point2d, const cv::Point3f &point3d);
+  void add_outlier(const cv::Point2f &point2d);
+  void add_descriptor(const cv::Mat &descriptor);
+  void add_keypoint(const cv::KeyPoint &kp);
+
+
+  void save(const std::string path);
+  void load(const std::string path);
+
+
+private:
+  /** The current number of correspondecnes */
+  int n_correspondences_;
+  /** The list of 2D points on the model surface */
+  std::vector<cv::KeyPoint> list_keypoints_;
+  /** The list of 2D points on the model surface */
+  std::vector<cv::Point2f> list_points2d_in_;
+  /** The list of 2D points outside the model surface */
+  std::vector<cv::Point2f> list_points2d_out_;
+  /** The list of 3D points on the model surface */
+  std::vector<cv::Point3f> list_points3d_in_;
+  /** The list of 2D points descriptors */
+  cv::Mat descriptors_;
+};
+
+#endif /* OBJECTMODEL_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.cpp
new file mode 100644
index 0000000000..e8da885685
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.cpp
@@ -0,0 +1,35 @@
+/*
+ * ModelRegistration.cpp
+ *
+ *  Created on: Apr 18, 2014
+ *      Author: edgar
+ */
+
+#include "ModelRegistration.h"
+
+ModelRegistration::ModelRegistration()
+{
+  n_registrations_ = 0;
+  max_registrations_ = 0;
+}
+
+ModelRegistration::~ModelRegistration()
+{
+  // TODO Auto-generated destructor stub
+}
+
+void ModelRegistration::registerPoint(const cv::Point2f &point2d, const cv::Point3f &point3d)
+ {
+   // add correspondence at the end of the vector
+    list_points2d_.push_back(point2d);
+    list_points3d_.push_back(point3d);
+    n_registrations_++;
+ }
+
+void ModelRegistration::reset()
+{
+  n_registrations_ = 0;
+  max_registrations_ = 0;
+  list_points2d_.clear();
+  list_points3d_.clear();
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.h
new file mode 100644
index 0000000000..f1e7aca469
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/ModelRegistration.h
@@ -0,0 +1,43 @@
+/*
+ * ModelRegistration.h
+ *
+ *  Created on: Apr 18, 2014
+ *      Author: edgar
+ */
+
+#ifndef MODELREGISTRATION_H_
+#define MODELREGISTRATION_H_
+
+#include <iostream>
+#include <opencv2/core/core.hpp>
+
+class ModelRegistration
+{
+public:
+
+  ModelRegistration();
+  virtual ~ModelRegistration();
+
+  void setNumMax(int n) { max_registrations_ = n; }
+
+  std::vector<cv::Point2f> get_points2d() const { return list_points2d_; }
+  std::vector<cv::Point3f> get_points3d() const { return list_points3d_; }
+  int getNumMax() const { return max_registrations_; }
+  int getNumRegist() const { return n_registrations_; }
+
+  bool is_registrable() const { return (n_registrations_ < max_registrations_); }
+  void registerPoint(const cv::Point2f &point2d, const cv::Point3f &point3d);
+  void reset();
+
+private:
+/** The current number of registered points */
+int n_registrations_;
+/** The total number of points to register */
+int max_registrations_;
+/** The list of 2D points to register the model */
+std::vector<cv::Point2f> list_points2d_;
+/** The list of 3D points to register the model */
+std::vector<cv::Point3f> list_points3d_;
+};
+
+#endif /* MODELREGISTRATION_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.cpp
new file mode 100644
index 0000000000..48c8be576d
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.cpp
@@ -0,0 +1,312 @@
+/*
+ * PnPProblem.cpp
+ *
+ *  Created on: Mar 28, 2014
+ *      Author: Edgar Riba
+ */
+
+#include <iostream>
+#include <sstream>
+
+#include "PnPProblem.h"
+#include "Mesh.h"
+
+#include <opencv2/calib3d/calib3d.hpp>
+
+
+/* Functions for Möller–Trumbore intersection algorithm
+ * */
+cv::Point3f CROSS(cv::Point3f v1, cv::Point3f v2)
+{
+  cv::Point3f tmp_p;
+  tmp_p.x =  v1.y*v2.z - v1.z*v2.y;
+  tmp_p.y =  v1.z*v2.x - v1.x*v2.z;
+  tmp_p.z =  v1.x*v2.y - v1.y*v2.x;
+  return tmp_p;
+}
+
+double DOT(cv::Point3f v1, cv::Point3f v2)
+{
+  return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z;
+}
+
+cv::Point3f SUB(cv::Point3f v1, cv::Point3f v2)
+{
+  cv::Point3f tmp_p;
+  tmp_p.x =  v1.x - v2.x;
+  tmp_p.y =  v1.y - v2.y;
+  tmp_p.z =  v1.z - v2.z;
+  return tmp_p;
+}
+
+/* End functions for Möller–Trumbore intersection algorithm
+ *  */
+
+// Function to get the nearest 3D point to the Ray origin
+cv::Point3f get_nearest_3D_point(std::vector<cv::Point3f> &points_list, cv::Point3f origin)
+{
+  cv::Point3f p1 = points_list[0];
+  cv::Point3f p2 = points_list[1];
+
+  double d1 = std::sqrt( std::pow(p1.x-origin.x, 2) + std::pow(p1.y-origin.y, 2) + std::pow(p1.z-origin.z, 2) );
+  double d2 = std::sqrt( std::pow(p2.x-origin.x, 2) + std::pow(p2.y-origin.y, 2) + std::pow(p2.z-origin.z, 2) );
+
+  if(d1 < d2)
+  {
+      return p1;
+  }
+  else
+  {
+      return p2;
+  }
+}
+
+// Custom constructor given the intrinsic camera parameters
+
+PnPProblem::PnPProblem(const double params[])
+{
+  _A_matrix = cv::Mat::zeros(3, 3, CV_64FC1);   // intrinsic camera parameters
+  _A_matrix.at<double>(0, 0) = params[0];       //      [ fx   0  cx ]
+  _A_matrix.at<double>(1, 1) = params[1];       //      [  0  fy  cy ]
+  _A_matrix.at<double>(0, 2) = params[2];       //      [  0   0   1 ]
+  _A_matrix.at<double>(1, 2) = params[3];
+  _A_matrix.at<double>(2, 2) = 1;
+  _R_matrix = cv::Mat::zeros(3, 3, CV_64FC1);   // rotation matrix
+  _t_matrix = cv::Mat::zeros(3, 1, CV_64FC1);   // translation matrix
+  _P_matrix = cv::Mat::zeros(3, 4, CV_64FC1);   // rotation-translation matrix
+
+}
+
+PnPProblem::~PnPProblem()
+{
+  // TODO Auto-generated destructor stub
+}
+
+void PnPProblem::set_P_matrix( const cv::Mat &R_matrix, const cv::Mat &t_matrix)
+{
+  // Rotation-Translation Matrix Definition
+  _P_matrix.at<double>(0,0) = R_matrix.at<double>(0,0);
+  _P_matrix.at<double>(0,1) = R_matrix.at<double>(0,1);
+  _P_matrix.at<double>(0,2) = R_matrix.at<double>(0,2);
+  _P_matrix.at<double>(1,0) = R_matrix.at<double>(1,0);
+  _P_matrix.at<double>(1,1) = R_matrix.at<double>(1,1);
+  _P_matrix.at<double>(1,2) = R_matrix.at<double>(1,2);
+  _P_matrix.at<double>(2,0) = R_matrix.at<double>(2,0);
+  _P_matrix.at<double>(2,1) = R_matrix.at<double>(2,1);
+  _P_matrix.at<double>(0,3) = t_matrix.at<double>(0);
+  _P_matrix.at<double>(1,3) = t_matrix.at<double>(1);
+  _P_matrix.at<double>(2,3) = t_matrix.at<double>(2);
+}
+
+
+// Estimate the pose given a list of 2D/3D correspondences and the method to use
+bool PnPProblem::estimatePose( const std::vector<cv::Point3f> &list_points3d,
+                               const std::vector<cv::Point2f> &list_points2d,
+                               int flags)
+{
+  cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
+  cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
+  cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);
+
+  bool useExtrinsicGuess = false;
+
+  // Pose estimation
+  bool correspondence = cv::solvePnP( list_points3d, list_points2d, _A_matrix, distCoeffs, rvec, tvec,
+                                      useExtrinsicGuess, flags);
+
+  // Transforms Rotation Vector to Matrix
+  Rodrigues(rvec,_R_matrix);
+  _t_matrix = tvec;
+
+  // Set projection matrix
+  this->set_P_matrix(_R_matrix, _t_matrix);
+
+  return correspondence;
+}
+
+// Estimate the pose given a list of 2D/3D correspondences with RANSAC and the method to use
+
+void PnPProblem::estimatePoseRANSAC( const std::vector<cv::Point3f> &list_points3d, // list with model 3D coordinates
+                               const std::vector<cv::Point2f> &list_points2d,     // list with scene 2D coordinates
+                               int flags, cv::Mat &inliers, int iterationsCount,  // PnP method; inliers container
+                               float reprojectionError, float confidence )    // Ransac parameters
+{
+  cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);  // vector of distortion coefficients
+  cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);          // output rotation vector
+  cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);    // output translation vector
+
+  bool useExtrinsicGuess = false;   // if true the function uses the provided rvec and tvec values as
+            // initial approximations of the rotation and translation vectors
+
+  cv::solvePnPRansac( list_points3d, list_points2d, _A_matrix, distCoeffs, rvec, tvec,
+                useExtrinsicGuess, iterationsCount, reprojectionError, confidence,
+                inliers, flags );
+
+  Rodrigues(rvec,_R_matrix);      // converts Rotation Vector to Matrix
+  _t_matrix = tvec;       // set translation matrix
+
+  this->set_P_matrix(_R_matrix, _t_matrix); // set rotation-translation matrix
+
+}
+
+// Given the mesh, backproject the 3D points to 2D to verify the pose estimation
+std::vector<cv::Point2f> PnPProblem::verify_points(Mesh *mesh)
+{
+  std::vector<cv::Point2f> verified_points_2d;
+  for( int i = 0; i < mesh->getNumVertices(); i++)
+  {
+    cv::Point3f point3d = mesh->getVertex(i);
+    cv::Point2f point2d = this->backproject3DPoint(point3d);
+    verified_points_2d.push_back(point2d);
+  }
+
+  return verified_points_2d;
+}
+
+
+// Backproject a 3D point to 2D using the estimated pose parameters
+
+cv::Point2f PnPProblem::backproject3DPoint(const cv::Point3f &point3d)
+{
+  // 3D point vector [x y z 1]'
+  cv::Mat point3d_vec = cv::Mat(4, 1, CV_64FC1);
+  point3d_vec.at<double>(0) = point3d.x;
+  point3d_vec.at<double>(1) = point3d.y;
+  point3d_vec.at<double>(2) = point3d.z;
+  point3d_vec.at<double>(3) = 1;
+
+  // 2D point vector [u v 1]'
+  cv::Mat point2d_vec = cv::Mat(4, 1, CV_64FC1);
+  point2d_vec = _A_matrix * _P_matrix * point3d_vec;
+
+  // Normalization of [u v]'
+  cv::Point2f point2d;
+  point2d.x = point2d_vec.at<double>(0) / point2d_vec.at<double>(2);
+  point2d.y = point2d_vec.at<double>(1) / point2d_vec.at<double>(2);
+
+  return point2d;
+}
+
+// Back project a 2D point to 3D and returns if it's on the object surface
+bool PnPProblem::backproject2DPoint(const Mesh *mesh, const cv::Point2f &point2d, cv::Point3f &point3d)
+{
+  // Triangles list of the object mesh
+  std::vector<std::vector<int> > triangles_list = mesh->getTrianglesList();
+
+  double lambda = 8;
+  double u = point2d.x;
+  double v = point2d.y;
+
+  // Point in vector form
+  cv::Mat point2d_vec = cv::Mat::ones(3, 1, CV_64F); // 3x1
+  point2d_vec.at<double>(0) = u * lambda;
+  point2d_vec.at<double>(1) = v * lambda;
+  point2d_vec.at<double>(2) = lambda;
+
+  // Point in camera coordinates
+  cv::Mat X_c = _A_matrix.inv() * point2d_vec ; // 3x1
+
+  // Point in world coordinates
+  cv::Mat X_w = _R_matrix.inv() * ( X_c - _t_matrix ); // 3x1
+
+  // Center of projection
+  cv::Mat C_op = cv::Mat(_R_matrix.inv()).mul(-1) * _t_matrix; // 3x1
+
+  // Ray direction vector
+  cv::Mat ray = X_w - C_op; // 3x1
+  ray = ray / cv::norm(ray); // 3x1
+
+  // Set up Ray
+  Ray R((cv::Point3f)C_op, (cv::Point3f)ray);
+
+  // A vector to store the intersections found
+  std::vector<cv::Point3f> intersections_list;
+
+  // Loop for all the triangles and check the intersection
+  for (unsigned int i = 0; i < triangles_list.size(); i++)
+  {
+    cv::Point3f V0 = mesh->getVertex(triangles_list[i][0]);
+    cv::Point3f V1 = mesh->getVertex(triangles_list[i][1]);
+    cv::Point3f V2 = mesh->getVertex(triangles_list[i][2]);
+
+    Triangle T(i, V0, V1, V2);
+
+    double out;
+    if(this->intersect_MollerTrumbore(R, T, &out))
+    {
+      cv::Point3f tmp_pt = R.getP0() + out*R.getP1(); // P = O + t*D
+      intersections_list.push_back(tmp_pt);
+    }
+  }
+
+  // If there are intersection, find the nearest one
+  if (!intersections_list.empty())
+  {
+    point3d = get_nearest_3D_point(intersections_list, R.getP0());
+    return true;
+  }
+  else
+  {
+    return false;
+  }
+}
+
+// Möller–Trumbore intersection algorithm
+bool PnPProblem::intersect_MollerTrumbore(Ray &Ray, Triangle &Triangle, double *out)
+{
+  const double EPSILON = 0.000001;
+
+  cv::Point3f e1, e2;
+  cv::Point3f P, Q, T;
+  double det, inv_det, u, v;
+  double t;
+
+  cv::Point3f V1 = Triangle.getV0();  // Triangle vertices
+  cv::Point3f V2 = Triangle.getV1();
+  cv::Point3f V3 = Triangle.getV2();
+
+  cv::Point3f O = Ray.getP0(); // Ray origin
+  cv::Point3f D = Ray.getP1(); // Ray direction
+
+  //Find vectors for two edges sharing V1
+  e1 = SUB(V2, V1);
+  e2 = SUB(V3, V1);
+
+  // Begin calculation determinant - also used to calculate U parameter
+  P = CROSS(D, e2);
+
+  // If determinant is near zero, ray lie in plane of triangle
+  det = DOT(e1, P);
+
+  //NOT CULLING
+  if(det > -EPSILON && det < EPSILON) return false;
+  inv_det = 1.f / det;
+
+  //calculate distance from V1 to ray origin
+  T = SUB(O, V1);
+
+  //Calculate u parameter and test bound
+  u = DOT(T, P) * inv_det;
+
+  //The intersection lies outside of the triangle
+  if(u < 0.f || u > 1.f) return false;
+
+  //Prepare to test v parameter
+  Q = CROSS(T, e1);
+
+  //Calculate V parameter and test bound
+  v = DOT(D, Q) * inv_det;
+
+  //The intersection lies outside of the triangle
+  if(v < 0.f || u + v  > 1.f) return false;
+
+  t = DOT(e2, Q) * inv_det;
+
+  if(t > EPSILON) { //ray intersection
+    *out = t;
+    return true;
+  }
+
+  // No hit, no win
+  return false;
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.h
new file mode 100644
index 0000000000..da189d0a0e
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/PnPProblem.h
@@ -0,0 +1,53 @@
+/*
+ * PnPProblem.h
+ *
+ *  Created on: Mar 28, 2014
+ *      Author: Edgar Riba
+ */
+
+#ifndef PNPPROBLEM_H_
+#define PNPPROBLEM_H_
+
+#include <iostream>
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/highgui/highgui.hpp>
+
+#include "Mesh.h"
+#include "ModelRegistration.h"
+
+class PnPProblem
+{
+
+public:
+  explicit PnPProblem(const double param[]);  // custom constructor
+  virtual ~PnPProblem();
+
+  bool backproject2DPoint(const Mesh *mesh, const cv::Point2f &point2d, cv::Point3f &point3d);
+  bool intersect_MollerTrumbore(Ray &R, Triangle &T, double *out);
+  std::vector<cv::Point2f> verify_points(Mesh *mesh);
+  cv::Point2f backproject3DPoint(const cv::Point3f &point3d);
+  bool estimatePose(const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d, int flags);
+  void estimatePoseRANSAC( const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d,
+                           int flags, cv::Mat &inliers,
+                           int iterationsCount, float reprojectionError, float confidence );
+
+  cv::Mat get_A_matrix() const { return _A_matrix; }
+  cv::Mat get_R_matrix() const { return _R_matrix; }
+  cv::Mat get_t_matrix() const { return _t_matrix; }
+  cv::Mat get_P_matrix() const { return _P_matrix; }
+
+  void set_P_matrix( const cv::Mat &R_matrix, const cv::Mat &t_matrix);
+
+private:
+  /** The calibration matrix */
+  cv::Mat _A_matrix;
+  /** The computed rotation matrix */
+  cv::Mat _R_matrix;
+  /** The computed translation matrix */
+  cv::Mat _t_matrix;
+  /** The computed projection matrix */
+  cv::Mat _P_matrix;
+};
+
+#endif /* PNPPROBLEM_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp
new file mode 100644
index 0000000000..7ef9f887a6
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp
@@ -0,0 +1,152 @@
+/*
+ * RobustMatcher.cpp
+ *
+ *  Created on: Jun 4, 2014
+ *      Author: eriba
+ */
+
+#include "RobustMatcher.h"
+#include <time.h>
+
+#include <opencv2/features2d/features2d.hpp>
+
+RobustMatcher::~RobustMatcher()
+{
+  // TODO Auto-generated destructor stub
+}
+
+void RobustMatcher::computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints)
+{
+  detector_->detect(image, keypoints);
+}
+
+void RobustMatcher::computeDescriptors( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors)
+{
+  extractor_->compute(image, keypoints, descriptors);
+}
+
+int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> > &matches)
+{
+  int removed = 0;
+  // for all matches
+  for ( std::vector<std::vector<cv::DMatch> >::iterator
+        matchIterator= matches.begin(); matchIterator!= matches.end(); ++matchIterator)
+  {
+    // if 2 NN has been identified
+    if (matchIterator->size() > 1)
+    {
+      // check distance ratio
+      if ((*matchIterator)[0].distance / (*matchIterator)[1].distance > ratio_)
+      {
+        matchIterator->clear(); // remove match
+        removed++;
+      }
+    }
+    else
+    { // does not have 2 neighbours
+      matchIterator->clear(); // remove match
+      removed++;
+    }
+  }
+  return removed;
+}
+
+void RobustMatcher::symmetryTest( const std::vector<std::vector<cv::DMatch> >& matches1,
+                     const std::vector<std::vector<cv::DMatch> >& matches2,
+                     std::vector<cv::DMatch>& symMatches )
+{
+
+  // for all matches image 1 -> image 2
+   for (std::vector<std::vector<cv::DMatch> >::const_iterator
+       matchIterator1 = matches1.begin(); matchIterator1 != matches1.end(); ++matchIterator1)
+   {
+
+      // ignore deleted matches
+      if (matchIterator1->empty() || matchIterator1->size() < 2)
+         continue;
+
+      // for all matches image 2 -> image 1
+      for (std::vector<std::vector<cv::DMatch> >::const_iterator
+          matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2)
+      {
+        // ignore deleted matches
+        if (matchIterator2->empty() || matchIterator2->size() < 2)
+           continue;
+
+        // Match symmetry test
+        if ((*matchIterator1)[0].queryIdx ==
+            (*matchIterator2)[0].trainIdx &&
+            (*matchIterator2)[0].queryIdx ==
+            (*matchIterator1)[0].trainIdx)
+        {
+            // add symmetrical match
+            symMatches.push_back(
+              cv::DMatch((*matchIterator1)[0].queryIdx,
+                         (*matchIterator1)[0].trainIdx,
+                         (*matchIterator1)[0].distance));
+            break; // next match in image 1 -> image 2
+        }
+      }
+   }
+
+}
+
+void RobustMatcher::robustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
+              std::vector<cv::KeyPoint>& keypoints_frame, const cv::Mat& descriptors_model )
+{
+
+  // 1a. Detection of the ORB features
+  this->computeKeyPoints(frame, keypoints_frame);
+
+  // 1b. Extraction of the ORB descriptors
+  cv::Mat descriptors_frame;
+  this->computeDescriptors(frame, keypoints_frame, descriptors_frame);
+
+  // 2. Match the two image descriptors
+  std::vector<std::vector<cv::DMatch> > matches12, matches21;
+
+  // 2a. From image 1 to image 2
+  matcher_->knnMatch(descriptors_frame, descriptors_model, matches12, 2); // return 2 nearest neighbours
+
+  // 2b. From image 2 to image 1
+  matcher_->knnMatch(descriptors_model, descriptors_frame, matches21, 2); // return 2 nearest neighbours
+
+  // 3. Remove matches for which NN ratio is > than threshold
+  // clean image 1 -> image 2 matches
+  int removed1 = ratioTest(matches12);
+  // clean image 2 -> image 1 matches
+  int removed2 = ratioTest(matches21);
+
+  // 4. Remove non-symmetrical matches
+  symmetryTest(matches12, matches21, good_matches);
+
+}
+
+void RobustMatcher::fastRobustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
+                                 std::vector<cv::KeyPoint>& keypoints_frame,
+                                 const cv::Mat& descriptors_model )
+{
+  good_matches.clear();
+
+  // 1a. Detection of the ORB features
+  this->computeKeyPoints(frame, keypoints_frame);
+
+  // 1b. Extraction of the ORB descriptors
+  cv::Mat descriptors_frame;
+  this->computeDescriptors(frame, keypoints_frame, descriptors_frame);
+
+  // 2. Match the two image descriptors
+  std::vector<std::vector<cv::DMatch> > matches;
+  matcher_->knnMatch(descriptors_frame, descriptors_model, matches, 2);
+
+  // 3. Remove matches for which NN ratio is > than threshold
+  int removed = ratioTest(matches);
+
+  // 4. Fill good matches container
+  for ( std::vector<std::vector<cv::DMatch> >::iterator
+         matchIterator= matches.begin(); matchIterator!= matches.end(); ++matchIterator)
+  {
+    if (!matchIterator->empty()) good_matches.push_back((*matchIterator)[0]);
+  }
+
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h
new file mode 100644
index 0000000000..fc03c74faa
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.h
@@ -0,0 +1,83 @@
+/*
+ * RobustMatcher.h
+ *
+ *  Created on: Jun 4, 2014
+ *      Author: eriba
+ */
+
+#ifndef ROBUSTMATCHER_H_
+#define ROBUSTMATCHER_H_
+
+#include <iostream>
+#include <boost/shared_ptr.hpp>
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/features2d/features2d.hpp>
+#include <opencv2/nonfree/nonfree.hpp>
+
+class RobustMatcher {
+public:
+  RobustMatcher() : ratio_(0.8f)
+  {
+    // ORB is the default feature
+    detector_ = new cv::OrbFeatureDetector();
+    extractor_ = new cv::OrbDescriptorExtractor();
+
+    // BruteFroce matcher with Norm Hamming is the default matcher
+    matcher_ = new cv::BFMatcher(cv::NORM_HAMMING, false);
+
+  }
+  virtual ~RobustMatcher();
+
+  // Set the feature detector
+  void setFeatureDetector(cv::FeatureDetector * detect) {  detector_ = detect; }
+
+  // Set the descriptor extractor
+  void setDescriptorExtractor(cv::DescriptorExtractor * desc) { extractor_ = desc; }
+
+  // Set the matcher
+  void setDescriptorMatcher(cv::DescriptorMatcher * match) {  matcher_ = match; }
+
+  // Compute the keypoints of an image
+  void computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints);
+
+  // Compute the descriptors of an image given its keypoints
+  void computeDescriptors( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors);
+
+  // Set ratio parameter for the ratio test
+  void setRatio( float rat) { ratio_ = rat; }
+
+  // Clear matches for which NN ratio is > than threshold
+  // return the number of removed points
+  // (corresponding entries being cleared,
+  // i.e. size will be 0)
+  int ratioTest(std::vector<std::vector<cv::DMatch> > &matches);
+
+  // Insert symmetrical matches in symMatches vector
+  void symmetryTest( const std::vector<std::vector<cv::DMatch> >& matches1,
+                     const std::vector<std::vector<cv::DMatch> >& matches2,
+                     std::vector<cv::DMatch>& symMatches );
+
+  // Match feature points using ratio and symmetry test
+  void robustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
+                      std::vector<cv::KeyPoint>& keypoints_frame,
+                      const cv::Mat& descriptors_model );
+
+ // Match feature points using ratio test
+ void fastRobustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
+                       std::vector<cv::KeyPoint>& keypoints_frame,
+                       const cv::Mat& descriptors_model );
+
+private:
+  // pointer to the feature point detector object
+  cv::FeatureDetector * detector_;
+  // pointer to the feature descriptor extractor object
+  cv::DescriptorExtractor * extractor_;
+  // pointer to the matcher object
+  cv::DescriptorMatcher * matcher_;
+  // max ratio between 1st and 2nd NN
+  float ratio_;
+};
+
+#endif /* ROBUSTMATCHER_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.cpp
new file mode 100644
index 0000000000..f66a10dd2a
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.cpp
@@ -0,0 +1,295 @@
+/*
+ * Utils.cpp
+ *
+ *  Created on: Mar 28, 2014
+ *      Author: Edgar Riba
+ */
+
+#include <iostream>
+
+#include "PnPProblem.h"
+#include "ModelRegistration.h"
+#include "Utils.h"
+
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/nonfree/features2d.hpp>
+
+
+// For text
+int fontFace = cv::FONT_ITALIC;
+double fontScale = 0.75;
+double thickness_font = 2;
+
+// For circles
+int lineType = 8;
+int radius = 4;
+double thickness_circ = -1;
+
+// Draw a text with the question point
+void drawQuestion(cv::Mat image, cv::Point3f point, cv::Scalar color)
+{
+  std::string x = IntToString((int)point.x);
+  std::string y = IntToString((int)point.y);
+  std::string z = IntToString((int)point.z);
+
+  std::string text = " Where is point (" + x + ","  + y + "," + z + ") ?";
+  cv::putText(image, text, cv::Point(25,50), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw a text with the number of entered points
+void drawText(cv::Mat image, std::string text, cv::Scalar color)
+{
+  cv::putText(image, text, cv::Point(25,50), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw a text with the number of entered points
+void drawText2(cv::Mat image, std::string text, cv::Scalar color)
+{
+  cv::putText(image, text, cv::Point(25,75), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw a text with the frame ratio
+void drawFPS(cv::Mat image, double fps, cv::Scalar color)
+{
+  std::string fps_str = IntToString((int)fps);
+  std::string text = fps_str + " FPS";
+  cv::putText(image, text, cv::Point(500,50), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw a text with the frame ratio
+void drawConfidence(cv::Mat image, double confidence, cv::Scalar color)
+{
+  std::string conf_str = IntToString((int)confidence);
+  std::string text = conf_str + " %";
+  cv::putText(image, text, cv::Point(500,75), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw a text with the number of entered points
+void drawCounter(cv::Mat image, int n, int n_max, cv::Scalar color)
+{
+  std::string n_str = IntToString(n);
+  std::string n_max_str = IntToString(n_max);
+  std::string text = n_str + " of " + n_max_str + " points";
+  cv::putText(image, text, cv::Point(500,50), fontFace, fontScale, color, thickness_font, 8);
+}
+
+// Draw the points and the coordinates
+void drawPoints(cv::Mat image, std::vector<cv::Point2f> &list_points_2d, std::vector<cv::Point3f> &list_points_3d, cv::Scalar color)
+{
+  for (unsigned int i = 0; i < list_points_2d.size(); ++i)
+  {
+    cv::Point2f point_2d = list_points_2d[i];
+    cv::Point3f point_3d = list_points_3d[i];
+
+    // Draw Selected points
+    cv::circle(image, point_2d, radius, color, -1, lineType );
+
+    std::string idx = IntToString(i+1);
+    std::string x = IntToString((int)point_3d.x);
+    std::string y = IntToString((int)point_3d.y);
+    std::string z = IntToString((int)point_3d.z);
+    std::string text = "P" + idx + " (" + x + "," + y + "," + z +")";
+
+    point_2d.x = point_2d.x + 10;
+    point_2d.y = point_2d.y - 10;
+    cv::putText(image, text, point_2d, fontFace, fontScale*0.5, color, thickness_font, 8);
+  }
+}
+
+// Draw only the points
+void draw2DPoints(cv::Mat image, std::vector<cv::Point2f> &list_points, cv::Scalar color)
+{
+  for( size_t i = 0; i < list_points.size(); i++)
+  {
+    cv::Point2f point_2d = list_points[i];
+
+    // Draw Selected points
+    cv::circle(image, point_2d, radius, color, -1, lineType );
+  }
+}
+
+void drawArrow(cv::Mat image, cv::Point2i p, cv::Point2i q, cv::Scalar color, int arrowMagnitude, int thickness, int line_type, int shift)
+{
+  //Draw the principle line
+  cv::line(image, p, q, color, thickness, line_type, shift);
+  const double PI = 3.141592653;
+  //compute the angle alpha
+  double angle = atan2((double)p.y-q.y, (double)p.x-q.x);
+  //compute the coordinates of the first segment
+  p.x = (int) ( q.x +  arrowMagnitude * cos(angle + PI/4));
+  p.y = (int) ( q.y +  arrowMagnitude * sin(angle + PI/4));
+  //Draw the first segment
+  cv::line(image, p, q, color, thickness, line_type, shift);
+  //compute the coordinates of the second segment
+  p.x = (int) ( q.x +  arrowMagnitude * cos(angle - PI/4));
+  p.y = (int) ( q.y +  arrowMagnitude * sin(angle - PI/4));
+  //Draw the second segment
+  cv::line(image, p, q, color, thickness, line_type, shift);
+}
+
+void draw3DCoordinateAxes(cv::Mat image, const std::vector<cv::Point2f> &list_points2d)
+{
+  cv::Scalar red(0, 0, 255);
+  cv::Scalar green(0,255,0);
+  cv::Scalar blue(255,0,0);
+  cv::Scalar black(0,0,0);
+
+  const double PI = 3.141592653;
+  int length = 50;
+
+  cv::Point2i origin = list_points2d[0];
+  cv::Point2i pointX = list_points2d[1];
+  cv::Point2i pointY = list_points2d[2];
+  cv::Point2i pointZ = list_points2d[3];
+
+  drawArrow(image, origin, pointX, red, 9, 2);
+  drawArrow(image, origin, pointY, blue, 9, 2);
+  drawArrow(image, origin, pointZ, green, 9, 2);
+  cv::circle(image, origin, radius/2, black, -1, lineType );
+
+}
+
+// Draw the object mesh
+void drawObjectMesh(cv::Mat image, const Mesh *mesh, PnPProblem *pnpProblem, cv::Scalar color)
+{
+  std::vector<std::vector<int> > list_triangles = mesh->getTrianglesList();
+  for( size_t i = 0; i < list_triangles.size(); i++)
+  {
+    std::vector<int> tmp_triangle = list_triangles.at(i);
+
+    cv::Point3f point_3d_0 = mesh->getVertex(tmp_triangle[0]);
+    cv::Point3f point_3d_1 = mesh->getVertex(tmp_triangle[1]);
+    cv::Point3f point_3d_2 = mesh->getVertex(tmp_triangle[2]);
+
+    cv::Point2f point_2d_0 = pnpProblem->backproject3DPoint(point_3d_0);
+    cv::Point2f point_2d_1 = pnpProblem->backproject3DPoint(point_3d_1);
+    cv::Point2f point_2d_2 = pnpProblem->backproject3DPoint(point_3d_2);
+
+    cv::line(image, point_2d_0, point_2d_1, color, 1);
+    cv::line(image, point_2d_1, point_2d_2, color, 1);
+    cv::line(image, point_2d_2, point_2d_0, color, 1);
+  }
+}
+
+bool equal_point(const cv::Point2f &p1, const cv::Point2f &p2)
+{
+  return ( (p1.x == p2.x) && (p1.y == p2.y) );
+}
+
+double get_translation_error(const cv::Mat &t_true, const cv::Mat &t)
+{
+  return cv::norm( t_true - t );
+}
+
+double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R)
+{
+  cv::Mat error_vec, error_mat;
+  error_mat = R_true * cv::Mat(R.inv()).mul(-1);
+  cv::Rodrigues(error_mat, error_vec);
+
+  return cv::norm(error_vec);
+}
+
+cv::Mat rot2euler(const cv::Mat & rotationMatrix)
+{
+  cv::Mat euler(3,1,CV_64F);
+
+  double m00 = rotationMatrix.at<double>(0,0);
+  double m01 = rotationMatrix.at<double>(0,1);
+  double m02 = rotationMatrix.at<double>(0,2);
+  double m10 = rotationMatrix.at<double>(1,0);
+  double m11 = rotationMatrix.at<double>(1,1);
+  double m12 = rotationMatrix.at<double>(1,2);
+  double m20 = rotationMatrix.at<double>(2,0);
+  double m21 = rotationMatrix.at<double>(2,1);
+  double m22 = rotationMatrix.at<double>(2,2);
+
+  double x, y, z;
+
+  // Assuming the angles are in radians.
+  if (m10 > 0.998) { // singularity at north pole
+    x = 0;
+    y = CV_PI/2;
+    z = atan2(m02,m22);
+  }
+  else if (m10 < -0.998) { // singularity at south pole
+    x = 0;
+    y = -CV_PI/2;
+    z = atan2(m02,m22);
+  }
+  else
+  {
+    x = atan2(-m12,m11);
+    y = asin(m10);
+    z = atan2(-m20,m00);
+  }
+
+  euler.at<double>(0) = x;
+  euler.at<double>(1) = y;
+  euler.at<double>(2) = z;
+
+  return euler;
+}
+
+cv::Mat euler2rot(const cv::Mat & euler)
+{
+  cv::Mat rotationMatrix(3,3,CV_64F);
+
+  double x = euler.at<double>(0);
+  double y = euler.at<double>(1);
+  double z = euler.at<double>(2);
+
+  // Assuming the angles are in radians.
+  double ch = cos(z);
+  double sh = sin(z);
+  double ca = cos(y);
+  double sa = sin(y);
+  double cb = cos(x);
+  double sb = sin(x);
+
+  double m00, m01, m02, m10, m11, m12, m20, m21, m22;
+
+  m00 = ch * ca;
+  m01 = sh*sb - ch*sa*cb;
+  m02 = ch*sa*sb + sh*cb;
+  m10 = sa;
+  m11 = ca*cb;
+  m12 = -ca*sb;
+  m20 = -sh*ca;
+  m21 = sh*sa*cb + ch*sb;
+  m22 = -sh*sa*sb + ch*cb;
+
+  rotationMatrix.at<double>(0,0) = m00;
+  rotationMatrix.at<double>(0,1) = m01;
+  rotationMatrix.at<double>(0,2) = m02;
+  rotationMatrix.at<double>(1,0) = m10;
+  rotationMatrix.at<double>(1,1) = m11;
+  rotationMatrix.at<double>(1,2) = m12;
+  rotationMatrix.at<double>(2,0) = m20;
+  rotationMatrix.at<double>(2,1) = m21;
+  rotationMatrix.at<double>(2,2) = m22;
+
+  return rotationMatrix;
+}
+
+int StringToInt ( const std::string &Text )
+{
+   std::istringstream ss(Text);
+   int result;
+   return ss >> result ? result : 0;
+}
+
+std::string FloatToString ( float Number )
+{
+  std::ostringstream ss;
+  ss << Number;
+  return ss.str();
+}
+
+std::string IntToString ( int Number )
+{
+  std::ostringstream ss;
+  ss << Number;
+  return ss.str();
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.h b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.h
new file mode 100644
index 0000000000..d51eb28b39
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/Utils.h
@@ -0,0 +1,75 @@
+/*
+ * Utils.h
+ *
+ *  Created on: Mar 28, 2014
+ *      Author: Edgar Riba
+ */
+
+#ifndef UTILS_H_
+#define UTILS_H_
+
+#include <iostream>
+#include <cv.h>
+
+#include "PnPProblem.h"
+
+// Draw a text with the question point
+void drawQuestion(cv::Mat image, cv::Point3f point, cv::Scalar color);
+
+// Draw a text in the left of the image
+void drawText(cv::Mat image, std::string text, cv::Scalar color);
+void drawText2(cv::Mat image, std::string text, cv::Scalar color);
+void drawFPS(cv::Mat image, double fps, cv::Scalar color);
+void drawConfidence(cv::Mat image, double confidence, cv::Scalar color);
+
+// Draw a text with the number of registered points
+void drawCounter(cv::Mat image, int n, int n_max, cv::Scalar color);
+
+// Draw the points and the coordinates
+void drawPoints(cv::Mat image, std::vector<cv::Point2f> &list_points_2d, std::vector<cv::Point3f> &list_points_3d, cv::Scalar color);
+
+// Draw only the points
+void draw2DPoints(cv::Mat image, std::vector<cv::Point2f> &list_points, cv::Scalar color);
+
+// Draw the object mesh
+void drawObjectMesh(cv::Mat image, const Mesh *mesh, PnPProblem *pnpProblem, cv::Scalar color);
+
+// Draw an arrow into the image
+void drawArrow(cv::Mat image, cv::Point2i p, cv::Point2i q, cv::Scalar color, int arrowMagnitude = 9, int thickness=1, int line_type=8, int shift=0);
+
+// Draw the 3D coordinate axes
+void draw3DCoordinateAxes(cv::Mat image, const std::vector<cv::Point2f> &list_points2d);
+
+// Compute the 2D points with the esticv::Mated pose
+std::vector<cv::Point2f> verification_points(PnPProblem *p);
+
+bool equal_point(const cv::Point2f &p1, const cv::Point2f &p2);
+
+double get_translation_error(const cv::Mat &t_true, const cv::Mat &t);
+double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R);
+double get_variance(const std::vector<cv::Point3f> list_points3d);
+cv::Mat rot2quat(cv::Mat &rotationMatrix);
+cv::Mat quat2rot(cv::Mat &q);
+cv::Mat euler2rot(const cv::Mat & euler);
+cv::Mat rot2euler(const cv::Mat & rotationMatrix);
+cv::Mat quat2euler(const cv::Mat & q);
+int StringToInt ( const std::string &Text );
+std::string FloatToString ( float Number );
+std::string IntToString ( int Number );
+
+class Timer
+{
+public:
+  Timer();
+    void start() { tstart = (double)clock()/CLOCKS_PER_SEC; }
+    void stop()  { tstop = (double)clock()/CLOCKS_PER_SEC; }
+    void reset()  { tstart = 0;  tstop = 0;}
+
+    double getTimeMilli() const { return (tstop-tstart)*1000; }
+    double getTimeSec()   const {  return tstop-tstart; }
+
+private:
+    double tstart, tstop, ttime;
+};
+
+#endif /* UTILS_H_ */
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp
new file mode 100644
index 0000000000..ff00e24e84
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp
@@ -0,0 +1,472 @@
+#include <iostream>
+#include <time.h>
+
+#include "cv.h"
+#include "highgui.h"
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/video/tracking.hpp>
+
+#include "Mesh.h"
+#include "Model.h"
+#include "PnPProblem.h"
+#include "RobustMatcher.h"
+#include "ModelRegistration.h"
+#include "Utils.h"
+
+//  COOKIES BOX - ORB
+std::string yml_read_path = "../Data/cookies_ORB.yml"; // 3dpts + descriptors
+
+// COOKIES BOX MESH
+std::string ply_read_path = "../Data/box.ply";   // mesh
+
+void help()
+{
+std::cout
+<< "--------------------------------------------------------------------------"   << std::endl
+<< "This program shows how to detect an object given its 3D textured model. You can choose to "
+<< "use a recorded video or the webcam." << std::endl
+<< "Usage:"                                                                       << std::endl
+<< "./pnp_detection ~/path_to_video/box.mp4"                                      << std::endl
+<< "./pnp_detection "                                                             << std::endl
+<< "--------------------------------------------------------------------------"   << std::endl
+<< std::endl;
+}
+
+
+/*
+ * Set up the intrinsic camera parameters: UVC WEBCAM
+ */
+
+double f = 55;                           // focal length in mm
+double sx = 22.3, sy = 14.9;             // sensor size
+double width = 640, height = 480;        // image size
+
+double params_WEBCAM[] = { width*f/sx,   // fx
+                           height*f/sy,  // fy
+                           width/2,      // cx
+                           height/2};    // cy
+
+
+/*
+ * Set up some basic colors
+ */
+cv::Scalar red(0, 0, 255);
+cv::Scalar green(0,255,0);
+cv::Scalar blue(255,0,0);
+cv::Scalar yellow(0,255,255);
+
+
+// Robust Matcher parameters
+
+int numKeyPoints = 2000;      // number of detected keypoints
+float ratio = 0.70f;          // ratio test
+bool fast_match = true;       // fastRobustMatch() or robustMatch()
+
+
+// RANSAC parameters
+
+int iterationsCount = 500;      // number of Ransac iterations.
+float reprojectionError = 2.0;  // maximum allowed distance to consider it an inlier.
+float confidence = 0.95;        // ransac successful confidence.
+
+
+// Kalman Filter parameters
+
+int minInliersKalman = 30;    // Kalman threshold updating
+
+
+/**********************************************************************************************************/
+
+void initKalmanFilter( cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
+
+
+/**********************************************************************************************************/
+
+
+void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurements,
+                         cv::Mat &translation_estimated, cv::Mat &rotation_estimated );
+
+/**********************************************************************************************************/
+
+
+void fillMeasurements( cv::Mat &measurements,
+                       const cv::Mat &translation_measured, const cv::Mat &rotation_measured);
+
+
+/**********************************************************************************************************/
+
+
+int main(int argc, char *argv[])
+{
+
+  help();
+
+  PnPProblem pnp_detection(params_WEBCAM);
+  PnPProblem pnp_detection_est(params_WEBCAM);
+
+  Model model;               // instantiate Model object
+  model.load(yml_read_path); // load a 3D textured object model
+
+  Mesh mesh;                // instantiate Mesh object
+  mesh.load(ply_read_path); // load an object mesh
+
+
+  RobustMatcher rmatcher;                                                     // instantiate RobustMatcher
+
+  cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);  // instatiate ORB feature detector
+  cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor();     // instatiate ORB descriptor extractor
+
+  rmatcher.setFeatureDetector(detector);                                      // set feature detector
+  rmatcher.setDescriptorExtractor(extractor);                                 // set descriptor extractor
+
+
+  cv::Ptr<cv::flann::IndexParams> indexParams = cv::makePtr<cv::flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
+  cv::Ptr<cv::flann::SearchParams> searchParams = cv::makePtr<cv::flann::SearchParams>(50);       // instantiate flann search parameters
+
+  cv::DescriptorMatcher * matcher = new cv::FlannBasedMatcher(indexParams, searchParams);         // instantiate FlannBased matcher
+  rmatcher.setDescriptorMatcher(matcher);                                                         // set matcher
+
+
+  rmatcher.setRatio(ratio); // set ratio test parameter
+
+
+  cv::KalmanFilter KF;         // instantiate Kalman Filter
+
+  int nStates = 18;            // the number of states
+  int nMeasurements = 6;       // the number of measured states
+  int nInputs = 0;             // the number of action control
+
+  double dt = 0.125;           // time between measurements (1/FPS)
+
+  initKalmanFilter(KF, nStates, nMeasurements, nInputs, dt);    // init function
+
+  cv::Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(cv::Scalar(0));
+  bool good_measurement = false;
+
+
+  // Get the MODEL INFO
+
+  std::vector<cv::Point3f> list_points3d_model = model.get_points3d();  // list with model 3D coordinates
+  cv::Mat descriptors_model = model.get_descriptors();                  // list with descriptors of each 3D coordinate
+
+
+  // Create & Open Window
+  cv::namedWindow("REAL TIME DEMO", CV_WINDOW_KEEPRATIO);
+
+
+  cv::VideoCapture cap;                           // instantiate VideoCapture
+  (argc < 2) ? cap.open(1) : cap.open(argv[1]);   // open the default camera device
+                                                  // or a recorder video
+
+  if(!cap.isOpened())   // check if we succeeded
+  {
+    std::cout << "Could not open the camera device" << std::endl;
+    return -1;
+  }
+
+
+  // start and end times
+  time_t start, end;
+
+  // fps calculated using number of frames / seconds
+  double fps;
+
+  // frame counter
+  int counter = 0;
+
+  // floating point seconds elapsed since start
+  double sec;
+
+  // start the clock
+  time(&start);
+
+  double tstart2, tstop2, ttime2; // algorithm metrics
+  double tstart, tstop, ttime; // algorithm metrics
+
+  cv::Mat frame, frame_vis;
+
+  while(cap.read(frame) && cv::waitKey(30) != 27) // capture frame until ESC is pressed
+  {
+
+    frame_vis = frame.clone();    // refresh visualisation frame
+
+
+    // -- Step 1: Robust matching between model descriptors and scene descriptors
+
+    std::vector<cv::DMatch> good_matches;       // to obtain the 3D points of the model
+    std::vector<cv::KeyPoint> keypoints_scene;  // to obtain the 2D points of the scene
+
+
+    if(fast_match)
+    {
+      rmatcher.fastRobustMatch(frame, good_matches, keypoints_scene, descriptors_model);
+    }
+    else
+    {
+      rmatcher.robustMatch(frame, good_matches, keypoints_scene, descriptors_model);
+    }
+
+
+    // -- Step 2: Find out the 2D/3D correspondences
+
+    std::vector<cv::Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
+    std::vector<cv::Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
+
+    for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index)
+    {
+      cv::Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ];  // 3D point from model
+      cv::Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
+      list_points3d_model_match.push_back(point3d_model);         // add 3D point
+      list_points2d_scene_match.push_back(point2d_scene);         // add 2D point
+    }
+
+    // Draw outliers
+    draw2DPoints(frame_vis, list_points2d_scene_match, red);
+
+
+    cv::Mat inliers_idx;
+    std::vector<cv::Point2f> list_points2d_inliers;
+
+    if(good_matches.size() > 0) // None matches, then RANSAC crashes
+    {
+
+
+      // -- Step 3: Estimate the pose using RANSAC approach
+      pnp_detection.estimatePoseRANSAC( list_points3d_model_match, list_points2d_scene_match,
+                                        cv::ITERATIVE, inliers_idx,
+                                        iterationsCount, reprojectionError, confidence );
+
+      // -- Step 4: Catch the inliers keypoints to draw
+      for(int inliers_index = 0; inliers_index < inliers_idx.rows; ++inliers_index)
+      {
+        int n = inliers_idx.at<int>(inliers_index);         // i-inlier
+        cv::Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
+        list_points2d_inliers.push_back(point2d);           // add i-inlier to list
+      }
+
+      // Draw inliers points 2D
+      draw2DPoints(frame_vis, list_points2d_inliers, blue);
+
+
+      // -- Step 5: Kalman Filter
+
+      good_measurement = false;
+
+      // GOOD MEASUREMENT
+      if( inliers_idx.rows >= minInliersKalman )
+      {
+
+        // Get the measured translation
+        cv::Mat translation_measured(3, 1, CV_64F);
+        translation_measured = pnp_detection.get_t_matrix();
+
+        // Get the measured rotation
+        cv::Mat rotation_measured(3, 3, CV_64F);
+        rotation_measured = pnp_detection.get_R_matrix();
+
+        // fill the measurements vector
+        fillMeasurements(measurements, translation_measured, rotation_measured);
+
+        good_measurement = true;
+
+      }
+
+      // Instantiate estimated translation and rotation
+      cv::Mat translation_estimated(3, 1, CV_64F);
+      cv::Mat rotation_estimated(3, 3, CV_64F);
+
+      // update the Kalman filter with good measurements
+      updateKalmanFilter( KF, measurements,
+                          translation_estimated, rotation_estimated);
+
+
+      // -- Step 6: Set estimated projection matrix
+      pnp_detection_est.set_P_matrix(rotation_estimated, translation_estimated);
+
+    }
+
+    // -- Step X: Draw pose
+
+    if(good_measurement)
+    {
+      drawObjectMesh(frame_vis, &mesh, &pnp_detection, green);  // draw current pose
+    }
+    else
+    {
+      drawObjectMesh(frame_vis, &mesh, &pnp_detection_est, yellow); // draw estimated pose
+    }
+
+    double l = 5;
+    std::vector<cv::Point2f> pose_points2d;
+    pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,0)));  // axis center
+    pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(l,0,0)));  // axis x
+    pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,l,0)));  // axis y
+    pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,l)));  // axis z
+    draw3DCoordinateAxes(frame_vis, pose_points2d);           // draw axes
+
+    // FRAME RATE
+
+    // see how much time has elapsed
+    time(&end);
+
+    // calculate current FPS
+    ++counter;
+    sec = difftime (end, start);
+
+    fps = counter / sec;
+
+    drawFPS(frame_vis, fps, yellow); // frame ratio
+    double ratio = ((double)inliers_idx.rows/(double)good_matches.size())*100;
+    drawConfidence(frame_vis, ratio, yellow);
+
+
+    // -- Step X: Draw some debugging text
+
+    // Draw some debug text
+    int inliers_int = inliers_idx.rows;
+    int outliers_int = good_matches.size() - inliers_int;
+    std::string inliers_str = IntToString(inliers_int);
+    std::string outliers_str = IntToString(outliers_int);
+    std::string n = IntToString(good_matches.size());
+    std::string text = "Found " + inliers_str + " of " + n + " matches";
+    std::string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
+
+    drawText(frame_vis, text, green);
+    drawText2(frame_vis, text2, red);
+
+    cv::imshow("REAL TIME DEMO", frame_vis);
+
+    //cv::waitKey(0);
+
+  }
+
+  // Close and Destroy Window
+  cv::destroyWindow("REAL TIME DEMO");
+
+  std::cout << "GOODBYE ..." << std::endl;
+
+}
+
+/**********************************************************************************************************/
+void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
+{
+
+  KF.init(nStates, nMeasurements, nInputs, CV_64F);                 // init Kalman Filter
+
+  cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-5));       // set process noise
+  cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-2));   // set measurement noise
+  cv::setIdentity(KF.errorCovPost, cv::Scalar::all(1));             // error covariance
+
+
+                     /** DYNAMIC MODEL **/
+
+  //  [1 0 0 dt  0  0 dt2   0   0 0 0 0  0  0  0   0   0   0]
+  //  [0 1 0  0 dt  0   0 dt2   0 0 0 0  0  0  0   0   0   0]
+  //  [0 0 1  0  0 dt   0   0 dt2 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  1  0  0  dt   0   0 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  1  0   0  dt   0 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  0  1   0   0  dt 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  0  0   1   0   0 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  0  0   0   1   0 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  0  0   0   0   1 0 0 0  0  0  0   0   0   0]
+  //  [0 0 0  0  0  0   0   0   0 1 0 0 dt  0  0 dt2   0   0]
+  //  [0 0 0  0  0  0   0   0   0 0 1 0  0 dt  0   0 dt2   0]
+  //  [0 0 0  0  0  0   0   0   0 0 0 1  0  0 dt   0   0 dt2]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  1  0  0  dt   0   0]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  0  1  0   0  dt   0]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  0  0  1   0   0  dt]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  0  0  0   1   0   0]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  0  0  0   0   1   0]
+  //  [0 0 0  0  0  0   0   0   0 0 0 0  0  0  0   0   0   1]
+
+  // position
+  KF.transitionMatrix.at<double>(0,3) = dt;
+  KF.transitionMatrix.at<double>(1,4) = dt;
+  KF.transitionMatrix.at<double>(2,5) = dt;
+  KF.transitionMatrix.at<double>(3,6) = dt;
+  KF.transitionMatrix.at<double>(4,7) = dt;
+  KF.transitionMatrix.at<double>(5,8) = dt;
+  KF.transitionMatrix.at<double>(0,6) = 0.5*pow(dt,2);
+  KF.transitionMatrix.at<double>(1,7) = 0.5*pow(dt,2);
+  KF.transitionMatrix.at<double>(2,8) = 0.5*pow(dt,2);
+
+  // orientation
+  KF.transitionMatrix.at<double>(9,12) = dt;
+  KF.transitionMatrix.at<double>(10,13) = dt;
+  KF.transitionMatrix.at<double>(11,14) = dt;
+  KF.transitionMatrix.at<double>(12,15) = dt;
+  KF.transitionMatrix.at<double>(13,16) = dt;
+  KF.transitionMatrix.at<double>(14,17) = dt;
+  KF.transitionMatrix.at<double>(9,15) = 0.5*pow(dt,2);
+  KF.transitionMatrix.at<double>(10,16) = 0.5*pow(dt,2);
+  KF.transitionMatrix.at<double>(11,17) = 0.5*pow(dt,2);
+
+
+           /** MEASUREMENT MODEL **/
+
+  //  [1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+  //  [0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+  //  [0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+  //  [0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]
+  //  [0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]
+  //  [0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
+
+  KF.measurementMatrix.at<double>(0,0) = 1;  // x
+  KF.measurementMatrix.at<double>(1,1) = 1;  // y
+  KF.measurementMatrix.at<double>(2,2) = 1;  // z
+  KF.measurementMatrix.at<double>(3,9) = 1;  // roll
+  KF.measurementMatrix.at<double>(4,10) = 1; // pitch
+  KF.measurementMatrix.at<double>(5,11) = 1; // yaw
+
+  //std::cout << "A " << std::endl << KF.transitionMatrix << std::endl;
+  //std::cout << "C " << std::endl << KF.measurementMatrix << std::endl;
+
+}
+
+/**********************************************************************************************************/
+void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement,
+                         cv::Mat &translation_estimated, cv::Mat &rotation_estimated )
+{
+
+  // First predict, to update the internal statePre variable
+  cv::Mat prediction = KF.predict();
+
+  // The "correct" phase that is going to use the predicted value and our measurement
+  cv::Mat estimated = KF.correct(measurement);
+
+  // Estimated translation
+  translation_estimated.at<double>(0) = estimated.at<double>(0);
+  translation_estimated.at<double>(1) = estimated.at<double>(1);
+  translation_estimated.at<double>(2) = estimated.at<double>(2);
+
+  // Estimated euler angles
+  cv::Mat eulers_estimated(3, 1, CV_64F);
+  eulers_estimated.at<double>(0) = estimated.at<double>(9);
+  eulers_estimated.at<double>(1) = estimated.at<double>(10);
+  eulers_estimated.at<double>(2) = estimated.at<double>(11);
+
+  // Convert estimated quaternion to rotation matrix
+  rotation_estimated = euler2rot(eulers_estimated);
+
+}
+
+/**********************************************************************************************************/
+void fillMeasurements( cv::Mat &measurements,
+                       const cv::Mat &translation_measured, const cv::Mat &rotation_measured)
+{
+  // Convert rotation matrix to euler angles
+  cv::Mat measured_eulers(3, 1, CV_64F);
+  measured_eulers = rot2euler(rotation_measured);
+
+  // Set measurement to predict
+  measurements.at<double>(0) = translation_measured.at<double>(0); // x
+  measurements.at<double>(1) = translation_measured.at<double>(1); // y
+  measurements.at<double>(2) = translation_measured.at<double>(2); // z
+  measurements.at<double>(3) = measured_eulers.at<double>(0);      // roll
+  measurements.at<double>(4) = measured_eulers.at<double>(1);      // pitch
+  measurements.at<double>(5) = measured_eulers.at<double>(2);      // yaw
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp
new file mode 100644
index 0000000000..3b2517b22f
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_registration.cpp
@@ -0,0 +1,290 @@
+#include <iostream>
+
+#include "Mesh.h"
+#include "Model.h"
+#include "PnPProblem.h"
+#include "RobustMatcher.h"
+#include "ModelRegistration.h"
+#include "Utils.h"
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/features2d/features2d.hpp>
+#include <opencv2/nonfree/features2d.hpp>
+
+  /*
+   * Set up the images paths
+   */
+
+  // COOKIES BOX [718x480]
+  std::string img_path = "../Data/resized_IMG_3875.JPG"; // f 55
+
+  // COOKIES BOX MESH
+  std::string ply_read_path = "../Data/box.ply";
+
+  // YAML writting path
+  std::string write_path = "../Data/cookies_ORB.yml";
+
+  void help()
+  {
+  std::cout
+  << "--------------------------------------------------------------------------"   << std::endl
+  << "This program shows how to create your 3D textured model. "                    << std::endl
+  << "Usage:"                                                                       << std::endl
+  << "./pnp_registration "                                                          << std::endl
+  << "--------------------------------------------------------------------------"   << std::endl
+  << std::endl;
+  }
+
+  // Boolean the know if the registration it's done
+  bool end_registration = false;
+
+  /*
+   * Set up the intrinsic camera parameters: CANON
+   */
+  double f = 45; // focal length in mm
+  double sx = 22.3, sy = 14.9;
+  double width = 2592, height = 1944;
+  double params_CANON[] = { width*f/sx,   // fx
+                            height*f/sy,  // fy
+                            width/2,      // cx
+                            height/2};    // cy
+
+
+  // Setup the points to register in the image
+  // In the order of the *.ply file and starting at 1
+  int n = 8;
+  int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4
+
+  /*
+   * Set up some basic colors
+   */
+  cv::Scalar red(0, 0, 255);
+  cv::Scalar green(0,255,0);
+  cv::Scalar blue(255,0,0);
+  cv::Scalar yellow(0,255,255);
+
+  /*
+   * CREATE MODEL REGISTRATION OBJECT
+   * CREATE OBJECT MESH
+   * CREATE OBJECT MODEL
+   * CREATE PNP OBJECT
+   */
+  ModelRegistration registration;
+  Model model;
+  Mesh mesh;
+  PnPProblem pnp_registration(params_CANON);
+
+
+// Mouse events for model registration
+static void onMouseModelRegistration( int event, int x, int y, int, void* )
+{
+  if  ( event == cv::EVENT_LBUTTONUP )
+  {
+      int n_regist = registration.getNumRegist();
+      int n_vertex = pts[n_regist];
+
+      cv::Point2f point_2d = cv::Point2f(x,y);
+      cv::Point3f point_3d = mesh.getVertex(n_vertex-1);
+
+      bool is_registrable = registration.is_registrable();
+      if (is_registrable)
+      {
+        registration.registerPoint(point_2d, point_3d);
+        if( registration.getNumRegist() == registration.getNumMax() ) end_registration = true;
+      }
+  }
+}
+
+/*
+ *   MAIN PROGRAM
+ *
+ */
+
+int main(int argc, char *argv[])
+{
+
+  help();
+
+  // load a mesh given the *.ply file path
+  mesh.load(ply_read_path);
+
+  // set parameters
+  int numKeyPoints = 10000;
+
+  //Instantiate robust matcher: detector, extractor, matcher
+  RobustMatcher rmatcher;
+  cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);
+  rmatcher.setFeatureDetector(detector);
+
+  /*
+   * GROUND TRUTH OF THE FIRST IMAGE
+   *
+   * by the moment it is the reference image
+   */
+
+  // Create & Open Window
+  cv::namedWindow("MODEL REGISTRATION", cv::WINDOW_KEEPRATIO);
+
+  // Set up the mouse events
+  cv::setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
+
+  // Open the image to register
+  cv::Mat img_in = cv::imread(img_path, cv::IMREAD_COLOR);
+  cv::Mat img_vis = img_in.clone();
+
+  if (!img_in.data) {
+    std::cout << "Could not open or find the image" << std::endl;
+    return -1;
+  }
+
+  // Set the number of points to register
+  int num_registrations = n;
+  registration.setNumMax(num_registrations);
+
+  std::cout << "Click the box corners ..." << std::endl;
+  std::cout << "Waiting ..." << std::endl;
+
+  // Loop until all the points are registered
+  while ( cv::waitKey(30) < 0 )
+  {
+    // Refresh debug image
+    img_vis = img_in.clone();
+
+    // Current registered points
+    std::vector<cv::Point2f> list_points2d = registration.get_points2d();
+    std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+
+    // Draw current registered points
+    drawPoints(img_vis, list_points2d, list_points3d, red);
+
+    // If the registration is not finished, draw which 3D point we have to register.
+    // If the registration is finished, breaks the loop.
+    if (!end_registration)
+    {
+      // Draw debug text
+      int n_regist = registration.getNumRegist();
+      int n_vertex = pts[n_regist];
+      cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1);
+
+      drawQuestion(img_vis, current_poin3d, green);
+      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
+    }
+    else
+    {
+      // Draw debug text
+      drawText(img_vis, "END REGISTRATION", green);
+      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), green);
+      break;
+    }
+
+    // Show the image
+    cv::imshow("MODEL REGISTRATION", img_vis);
+  }
+
+
+  /*
+   *
+   * COMPUTE CAMERA POSE
+   *
+   */
+
+  std::cout << "COMPUTING POSE ..." << std::endl;
+
+  // The list of registered points
+  std::vector<cv::Point2f> list_points2d = registration.get_points2d();
+  std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+
+  // Estimate pose given the registered points
+  bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, cv::ITERATIVE);
+  if ( is_correspondence )
+  {
+    std::cout << "Correspondence found" << std::endl;
+
+    // Compute all the 2D points of the mesh to verify the algorithm and draw it
+    std::vector<cv::Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
+    draw2DPoints(img_vis, list_points2d_mesh, green);
+
+  } else {
+    std::cout << "Correspondence not found" << std::endl << std::endl;
+  }
+
+  // Show the image
+  cv::imshow("MODEL REGISTRATION", img_vis);
+
+  // Show image until ESC pressed
+  cv::waitKey(0);
+
+
+   /*
+    *
+    * COMPUTE 3D of the image Keypoints
+    *
+    */
+
+
+  // Containers for keypoints and descriptors of the model
+  std::vector<cv::KeyPoint> keypoints_model;
+  cv::Mat descriptors;
+
+  // Compute keypoints and descriptors
+  rmatcher.computeKeyPoints(img_in, keypoints_model);
+  rmatcher.computeDescriptors(img_in, keypoints_model, descriptors);
+
+  // Check if keypoints are on the surface of the registration image and add to the model
+  for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
+    cv::Point2f point2d(keypoints_model[i].pt);
+    cv::Point3f point3d;
+    bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
+    if (on_surface)
+    {
+        model.add_correspondence(point2d, point3d);
+        model.add_descriptor(descriptors.row(i));
+        model.add_keypoint(keypoints_model[i]);
+    }
+    else
+    {
+        model.add_outlier(point2d);
+    }
+  }
+
+  // save the model into a *.yaml file
+  model.save(write_path);
+
+  // Out image
+  img_vis = img_in.clone();
+
+  // The list of the points2d of the model
+  std::vector<cv::Point2f> list_points_in = model.get_points2d_in();
+  std::vector<cv::Point2f> list_points_out = model.get_points2d_out();
+
+  // Draw some debug text
+  std::string num = IntToString(list_points_in.size());
+  std::string text = "There are " + num + " inliers";
+  drawText(img_vis, text, green);
+
+  // Draw some debug text
+  num = IntToString(list_points_out.size());
+  text = "There are " + num + " outliers";
+  drawText2(img_vis, text, red);
+
+  // Draw the object mesh
+  drawObjectMesh(img_vis, &mesh, &pnp_registration, blue);
+
+  // Draw found keypoints depending on if are or not on the surface
+  draw2DPoints(img_vis, list_points_in, green);
+  draw2DPoints(img_vis, list_points_out, red);
+
+  // Show the image
+  cv::imshow("MODEL REGISTRATION", img_vis);
+
+  // Wait until ESC pressed
+  cv::waitKey(0);
+
+  // Close and Destroy Window
+  cv::destroyWindow("MODEL REGISTRATION");
+
+  std::cout << "GOODBYE" << std::endl;
+
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_verification.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_verification.cpp
new file mode 100644
index 0000000000..75456588de
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_verification.cpp
@@ -0,0 +1,327 @@
+#include <iostream>
+
+#include "cv.h"
+#include "highgui.h"
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/nonfree/features2d.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+
+#include "Mesh.h"
+#include "Model.h"
+#include "PnPProblem.h"
+#include "RobustMatcher.h"
+#include "ModelRegistration.h"
+#include "Utils.h"
+
+#include "CsvWriter.h"
+
+
+  /*
+   * Set up the images paths
+   */
+
+  std::string img_verification_path = "../Data/resized_IMG_3872.JPG";
+  std::string ply_read_path = "../Data/box.ply";
+  std::string yml_read_path = "../Data/cookies_ORB.yml";
+
+  // Boolean the know if the registration it's done
+  bool end_registration = false;
+
+  // Setup the points to register in the image
+  // In the order of the *.ply file and starting at 1
+  int n = 7;
+  int pts[] = {1, 2, 3, 5, 6, 7, 8};
+
+  /*
+   * Set up the intrinsic camera parameters: CANON
+   */
+  double f = 43;
+  double sx = 22.3, sy = 14.9;
+  double width = 718, height = 480;
+  double params_CANON[] = { width*f/sx,   // fx
+                            height*f/sy,  // fy
+                            width/2,      // cx
+                            height/2};    // cy
+
+  /*
+   * Set up some basic colors
+   */
+  cv::Scalar red(0, 0, 255);
+  cv::Scalar green(0,255,0);
+  cv::Scalar blue(255,0,0);
+  cv::Scalar yellow(0,255,255);
+
+  /*
+   * CREATE MODEL REGISTRATION OBJECT
+   * CREATE OBJECT MESH
+   * CREATE OBJECT MODEL
+   * CREATE PNP OBJECT
+   */
+  Mesh mesh;
+  ModelRegistration registration;
+  PnPProblem pnp_verification_epnp(params_CANON);
+  PnPProblem pnp_verification_iter(params_CANON);
+  PnPProblem pnp_verification_p3p(params_CANON);
+  PnPProblem pnp_verification_dls(params_CANON);
+  PnPProblem pnp_verification_gt(params_CANON); // groud truth
+
+
+// Mouse events for model registration
+static void onMouseModelVerification( int event, int x, int y, int, void* )
+{
+  if  ( event == cv::EVENT_LBUTTONUP )
+  {
+      int n_regist = registration.getNumRegist();
+      int n_vertex = pts[n_regist];
+
+      cv::Point2f point_2d = cv::Point2f(x,y);
+      cv::Point3f point_3d = mesh.getVertex(n_vertex-1);
+
+      bool is_registrable = registration.is_registrable();
+      if (is_registrable)
+      {
+        registration.registerPoint(point_2d, point_3d);
+        if( registration.getNumRegist() == registration.getNumMax() ) end_registration = true;
+      }
+  }
+}
+
+
+/*
+ *   MAIN PROGRAM
+ *
+ */
+
+int main(int, char**)
+{
+
+  std::cout << "!!!Hello Verification!!!" << std::endl; // prints !!!Hello World!!!
+
+  // load a mesh given the *.ply file path
+  mesh.load(ply_read_path);
+
+  // load the 3D textured object model
+  Model model;
+  model.load(yml_read_path);
+
+  // set parameters
+  int numKeyPoints = 10000;
+
+  //Instantiate robust matcher: detector, extractor, matcher
+  RobustMatcher rmatcher;
+  cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);
+  rmatcher.setFeatureDetector(detector);
+  rmatcher.setRatio(0.80);
+
+  // RANSAC parameters
+  int iterationsCount = 500;
+  float reprojectionError = 2.0;
+  float confidence = 0.99;
+
+
+  /*
+  * GROUND TRUTH SECOND IMAGE
+  *
+  */
+
+  cv::Mat img_in, img_vis;
+
+  // Setup for new registration
+  registration.setNumMax(n);
+
+  // Create & Open Window
+  cv::namedWindow("MODEL GROUND TRUTH", CV_WINDOW_KEEPRATIO);
+
+  // Set up the mouse events
+  cv::setMouseCallback("MODEL GROUND TRUTH", onMouseModelVerification, 0 );
+
+  // Open the image to register
+  img_in = cv::imread(img_verification_path, cv::IMREAD_COLOR);
+
+  if (!img_in.data)
+  {
+    std::cout << "Could not open or find the image" << std::endl;
+    return -1;
+  }
+
+  std::cout << "Click the box corners ..." << std::endl;
+  std::cout << "Waiting ..." << std::endl;
+
+  // Loop until all the points are registered
+  while ( cv::waitKey(30) < 0 )
+  {
+    // Refresh debug image
+    img_vis = img_in.clone();
+
+    // Current registered points
+    std::vector<cv::Point2f> list_points2d = registration.get_points2d();
+    std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+
+    // Draw current registered points
+    drawPoints(img_vis, list_points2d, list_points3d, red);
+
+    // If the registration is not finished, draw which 3D point we have to register.
+    // If the registration is finished, breaks the loop.
+    if (!end_registration)
+    {
+      // Draw debug text
+      int n_regist = registration.getNumRegist();
+      int n_vertex = pts[n_regist];
+      cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1);
+
+      drawQuestion(img_vis, current_poin3d, green);
+      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
+    }
+    else
+    {
+      // Draw debug text
+      drawText(img_vis, "GROUND TRUTH", green);
+      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), green);
+      break;
+    }
+
+    // Show the image
+    cv::imshow("MODEL GROUND TRUTH", img_vis);
+  }
+
+  // The list of registered points
+  std::vector<cv::Point2f> list_points2d = registration.get_points2d();
+  std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+
+  // Estimate pose given the registered points
+  bool is_correspondence = pnp_verification_gt.estimatePose(list_points3d, list_points2d, cv::ITERATIVE);
+
+  // Compute and draw all mesh object 2D points
+  std::vector<cv::Point2f> pts_2d_ground_truth = pnp_verification_gt.verify_points(&mesh);
+  draw2DPoints(img_vis, pts_2d_ground_truth, green);
+
+  // Draw the ground truth mesh
+  drawObjectMesh(img_vis, &mesh, &pnp_verification_gt, blue);
+
+  // Show the image
+  cv::imshow("MODEL GROUND TRUTH", img_vis);
+
+  // Show image until ESC pressed
+  cv::waitKey(0);
+
+
+  /*
+   * EXTRACT CORRRESPONDENCES
+   *
+   */
+
+   // refresh visualisation image
+  img_vis = img_in.clone();
+
+  // Get the MODEL INFO
+  std::vector<cv::Point2f> list_points2d_model = model.get_points2d_in();
+  std::vector<cv::Point3f> list_points3d_model = model.get_points3d();
+  std::vector<cv::KeyPoint> keypoints_model = model.get_keypoints();
+  cv::Mat descriptors_model = model.get_descriptors();
+
+  // -- Step 1: Robust matching between model descriptors and scene descriptors
+
+  std::vector<cv::DMatch> good_matches;       // to obtain the 3D points of the model
+  std::vector<cv::KeyPoint> keypoints_scene;  // to obtain the 2D points of the scene
+
+  //rmatcher.fastRobustMatch(frame, good_matches, keypoints_scene, descriptors_model);
+  rmatcher.robustMatch(img_vis, good_matches, keypoints_scene, descriptors_model);
+
+  cv::Mat inliers_idx;
+  std::vector<cv::DMatch> matches_inliers;
+  std::vector<cv::Point2f> list_points2d_inliers;
+  std::vector<cv::Point3f> list_points3d_inliers;
+
+    // -- Step 2: Find out the 2D/3D correspondences
+
+    std::vector<cv::Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
+    std::vector<cv::Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
+
+    for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index)
+    {
+      cv::Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ];  // 3D point from model
+      cv::Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
+      list_points3d_model_match.push_back(point3d_model);         // add 3D point
+      list_points2d_scene_match.push_back(point2d_scene);         // add 2D point
+    }
+
+    // Draw outliers
+    //draw2DPoints(img_vis, list_points2d_scene_match, red);
+
+  /*
+   * COMPUTE PNP ERRORS:
+   * Calculation of the rotation and translation error
+   *
+   */
+
+  pnp_verification_epnp.estimatePose( list_points3d_model_match, list_points2d_scene_match, cv::EPNP);
+  pnp_verification_iter.estimatePose( list_points3d_model_match, list_points2d_scene_match, cv::ITERATIVE);
+  //pnp_verification_p3p.estimatePose( list_points3d_model_match, list_points2d_scene_match, cv::P3P);
+  pnp_verification_dls.estimatePose( list_points3d_model_match, list_points2d_scene_match, cv::DLS);
+
+  // Draw mesh
+  drawObjectMesh(img_vis, &mesh, &pnp_verification_dls, green);
+  drawObjectMesh(img_vis, &mesh, &pnp_verification_gt, yellow);
+
+   cv::imshow("MODEL GROUND TRUTH", img_vis);
+
+  cv::Mat t_true = pnp_verification_gt.get_t_matrix();
+  cv::Mat t_epnp = pnp_verification_epnp.get_t_matrix();
+  cv::Mat t_iter = pnp_verification_iter.get_t_matrix();
+  cv::Mat t_p3p = pnp_verification_p3p.get_t_matrix();
+  cv::Mat t_dls = pnp_verification_dls.get_t_matrix();
+
+  cv::Mat R_true = pnp_verification_gt.get_R_matrix();
+  cv::Mat R_epnp = pnp_verification_epnp.get_R_matrix();
+  cv::Mat R_iter = pnp_verification_iter.get_R_matrix();
+  cv::Mat R_p3p = pnp_verification_p3p.get_R_matrix();
+  cv::Mat R_dls = pnp_verification_dls.get_R_matrix();
+
+  double error_trans_epnp = get_translation_error(t_true, t_epnp);
+  double error_rot_epnp = get_rotation_error(R_true, R_epnp)*180/CV_PI;
+
+  double error_trans_iter = get_translation_error(t_true, t_iter);
+  double error_rot_iter = get_rotation_error(R_true, R_iter)*180/CV_PI;
+
+  double error_trans_p3p = get_translation_error(t_true, t_p3p);
+  double error_rot_p3p = get_rotation_error(R_true, R_p3p)*180/CV_PI;
+
+  double error_trans_dls = get_translation_error(t_true, t_dls);
+  double error_rot_dls = get_rotation_error(R_true, R_dls)*180/CV_PI;
+
+
+  std::cout << std::endl << "****  EPNP ERRORS  **** " << std::endl;
+
+  std::cout << "Translation error: " << error_trans_epnp << " m." << std::endl;
+  std::cout << "Rotation error: " << error_rot_epnp << " deg." << std::endl;
+
+
+  std::cout << std::endl << "****  ITERATIVE ERRORS  **** " << std::endl;
+
+  std::cout << "Translation error: " << error_trans_iter << " m." << std::endl;
+  std::cout << "Rotation error: " << error_rot_iter << " deg." << std::endl;
+
+
+  std::cout << std::endl << "****  P3P ERRORS  **** " << std::endl;
+
+  std::cout << "Translation error: " << error_trans_p3p << " m." << std::endl;
+  std::cout << "Rotation error: " << error_rot_p3p << " deg." << std::endl;
+
+
+  std::cout << std::endl << "****  DLS ERRORS  **** " << std::endl;
+
+  std::cout << "Translation error: " << error_trans_dls << " m." << std::endl;
+  std::cout << "Rotation error: " << error_rot_dls << " deg." << std::endl;
+
+
+  // Show image until ESC pressed
+  cv::waitKey(0);
+
+  // Close and Destroy Window
+  cv::destroyWindow("MODEL GROUND TRUTH");
+
+  std::cout << "GOODBYE" << std::endl;
+
+}
diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/test_pnp.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/test_pnp.cpp
new file mode 100644
index 0000000000..4843d32ceb
--- /dev/null
+++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/test_pnp.cpp
@@ -0,0 +1,143 @@
+#include <opencv2/core/core.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/contrib/contrib.hpp>
+
+#include <iostream>
+#include <fstream>
+
+using namespace std;
+using namespace cv;
+
+void generate3DPointCloud(vector<Point3f>& points, Point3f pmin = Point3f(-1,
+        -1, 5), Point3f pmax = Point3f(1, 1, 10))
+    {
+        const Point3f delta = pmax - pmin;
+        for (size_t i = 0; i < points.size(); i++)
+        {
+            Point3f p(float(rand()) / RAND_MAX, float(rand()) / RAND_MAX,
+                float(rand()) / RAND_MAX);
+            p.x *= delta.x;
+            p.y *= delta.y;
+            p.z *= delta.z;
+            p = p + pmin;
+            points[i] = p;
+        }
+    }
+
+void generateCameraMatrix(Mat& cameraMatrix, RNG& rng)
+{
+    const double fcMinVal = 1e-3;
+    const double fcMaxVal = 100;
+    cameraMatrix.create(3, 3, CV_64FC1);
+    cameraMatrix.setTo(Scalar(0));
+    cameraMatrix.at<double>(0,0) = rng.uniform(fcMinVal, fcMaxVal);
+    cameraMatrix.at<double>(1,1) = rng.uniform(fcMinVal, fcMaxVal);
+    cameraMatrix.at<double>(0,2) = rng.uniform(fcMinVal, fcMaxVal);
+    cameraMatrix.at<double>(1,2) = rng.uniform(fcMinVal, fcMaxVal);
+    cameraMatrix.at<double>(2,2) = 1;
+}
+
+void generateDistCoeffs(Mat& distCoeffs, RNG& rng)
+{
+    distCoeffs = Mat::zeros(4, 1, CV_64FC1);
+    for (int i = 0; i < 3; i++)
+        distCoeffs.at<double>(i,0) = rng.uniform(0.0, 1.0e-6);
+}
+
+void generatePose(Mat& rvec, Mat& tvec, RNG& rng)
+{
+    const double minVal = 1.0e-3;
+    const double maxVal = 1.0;
+    rvec.create(3, 1, CV_64FC1);
+    tvec.create(3, 1, CV_64FC1);
+    for (int i = 0; i < 3; i++)
+    {
+        rvec.at<double>(i,0) = rng.uniform(minVal, maxVal);
+        tvec.at<double>(i,0) = rng.uniform(minVal, maxVal/10);
+    }
+}
+
+void data2file(const string& path, const vector<vector<double> >& data)
+{
+  std::fstream fs;
+  fs.open(path.c_str(), std::fstream::in | std::fstream::out | std::fstream::app);
+
+  for (int method = 0; method < data.size(); ++method)
+  {
+    for (int i = 0; i < data[method].size(); ++i)
+    {
+      fs << data[method][i] << " ";
+    }
+    fs << endl;
+  }
+
+  fs.close();
+}
+
+
+int main(int argc, char *argv[])
+{
+
+  RNG rng;
+ // TickMeter tm;
+  vector<vector<double> > error_trans(4), error_rot(4), comp_time(4);
+
+  int maxpoints = 2000;
+  for (int npoints = 10; npoints < maxpoints+10; ++npoints)
+  {
+    // generate 3D point cloud
+    vector<Point3f> points;
+    points.resize(npoints);
+    generate3DPointCloud(points);
+
+    // generate cameramatrix
+    Mat rvec, tvec;
+    Mat trueRvec, trueTvec;
+    Mat intrinsics, distCoeffs;
+    generateCameraMatrix(intrinsics, rng);
+
+    // generate distorsion coefficients
+    generateDistCoeffs(distCoeffs, rng);
+
+    // generate groud truth pose
+    generatePose(trueRvec, trueTvec, rng);
+
+    for (int method = 0; method < 4; ++method)
+    {
+      std::vector<Point3f> opoints;
+      if (method == 2)
+      {
+        opoints = std::vector<Point3f>(points.begin(), points.begin()+4);
+      }
+      else
+        opoints = points;
+
+      vector<Point2f> projectedPoints;
+      projectedPoints.resize(opoints.size());
+      projectPoints(Mat(opoints), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);
+
+      //tm.reset(); tm.start();
+
+      solvePnP(opoints, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, method);
+
+     // tm.stop();
+
+    //  double compTime = tm.getTimeMilli();
+      double rvecDiff = norm(rvec-trueRvec), tvecDiff = norm(tvec-trueTvec);
+
+      error_rot[method].push_back(rvecDiff);
+      error_trans[method].push_back(tvecDiff);
+      //comp_time[method].push_back(compTime);
+
+    }
+
+    //system("clear");
+    cout << "Completed " << npoints+1 << "/" << maxpoints << endl;
+
+  }
+
+  data2file("translation_error.txt", error_trans);
+  data2file("rotation_error.txt", error_rot);
+  data2file("computation_time.txt", comp_time);
+
+}