Merge pull request #41 from vpisarev/added_text_module

added "text" module; added linemod to rgbd module, fixed compile errors in rgbd
pull/44/head
Vadim Pisarevsky 11 years ago
commit f6b1434987
  1. 2
      modules/rgbd/CMakeLists.txt
  2. 21
      modules/rgbd/include/opencv2/rgbd.hpp
  3. 455
      modules/rgbd/include/opencv2/rgbd/linemod.hpp
  4. 705
      modules/rgbd/samples/linemod.cpp
  5. 35
      modules/rgbd/samples/odometry_evaluation.cpp
  6. 50
      modules/rgbd/src/depth_cleaner.cpp
  7. 14
      modules/rgbd/src/depth_to_3d.cpp
  8. 10
      modules/rgbd/src/depth_to_3d.h
  9. 1843
      modules/rgbd/src/linemod.cpp
  10. 209
      modules/rgbd/src/normal.cpp
  11. 4
      modules/rgbd/src/normal_lut.i
  12. 94
      modules/rgbd/src/odometry.cpp
  13. 135
      modules/rgbd/src/plane.cpp
  14. 56
      modules/rgbd/src/precomp.hpp
  15. 8
      modules/rgbd/src/rgbd_init.cpp
  16. 4
      modules/rgbd/src/utils.cpp
  17. 14
      modules/rgbd/src/utils.h
  18. 185
      modules/rgbd/test/test_normal.cpp
  19. 19
      modules/rgbd/test/test_odometry.cpp
  20. 38
      modules/rgbd/test/test_utils.cpp
  21. 2
      modules/text/CMakeLists.txt
  22. 211
      modules/text/doc/erfilter.rst
  23. BIN
      modules/text/doc/pics/component_tree.png
  24. 10
      modules/text/doc/text.rst
  25. 44
      modules/text/include/opencv2/text.hpp
  26. 269
      modules/text/include/opencv2/text/erfilter.hpp
  27. BIN
      modules/text/samples/scenetext01.jpg
  28. BIN
      modules/text/samples/scenetext02.jpg
  29. BIN
      modules/text/samples/scenetext03.jpg
  30. BIN
      modules/text/samples/scenetext04.jpg
  31. BIN
      modules/text/samples/scenetext05.jpg
  32. BIN
      modules/text/samples/scenetext06.jpg
  33. 125
      modules/text/samples/textdetection.cpp
  34. 3184
      modules/text/src/erfilter.cpp
  35. 48
      modules/text/src/precomp.hpp
  36. 1
      modules/xobjdetect/include/opencv2/xobjdetect.hpp

@ -1,3 +1,3 @@
set(the_description "RGBD algorithms")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
ocv_define_module(rgbd opencv_core opencv_calib3d opencv_contrib opencv_highgui opencv_imgproc)
ocv_define_module(rgbd opencv_core opencv_calib3d opencv_highgui opencv_imgproc)

@ -38,12 +38,13 @@
#ifdef __cplusplus
#include <limits>
#include <opencv2/core.hpp>
#include <limits>
namespace cv
{
namespace rgbd
{
/** Checks if the value is a valid depth. For CV_16U or CV_16S, the convention is to be invalid if it is
* a limit. For a float/double, we just check if it is a NaN
* @param depth the depth to check for validity
@ -427,7 +428,7 @@ namespace cv
/** Method to compute a transformation from the source frame to the destination one.
* Some odometry algorithms do not used some data of frames (eg. ICP does not use images).
* In such case corresponding arguments can be set as empty cv::Mat.
* In such case corresponding arguments can be set as empty Mat.
* The method returns true if all internal computions were possible (e.g. there were enough correspondences,
* system of equations has a solution, etc) and resulting transformation satisfies some test if it's provided
* by the Odometry inheritor implementation (e.g. thresholds for maximum translation and rotation).
@ -507,7 +508,7 @@ namespace cv
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to cv::AlgorithmInfo::addParams does not support it now.
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
/*float*/
double minDepth, maxDepth, maxDepthDiff;
/*vector<int>*/
@ -556,7 +557,7 @@ namespace cv
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to cv::AlgorithmInfo::addParams does not support it now.
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
/*float*/
double minDepth, maxDepth, maxDepthDiff;
/*float*/
@ -569,7 +570,7 @@ namespace cv
double maxTranslation, maxRotation;
mutable cv::Ptr<cv::RgbdNormals> normalsComputer;
mutable Ptr<RgbdNormals> normalsComputer;
};
/** Odometry that merges RgbdOdometry and ICPOdometry by minimize sum of their energy functions.
@ -610,7 +611,7 @@ namespace cv
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to cv::AlgorithmInfo::addParams does not support it now.
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
/*float*/
double minDepth, maxDepth, maxDepthDiff;
/*float*/
@ -625,7 +626,7 @@ namespace cv
double maxTranslation, maxRotation;
mutable cv::Ptr<cv::RgbdNormals> normalsComputer;
mutable Ptr<RgbdNormals> normalsComputer;
};
/** Warp the image: compute 3d points from the depth, transform them using given transformation,
@ -649,10 +650,12 @@ namespace cv
// TODO Depth interpolation
// Curvature
// Get rescaleDepth return dubles if asked for
} /* namespace rgbd */
} /* namespace cv */
#endif /* __cplusplus */
#include "opencv2/rgbd/linemod.hpp"
#endif /* __cplusplus */
#endif
/* End of file. */

@ -0,0 +1,455 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_OBJDETECT_LINEMOD_HPP__
#define __OPENCV_OBJDETECT_LINEMOD_HPP__
#include "opencv2/core.hpp"
#include <map>
/****************************************************************************************\
* LINE-MOD *
\****************************************************************************************/
namespace cv {
namespace linemod {
/// @todo Convert doxy comments to rst
/**
* \brief Discriminant feature described by its location and label.
*/
struct CV_EXPORTS Feature
{
int x; ///< x offset
int y; ///< y offset
int label; ///< Quantization
Feature() : x(0), y(0), label(0) {}
Feature(int x, int y, int label);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
};
inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}
struct CV_EXPORTS Template
{
int width;
int height;
int pyramid_level;
std::vector<Feature> features;
void read(const FileNode& fn);
void write(FileStorage& fs) const;
};
/**
* \brief Represents a modality operating over an image pyramid.
*/
class QuantizedPyramid
{
public:
// Virtual destructor
virtual ~QuantizedPyramid() {}
/**
* \brief Compute quantized image at current pyramid level for online detection.
*
* \param[out] dst The destination 8-bit image. For each pixel at most one bit is set,
* representing its classification.
*/
virtual void quantize(Mat& dst) const =0;
/**
* \brief Extract most discriminant features at current pyramid level to form a new template.
*
* \param[out] templ The new template.
*/
virtual bool extractTemplate(Template& templ) const =0;
/**
* \brief Go to the next pyramid level.
*
* \todo Allow pyramid scale factor other than 2
*/
virtual void pyrDown() =0;
protected:
/// Candidate feature with a score
struct Candidate
{
Candidate(int x, int y, int label, float score);
/// Sort candidates with high score to the front
bool operator<(const Candidate& rhs) const
{
return score > rhs.score;
}
Feature f;
float score;
};
/**
* \brief Choose candidate features so that they are not bunched together.
*
* \param[in] candidates Candidate features sorted by score.
* \param[out] features Destination vector of selected features.
* \param[in] num_features Number of candidates to select.
* \param[in] distance Hint for desired distance between features.
*/
static void selectScatteredFeatures(const std::vector<Candidate>& candidates,
std::vector<Feature>& features,
size_t num_features, float distance);
};
inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}
/**
* \brief Interface for modalities that plug into the LINE template matching representation.
*
* \todo Max response, to allow optimization of summing (255/MAX) features as uint8
*/
class CV_EXPORTS Modality
{
public:
// Virtual destructor
virtual ~Modality() {}
/**
* \brief Form a quantized image pyramid from a source image.
*
* \param[in] src The source image. Type depends on the modality.
* \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero
* in quantized image and cannot be extracted as features.
*/
Ptr<QuantizedPyramid> process(const Mat& src,
const Mat& mask = Mat()) const
{
return processImpl(src, mask);
}
virtual String name() const =0;
virtual void read(const FileNode& fn) =0;
virtual void write(FileStorage& fs) const =0;
/**
* \brief Create modality by name.
*
* The following modality types are supported:
* - "ColorGradient"
* - "DepthNormal"
*/
static Ptr<Modality> create(const String& modality_type);
/**
* \brief Load a modality from file.
*/
static Ptr<Modality> create(const FileNode& fn);
protected:
// Indirection is because process() has a default parameter.
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
const Mat& mask) const =0;
};
/**
* \brief Modality that computes quantized gradient orientations from a color image.
*/
class CV_EXPORTS ColorGradient : public Modality
{
public:
/**
* \brief Default constructor. Uses reasonable default parameter values.
*/
ColorGradient();
/**
* \brief Constructor.
*
* \param weak_threshold When quantizing, discard gradients with magnitude less than this.
* \param num_features How many features a template must contain.
* \param strong_threshold Consider as candidate features only gradients whose norms are
* larger than this.
*/
ColorGradient(float weak_threshold, size_t num_features, float strong_threshold);
virtual String name() const;
virtual void read(const FileNode& fn);
virtual void write(FileStorage& fs) const;
float weak_threshold;
size_t num_features;
float strong_threshold;
protected:
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
const Mat& mask) const;
};
/**
* \brief Modality that computes quantized surface normals from a dense depth map.
*/
class CV_EXPORTS DepthNormal : public Modality
{
public:
/**
* \brief Default constructor. Uses reasonable default parameter values.
*/
DepthNormal();
/**
* \brief Constructor.
*
* \param distance_threshold Ignore pixels beyond this distance.
* \param difference_threshold When computing normals, ignore contributions of pixels whose
* depth difference with the central pixel is above this threshold.
* \param num_features How many features a template must contain.
* \param extract_threshold Consider as candidate feature only if there are no differing
* orientations within a distance of extract_threshold.
*/
DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
int extract_threshold);
virtual String name() const;
virtual void read(const FileNode& fn);
virtual void write(FileStorage& fs) const;
int distance_threshold;
int difference_threshold;
size_t num_features;
int extract_threshold;
protected:
virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
const Mat& mask) const;
};
/**
* \brief Debug function to colormap a quantized image for viewing.
*/
void colormap(const Mat& quantized, Mat& dst);
/**
* \brief Represents a successful template match.
*/
struct CV_EXPORTS Match
{
Match()
{
}
Match(int x, int y, float similarity, const String& class_id, int template_id);
/// Sort matches with high similarity to the front
bool operator<(const Match& rhs) const
{
// Secondarily sort on template_id for the sake of duplicate removal
if (similarity != rhs.similarity)
return similarity > rhs.similarity;
else
return template_id < rhs.template_id;
}
bool operator==(const Match& rhs) const
{
return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id;
}
int x;
int y;
float similarity;
String class_id;
int template_id;
};
inline
Match::Match(int _x, int _y, float _similarity, const String& _class_id, int _template_id)
: x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)
{}
/**
* \brief Object detector using the LINE template matching algorithm with any set of
* modalities.
*/
class CV_EXPORTS Detector
{
public:
/**
* \brief Empty constructor, initialize with read().
*/
Detector();
/**
* \brief Constructor.
*
* \param modalities Modalities to use (color gradients, depth normals, ...).
* \param T_pyramid Value of the sampling step T at each pyramid level. The
* number of pyramid levels is T_pyramid.size().
*/
Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);
/**
* \brief Detect objects by template matching.
*
* Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
*
* \param sources Source images, one for each modality.
* \param threshold Similarity threshold, a percentage between 0 and 100.
* \param[out] matches Template matches, sorted by similarity score.
* \param class_ids If non-empty, only search for the desired object classes.
* \param[out] quantized_images Optionally return vector<Mat> of quantized images.
* \param masks The masks for consideration during matching. The masks should be CV_8UC1
* where 255 represents a valid pixel. If non-empty, the vector must be
* the same size as sources. Each element must be
* empty or the same size as its corresponding source.
*/
void match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,
const std::vector<String>& class_ids = std::vector<String>(),
OutputArrayOfArrays quantized_images = noArray(),
const std::vector<Mat>& masks = std::vector<Mat>()) const;
/**
* \brief Add new object template.
*
* \param sources Source images, one for each modality.
* \param class_id Object class ID.
* \param object_mask Mask separating object from background.
* \param[out] bounding_box Optionally return bounding box of the extracted features.
*
* \return Template ID, or -1 if failed to extract a valid template.
*/
int addTemplate(const std::vector<Mat>& sources, const String& class_id,
const Mat& object_mask, Rect* bounding_box = NULL);
/**
* \brief Add a new object template computed by external means.
*/
int addSyntheticTemplate(const std::vector<Template>& templates, const String& class_id);
/**
* \brief Get the modalities used by this detector.
*
* You are not permitted to add/remove modalities, but you may dynamic_cast them to
* tweak parameters.
*/
const std::vector< Ptr<Modality> >& getModalities() const { return modalities; }
/**
* \brief Get sampling step T at pyramid_level.
*/
int getT(int pyramid_level) const { return T_at_level[pyramid_level]; }
/**
* \brief Get number of pyramid levels used by this detector.
*/
int pyramidLevels() const { return pyramid_levels; }
/**
* \brief Get the template pyramid identified by template_id.
*
* For example, with 2 modalities (Gradient, Normal) and two pyramid levels
* (L0, L1), the order is (GradientL0, NormalL0, GradientL1, NormalL1).
*/
const std::vector<Template>& getTemplates(const String& class_id, int template_id) const;
int numTemplates() const;
int numTemplates(const String& class_id) const;
int numClasses() const { return static_cast<int>(class_templates.size()); }
std::vector<String> classIds() const;
void read(const FileNode& fn);
void write(FileStorage& fs) const;
String readClass(const FileNode& fn, const String &class_id_override = "");
void writeClass(const String& class_id, FileStorage& fs) const;
void readClasses(const std::vector<String>& class_ids,
const String& format = "templates_%s.yml.gz");
void writeClasses(const String& format = "templates_%s.yml.gz") const;
protected:
std::vector< Ptr<Modality> > modalities;
int pyramid_levels;
std::vector<int> T_at_level;
typedef std::vector<Template> TemplatePyramid;
typedef std::map<String, std::vector<TemplatePyramid> > TemplatesMap;
TemplatesMap class_templates;
typedef std::vector<Mat> LinearMemories;
// Indexed as [pyramid level][modality][quantized label]
typedef std::vector< std::vector<LinearMemories> > LinearMemoryPyramid;
void matchClass(const LinearMemoryPyramid& lm_pyramid,
const std::vector<Size>& sizes,
float threshold, std::vector<Match>& matches,
const String& class_id,
const std::vector<TemplatePyramid>& template_pyramids) const;
};
/**
* \brief Factory function for detector using LINE algorithm with color gradients.
*
* Default parameter settings suitable for VGA images.
*/
CV_EXPORTS Ptr<Detector> getDefaultLINE();
/**
* \brief Factory function for detector using LINE-MOD algorithm with color gradients
* and depth normals.
*
* Default parameter settings suitable for VGA images.
*/
CV_EXPORTS Ptr<Detector> getDefaultLINEMOD();
} // namespace linemod
} // namespace cv
#endif // __OPENCV_OBJDETECT_LINEMOD_HPP__

@ -0,0 +1,705 @@
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc/imgproc_c.h> // cvFindContours
#include <opencv2/imgproc.hpp>
#include <opencv2/rgbd.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <iterator>
#include <set>
#include <cstdio>
#include <iostream>
// Function prototypes
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst);
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst);
void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T);
cv::Mat displayQuantized(const cv::Mat& quantized);
// Copy of cv_mouse from cv_utilities
class Mouse
{
public:
static void start(const std::string& a_img_name)
{
cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
}
static int event(void)
{
int l_event = m_event;
m_event = -1;
return l_event;
}
static int x(void)
{
return m_x;
}
static int y(void)
{
return m_y;
}
private:
static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
{
m_event = a_event;
m_x = a_x;
m_y = a_y;
}
static int m_event;
static int m_x;
static int m_y;
};
int Mouse::m_event;
int Mouse::m_x;
int Mouse::m_y;
static void help()
{
printf("Usage: openni_demo [templates.yml]\n\n"
"Place your object on a planar, featureless surface. With the mouse,\n"
"frame it in the 'color' window and right click to learn a first template.\n"
"Then press 'l' to enter online learning mode, and move the camera around.\n"
"When the match score falls between 90-95%% the demo will add a new template.\n\n"
"Keys:\n"
"\t h -- This help page\n"
"\t l -- Toggle online learning\n"
"\t m -- Toggle printing match result\n"
"\t t -- Toggle printing timings\n"
"\t w -- Write learned templates to disk\n"
"\t [ ] -- Adjust matching threshold: '[' down, ']' up\n"
"\t q -- Quit\n\n");
}
// Adapted from cv_timer in cv_utilities
class Timer
{
public:
Timer() : start_(0), time_(0) {}
void start()
{
start_ = cv::getTickCount();
}
void stop()
{
CV_Assert(start_ != 0);
int64 end = cv::getTickCount();
time_ += end - start_;
start_ = 0;
}
double time()
{
double ret = time_ / cv::getTickFrequency();
time_ = 0;
return ret;
}
private:
int64 start_, time_;
};
// Functions to store detector and templates in single XML/YAML file
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
cv::FileStorage fs(filename, cv::FileStorage::READ);
detector->read(fs.root());
cv::FileNode fn = fs["classes"];
for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
detector->readClass(*i);
return detector;
}
static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
{
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
detector->write(fs);
std::vector<cv::String> ids = detector->classIds();
fs << "classes" << "[";
for (int i = 0; i < (int)ids.size(); ++i)
{
fs << "{";
detector->writeClass(ids[i], fs);
fs << "}"; // current class
}
fs << "]"; // classes
}
int main(int argc, char * argv[])
{
// Various settings and flags
bool show_match_result = true;
bool show_timings = false;
bool learn_online = false;
int num_classes = 0;
int matching_threshold = 80;
/// @todo Keys for changing these?
cv::Size roi_size(200, 200);
int learning_lower_bound = 90;
int learning_upper_bound = 95;
// Timers
Timer extract_timer;
Timer match_timer;
// Initialize HighGUI
help();
cv::namedWindow("color");
cv::namedWindow("normals");
Mouse::start("color");
// Initialize LINEMOD data structures
cv::Ptr<cv::linemod::Detector> detector;
std::string filename;
if (argc == 1)
{
filename = "linemod_templates.yml";
detector = cv::linemod::getDefaultLINEMOD();
}
else
{
detector = readLinemod(argv[1]);
std::vector<cv::String> ids = detector->classIds();
num_classes = detector->numClasses();
printf("Loaded %s with %d classes and %d templates\n",
argv[1], num_classes, detector->numTemplates());
if (!ids.empty())
{
printf("Class ids:\n");
std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
}
}
int num_modalities = (int)detector->getModalities().size();
// Open Kinect sensor
cv::VideoCapture capture( cv::CAP_OPENNI );
if (!capture.isOpened())
{
printf("Could not open OpenNI-capable sensor\n");
return -1;
}
capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
//printf("Focal length = %f\n", focal_length);
// Main loop
cv::Mat color, depth;
for(;;)
{
// Capture next color/depth pair
capture.grab();
capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
std::vector<cv::Mat> sources;
sources.push_back(color);
sources.push_back(depth);
cv::Mat display = color.clone();
if (!learn_online)
{
cv::Point mouse(Mouse::x(), Mouse::y());
int event = Mouse::event();
// Compute ROI centered on current mouse location
cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
cv::Point pt1 = mouse - roi_offset; // top left
cv::Point pt2 = mouse + roi_offset; // bottom right
if (event == cv::EVENT_RBUTTONDOWN)
{
// Compute object mask by subtracting the plane within the ROI
std::vector<CvPoint> chain(4);
chain[0] = pt1;
chain[1] = cv::Point(pt2.x, pt1.y);
chain[2] = pt2;
chain[3] = cv::Point(pt1.x, pt2.y);
cv::Mat mask;
subtractPlane(depth, mask, chain, focal_length);
cv::imshow("mask", mask);
// Extract template
std::string class_id = cv::format("class%d", num_classes);
cv::Rect bb;
extract_timer.start();
int template_id = detector->addTemplate(sources, class_id, mask, &bb);
extract_timer.stop();
if (template_id != -1)
{
printf("*** Added template (id %d) for new object class %d***\n",
template_id, num_classes);
//printf("Extracted at (%d, %d) size %dx%d\n", bb.x, bb.y, bb.width, bb.height);
}
++num_classes;
}
// Draw ROI for display
cv::rectangle(display, pt1, pt2, CV_RGB(0,0,0), 3);
cv::rectangle(display, pt1, pt2, CV_RGB(255,255,0), 1);
}
// Perform matching
std::vector<cv::linemod::Match> matches;
std::vector<cv::String> class_ids;
std::vector<cv::Mat> quantized_images;
match_timer.start();
detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
match_timer.stop();
int classes_visited = 0;
std::set<std::string> visited;
for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
{
cv::linemod::Match m = matches[i];
if (visited.insert(m.class_id).second)
{
++classes_visited;
if (show_match_result)
{
printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
}
// Draw matching template
const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
if (learn_online == true)
{
/// @todo Online learning possibly broken by new gradient feature extraction,
/// which assumes an accurate object outline.
// Compute masks based on convex hull of matched template
cv::Mat color_mask, depth_mask;
std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
cv::Point(m.x, m.y), color.size(),
color_mask, display);
subtractPlane(depth, depth_mask, chain, focal_length);
cv::imshow("mask", depth_mask);
// If pretty sure (but not TOO sure), add new template
if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
{
extract_timer.start();
int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
extract_timer.stop();
if (template_id != -1)
{
printf("*** Added template (id %d) for existing object class %s***\n",
template_id, m.class_id.c_str());
}
}
}
}
}
if (show_match_result && matches.empty())
printf("No matches found...\n");
if (show_timings)
{
printf("Training: %.2fs\n", extract_timer.time());
printf("Matching: %.2fs\n", match_timer.time());
}
if (show_match_result || show_timings)
printf("------------------------------------------------------------\n");
cv::imshow("color", display);
cv::imshow("normals", quantized_images[1]);
cv::FileStorage fs;
char key = (char)cv::waitKey(10);
if( key == 'q' )
break;
switch (key)
{
case 'h':
help();
break;
case 'm':
// toggle printing match result
show_match_result = !show_match_result;
printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
break;
case 't':
// toggle printing timings
show_timings = !show_timings;
printf("Show timings %s\n", show_timings ? "ON" : "OFF");
break;
case 'l':
// toggle online learning
learn_online = !learn_online;
printf("Online learning %s\n", learn_online ? "ON" : "OFF");
break;
case '[':
// decrement threshold
matching_threshold = std::max(matching_threshold - 1, -100);
printf("New threshold: %d\n", matching_threshold);
break;
case ']':
// increment threshold
matching_threshold = std::min(matching_threshold + 1, +100);
printf("New threshold: %d\n", matching_threshold);
break;
case 'w':
// write model to disk
writeLinemod(detector, filename);
printf("Wrote detector and templates to %s\n", filename.c_str());
break;
default:
;
}
}
return 0;
}
static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
{
real.resize(proj.size());
double f_inv = 1.0 / f;
for (int i = 0; i < (int)proj.size(); ++i)
{
double Z = proj[i].z;
real[i].x = (proj[i].x - 320.) * (f_inv * Z);
real[i].y = (proj[i].y - 240.) * (f_inv * Z);
real[i].z = Z;
}
}
static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
{
const int l_num_cost_pts = 200;
float l_thres = 4;
IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
cvSet(lp_mask, cvRealScalar(0));
std::vector<CvPoint> l_chain_vector;
float l_chain_length = 0;
float * lp_seg_length = new float[a_chain.size()];
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
l_chain_length += lp_seg_length[l_i];
}
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
if (lp_seg_length[l_i] > 0)
{
int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
float l_cur_len = lp_seg_length[l_i] / l_cur_num;
for (int l_j = 0; l_j < l_cur_num; ++l_j)
{
float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
CvPoint l_pts;
l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
l_chain_vector.push_back(l_pts);
}
}
}
std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
//CV_IMAGE_ELEM(lp_mask,unsigned char,(int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X)=255;
}
//cv_show_image(lp_mask,"hallo2");
reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
}
cvSVD(lp_pts, lp_w, 0, lp_v);
float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
CV_MAT_ELEM(*lp_v, float, 1, 3),
CV_MAT_ELEM(*lp_v, float, 2, 3),
CV_MAT_ELEM(*lp_v, float, 3, 3)};
float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
l_n[0] /= l_norm;
l_n[1] /= l_norm;
l_n[2] /= l_norm;
l_n[3] /= l_norm;
float l_max_dist = 0;
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
{
float l_dist = l_n[0] * CV_MAT_ELEM(*lp_pts, float, l_i, 0) +
l_n[1] * CV_MAT_ELEM(*lp_pts, float, l_i, 1) +
l_n[2] * CV_MAT_ELEM(*lp_pts, float, l_i, 2) +
l_n[3] * CV_MAT_ELEM(*lp_pts, float, l_i, 3);
if (fabs(l_dist) > l_max_dist)
l_max_dist = l_dist;
}
//std::cerr << "plane: " << l_n[0] << ";" << l_n[1] << ";" << l_n[2] << ";" << l_n[3] << " maxdist: " << l_max_dist << " end" << std::endl;
int l_minx = ap_depth->width;
int l_miny = ap_depth->height;
int l_maxx = 0;
int l_maxy = 0;
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
{
l_minx = std::min(l_minx, a_chain[l_i].x);
l_miny = std::min(l_miny, a_chain[l_i].y);
l_maxx = std::max(l_maxx, a_chain[l_i].x);
l_maxy = std::max(l_maxy, a_chain[l_i].y);
}
int l_w = l_maxx - l_minx + 1;
int l_h = l_maxy - l_miny + 1;
int l_nn = (int)a_chain.size();
CvPoint * lp_chain = new CvPoint[l_nn];
for (int l_i = 0; l_i < l_nn; ++l_i)
lp_chain[l_i] = a_chain[l_i];
cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
delete[] lp_chain;
//cv_show_image(lp_mask,"hallo1");
std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
int l_ind = 0;
for (int l_r = 0; l_r < l_h; ++l_r)
{
for (int l_c = 0; l_c < l_w; ++l_c)
{
lp_dst_3Dpts[l_ind].x = l_c + l_minx;
lp_dst_3Dpts[l_ind].y = l_r + l_miny;
lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, unsigned short, l_r + l_miny, l_c + l_minx);
++l_ind;
}
}
reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
l_ind = 0;
for (int l_r = 0; l_r < l_h; ++l_r)
{
for (int l_c = 0; l_c < l_w; ++l_c)
{
float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
++l_ind;
if (CV_IMAGE_ELEM(lp_mask, unsigned char, l_r + l_miny, l_c + l_minx) != 0)
{
if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
{
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
{
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
}
}
else
{
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
{
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
}
}
}
}
}
cvReleaseImage(&lp_mask);
cvReleaseMat(&lp_pts);
cvReleaseMat(&lp_w);
cvReleaseMat(&lp_v);
}
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f)
{
mask = cv::Mat::zeros(depth.size(), CV_8U);
std::vector<IplImage*> tmp;
IplImage mask_ipl = mask;
tmp.push_back(&mask_ipl);
IplImage depth_ipl = depth;
filterPlane(&depth_ipl, tmp, chain, f);
}
std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst)
{
templateConvexHull(templates, num_modalities, offset, size, mask);
const int OFFSET = 30;
cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
CvMemStorage * lp_storage = cvCreateMemStorage(0);
CvTreeNodeIterator l_iterator;
CvSeqReader l_reader;
CvSeq * lp_contour = 0;
cv::Mat mask_copy = mask.clone();
IplImage mask_copy_ipl = mask_copy;
cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, sizeof(CvContour),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
std::vector<CvPoint> l_pts1; // to use as input to cv_primesensor::filter_plane
cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
{
CvPoint l_pt0;
cvStartReadSeq(lp_contour, &l_reader, 0);
CV_READ_SEQ_ELEM(l_pt0, l_reader);
l_pts1.push_back(l_pt0);
for (int i = 0; i < lp_contour->total; ++i)
{
CvPoint l_pt1;
CV_READ_SEQ_ELEM(l_pt1, l_reader);
/// @todo Really need dst at all? Can just as well do this outside
cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
l_pt0 = l_pt1;
l_pts1.push_back(l_pt0);
}
}
cvReleaseMemStorage(&lp_storage);
return l_pts1;
}
// Adapted from cv_show_angles
cv::Mat displayQuantized(const cv::Mat& quantized)
{
cv::Mat color(quantized.size(), CV_8UC3);
for (int r = 0; r < quantized.rows; ++r)
{
const uchar* quant_r = quantized.ptr(r);
cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
for (int c = 0; c < quantized.cols; ++c)
{
cv::Vec3b& bgr = color_r[c];
switch (quant_r[c])
{
case 0: bgr[0]= 0; bgr[1]= 0; bgr[2]= 0; break;
case 1: bgr[0]= 55; bgr[1]= 55; bgr[2]= 55; break;
case 2: bgr[0]= 80; bgr[1]= 80; bgr[2]= 80; break;
case 4: bgr[0]=105; bgr[1]=105; bgr[2]=105; break;
case 8: bgr[0]=130; bgr[1]=130; bgr[2]=130; break;
case 16: bgr[0]=155; bgr[1]=155; bgr[2]=155; break;
case 32: bgr[0]=180; bgr[1]=180; bgr[2]=180; break;
case 64: bgr[0]=205; bgr[1]=205; bgr[2]=205; break;
case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230; break;
case 255: bgr[0]= 0; bgr[1]= 0; bgr[2]=255; break;
default: bgr[0]= 0; bgr[1]=255; bgr[2]= 0; break;
}
}
}
return color;
}
// Adapted from cv_line_template::convex_hull
void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst)
{
std::vector<cv::Point> points;
for (int m = 0; m < num_modalities; ++m)
{
for (int i = 0; i < (int)templates[m].features.size(); ++i)
{
cv::linemod::Feature f = templates[m].features[i];
points.push_back(cv::Point(f.x, f.y) + offset);
}
}
std::vector<cv::Point> hull;
cv::convexHull(points, hull);
dst = cv::Mat::zeros(size, CV_8U);
const int hull_count = (int)hull.size();
const cv::Point* hull_pts = &hull[0];
cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
}
void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T)
{
static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
CV_RGB(0, 255, 0),
CV_RGB(255, 255, 0),
CV_RGB(255, 140, 0),
CV_RGB(255, 0, 0) };
for (int m = 0; m < num_modalities; ++m)
{
// NOTE: Original demo recalculated max response for each feature in the TxT
// box around it and chose the display color based on that response. Here
// the display color just depends on the modality.
cv::Scalar color = COLORS[m];
for (int i = 0; i < (int)templates[m].features.size(); ++i)
{
cv::linemod::Feature f = templates[m].features[i];
cv::Point pt(f.x + offset.x, f.y + offset.y);
cv::circle(dst, pt, T / 2, color);
}
}
}

@ -36,18 +36,45 @@
#include <opencv2/rgbd.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/contrib.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/utility.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
using namespace cv::rgbd;
#define BILATERAL_FILTER 0// if 1 then bilateral filter will be used for the depth
class MyTickMeter
{
public:
MyTickMeter() { reset(); }
void start() { startTime = getTickCount(); }
void stop()
{
int64 time = getTickCount();
if ( startTime == 0 )
return;
++counter;
sumTime += ( time - startTime );
startTime = 0;
}
int64 getTimeTicks() const { return sumTime; }
double getTimeSec() const { return (double)getTimeTicks()/getTickFrequency(); }
int64 getCounter() const { return counter; }
void reset() { startTime = sumTime = 0; counter = 0; }
private:
int64 counter;
int64 sumTime;
int64 startTime;
};
static
void writeResults( const string& filename, const vector<string>& timestamps, const vector<Mat>& Rt )
{
@ -154,7 +181,7 @@ int main(int argc, char** argv)
}
odometry->set("cameraMatrix", cameraMatrix);
TickMeter gtm;
MyTickMeter gtm;
int count = 0;
for(int i = 0; !file.eof(); i++)
{
@ -167,7 +194,7 @@ int main(int argc, char** argv)
// Read one pair (rgb and depth)
// example: 1305031453.359684 rgb/1305031453.359684.png 1305031453.374112 depth/1305031453.374112.png
#if BILATERAL_FILTER
TickMeter tm_bilateral_filter;
MyTickMeter tm_bilateral_filter;
#endif
{
string rgbFilename = str.substr(timestampLength + 1, rgbPathLehgth );
@ -213,7 +240,7 @@ int main(int argc, char** argv)
Mat Rt;
if(!Rts.empty())
{
TickMeter tm;
MyTickMeter tm;
tm.start();
gtm.start();
bool res = odometry->compute(frame_curr, frame_prev, Rt);

@ -33,19 +33,18 @@
*
*/
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/rgbd.hpp>
#include <iostream>
#include "precomp.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace
namespace cv
{
namespace rgbd
{
class DepthCleanerImpl
{
public:
DepthCleanerImpl(int window_size, int depth, cv::DepthCleaner::DEPTH_CLEANER_METHOD method)
DepthCleanerImpl(int window_size, int depth, DepthCleaner::DEPTH_CLEANER_METHOD method)
:
depth_(depth),
window_size_(window_size),
@ -69,14 +68,11 @@ namespace
protected:
int depth_;
int window_size_;
cv::DepthCleaner::DEPTH_CLEANER_METHOD method_;
DepthCleaner::DEPTH_CLEANER_METHOD method_;
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace
{
/** Given a depth image, compute the normals as detailed in the LINEMOD paper
* ``Gradient Response Maps for Real-Time Detection of Texture-Less Objects``
* by S. Hinterstoisser, C. Cagniart, S. Ilic, P. Sturm, N. Navab, P. Fua, and V. Lepetit
@ -85,10 +81,10 @@ namespace
class NIL: public DepthCleanerImpl
{
public:
typedef cv::Vec<T, 3> Vec3T;
typedef cv::Matx<T, 3, 3> Mat33T;
typedef Vec<T, 3> Vec3T;
typedef Matx<T, 3, 3> Mat33T;
NIL(int window_size, int depth, cv::DepthCleaner::DEPTH_CLEANER_METHOD method)
NIL(int window_size, int depth, DepthCleaner::DEPTH_CLEANER_METHOD method)
:
DepthCleanerImpl(window_size, depth, method)
{
@ -106,27 +102,27 @@ namespace
* @return
*/
void
compute(const cv::Mat& depth_in, cv::Mat& depth_out) const
compute(const Mat& depth_in, Mat& depth_out) const
{
switch (depth_in.depth())
{
case CV_16U:
{
const cv::Mat_<unsigned short> &depth(depth_in);
cv::Mat depth_out_tmp;
const Mat_<unsigned short> &depth(depth_in);
Mat depth_out_tmp;
computeImpl<unsigned short, float>(depth, depth_out_tmp, 0.001f);
depth_out_tmp.convertTo(depth_out, CV_16U);
break;
}
case CV_32F:
{
const cv::Mat_<float> &depth(depth_in);
const Mat_<float> &depth(depth_in);
computeImpl<float, float>(depth, depth_out, 1);
break;
}
case CV_64F:
{
const cv::Mat_<double> &depth(depth_in);
const Mat_<double> &depth(depth_in);
computeImpl<double, double>(depth, depth_out, 1);
break;
}
@ -140,7 +136,7 @@ namespace
*/
template<typename DepthDepth, typename ContainerDepth>
void
computeImpl(const cv::Mat_<DepthDepth> &depth_in, cv::Mat & depth_out, ContainerDepth scale) const
computeImpl(const Mat_<DepthDepth> &depth_in, Mat & depth_out, ContainerDepth scale) const
{
const ContainerDepth theta_mean = (float)(30. * CV_PI / 180);
int rows = depth_in.rows;
@ -148,14 +144,14 @@ namespace
// Precompute some data
const ContainerDepth sigma_L = (float)(0.8 + 0.035 * theta_mean / (CV_PI / 2 - theta_mean));
cv::Mat_<ContainerDepth> sigma_z(rows, cols);
Mat_<ContainerDepth> sigma_z(rows, cols);
for (int y = 0; y < rows; ++y)
for (int x = 0; x < cols; ++x)
sigma_z(y, x) = (float)(0.0012 + 0.0019 * (depth_in(y, x) * scale - 0.4) * (depth_in(y, x) * scale - 0.4));
ContainerDepth difference_threshold = 10;
cv::Mat_<ContainerDepth> Dw_sum = cv::Mat_<ContainerDepth>::zeros(rows, cols), w_sum =
cv::Mat_<ContainerDepth>::zeros(rows, cols);
Mat_<ContainerDepth> Dw_sum = Mat_<ContainerDepth>::zeros(rows, cols), w_sum =
Mat_<ContainerDepth>::zeros(rows, cols);
for (int y = 0; y < rows - 1; ++y)
{
// Every pixel has had the contribution of previous pixels (in a row-major way)
@ -192,15 +188,12 @@ namespace
}
}
}
cv::Mat(Dw_sum / w_sum).copyTo(depth_out);
Mat(Dw_sum / w_sum).copyTo(depth_out);
}
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cv
{
/** Default constructor of the Algorithm class that computes normals
*/
DepthCleaner::DepthCleaner(int depth, int window_size, int method_in)
@ -292,12 +285,12 @@ namespace cv
void
DepthCleaner::operator()(InputArray depth_in_array, OutputArray depth_out_array) const
{
cv::Mat depth_in = depth_in_array.getMat();
Mat depth_in = depth_in_array.getMat();
CV_Assert(depth_in.dims == 2);
CV_Assert(depth_in.channels() == 1);
depth_out_array.create(depth_in.size(), depth_);
cv::Mat depth_out = depth_out_array.getMat();
Mat depth_out = depth_out_array.getMat();
// Initialize the pimpl
initialize();
@ -324,3 +317,4 @@ namespace cv
}
}
}
}

@ -34,12 +34,13 @@
*/
#include <opencv2/rgbd.hpp>
#include <limits>
#include "depth_to_3d.h"
#include "utils.h"
namespace
namespace cv
{
namespace rgbd
{
/**
* @param K
@ -47,7 +48,7 @@ namespace
* @param mask the mask of the points to consider (can be empty)
* @param points3d the resulting 3d points, a 3-channel matrix
*/
void
static void
depthTo3d_from_uvz(const cv::Mat& in_K, const cv::Mat& u_mat, const cv::Mat& v_mat, const cv::Mat& z_mat,
cv::Mat& points3d)
{
@ -89,7 +90,7 @@ namespace
* @param mask the mask of the points to consider (can be empty)
* @param points3d the resulting 3d points
*/
void
static void
depthTo3dMask(const cv::Mat& depth, const cv::Mat& K, const cv::Mat& mask, cv::Mat& points3d)
{
// Create 3D points in one go.
@ -168,13 +169,9 @@ namespace
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
namespace cv
{
/**
* @param K
* @param depth the depth image
@ -263,3 +260,4 @@ namespace cv
}
}
}
}

@ -39,7 +39,12 @@
#ifdef __cplusplus
#include <opencv2/core.hpp>
#include <limits>
#include <limits.h>
namespace cv
{
namespace rgbd
{
/**
* @param depth the depth image, containing depth with the value T
@ -114,6 +119,9 @@ convertDepthToFloat(const cv::Mat& depth, float scale, const cv::Mat &uv_mat, cv
}
}
}
}
#endif /* __cplusplus */
#endif

File diff suppressed because it is too large Load Diff

@ -33,13 +33,12 @@
*
*/
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgproc/types_c.h>
#include <opencv2/rgbd.hpp>
#include "precomp.hpp"
namespace
namespace cv
{
namespace rgbd
{
/** Just compute the norm of a vector
* @param vec a vector of size 3 and any type T
* @return
@ -47,7 +46,7 @@ namespace
template<typename T>
T
inline
norm_vec(const cv::Vec<T, 3> &vec)
norm_vec(const Vec<T, 3> &vec)
{
return std::sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
}
@ -57,16 +56,16 @@ namespace
* @return
*/
template<typename T>
cv::Mat_<T>
computeRadius(const cv::Mat &points)
Mat_<T>
computeRadius(const Mat &points)
{
typedef cv::Vec<T, 3> PointT;
typedef Vec<T, 3> PointT;
// Compute the
cv::Size size(points.cols, points.rows);
cv::Mat_<T> r(size);
Size size(points.cols, points.rows);
Mat_<T> r(size);
if (points.isContinuous())
size = cv::Size(points.cols * points.rows, 1);
size = Size(points.cols * points.rows, 1);
for (int y = 0; y < size.height; ++y)
{
const PointT* point = points.ptr < PointT > (y), *point_end = points.ptr < PointT > (y) + size.width;
@ -83,21 +82,21 @@ namespace
// by H. Badino, D. Huber, Y. Park and T. Kanade
template<typename T>
void
computeThetaPhi(int rows, int cols, const cv::Matx<T, 3, 3>& K, cv::Mat &cos_theta, cv::Mat &sin_theta,
cv::Mat &cos_phi, cv::Mat &sin_phi)
computeThetaPhi(int rows, int cols, const Matx<T, 3, 3>& K, Mat &cos_theta, Mat &sin_theta,
Mat &cos_phi, Mat &sin_phi)
{
// Create some bogus coordinates
cv::Mat depth_image = K(0, 0) * cv::Mat_ < T > ::ones(rows, cols);
cv::Mat points3d;
depthTo3d(depth_image, cv::Mat(K), points3d);
Mat depth_image = K(0, 0) * Mat_ < T > ::ones(rows, cols);
Mat points3d;
depthTo3d(depth_image, Mat(K), points3d);
typedef cv::Vec<T, 3> Vec3T;
typedef Vec<T, 3> Vec3T;
cos_theta = cv::Mat_ < T > (rows, cols);
sin_theta = cv::Mat_ < T > (rows, cols);
cos_phi = cv::Mat_ < T > (rows, cols);
sin_phi = cv::Mat_ < T > (rows, cols);
cv::Mat r = computeRadius<T>(points3d);
cos_theta = Mat_ < T > (rows, cols);
sin_theta = Mat_ < T > (rows, cols);
cos_phi = Mat_ < T > (rows, cols);
sin_phi = Mat_ < T > (rows, cols);
Mat r = computeRadius<T>(points3d);
for (int y = 0; y < rows; ++y)
{
T *row_cos_theta = cos_theta.ptr < T > (y), *row_sin_theta = sin_theta.ptr < T > (y);
@ -127,9 +126,9 @@ namespace
template<typename T>
inline
void
signNormal(const cv::Vec<T, 3> & normal_in, cv::Vec<T, 3> & normal_out)
signNormal(const Vec<T, 3> & normal_in, Vec<T, 3> & normal_out)
{
cv::Vec<T, 3> res;
Vec<T, 3> res;
if (normal_in[2] > 0)
res = -normal_in / norm_vec(normal_in);
else
@ -144,7 +143,7 @@ namespace
template<typename T>
inline
void
signNormal(T a, T b, T c, cv::Vec<T, 3> & normal)
signNormal(T a, T b, T c, Vec<T, 3> & normal)
{
T norm = 1 / std::sqrt(a * a + b * b + c * c);
if (c > 0)
@ -160,16 +159,14 @@ namespace
normal[2] = c * norm;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace
{
class RgbdNormalsImpl
{
public:
RgbdNormalsImpl(int rows, int cols, int window_size, int depth, const cv::Mat &K,
cv::RgbdNormals::RGBD_NORMALS_METHOD method)
RgbdNormalsImpl(int rows, int cols, int window_size, int depth, const Mat &K,
RgbdNormals::RGBD_NORMALS_METHOD method)
:
rows_(rows),
cols_(cols),
@ -190,19 +187,19 @@ namespace
cache()=0;
bool
validate(int rows, int cols, int depth, const cv::Mat &K_ori, int window_size, int method) const
validate(int rows, int cols, int depth, const Mat &K_ori, int window_size, int method) const
{
if ((K_ori.cols != K_ori_.cols) || (K_ori.rows != K_ori_.rows) || (K_ori.type() != K_ori_.type()))
return false;
bool K_test = !(cv::countNonZero(K_ori != K_ori_));
bool K_test = !(countNonZero(K_ori != K_ori_));
return (rows == rows_) && (cols == cols_) && (window_size == window_size_) && (depth == depth_) && (K_test)
&& (method == method_);
}
protected:
int rows_, cols_, depth_;
cv::Mat K_, K_ori_;
Mat K_, K_ori_;
int window_size_;
cv::RgbdNormals::RGBD_NORMALS_METHOD method_;
RgbdNormals::RGBD_NORMALS_METHOD method_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -216,11 +213,11 @@ namespace
class FALS: public RgbdNormalsImpl
{
public:
typedef cv::Matx<T, 3, 3> Mat33T;
typedef cv::Vec<T, 9> Vec9T;
typedef cv::Vec<T, 3> Vec3T;
typedef Matx<T, 3, 3> Mat33T;
typedef Vec<T, 9> Vec9T;
typedef Vec<T, 3> Vec3T;
FALS(int rows, int cols, int window_size, int depth, const cv::Mat &K, cv::RgbdNormals::RGBD_NORMALS_METHOD method)
FALS(int rows, int cols, int window_size, int depth, const Mat &K, RgbdNormals::RGBD_NORMALS_METHOD method)
:
RgbdNormalsImpl(rows, cols, window_size, depth, K, method)
{
@ -235,18 +232,18 @@ namespace
cache()
{
// Compute theta and phi according to equation 3
cv::Mat cos_theta, sin_theta, cos_phi, sin_phi;
Mat cos_theta, sin_theta, cos_phi, sin_phi;
computeThetaPhi<T>(rows_, cols_, K_, cos_theta, sin_theta, cos_phi, sin_phi);
// Compute all the v_i for every points
std::vector<cv::Mat> channels(3);
std::vector<Mat> channels(3);
channels[0] = sin_theta.mul(cos_phi);
channels[1] = sin_phi;
channels[2] = cos_theta.mul(cos_phi);
cv::merge(channels, V_);
merge(channels, V_);
// Compute M
cv::Mat_<Vec9T> M(rows_, cols_);
Mat_<Vec9T> M(rows_, cols_);
Mat33T VVt;
const Vec3T * vec = V_[0];
Vec9T * M_ptr = M[0], *M_ptr_end = M_ptr + rows_ * cols_;
@ -256,7 +253,7 @@ namespace
*M_ptr = Vec9T(VVt.val);
}
cv::boxFilter(M, M, M.depth(), cv::Size(window_size_, window_size_), cv::Point(-1, -1), false);
boxFilter(M, M, M.depth(), Size(window_size_, window_size_), Point(-1, -1), false);
// Compute M's inverse
Mat33T M_inv;
@ -265,7 +262,7 @@ namespace
for (M_ptr = &M(0); M_ptr != M_ptr_end; ++M_inv_ptr, ++M_ptr)
{
// We have a semi-definite matrix
cv::invert(Mat33T(M_ptr->val), M_inv, cv::DECOMP_CHOLESKY);
invert(Mat33T(M_ptr->val), M_inv, DECOMP_CHOLESKY);
*M_inv_ptr = Vec9T(M_inv.val);
}
}
@ -275,10 +272,10 @@ namespace
* @return
*/
virtual void
compute(const cv::Mat&, const cv::Mat &r, cv::Mat & normals) const
compute(const Mat&, const Mat &r, Mat & normals) const
{
// Compute B
cv::Mat_<Vec3T> B(rows_, cols_);
Mat_<Vec3T> B(rows_, cols_);
const T* row_r = r.ptr < T > (0), *row_r_end = row_r + rows_ * cols_;
const Vec3T *row_V = V_[0];
@ -292,7 +289,7 @@ namespace
}
// Apply a box filter to B
cv::boxFilter(B, B, B.depth(), cv::Size(window_size_, window_size_), cv::Point(-1, -1), false);
boxFilter(B, B, B.depth(), Size(window_size_, window_size_), Point(-1, -1), false);
// compute the Minv*B products
row_r = r.ptr < T > (0);
@ -318,10 +315,9 @@ namespace
}
private:
cv::Mat_<Vec3T> V_;
cv::Mat_<Vec9T> M_inv_;
Mat_<Vec3T> V_;
Mat_<Vec9T> M_inv_;
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -335,15 +331,13 @@ namespace
*/
template<typename T, typename U>
void
multiply_by_K_inv(const cv::Matx<T, 3, 3> & K_inv, U a, U b, U c, cv::Vec<T, 3> &res)
multiply_by_K_inv(const Matx<T, 3, 3> & K_inv, U a, U b, U c, Vec<T, 3> &res)
{
res[0] = (T)(K_inv(0, 0) * a + K_inv(0, 1) * b + K_inv(0, 2) * c);
res[1] = (T)(K_inv(1, 1) * b + K_inv(1, 2) * c);
res[2] = (T)c;
}
namespace
{
/** Given a depth image, compute the normals as detailed in the LINEMOD paper
* ``Gradient Response Maps for Real-Time Detection of Texture-Less Objects``
* by S. Hinterstoisser, C. Cagniart, S. Ilic, P. Sturm, N. Navab, P. Fua, and V. Lepetit
@ -352,11 +346,11 @@ namespace
class LINEMOD: public RgbdNormalsImpl
{
public:
typedef cv::Vec<T, 3> Vec3T;
typedef cv::Matx<T, 3, 3> Mat33T;
typedef Vec<T, 3> Vec3T;
typedef Matx<T, 3, 3> Mat33T;
LINEMOD(int rows, int cols, int window_size, int depth, const cv::Mat &K,
cv::RgbdNormals::RGBD_NORMALS_METHOD method)
LINEMOD(int rows, int cols, int window_size, int depth, const Mat &K,
RgbdNormals::RGBD_NORMALS_METHOD method)
:
RgbdNormalsImpl(rows, cols, window_size, depth, K, method)
{
@ -374,25 +368,25 @@ namespace
* @param normals the output normals
*/
void
compute(const cv::Mat& depth_in, cv::Mat & normals) const
compute(const Mat& depth_in, Mat & normals) const
{
switch (depth_in.depth())
{
case CV_16U:
{
const cv::Mat_<unsigned short> &depth(depth_in);
const Mat_<unsigned short> &depth(depth_in);
computeImpl<unsigned short, long>(depth, normals);
break;
}
case CV_32F:
{
const cv::Mat_<float> &depth(depth_in);
const Mat_<float> &depth(depth_in);
computeImpl<float, float>(depth, normals);
break;
}
case CV_64F:
{
const cv::Mat_<double> &depth(depth_in);
const Mat_<double> &depth(depth_in);
computeImpl<double, double>(depth, normals);
break;
}
@ -405,8 +399,8 @@ namespace
* @return
*/
template<typename DepthDepth, typename ContainerDepth>
cv::Mat
computeImpl(const cv::Mat_<DepthDepth> &depth, cv::Mat & normals) const
Mat
computeImpl(const Mat_<DepthDepth> &depth, Mat & normals) const
{
const int r = 5; // used to be 7
const int sample_step = r;
@ -429,7 +423,7 @@ namespace
}
// Define K_inv by hand, just for higher accuracy
Mat33T K_inv = cv::Matx<T, 3, 3>::eye(), K;
Mat33T K_inv = Matx<T, 3, 3>::eye(), K;
K_.copyTo(K);
K_inv(0, 0) = 1.0f / K(0, 0);
K_inv(0, 1) = -K(0, 1) / (K(0, 0) * K(1, 1));
@ -493,13 +487,9 @@ namespace
return normals;
}
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace
{
/** Given a set of 3d points in a depth image, compute the normals at each point
* using the SRI method described in
* ``Fast and Accurate Computation of Surface Normals from Range Images``
@ -509,11 +499,11 @@ namespace
class SRI: public RgbdNormalsImpl
{
public:
typedef cv::Matx<T, 3, 3> Mat33T;
typedef cv::Vec<T, 9> Vec9T;
typedef cv::Vec<T, 3> Vec3T;
typedef Matx<T, 3, 3> Mat33T;
typedef Vec<T, 9> Vec9T;
typedef Vec<T, 3> Vec3T;
SRI(int rows, int cols, int window_size, int depth, const cv::Mat &K, cv::RgbdNormals::RGBD_NORMALS_METHOD method)
SRI(int rows, int cols, int window_size, int depth, const Mat &K, RgbdNormals::RGBD_NORMALS_METHOD method)
:
RgbdNormalsImpl(rows, cols, window_size, depth, K, method),
phi_step_(0),
@ -526,7 +516,7 @@ namespace
virtual void
cache()
{
cv::Mat_<T> cos_theta, sin_theta, cos_phi, sin_phi;
Mat_<T> cos_theta, sin_theta, cos_phi, sin_phi;
computeThetaPhi<T>(rows_, cols_, K_, cos_theta, sin_theta, cos_phi, sin_phi);
// Create the derivative kernels
@ -537,7 +527,7 @@ namespace
float min_theta = (float)std::asin(sin_theta(0, 0)), max_theta = (float)std::asin(sin_theta(0, cols_ - 1));
float min_phi = (float)std::asin(sin_phi(0, cols_/2-1)), max_phi = (float) std::asin(sin_phi(rows_ - 1, cols_/2-1));
std::vector<cv::Point3f> points3d(cols_ * rows_);
std::vector<Point3f> points3d(cols_ * rows_);
R_hat_.create(rows_, cols_);
phi_step_ = float(max_phi - min_phi) / (rows_ - 1);
theta_step_ = float(max_theta - min_theta) / (cols_ - 1);
@ -548,13 +538,13 @@ namespace
{
float theta = min_theta + theta_int * theta_step_;
// Store the 3d point to project it later
points3d[k] = cv::Point3f(std::sin(theta) * std::cos(phi), std::sin(phi), std::cos(theta) * std::cos(phi));
points3d[k] = Point3f(std::sin(theta) * std::cos(phi), std::sin(phi), std::cos(theta) * std::cos(phi));
// Cache the rotation matrix and negate it
cv::Mat_<T> mat =
(cv::Mat_ < T > (3, 3) << 0, 1, 0, 0, 0, 1, 1, 0, 0) * ((cv::Mat_ < T > (3, 3) << std::cos(theta), -std::sin(
Mat_<T> mat =
(Mat_ < T > (3, 3) << 0, 1, 0, 0, 0, 1, 1, 0, 0) * ((Mat_ < T > (3, 3) << std::cos(theta), -std::sin(
theta), 0, std::sin(theta), std::cos(theta), 0, 0, 0, 1))
* ((cv::Mat_ < T > (3, 3) << std::cos(phi), 0, -std::sin(phi), 0, 1, 0, std::sin(phi), 0, std::cos(phi)));
* ((Mat_ < T > (3, 3) << std::cos(phi), 0, -std::sin(phi), 0, 1, 0, std::sin(phi), 0, std::cos(phi)));
for (unsigned char i = 0; i < 3; ++i)
mat(i, 1) = mat(i, 1) / std::cos(phi);
// The second part of the matrix is never explained in the paper ... but look at the wikipedia normal article
@ -567,9 +557,9 @@ namespace
}
map_.create(rows_, cols_);
cv::projectPoints(points3d, cv::Mat(3,1,CV_32FC1,cv::Scalar::all(0.0f)), cv::Mat(3,1,CV_32FC1,cv::Scalar::all(0.0f)), K_, cv::Mat(), map_);
projectPoints(points3d, Mat(3,1,CV_32FC1,Scalar::all(0.0f)), Mat(3,1,CV_32FC1,Scalar::all(0.0f)), K_, Mat(), map_);
map_ = map_.reshape(2, rows_);
cv::convertMaps(map_, cv::Mat(), xy_, fxy_, CV_16SC2);
convertMaps(map_, Mat(), xy_, fxy_, CV_16SC2);
//map for converting from Spherical coordinate space to Euclidean space
euclideanMap_.create(rows_,cols_);
@ -584,11 +574,11 @@ namespace
float theta = std::atan(x);
float phi = std::asin(y/std::sqrt(x*x+y*y+1.0f));
euclideanMap_(i,j) = cv::Vec2f((theta-min_theta)/theta_step_,(phi-min_phi)/phi_step_);
euclideanMap_(i,j) = Vec2f((theta-min_theta)/theta_step_,(phi-min_phi)/phi_step_);
}
}
//convert map to 2 maps in short format for increasing speed in remap function
cv::convertMaps(euclideanMap_, cv::Mat(), invxy_, invfxy_, CV_16SC2);
convertMaps(euclideanMap_, Mat(), invxy_, invfxy_, CV_16SC2);
// Update the kernels: the steps are due to the fact that derivatives will be computed on a grid where
// the step is not 1. Only need to do it on one dimension as it computes derivatives in only one direction
@ -601,10 +591,10 @@ namespace
* @return
*/
virtual void
compute(const cv::Mat& points3d, const cv::Mat &r, cv::Mat & normals) const
compute(const Mat& points3d, const Mat &r, Mat & normals) const
{
const cv::Mat_<T>& r_T(r);
const cv::Mat_<Vec3T> &points3d_T(points3d);
const Mat_<T>& r_T(r);
const Mat_<Vec3T> &points3d_T(points3d);
compute(points3d_T, r_T, normals);
}
@ -613,23 +603,23 @@ namespace
* @return
*/
void
compute(const cv::Mat_<Vec3T> &, const cv::Mat_<T> &r_non_interp, cv::Mat & normals_out) const
compute(const Mat_<Vec3T> &, const Mat_<T> &r_non_interp, Mat & normals_out) const
{
// Interpolate the radial image to make derivatives meaningful
cv::Mat_<T> r;
Mat_<T> r;
// higher quality remapping does not help here
cv::remap(r_non_interp, r, xy_, fxy_, CV_INTER_LINEAR);
remap(r_non_interp, r, xy_, fxy_, INTER_LINEAR);
// Compute the derivatives with respect to theta and phi
// TODO add bilateral filtering (as done in kinfu)
cv::Mat_<T> r_theta, r_phi;
cv::sepFilter2D(r, r_theta, r.depth(), kx_dx_, ky_dx_);
Mat_<T> r_theta, r_phi;
sepFilter2D(r, r_theta, r.depth(), kx_dx_, ky_dx_);
//current OpenCV version sometimes corrupts r matrix after second call of sepFilter2D
//it depends on resolution, be careful
cv::sepFilter2D(r, r_phi, r.depth(), kx_dy_, ky_dy_);
sepFilter2D(r, r_phi, r.depth(), kx_dy_, ky_dy_);
// Fill the result matrix
cv::Mat_<Vec3T> normals(rows_, cols_);
Mat_<Vec3T> normals(rows_, cols_);
const T* r_theta_ptr = r_theta[0], *r_theta_ptr_end = r_theta_ptr + rows_ * cols_;
const T* r_phi_ptr = r_phi[0];
@ -655,7 +645,7 @@ namespace
}
}
cv::remap(normals, normals_out, invxy_, invfxy_, cv::INTER_LINEAR);
remap(normals, normals_out, invxy_, invfxy_, INTER_LINEAR);
normal = normals_out.ptr<Vec3T>(0);
Vec3T * normal_end = normal + rows_ * cols_;
for (; normal != normal_end; ++normal)
@ -663,24 +653,21 @@ namespace
}
private:
/** Stores R */
cv::Mat_<Vec9T> R_hat_;
Mat_<Vec9T> R_hat_;
float phi_step_, theta_step_;
/** Derivative kernels */
cv::Mat kx_dx_, ky_dx_, kx_dy_, ky_dy_;
Mat kx_dx_, ky_dx_, kx_dy_, ky_dy_;
/** mapping function to get an SRI image */
cv::Mat_<cv::Vec2f> map_;
cv::Mat xy_, fxy_;
Mat_<Vec2f> map_;
Mat xy_, fxy_;
cv::Mat_<cv::Vec2f> euclideanMap_;
cv::Mat invxy_, invfxy_;
Mat_<Vec2f> euclideanMap_;
Mat invxy_, invfxy_;
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cv
{
/** Default constructor of the Algorithm class that computes normals
*/
RgbdNormals::RgbdNormals(int rows, int cols, int depth, InputArray K_in, int window_size, int method_in)
@ -740,7 +727,7 @@ namespace cv
}
void
RgbdNormals::initialize_normals_impl(int rows, int cols, int depth, const cv::Mat & K, int window_size,
RgbdNormals::initialize_normals_impl(int rows, int cols, int depth, const Mat & K, int window_size,
int method_in) const
{
CV_Assert(rows > 0 && cols > 0 && (depth == CV_32F || depth == CV_64F));
@ -802,7 +789,7 @@ namespace cv
void
RgbdNormals::operator()(InputArray points3d_in, OutputArray normals_out) const
{
cv::Mat points3d_ori = points3d_in.getMat();
Mat points3d_ori = points3d_in.getMat();
CV_Assert(points3d_ori.dims == 2);
// Either we have 3d points or a depth image
@ -831,7 +818,7 @@ namespace cv
initialize();
// Precompute something for RGBD_NORMALS_METHOD_SRI and RGBD_NORMALS_METHOD_FALS
cv::Mat points3d, radius;
Mat points3d, radius;
if ((method_ == RGBD_NORMALS_METHOD_SRI) || (method_ == RGBD_NORMALS_METHOD_FALS))
{
// Make the points have the right depth
@ -852,7 +839,7 @@ namespace cv
if (points3d_in.empty())
return;
cv::Mat normals = normals_out.getMat();
Mat normals = normals_out.getMat();
switch (method_)
{
case (RGBD_NORMALS_METHOD_FALS):
@ -866,11 +853,11 @@ namespace cv
case RGBD_NORMALS_METHOD_LINEMOD:
{
// Only focus on the depth image for LINEMOD
cv::Mat depth;
Mat depth;
if (points3d_ori.channels() == 3)
{
std::vector<cv::Mat> channels;
cv::split(points3d, channels);
std::vector<Mat> channels;
split(points3d, channels);
depth = channels[2];
}
else
@ -893,3 +880,5 @@ namespace cv
}
}
}
}

File diff suppressed because one or more lines are too long

@ -33,16 +33,7 @@
*
*/
#include <opencv2/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/rgbd.hpp>
#include <iostream>
#include <limits>
#include "precomp.hpp"
#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3
#define HAVE_EIGEN3_HERE
@ -51,7 +42,10 @@
#include <Eigen/Dense>
#endif
using namespace cv;
namespace cv
{
namespace rgbd
{
enum
{
@ -97,20 +91,20 @@ static inline
void checkImage(const Mat& image)
{
if(image.empty())
CV_Error(CV_StsBadSize, "Image is empty.");
CV_Error(Error::StsBadSize, "Image is empty.");
if(image.type() != CV_8UC1)
CV_Error(CV_StsBadSize, "Image type has to be CV_8UC1.");
CV_Error(Error::StsBadSize, "Image type has to be CV_8UC1.");
}
static inline
void checkDepth(const Mat& depth, const Size& imageSize)
{
if(depth.empty())
CV_Error(CV_StsBadSize, "Depth is empty.");
CV_Error(Error::StsBadSize, "Depth is empty.");
if(depth.size() != imageSize)
CV_Error(CV_StsBadSize, "Depth has to have the size equal to the image size.");
CV_Error(Error::StsBadSize, "Depth has to have the size equal to the image size.");
if(depth.type() != CV_32FC1)
CV_Error(CV_StsBadSize, "Depth type has to be CV_32FC1.");
CV_Error(Error::StsBadSize, "Depth type has to be CV_32FC1.");
}
static inline
@ -119,9 +113,9 @@ void checkMask(const Mat& mask, const Size& imageSize)
if(!mask.empty())
{
if(mask.size() != imageSize)
CV_Error(CV_StsBadSize, "Mask has to have the size equal to the image size.");
CV_Error(Error::StsBadSize, "Mask has to have the size equal to the image size.");
if(mask.type() != CV_8UC1)
CV_Error(CV_StsBadSize, "Mask type has to be CV_8UC1.");
CV_Error(Error::StsBadSize, "Mask type has to be CV_8UC1.");
}
}
@ -129,9 +123,9 @@ static inline
void checkNormals(const Mat& normals, const Size& depthSize)
{
if(normals.size() != depthSize)
CV_Error(CV_StsBadSize, "Normals has to have the size equal to the depth size.");
CV_Error(Error::StsBadSize, "Normals has to have the size equal to the depth size.");
if(normals.type() != CV_32FC3)
CV_Error(CV_StsBadSize, "Normals type has to be CV_32FC3.");
CV_Error(Error::StsBadSize, "Normals type has to be CV_32FC3.");
}
static
@ -140,7 +134,7 @@ void preparePyramidImage(const Mat& image, std::vector<Mat>& pyramidImage, size_
if(!pyramidImage.empty())
{
if(pyramidImage.size() < levelCount)
CV_Error(CV_StsBadSize, "Levels count of pyramidImage has to be equal or less than size of iterCounts.");
CV_Error(Error::StsBadSize, "Levels count of pyramidImage has to be equal or less than size of iterCounts.");
CV_Assert(pyramidImage[0].size() == image.size());
for(size_t i = 0; i < pyramidImage.size(); i++)
@ -156,7 +150,7 @@ void preparePyramidDepth(const Mat& depth, std::vector<Mat>& pyramidDepth, size_
if(!pyramidDepth.empty())
{
if(pyramidDepth.size() < levelCount)
CV_Error(CV_StsBadSize, "Levels count of pyramidDepth has to be equal or less than size of iterCounts.");
CV_Error(Error::StsBadSize, "Levels count of pyramidDepth has to be equal or less than size of iterCounts.");
CV_Assert(pyramidDepth[0].size() == depth.size());
for(size_t i = 0; i < pyramidDepth.size(); i++)
@ -176,7 +170,7 @@ void preparePyramidMask(const Mat& mask, const std::vector<Mat>& pyramidDepth, f
if(!pyramidMask.empty())
{
if(pyramidMask.size() != pyramidDepth.size())
CV_Error(CV_StsBadSize, "Levels count of pyramidMask has to be equal to size of pyramidDepth.");
CV_Error(Error::StsBadSize, "Levels count of pyramidMask has to be equal to size of pyramidDepth.");
for(size_t i = 0; i < pyramidMask.size(); i++)
{
@ -227,7 +221,7 @@ void preparePyramidCloud(const std::vector<Mat>& pyramidDepth, const Mat& camera
if(!pyramidCloud.empty())
{
if(pyramidCloud.size() != pyramidDepth.size())
CV_Error(CV_StsBadSize, "Incorrect size of pyramidCloud.");
CV_Error(Error::StsBadSize, "Incorrect size of pyramidCloud.");
for(size_t i = 0; i < pyramidDepth.size(); i++)
{
@ -256,7 +250,7 @@ void preparePyramidSobel(const std::vector<Mat>& pyramidImage, int dx, int dy, s
if(!pyramidSobel.empty())
{
if(pyramidSobel.size() != pyramidImage.size())
CV_Error(CV_StsBadSize, "Incorrect size of pyramidSobel.");
CV_Error(Error::StsBadSize, "Incorrect size of pyramidSobel.");
for(size_t i = 0; i < pyramidSobel.size(); i++)
{
@ -309,7 +303,7 @@ void preparePyramidTexturedMask(const std::vector<Mat>& pyramid_dI_dx, const std
if(!pyramidTexturedMask.empty())
{
if(pyramidTexturedMask.size() != pyramid_dI_dx.size())
CV_Error(CV_StsBadSize, "Incorrect size of pyramidTexturedMask.");
CV_Error(Error::StsBadSize, "Incorrect size of pyramidTexturedMask.");
for(size_t i = 0; i < pyramidTexturedMask.size(); i++)
{
@ -354,7 +348,7 @@ void preparePyramidNormals(const Mat& normals, const std::vector<Mat>& pyramidDe
if(!pyramidNormals.empty())
{
if(pyramidNormals.size() != pyramidDepth.size())
CV_Error(CV_StsBadSize, "Incorrect size of pyramidNormals.");
CV_Error(Error::StsBadSize, "Incorrect size of pyramidNormals.");
for(size_t i = 0; i < pyramidNormals.size(); i++)
{
@ -389,7 +383,7 @@ void preparePyramidNormalsMask(const std::vector<Mat>& pyramidNormals, const std
if(!pyramidNormalsMask.empty())
{
if(pyramidNormalsMask.size() != pyramidMask.size())
CV_Error(CV_StsBadSize, "Incorrect size of pyramidNormalsMask.");
CV_Error(Error::StsBadSize, "Incorrect size of pyramidNormalsMask.");
for(size_t i = 0; i < pyramidNormalsMask.size(); i++)
{
@ -786,12 +780,12 @@ void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
static
bool solveSystem(const Mat& AtA, const Mat& AtB, double detThreshold, Mat& x)
{
double det = cv::determinant(AtA);
double det = determinant(AtA);
if(fabs (det) < detThreshold || cvIsNaN(det) || cvIsInf(det))
return false;
cv::solve(AtA, AtB, x, DECOMP_CHOLESKY);
solve(AtA, AtB, x, DECOMP_CHOLESKY);
return true;
}
@ -813,7 +807,7 @@ static
bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
const Ptr<OdometryFrame>& srcFrame,
const Ptr<OdometryFrame>& dstFrame,
const cv::Mat& cameraMatrix,
const Mat& cameraMatrix,
float maxDepthDiff, const std::vector<int>& iterCounts,
double maxTranslation, double maxRotation,
int method, int transfromType)
@ -839,7 +833,7 @@ bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
icpEquationFuncPtr = calcICPEquationCoeffsTranslation;
break;
default:
CV_Error(CV_StsBadArg, "Incorrect transformation type");
CV_Error(Error::StsBadArg, "Incorrect transformation type");
}
const int minOverdetermScale = 20;
@ -945,7 +939,7 @@ bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
template<class ImageElemType>
static void
warpFrameImpl(const cv::Mat& image, const Mat& depth, const Mat& mask,
warpFrameImpl(const Mat& image, const Mat& depth, const Mat& mask,
const Mat& Rt, const Mat& cameraMatrix, const Mat& distCoeff,
Mat& warpedImage, Mat* warpedDepth, Mat* warpedMask)
{
@ -996,9 +990,6 @@ warpFrameImpl(const cv::Mat& image, const Mat& depth, const Mat& mask,
///////////////////////////////////////////////////////////////////////////////////////////////
namespace cv
{
RgbdFrame::RgbdFrame() : ID(-1)
{}
@ -1065,7 +1056,7 @@ bool Odometry::compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFram
Size dstSize = prepareFrameCache(dstFrame, OdometryFrame::CACHE_DST);
if(srcSize != dstSize)
CV_Error(CV_StsBadSize, "srcFrame and dstFrame have to have the same size (resolution).");
CV_Error(Error::StsBadSize, "srcFrame and dstFrame have to have the same size (resolution).");
return computeImpl(srcFrame, dstFrame, Rt, initRt);
}
@ -1073,7 +1064,7 @@ bool Odometry::compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFram
Size Odometry::prepareFrameCache(Ptr<OdometryFrame> &frame, int /*cacheType*/) const
{
if(frame == 0)
CV_Error(CV_StsBadArg, "Null frame pointer.\n");
CV_Error(Error::StsBadArg, "Null frame pointer.\n");
return Size();
}
@ -1122,7 +1113,7 @@ Size RgbdOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) c
if(!frame->pyramidImage.empty())
frame->image = frame->pyramidImage[0];
else
CV_Error(CV_StsBadSize, "Image or pyramidImage have to be set.");
CV_Error(Error::StsBadSize, "Image or pyramidImage have to be set.");
}
checkImage(frame->image);
@ -1134,11 +1125,11 @@ Size RgbdOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) c
{
Mat cloud = frame->pyramidCloud[0];
std::vector<Mat> xyz;
cv::split(cloud, xyz);
split(cloud, xyz);
frame->depth = xyz[2];
}
else
CV_Error(CV_StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
CV_Error(Error::StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
}
checkDepth(frame->depth, frame->image.size());
@ -1213,11 +1204,11 @@ Size ICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) co
{
Mat cloud = frame->pyramidCloud[0];
std::vector<Mat> xyz;
cv::split(cloud, xyz);
split(cloud, xyz);
frame->depth = xyz[2];
}
else
CV_Error(CV_StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
CV_Error(Error::StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
}
checkDepth(frame->depth, frame->depth.size());
@ -1240,8 +1231,8 @@ Size ICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) co
if(normalsComputer.empty() ||
normalsComputer->get<int>("rows") != frame->depth.rows ||
normalsComputer->get<int>("cols") != frame->depth.cols ||
cv::norm(normalsComputer->get<Mat>("K"), cameraMatrix) > FLT_EPSILON)
normalsComputer = cv::Ptr<cv::RgbdNormals>(new RgbdNormals(frame->depth.rows, frame->depth.cols, frame->depth.depth(), cameraMatrix, normalWinSize, normalMethod));
norm(normalsComputer->get<Mat>("K"), cameraMatrix) > FLT_EPSILON)
normalsComputer = Ptr<RgbdNormals>(new RgbdNormals(frame->depth.rows, frame->depth.cols, frame->depth.depth(), cameraMatrix, normalWinSize, normalMethod));
(*normalsComputer)(frame->pyramidCloud[0], frame->normals);
}
@ -1308,7 +1299,7 @@ Size RgbdICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType
if(!frame->pyramidImage.empty())
frame->image = frame->pyramidImage[0];
else
CV_Error(CV_StsBadSize, "Image or pyramidImage have to be set.");
CV_Error(Error::StsBadSize, "Image or pyramidImage have to be set.");
}
checkImage(frame->image);
@ -1320,11 +1311,11 @@ Size RgbdICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType
{
Mat cloud = frame->pyramidCloud[0];
std::vector<Mat> xyz;
cv::split(cloud, xyz);
split(cloud, xyz);
frame->depth = xyz[2];
}
else
CV_Error(CV_StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
CV_Error(Error::StsBadSize, "Depth or pyramidDepth or pyramidCloud have to be set.");
}
checkDepth(frame->depth, frame->image.size());
@ -1349,8 +1340,8 @@ Size RgbdICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType
if(normalsComputer.empty() ||
normalsComputer->get<int>("rows") != frame->depth.rows ||
normalsComputer->get<int>("cols") != frame->depth.cols ||
cv::norm(normalsComputer->get<Mat>("K"), cameraMatrix) > FLT_EPSILON)
normalsComputer = cv::Ptr<cv::RgbdNormals>(new RgbdNormals(frame->depth.rows, frame->depth.cols, frame->depth.depth(), cameraMatrix, normalWinSize, normalMethod));
norm(normalsComputer->get<Mat>("K"), cameraMatrix) > FLT_EPSILON)
normalsComputer = Ptr<RgbdNormals>(new RgbdNormals(frame->depth.rows, frame->depth.cols, frame->depth.depth(), cameraMatrix, normalWinSize, normalMethod));
(*normalsComputer)(frame->pyramidCloud[0], frame->normals);
}
@ -1401,6 +1392,7 @@ warpFrame(const Mat& image, const Mat& depth, const Mat& mask,
else if(image.type() == CV_8UC3)
warpFrameImpl<Point3_<uchar> >(image, depth, mask, Rt, cameraMatrix, distCoeff, warpedImage, warpedDepth, warpedMask);
else
CV_Error(CV_StsBadArg, "Image has to be type of CV_8UC1 or CV_8UC3");
CV_Error(Error::StsBadArg, "Image has to be type of CV_8UC1 or CV_8UC3");
}
}
} // namespace cv

@ -43,22 +43,23 @@
* Houxiang Zhang and Hans Petter Hildre
*/
#include <list>
#include <set>
#include <opencv2/rgbd.hpp>
#include "precomp.hpp"
namespace cv
{
namespace rgbd
{
/** Structure defining a plane. The notations are from the second paper */
class PlaneBase
{
public:
PlaneBase(const cv::Vec3f & m, const cv::Vec3f &n_in, int index)
PlaneBase(const Vec3f & m, const Vec3f &n_in, int index)
:
index_(index),
n_(n_in),
m_sum_(cv::Vec3f(0, 0, 0)),
m_sum_(Vec3f(0, 0, 0)),
m_(m),
Q_(cv::Matx33f::zeros()),
Q_(Matx33f::zeros()),
mse_(0),
K_(0)
{
@ -77,7 +78,7 @@ public:
*/
virtual
float
distance(const cv::Vec3f& p_j) const = 0;
distance(const Vec3f& p_j) const = 0;
/** The d coefficient in the plane equation ax+by+cz+d = 0
* @return
@ -91,7 +92,7 @@ public:
/** The normal to the plane
* @return the normal to the plane
*/
const cv::Vec3f &
const Vec3f &
n() const
{
return n_;
@ -106,11 +107,11 @@ public:
return;
m_ = m_sum_ / K_;
// Compute C
cv::Matx33f C = Q_ - m_sum_ * m_.t();
Matx33f C = Q_ - m_sum_ * m_.t();
// Compute n
cv::SVD svd(C);
n_ = cv::Vec3f(svd.vt.at<float>(2, 0), svd.vt.at<float>(2, 1), svd.vt.at<float>(2, 2));
SVD svd(C);
n_ = Vec3f(svd.vt.at<float>(2, 0), svd.vt.at<float>(2, 1), svd.vt.at<float>(2, 2));
mse_ = svd.w.at<float>(2) / K_;
UpdateD();
@ -119,7 +120,7 @@ public:
/** Update the different sum of point and sum of point*point.t()
*/
void
UpdateStatistics(const cv::Vec3f & point, const cv::Matx33f & Q_local)
UpdateStatistics(const Vec3f & point, const Matx33f & Q_local)
{
m_sum_ += point;
Q_ += Q_local;
@ -143,7 +144,7 @@ protected:
/** The 4th coefficient in the plane equation ax+by+cz+d = 0 */
float d_;
/** Normal of the plane */
cv::Vec3f n_;
Vec3f n_;
private:
inline void
UpdateD()
@ -151,13 +152,13 @@ private:
d_ = -m_.dot(n_);
}
/** The sum of the points */
cv::Vec3f m_sum_;
Vec3f m_sum_;
/** The mean of the points */
cv::Vec3f m_;
Vec3f m_;
/** The sum of pi * pi^\top */
cv::Matx33f Q_;
Matx33f Q_;
/** The different matrices we need to update */
cv::Matx33f C_;
Matx33f C_;
float mse_;
/** the number of points that form the plane */
int K_;
@ -170,7 +171,7 @@ private:
class Plane: public PlaneBase
{
public:
Plane(const cv::Vec3f & m, const cv::Vec3f &n_in, int index)
Plane(const Vec3f & m, const Vec3f &n_in, int index)
:
PlaneBase(m, n_in, index)
{
@ -181,7 +182,7 @@ public:
* @return
*/
float
distance(const cv::Vec3f& p_j) const
distance(const Vec3f& p_j) const
{
return std::abs(float(p_j.dot(n_) + d_));
}
@ -192,7 +193,7 @@ public:
class PlaneABC: public PlaneBase
{
public:
PlaneABC(const cv::Vec3f & m, const cv::Vec3f &n_in, int index, float sensor_error_a, float sensor_error_b,
PlaneABC(const Vec3f & m, const Vec3f &n_in, int index, float sensor_error_a, float sensor_error_b,
float sensor_error_c)
:
PlaneBase(m, n_in, index),
@ -205,7 +206,7 @@ public:
/** The distance is now computed by taking the sensor error into account */
inline
float
distance(const cv::Vec3f& p_j) const
distance(const Vec3f& p_j) const
{
float cst = p_j.dot(n_) + d_;
float err = sensor_error_a_ * p_j[2] * p_j[2] + sensor_error_b_ * p_j[2] + sensor_error_c_;
@ -226,7 +227,7 @@ private:
class PlaneGrid
{
public:
PlaneGrid(const cv::Mat_<cv::Vec3f> & points3d, int block_size)
PlaneGrid(const Mat_<Vec3f> & points3d, int block_size)
:
block_size_(block_size)
{
@ -248,15 +249,15 @@ public:
for (int x = 0; x < mini_cols; ++x)
{
// Update the tiles
cv::Matx33f Q = cv::Matx33f::zeros();
cv::Vec3f m = cv::Vec3f(0, 0, 0);
Matx33f Q = Matx33f::zeros();
Vec3f m = Vec3f(0, 0, 0);
int K = 0;
for (int j = y * block_size; j < std::min((y + 1) * block_size, points3d.rows); ++j)
{
const cv::Vec3f * vec = points3d.ptr < cv::Vec3f > (j, x * block_size), *vec_end;
float * pointpointt = reinterpret_cast<float*>(Q_.ptr < cv::Vec<float, 9> > (j, x * block_size));
const Vec3f * vec = points3d.ptr < Vec3f > (j, x * block_size), *vec_end;
float * pointpointt = reinterpret_cast<float*>(Q_.ptr < Vec<float, 9> > (j, x * block_size));
if (x == mini_cols - 1)
vec_end = points3d.ptr < cv::Vec3f > (j, points3d.cols - 1) + 1;
vec_end = points3d.ptr < Vec3f > (j, points3d.cols - 1) + 1;
else
vec_end = vec + block_size;
for (; vec != vec_end; ++vec, pointpointt += 9)
@ -274,7 +275,7 @@ public:
*(pointpointt + 7) = *(pointpointt + 5);
*(pointpointt + 8) = vec->val[2] * vec->val[2];
Q += *reinterpret_cast<cv::Matx33f*>(pointpointt);
Q += *reinterpret_cast<Matx33f*>(pointpointt);
m += (*vec);
++K;
}
@ -289,21 +290,21 @@ public:
m_(y, x) = m;
// Compute C
cv::Matx33f C = Q - K * m * m.t();
Matx33f C = Q - K * m * m.t();
// Compute n
cv::SVD svd(C);
n_(y, x) = cv::Vec3f(svd.vt.at<float>(2, 0), svd.vt.at<float>(2, 1), svd.vt.at<float>(2, 2));
SVD svd(C);
n_(y, x) = Vec3f(svd.vt.at<float>(2, 0), svd.vt.at<float>(2, 1), svd.vt.at<float>(2, 2));
mse_(y, x) = svd.w.at<float>(2) / K;
}
}
/** The size of the block */
int block_size_;
cv::Mat_<cv::Vec3f> m_;
cv::Mat_<cv::Vec3f> n_;
cv::Mat_<cv::Vec<float, 9> > Q_;
cv::Mat_<float> mse_;
Mat_<Vec3f> m_;
Mat_<Vec3f> n_;
Mat_<Vec<float, 9> > Q_;
Mat_<float> mse_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -334,7 +335,7 @@ public:
TileQueue(const PlaneGrid &plane_grid)
{
done_tiles_ = cv::Mat_<unsigned char>::zeros(plane_grid.mse_.rows, plane_grid.mse_.cols);
done_tiles_ = Mat_<unsigned char>::zeros(plane_grid.mse_.rows, plane_grid.mse_.cols);
tiles_.clear();
for (int y = 0; y < plane_grid.mse_.rows; ++y)
for (int x = 0; x < plane_grid.mse_.cols; ++x)
@ -374,7 +375,7 @@ private:
/** The list of tiles ordered from most planar to least */
std::list<PlaneTile> tiles_;
/** contains 1 when the tiles has been studied, 0 otherwise */
cv::Mat_<unsigned char> done_tiles_;
Mat_<unsigned char> done_tiles_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -382,7 +383,7 @@ private:
class InlierFinder
{
public:
InlierFinder(float err, const cv::Mat_<cv::Vec3f> & points3d, const cv::Mat_<cv::Vec3f> & normals,
InlierFinder(float err, const Mat_<Vec3f> & points3d, const Mat_<Vec3f> & normals,
unsigned char plane_index, int block_size)
:
err_(err),
@ -394,39 +395,39 @@ public:
}
void
Find(const PlaneGrid &plane_grid, cv::Ptr<PlaneBase> & plane, TileQueue & tile_queue,
std::set<TileQueue::PlaneTile> & neighboring_tiles, cv::Mat_<unsigned char> & overall_mask,
cv::Mat_<unsigned char> & plane_mask)
Find(const PlaneGrid &plane_grid, Ptr<PlaneBase> & plane, TileQueue & tile_queue,
std::set<TileQueue::PlaneTile> & neighboring_tiles, Mat_<unsigned char> & overall_mask,
Mat_<unsigned char> & plane_mask)
{
// Do not use reference as we pop the from later on
TileQueue::PlaneTile tile = *(neighboring_tiles.begin());
// Figure the part of the image to look at
cv::Range range_x, range_y;
Range range_x, range_y;
int x = tile.x_ * block_size_, y = tile.y_ * block_size_;
if (tile.x_ == plane_mask.cols - 1)
range_x = cv::Range(x, overall_mask.cols);
range_x = Range(x, overall_mask.cols);
else
range_x = cv::Range(x, x + block_size_);
range_x = Range(x, x + block_size_);
if (tile.y_ == plane_mask.rows - 1)
range_y = cv::Range(y, overall_mask.rows);
range_y = Range(y, overall_mask.rows);
else
range_y = cv::Range(y, y + block_size_);
range_y = Range(y, y + block_size_);
int n_valid_points = 0;
for (int yy = range_y.start; yy != range_y.end; ++yy)
{
uchar* data = overall_mask.ptr(yy, range_x.start), *data_end = data + range_x.size();
const cv::Vec3f* point = points3d_.ptr < cv::Vec3f > (yy, range_x.start);
const cv::Matx33f* Q_local = reinterpret_cast<const cv::Matx33f *>(plane_grid.Q_.ptr < cv::Vec<float, 9>
const Vec3f* point = points3d_.ptr < Vec3f > (yy, range_x.start);
const Matx33f* Q_local = reinterpret_cast<const Matx33f *>(plane_grid.Q_.ptr < Vec<float, 9>
> (yy, range_x.start));
// Depending on whether you have a normal, check it
if (!normals_.empty())
{
const cv::Vec3f* normal = normals_.ptr < cv::Vec3f > (yy, range_x.start);
const Vec3f* normal = normals_.ptr < Vec3f > (yy, range_x.start);
for (; data != data_end; ++data, ++point, ++normal, ++Q_local)
{
// Don't do anything if the point already belongs to another plane
@ -518,31 +519,28 @@ public:
private:
float err_;
const cv::Mat_<cv::Vec3f> & points3d_;
const cv::Mat_<cv::Vec3f> & normals_;
const Mat_<Vec3f> & points3d_;
const Mat_<Vec3f> & normals_;
unsigned char plane_index_;
/** THe block size as defined in the main algorithm */
int block_size_;
const InlierFinder & operator = (const InlierFinder &);
}
;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cv
{
void
RgbdPlane::operator()(InputArray points3d_in, OutputArray mask_out, OutputArray plane_coefficients)
{
this->operator()(points3d_in, cv::Mat(), mask_out, plane_coefficients);
this->operator()(points3d_in, Mat(), mask_out, plane_coefficients);
}
void
RgbdPlane::operator()(InputArray points3d_in, InputArray normals_in, OutputArray mask_out,
OutputArray plane_coefficients_out)
{
cv::Mat_<cv::Vec3f> points3d, normals;
Mat_<Vec3f> points3d, normals;
if (points3d_in.depth() == CV_32F)
points3d = points3d_in.getMat();
else
@ -557,14 +555,14 @@ namespace cv
// Pre-computations
mask_out.create(points3d.size(), CV_8U);
cv::Mat mask_out_mat = mask_out.getMat();
cv::Mat_<unsigned char> mask_out_uc = (cv::Mat_<unsigned char>&) mask_out_mat;
Mat mask_out_mat = mask_out.getMat();
Mat_<unsigned char> mask_out_uc = (Mat_<unsigned char>&) mask_out_mat;
mask_out_uc.setTo(255);
PlaneGrid plane_grid(points3d, block_size_);
TileQueue plane_queue(plane_grid);
size_t index_plane = 0;
std::vector<cv::Vec4f> plane_coefficients;
std::vector<Vec4f> plane_coefficients;
float mse_min = (float)(threshold_ * threshold_);
while (!plane_queue.empty())
@ -578,15 +576,15 @@ namespace cv
// Construct the plane for the first tile
int x = front_tile.x_, y = front_tile.y_;
const cv::Vec3f & n = plane_grid.n_(y, x);
cv::Ptr<PlaneBase> plane;
const Vec3f & n = plane_grid.n_(y, x);
Ptr<PlaneBase> plane;
if ((sensor_error_a_ == 0) && (sensor_error_b_ == 0) && (sensor_error_c_ == 0))
plane = cv::Ptr<PlaneBase>(new Plane(plane_grid.m_(y, x), n, (int)index_plane));
plane = Ptr<PlaneBase>(new Plane(plane_grid.m_(y, x), n, (int)index_plane));
else
plane = cv::Ptr<PlaneBase>(new PlaneABC(plane_grid.m_(y, x), n, (int)index_plane,
plane = Ptr<PlaneBase>(new PlaneABC(plane_grid.m_(y, x), n, (int)index_plane,
(float)sensor_error_a_, (float)sensor_error_b_, (float)sensor_error_c_));
cv::Mat_<unsigned char> plane_mask = cv::Mat_<unsigned char>::zeros(points3d.rows / block_size_,
Mat_<unsigned char> plane_mask = Mat_<unsigned char>::zeros(points3d.rows / block_size_,
points3d.cols / block_size_);
std::set<TileQueue::PlaneTile> neighboring_tiles;
neighboring_tiles.insert(front_tile);
@ -625,7 +623,7 @@ namespace cv
++index_plane;
if (index_plane >= 255)
break;
cv::Vec4f coeffs(plane->n()[0], plane->n()[1], plane->n()[2], plane->d());
Vec4f coeffs(plane->n()[0], plane->n()[1], plane->n()[2], plane->d());
if (coeffs(2) > 0)
coeffs = -coeffs;
plane_coefficients.push_back(coeffs);
@ -635,10 +633,11 @@ namespace cv
if (plane_coefficients.empty())
return;
plane_coefficients_out.create((int)plane_coefficients.size(), 1, CV_32FC4);
cv::Mat plane_coefficients_mat = plane_coefficients_out.getMat();
Mat plane_coefficients_mat = plane_coefficients_out.getMat();
float* data = plane_coefficients_mat.ptr<float>(0);
for(size_t i=0; i<plane_coefficients.size(); ++i)
for(uchar j=0; j<4; ++j, ++data)
*data = plane_coefficients[i][j];
}
}
}

@ -0,0 +1,56 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/rgbd.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include <iostream>
#include <list>
#include <set>
#include <limits>
#endif

@ -39,6 +39,8 @@
#include "opencv2/core/private.hpp"
namespace cv
{
namespace rgbd
{
CV_INIT_ALGORITHM(DepthCleaner, "RGBD.DepthCleaner",
obj.info()->addParam(obj, "window_size", obj.window_size_);
@ -84,7 +86,7 @@ namespace cv
obj.info()->addParam(obj, "transformType", obj.transformType);
obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation);
obj.info()->addParam(obj, "maxRotation", obj.maxRotation);
obj.info()->addParam<cv::RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
obj.info()->addParam<RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
CV_INIT_ALGORITHM(RgbdICPOdometry, "RGBD.RgbdICPOdometry",
obj.info()->addParam(obj, "cameraMatrix", obj.cameraMatrix);
@ -97,7 +99,7 @@ namespace cv
obj.info()->addParam(obj, "transformType", obj.transformType);
obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation);
obj.info()->addParam(obj, "maxRotation", obj.maxRotation);
obj.info()->addParam<cv::RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
obj.info()->addParam<RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
bool
initModule_rgbd(void);
@ -113,3 +115,5 @@ namespace cv
return all;
}
}
}

@ -40,6 +40,8 @@
namespace cv
{
namespace rgbd
{
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
* Otherwise, the image is simply converted to floats
@ -75,3 +77,5 @@ namespace cv
in.convertTo(out, depth);
}
}
}

@ -40,6 +40,11 @@
#include <opencv2/rgbd.hpp>
namespace cv
{
namespace rgbd
{
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
* Otherwise, the image is simply converted to floats
@ -50,22 +55,25 @@
*/
template<typename T>
void
rescaleDepthTemplated(const cv::Mat& in, cv::Mat& out);
rescaleDepthTemplated(const Mat& in, Mat& out);
template<>
inline void
rescaleDepthTemplated<float>(const cv::Mat& in, cv::Mat& out)
rescaleDepthTemplated<float>(const Mat& in, Mat& out)
{
rescaleDepth(in, CV_32F, out);
}
template<>
inline void
rescaleDepthTemplated<double>(const cv::Mat& in, cv::Mat& out)
rescaleDepthTemplated<double>(const Mat& in, Mat& out)
{
rescaleDepth(in, CV_64F, out);
}
}
}
#endif /* __cplusplus */
#endif

@ -33,26 +33,68 @@
*
*/
#include <stdexcept>
#include <opencv2/contrib.hpp>
#include "test_precomp.hpp"
#include <opencv2/rgbd.hpp>
#include "test_precomp.hpp"
namespace cv
{
namespace rgbd
{
class CV_EXPORTS TickMeter
{
public:
TickMeter();
void start();
void stop();
int64 getTimeTicks() const;
double getTimeMicro() const;
double getTimeMilli() const;
double getTimeSec() const;
int64 getCounter() const;
void reset();
private:
int64 counter;
int64 sumTime;
int64 startTime;
};
TickMeter::TickMeter() { reset(); }
int64 TickMeter::getTimeTicks() const { return sumTime; }
double TickMeter::getTimeSec() const { return (double)getTimeTicks()/getTickFrequency(); }
double TickMeter::getTimeMilli() const { return getTimeSec()*1e3; }
double TickMeter::getTimeMicro() const { return getTimeMilli()*1e3; }
int64 TickMeter::getCounter() const { return counter; }
void TickMeter::reset() {startTime = 0; sumTime = 0; counter = 0; }
void TickMeter::start(){ startTime = getTickCount(); }
void TickMeter::stop()
{
int64 time = getTickCount();
if ( startTime == 0 )
return;
++counter;
sumTime += ( time - startTime );
startTime = 0;
}
cv::Point3f
rayPlaneIntersection(cv::Point2f uv, const cv::Mat& centroid, const cv::Mat& normal, const cv::Mat_<float>& Kinv);
Point3f
rayPlaneIntersection(Point2f uv, const Mat& centroid, const Mat& normal, const Mat_<float>& Kinv);
cv::Vec3f
rayPlaneIntersection(const cv::Vec3d& uv1, double centroid_dot_normal, const cv::Vec3d& normal,
const cv::Matx33d& Kinv);
cv::Vec3f
rayPlaneIntersection(const cv::Vec3d& uv1, double centroid_dot_normal, const cv::Vec3d& normal, const cv::Matx33d& Kinv)
Vec3f
rayPlaneIntersection(const Vec3d& uv1, double centroid_dot_normal, const Vec3d& normal,
const Matx33d& Kinv);
Vec3f
rayPlaneIntersection(const Vec3d& uv1, double centroid_dot_normal, const Vec3d& normal, const Matx33d& Kinv)
{
cv::Matx31d L = Kinv * uv1; //a ray passing through camera optical center
Matx31d L = Kinv * uv1; //a ray passing through camera optical center
//and uv.
L = L * (1.0 / cv::norm(L));
L = L * (1.0 / norm(L));
double LdotNormal = L.dot(normal);
double d;
if (std::fabs(LdotNormal) > 1e-9)
@ -63,18 +105,18 @@ rayPlaneIntersection(const cv::Vec3d& uv1, double centroid_dot_normal, const cv:
{
d = 1.0;
std::cout << "warning, LdotNormal nearly 0! " << LdotNormal << std::endl;
std::cout << "contents of L, Normal: " << cv::Mat(L) << ", " << cv::Mat(normal) << std::endl;
std::cout << "contents of L, Normal: " << Mat(L) << ", " << Mat(normal) << std::endl;
}
cv::Vec3f xyz((float)(d * L(0)), (float)(d * L(1)), (float)(d * L(2)));
Vec3f xyz((float)(d * L(0)), (float)(d * L(1)), (float)(d * L(2)));
return xyz;
}
cv::Point3f
rayPlaneIntersection(cv::Point2f uv, const cv::Mat& centroid, const cv::Mat& normal, const cv::Mat_<float>& Kinv)
Point3f
rayPlaneIntersection(Point2f uv, const Mat& centroid, const Mat& normal, const Mat_<float>& Kinv)
{
cv::Matx33d dKinv(Kinv);
cv::Vec3d dNormal(normal);
return rayPlaneIntersection(cv::Vec3d(uv.x, uv.y, 1), centroid.dot(normal), dNormal, dKinv);
Matx33d dKinv(Kinv);
Vec3d dNormal(normal);
return rayPlaneIntersection(Vec3d(uv.x, uv.y, 1), centroid.dot(normal), dNormal, dKinv);
}
const int W = 640;
@ -84,43 +126,43 @@ float focal_length = 525;
float cx = W / 2.f + 0.5f;
float cy = H / 2.f + 0.5f;
cv::Mat K = (cv::Mat_<double>(3, 3) << focal_length, 0, cx, 0, focal_length, cy, 0, 0, 1);
cv::Mat Kinv = K.inv();
Mat K = (Mat_<double>(3, 3) << focal_length, 0, cx, 0, focal_length, cy, 0, 0, 1);
Mat Kinv = K.inv();
static cv::RNG rng;
static RNG rng;
struct Plane
{
cv::Vec3d n, p;
Vec3d n, p;
double p_dot_n;
Plane()
{
n[0] = rng.uniform(-0.5, 0.5);
n[1] = rng.uniform(-0.5, 0.5);
n[2] = -0.3; //rng.uniform(-1.f, 0.5f);
n = n / cv::norm(n);
n = n / norm(n);
set_d((float)rng.uniform(-2.0, 0.6));
}
void
set_d(float d)
{
p = cv::Vec3d(0, 0, d / n[2]);
p = Vec3d(0, 0, d / n[2]);
p_dot_n = p.dot(n);
}
cv::Vec3f
intersection(float u, float v, const cv::Matx33f& Kinv_in) const
Vec3f
intersection(float u, float v, const Matx33f& Kinv_in) const
{
return rayPlaneIntersection(cv::Vec3d(u, v, 1), p_dot_n, n, Kinv_in);
return rayPlaneIntersection(Vec3d(u, v, 1), p_dot_n, n, Kinv_in);
}
};
void
gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mask, cv::Mat& points3d, cv::Mat& normals,
gen_points_3d(std::vector<Plane>& planes_out, Mat_<unsigned char> &plane_mask, Mat& points3d, Mat& normals,
int n_planes);
void
gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mask, cv::Mat& points3d, cv::Mat& normals,
gen_points_3d(std::vector<Plane>& planes_out, Mat_<unsigned char> &plane_mask, Mat& points3d, Mat& normals,
int n_planes)
{
std::vector<Plane> planes;
@ -133,8 +175,8 @@ gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mas
planes.push_back(px);
}
}
cv::Mat_ < cv::Vec3f > outp(H, W);
cv::Mat_ < cv::Vec3f > outn(H, W);
Mat_ < Vec3f > outp(H, W);
Mat_ < Vec3f > outn(H, W);
plane_mask.create(H, W);
// n ( r - r_0) = 0
@ -175,10 +217,10 @@ protected:
{
try
{
cv::Mat_<unsigned char> plane_mask;
Mat_<unsigned char> plane_mask;
for (unsigned char i = 0; i < 3; ++i)
{
cv::RgbdNormals::RGBD_NORMALS_METHOD method;
RgbdNormals::RGBD_NORMALS_METHOD method;
// inner vector: whether it's 1 plane or 3 planes
// outer vector: float or double
std::vector<std::vector<float> > errors(2);
@ -187,7 +229,7 @@ protected:
switch (i)
{
case 0:
method = cv::RgbdNormals::RGBD_NORMALS_METHOD_FALS;
method = RgbdNormals::RGBD_NORMALS_METHOD_FALS;
std::cout << std::endl << "*** FALS" << std::endl;
errors[0][0] = 0.006f;
errors[0][1] = 0.03f;
@ -195,7 +237,7 @@ protected:
errors[1][1] = 0.02f;
break;
case 1:
method = cv::RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD;
method = RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD;
std::cout << std::endl << "*** LINEMOD" << std::endl;
errors[0][0] = 0.04f;
errors[0][1] = 0.07f;
@ -203,7 +245,7 @@ protected:
errors[1][1] = 0.08f;
break;
case 2:
method = cv::RgbdNormals::RGBD_NORMALS_METHOD_SRI;
method = RgbdNormals::RGBD_NORMALS_METHOD_SRI;
std::cout << std::endl << "*** SRI" << std::endl;
errors[0][0] = 0.02f;
errors[0][1] = 0.04f;
@ -211,7 +253,7 @@ protected:
errors[1][1] = 0.04f;
break;
default:
method = (cv::RgbdNormals::RGBD_NORMALS_METHOD)-1;
method = (RgbdNormals::RGBD_NORMALS_METHOD)-1;
CV_Error(0, "");
}
@ -223,11 +265,11 @@ protected:
else
std::cout << "* double" << std::endl;
cv::RgbdNormals normals_computer(H, W, depth, K, 5, method);
RgbdNormals normals_computer(H, W, depth, K, 5, method);
normals_computer.initialize();
std::vector<Plane> plane_params;
cv::Mat points3d, ground_normals;
Mat points3d, ground_normals;
// 1 plane, continuous scene, very low error..
std::cout << "1 plane" << std::endl;
float err_mean = 0;
@ -262,22 +304,22 @@ protected:
}
float
testit(const cv::Mat & points3d, const cv::Mat & in_ground_normals, const cv::RgbdNormals & normals_computer)
testit(const Mat & points3d, const Mat & in_ground_normals, const RgbdNormals & normals_computer)
{
cv::TickMeter tm;
TickMeter tm;
tm.start();
cv::Mat in_normals;
if (normals_computer.method() == cv::RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD)
Mat in_normals;
if (normals_computer.method() == RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD)
{
std::vector<cv::Mat> channels;
cv::split(points3d, channels);
std::vector<Mat> channels;
split(points3d, channels);
normals_computer(channels[2], in_normals);
}
else
normals_computer(points3d, in_normals);
tm.stop();
cv::Mat_<cv::Vec3f> normals, ground_normals;
Mat_<Vec3f> normals, ground_normals;
in_normals.convertTo(normals, CV_32FC3);
in_ground_normals.convertTo(ground_normals, CV_32FC3);
@ -285,9 +327,9 @@ protected:
for (int y = 0; y < normals.rows; ++y)
for (int x = 0; x < normals.cols; ++x)
{
cv::Vec3f vec1 = normals(y, x), vec2 = ground_normals(y, x);
vec1 = vec1 / cv::norm(vec1);
vec2 = vec2 / cv::norm(vec2);
Vec3f vec1 = normals(y, x), vec2 = ground_normals(y, x);
vec1 = vec1 / norm(vec1);
vec2 = vec2 / norm(vec2);
float dot = vec1.dot(vec2);
// Just for rounding errors
@ -318,11 +360,11 @@ protected:
{
try
{
cv::RgbdPlane plane_computer;
RgbdPlane plane_computer;
std::vector<Plane> planes;
cv::Mat points3d, ground_normals;
cv::Mat_<unsigned char> plane_mask;
Mat points3d, ground_normals;
Mat_<unsigned char> plane_mask;
gen_points_3d(planes, plane_mask, points3d, ground_normals, 1);
testit(planes, plane_mask, points3d, plane_computer); // 1 plane, continuous scene, very low error..
for (int ii = 0; ii < 10; ii++)
@ -338,22 +380,22 @@ protected:
}
void
testit(const std::vector<Plane> & gt_planes, const cv::Mat & gt_plane_mask, const cv::Mat & points3d,
cv::RgbdPlane & plane_computer)
testit(const std::vector<Plane> & gt_planes, const Mat & gt_plane_mask, const Mat & points3d,
RgbdPlane & plane_computer)
{
for (char i_test = 0; i_test < 2; ++i_test)
{
cv::TickMeter tm1, tm2;
cv::Mat plane_mask;
std::vector<cv::Vec4f> plane_coefficients;
TickMeter tm1, tm2;
Mat plane_mask;
std::vector<Vec4f> plane_coefficients;
if (i_test == 0)
{
tm1.start();
// First, get the normals
int depth = CV_32F;
cv::RgbdNormals normals_computer(H, W, depth, K, 5, cv::RgbdNormals::RGBD_NORMALS_METHOD_FALS);
cv::Mat normals;
RgbdNormals normals_computer(H, W, depth, K, 5, RgbdNormals::RGBD_NORMALS_METHOD_FALS);
Mat normals;
normals_computer(points3d, normals);
tm1.stop();
@ -371,17 +413,17 @@ protected:
// Compare each found plane to each ground truth plane
int n_planes = (int)plane_coefficients.size();
int n_gt_planes = (int)gt_planes.size();
cv::Mat_<int> matching(n_gt_planes, n_planes);
Mat_<int> matching(n_gt_planes, n_planes);
for (int j = 0; j < n_gt_planes; ++j)
{
cv::Mat gt_mask = gt_plane_mask == j;
int n_gt = cv::countNonZero(gt_mask);
Mat gt_mask = gt_plane_mask == j;
int n_gt = countNonZero(gt_mask);
int n_max = 0, i_max = 0;
for (int i = 0; i < n_planes; ++i)
{
cv::Mat dst;
cv::bitwise_and(gt_mask, plane_mask == i, dst);
matching(j, i) = cv::countNonZero(dst);
Mat dst;
bitwise_and(gt_mask, plane_mask == i, dst);
matching(j, i) = countNonZero(dst);
if (matching(j, i) > n_max)
{
n_max = matching(j, i);
@ -391,7 +433,7 @@ protected:
// Get the best match
ASSERT_LE(float(n_max - n_gt) / n_gt, 0.001);
// Compare the normals
cv::Vec3d normal(plane_coefficients[i_max][0], plane_coefficients[i_max][1], plane_coefficients[i_max][2]);
Vec3d normal(plane_coefficients[i_max][0], plane_coefficients[i_max][1], plane_coefficients[i_max][2]);
ASSERT_GE(std::abs(gt_planes[j].n.dot(normal)), 0.95);
}
@ -403,16 +445,19 @@ protected:
}
};
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Rgbd_Normals, compute)
{
CV_RgbdNormalsTest test;
cv::rgbd::CV_RgbdNormalsTest test;
test.safe_run();
}
TEST(Rgbd_Plane, compute)
{
CV_RgbdPlaneTest test;
cv::rgbd::CV_RgbdPlaneTest test;
test.safe_run();
}

@ -33,13 +33,16 @@
*
*/
#include "test_precomp.hpp"
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include "test_precomp.hpp"
using namespace cv;
namespace cv
{
namespace rgbd
{
#define SHOW_DEBUG_LOG 0
#define SHOW_DEBUG_IMAGES 0
@ -152,7 +155,7 @@ void dilateFrame(Mat& image, Mat& depth)
class CV_OdometryTest : public cvtest::BaseTest
{
public:
CV_OdometryTest(const Ptr<Odometry>& _odometry,
CV_OdometryTest(const Ptr<Odometry>& _odometry,
double _maxError1,
double _maxError5) :
odometry(_odometry),
@ -330,21 +333,23 @@ void CV_OdometryTest::run(int)
/****************************************************************************************\
* Tests registrations *
\****************************************************************************************/
}
}
TEST(RGBD_Odometry_Rgbd, algorithmic)
{
CV_OdometryTest test(Algorithm::create<Odometry>("RGBD.RgbdOdometry"), 0.99, 0.94);
cv::rgbd::CV_OdometryTest test(cv::Algorithm::create<cv::rgbd::Odometry>("RGBD.RgbdOdometry"), 0.99, 0.94);
test.safe_run();
}
TEST(RGBD_Odometry_ICP, algorithmic)
{
CV_OdometryTest test(Algorithm::create<Odometry>("RGBD.ICPOdometry"), 0.99, 0.99);
cv::rgbd::CV_OdometryTest test(cv::Algorithm::create<cv::rgbd::Odometry>("RGBD.ICPOdometry"), 0.99, 0.99);
test.safe_run();
}
TEST(RGBD_Odometry_RgbdICP, algorithmic)
{
CV_OdometryTest test(Algorithm::create<Odometry>("RGBD.RgbdICPOdometry"), 0.99, 0.99);
cv::rgbd::CV_OdometryTest test(cv::Algorithm::create<cv::rgbd::Odometry>("RGBD.RgbdICPOdometry"), 0.99, 0.99);
test.safe_run();
}

@ -1,6 +1,11 @@
#include "test_precomp.hpp"
#include <opencv2/calib3d.hpp>
#include "test_precomp.hpp"
namespace cv
{
namespace rgbd
{
class CV_RgbdDepthTo3dTest: public cvtest::BaseTest
{
@ -18,31 +23,31 @@ protected:
try
{
// K from a VGA Kinect
cv::Mat K = (cv::Mat_<float>(3, 3) << 525., 0., 319.5, 0., 525., 239.5, 0., 0., 1.);
Mat K = (Mat_<float>(3, 3) << 525., 0., 319.5, 0., 525., 239.5, 0., 0., 1.);
// Create a random depth image
cv::RNG rng;
cv::Mat_<float> depth(480, 640);
rng.fill(depth, cv::RNG::UNIFORM, 0, 100);
RNG rng;
Mat_<float> depth(480, 640);
rng.fill(depth, RNG::UNIFORM, 0, 100);
// Create some 3d points on the plane
int rows = depth.rows, cols = depth.cols;
cv::Mat_<cv::Vec3f> points3d;
cv::depthTo3d(depth, K, points3d);
Mat_<Vec3f> points3d;
depthTo3d(depth, K, points3d);
// Make sure the points belong to the plane
cv::Mat points = points3d.reshape(1, rows*cols);
cv::Mat image_points;
cv::Mat rvec;
cv::Rodrigues(cv::Mat::eye(3,3,CV_32F),rvec);
cv::Mat tvec = (cv::Mat_<float>(1,3) << 0, 0, 0);
cv::projectPoints(points, rvec, tvec, K, cv::Mat(), image_points);
Mat points = points3d.reshape(1, rows*cols);
Mat image_points;
Mat rvec;
Rodrigues(Mat::eye(3,3,CV_32F),rvec);
Mat tvec = (Mat_<float>(1,3) << 0, 0, 0);
projectPoints(points, rvec, tvec, K, Mat(), image_points);
image_points = image_points.reshape(2, rows);
float avg_diff = 0;
for (int y = 0; y < rows; ++y)
for (int x = 0; x < cols; ++x)
avg_diff += (float)cv::norm(image_points.at<cv::Vec2f>(y,x) - cv::Vec2f((float)x,(float)y));
avg_diff += (float)norm(image_points.at<Vec2f>(y,x) - Vec2f((float)x,(float)y));
// Verify the function works
ASSERT_LE(avg_diff/rows/cols, 1e-4) << "Average error for ground truth is: " << (avg_diff / rows / cols);
@ -54,8 +59,11 @@ protected:
}
};
}
}
TEST(Rgbd_DepthTo3d, compute)
{
CV_RgbdDepthTo3dTest test;
cv::rgbd::CV_RgbdDepthTo3dTest test;
test.safe_run();
}

@ -0,0 +1,2 @@
set(the_description "Text Detection and Recognition")
ocv_define_module(text opencv_ml opencv_highgui opencv_imgproc opencv_core)

@ -0,0 +1,211 @@
Scene Text Detection
====================
.. highlight:: cpp
Class-specific Extremal Regions for Scene Text Detection
--------------------------------------------------------
The scene text detection algorithm described below has been initially proposed by Lukás Neumann & Jiri Matas [Neumann12]. The main idea behind Class-specific Extremal Regions is similar to the MSER in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image. However, this technique differs from MSER in that selection of suitable ERs is done by a sequential classifier trained for character detection, i.e. dropping the stability requirement of MSERs and selecting class-specific (not necessarily stable) regions.
The component tree of an image is constructed by thresholding by an increasing value step-by-step from 0 to 255 and then linking the obtained connected components from successive levels in a hierarchy by their inclusion relation:
.. image:: pics/component_tree.png
:width: 100%
The component tree may conatain a huge number of regions even for a very simple image as shown in the previous image. This number can easily reach the order of 1 x 10^6 regions for an average 1 Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm make use of a sequential classifier with two differentiated stages.
In the first stage incrementally computable descriptors (area, perimeter, bounding box, and euler number) are computed (in O(1)) for each region r and used as features for a classifier which estimates the class-conditional probability p(r|character). Only the ERs which correspond to local maximum of the probability p(r|character) are selected (if their probability is above a global limit p_min and the difference between local maximum and local minimum is greater than a \delta_min value).
In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. (Hole area ratio, convex hull ratio, and the number of outer boundary inflexion points).
This ER filtering process is done in different single-channel projections of the input image in order to increase the character localization recall.
After the ER filtering is done on each input channel, character candidates must be grouped in high-level text blocks (i.e. words, text lines, paragraphs, ...). The grouping algorithm used in this implementation has been proposed by Lluis Gomez and Dimosthenis Karatzas in [Gomez13] and basically consist in finding meaningful groups of regions using a perceptual organization based clustering analisys (see :ocv:func:`erGrouping`).
To see the text detector at work, have a look at the textdetection demo: https://github.com/Itseez/opencv/blob/master/samples/cpp/textdetection.cpp
.. [Neumann12] Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012. The paper is available online at http://cmp.felk.cvut.cz/~neumalu1/neumann-cvpr2012.pdf
.. [Gomez13] Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013. The paper is available online at http://158.109.8.37/files/GoK2013.pdf
ERStat
------
.. ocv:struct:: ERStat
The ERStat structure represents a class-specific Extremal Region (ER).
An ER is a 4-connected set of pixels with all its grey-level values smaller than the values in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's in the component tree of the image. ::
struct CV_EXPORTS ERStat
{
public:
//! Constructor
explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0);
//! Destructor
~ERStat() { }
//! seed point and threshold (max grey-level value)
int pixel;
int level;
//! incrementally computable features
int area;
int perimeter;
int euler; //!< euler number
Rect rect; //!< bounding box
double raw_moments[2]; //!< order 1 raw moments to derive the centroid
double central_moments[3]; //!< order 2 central moments to construct the covariance matrix
std::deque<int> *crossings;//!< horizontal crossings
float med_crossings; //!< median of the crossings at three different height levels
//! 2nd stage features
float hole_area_ratio;
float convex_hull_ratio;
float num_inflexion_points;
//! probability that the ER belongs to the class we are looking for
double probability;
//! pointers preserving the tree structure of the component tree
ERStat* parent;
ERStat* child;
ERStat* next;
ERStat* prev;
};
computeNMChannels
-----------------
Compute the different channels to be processed independently in the N&M algorithm [Neumann12].
.. ocv:function:: void computeNMChannels(InputArray _src, OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad)
:param _src: Source image. Must be RGB ``CV_8UC3``.
:param _channels: Output vector<Mat> where computed channels are stored.
:param _mode: Mode of operation. Currently the only available options are: **ERFILTER_NM_RGBLGrad** (used by default) and **ERFILTER_NM_IHSGrad**.
In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient magnitude channels (Grad) are used in order to obtain high localization recall. This implementation also provides an alternative combination of red (R), green (G), blue (B), lightness (L), and gradient magnitude (Grad).
ERFilter
--------
.. ocv:class:: ERFilter : public Algorithm
Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm [Neumann12]. ::
class CV_EXPORTS ERFilter : public Algorithm
{
public:
//! callback with the classifier is made a class.
//! By doing it we hide SVM, Boost etc. Developers can provide their own classifiers
class CV_EXPORTS Callback
{
public:
virtual ~Callback() { }
//! The classifier must return probability measure for the region.
virtual double eval(const ERStat& stat) = 0;
};
/*!
the key method. Takes image on input and returns the selected regions in a vector of ERStat
only distinctive ERs which correspond to characters are selected by a sequential classifier
*/
virtual void run( InputArray image, std::vector<ERStat>& regions ) = 0;
(...)
};
ERFilter::Callback
------------------
Callback with the classifier is made a class. By doing it we hide SVM, Boost etc. Developers can provide their own classifiers to the ERFilter algorithm.
.. ocv:class:: ERFilter::Callback
ERFilter::Callback::eval
------------------------
The classifier must return probability measure for the region.
.. ocv:function:: double ERFilter::Callback::eval(const ERStat& stat)
:param stat: The region to be classified
ERFilter::run
-------------
The key method of ERFilter algorithm. Takes image on input and returns the selected regions in a vector of ERStat only distinctive ERs which correspond to characters are selected by a sequential classifier
.. ocv:function:: void ERFilter::run( InputArray image, std::vector<ERStat>& regions )
:param image: Sinle channel image ``CV_8UC1``
:param regions: Output for the 1st stage and Input/Output for the 2nd. The selected Extremal Regions are stored here.
Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.
createERFilterNM1
-----------------
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12].
.. ocv:function:: Ptr<ERFilter> createERFilterNM1( const Ptr<ERFilter::Callback>& cb, int thresholdDelta = 1, float minArea = 0.00025, float maxArea = 0.13, float minProbability = 0.4, bool nonMaxSuppression = true, float minProbabilityDiff = 0.1 )
:param cb: Callback with the classifier. Default classifier can be implicitly load with function :ocv:func:`loadClassifierNM1`, e.g. from file in samples/cpp/trained_classifierNM1.xml
:param thresholdDelta: Threshold step in subsequent thresholds when extracting the component tree
:param minArea: The minimum area (% of image size) allowed for retreived ER's
:param minArea: The maximum area (% of image size) allowed for retreived ER's
:param minProbability: The minimum probability P(er|character) allowed for retreived ER's
:param nonMaxSuppression: Whenever non-maximum suppression is done over the branch probabilities
:param minProbability: The minimum probability difference between local maxima and local minima ERs
The component tree of the image is extracted by a threshold increased step by step from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness, number of holes, and number of horizontal crossings) are computed for each ER and used as features for a classifier which estimates the class-conditional probability P(er|character). The value of P(er|character) is tracked using the inclusion relation of ER across all thresholds and only the ERs which correspond to local maximum of the probability P(er|character) are selected (if the local maximum of the probability is above a global limit pmin and the difference between local maximum and local minimum is greater than minProbabilityDiff).
createERFilterNM2
-----------------
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12].
.. ocv:function:: Ptr<ERFilter> createERFilterNM2( const Ptr<ERFilter::Callback>& cb, float minProbability = 0.3 )
:param cb: Callback with the classifier. Default classifier can be implicitly load with function :ocv:func:`loadClassifierNM2`, e.g. from file in samples/cpp/trained_classifierNM2.xml
:param minProbability: The minimum probability P(er|character) allowed for retreived ER's
In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. The classifier uses all the features calculated in the first stage and the following additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
loadClassifierNM1
-----------------
Allow to implicitly load the default classifier when creating an ERFilter object.
.. ocv:function:: Ptr<ERFilter::Callback> loadClassifierNM1(const std::string& filename)
:param filename: The XML or YAML file with the classifier model (e.g. trained_classifierNM1.xml)
returns a pointer to ERFilter::Callback.
loadClassifierNM2
-----------------
Allow to implicitly load the default classifier when creating an ERFilter object.
.. ocv:function:: Ptr<ERFilter::Callback> loadClassifierNM2(const std::string& filename)
:param filename: The XML or YAML file with the classifier model (e.g. trained_classifierNM2.xml)
returns a pointer to ERFilter::Callback.
erGrouping
----------
Find groups of Extremal Regions that are organized as text blocks.
.. ocv:function:: void erGrouping( InputArrayOfArrays src, std::vector<std::vector<ERStat> > &regions, const std::string& filename, float minProbablity, std::vector<Rect > &groups)
:param src: Vector of sinle channel images CV_8UC1 from wich the regions were extracted
:param regions: Vector of ER's retreived from the ERFilter algorithm from each channel
:param filename: The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)
:param minProbability: The minimum probability for accepting a group
:param groups: The output of the algorithm are stored in this parameter as list of rectangles.
This function implements the grouping algorithm described in [Gomez13]. Notice that this implementation constrains the results to horizontally-aligned text and latin script (since ERFilter classifiers are trained only for latin script detection).
The algorithm combines two different clustering techniques in a single parameter-free procedure to detect groups of regions organized as text. The maximally meaningful groups are fist detected in several feature spaces, where each feature space is a combination of proximity information (x,y coordinates) and a similarity measure (intensity, color, size, gradient magnitude, etc.), thus providing a set of hypotheses of text groups. Evidence Accumulation framework is used to combine all these hypotheses to get the final estimate. Each of the resulting groups are finally validated using a classifier in order to assess if they form a valid horizontally-aligned text block.

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

@ -0,0 +1,10 @@
***************************
objdetect. Object Detection
***************************
.. highlight:: cpp
.. toctree::
:maxdepth: 2
erfilter

@ -0,0 +1,44 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_TEXT_HPP__
#define __OPENCV_TEXT_HPP__
#include "opencv2/text/erfilter.hpp"
#endif

@ -0,0 +1,269 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_OBJDETECT_ERFILTER_HPP__
#define __OPENCV_OBJDETECT_ERFILTER_HPP__
#include "opencv2/core.hpp"
#include <vector>
#include <deque>
#include <string>
namespace cv
{
namespace text
{
/*!
Extremal Region Stat structure
The ERStat structure represents a class-specific Extremal Region (ER).
An ER is a 4-connected set of pixels with all its grey-level values smaller than the values
in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's
in the component tree of the image.
*/
struct CV_EXPORTS ERStat
{
public:
//! Constructor
explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0);
//! Destructor
~ERStat() { }
//! seed point and the threshold (max grey-level value)
int pixel;
int level;
//! incrementally computable features
int area;
int perimeter;
int euler; //!< euler number
Rect rect;
double raw_moments[2]; //!< order 1 raw moments to derive the centroid
double central_moments[3]; //!< order 2 central moments to construct the covariance matrix
std::deque<int> *crossings;//!< horizontal crossings
float med_crossings; //!< median of the crossings at three different height levels
//! 2nd stage features
float hole_area_ratio;
float convex_hull_ratio;
float num_inflexion_points;
// TODO Other features can be added (average color, standard deviation, and such)
// TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ?
std::vector<int> *pixels;
//! probability that the ER belongs to the class we are looking for
double probability;
//! pointers preserving the tree structure of the component tree
ERStat* parent;
ERStat* child;
ERStat* next;
ERStat* prev;
//! wenever the regions is a local maxima of the probability
bool local_maxima;
ERStat* max_probability_ancestor;
ERStat* min_probability_ancestor;
};
/*!
Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithms
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.
*/
class CV_EXPORTS ERFilter : public Algorithm
{
public:
//! callback with the classifier is made a class. By doing it we hide SVM, Boost etc.
class CV_EXPORTS Callback
{
public:
virtual ~Callback() { }
//! The classifier must return probability measure for the region.
virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
};
/*!
the key method. Takes image on input and returns the selected regions in a vector of ERStat
only distinctive ERs which correspond to characters are selected by a sequential classifier
\param image is the input image
\param regions is output for the first stage, input/output for the second one.
*/
virtual void run( InputArray image, std::vector<ERStat>& regions ) = 0;
//! set/get methods to set the algorithm properties,
virtual void setCallback(const Ptr<ERFilter::Callback>& cb) = 0;
virtual void setThresholdDelta(int thresholdDelta) = 0;
virtual void setMinArea(float minArea) = 0;
virtual void setMaxArea(float maxArea) = 0;
virtual void setMinProbability(float minProbability) = 0;
virtual void setMinProbabilityDiff(float minProbabilityDiff) = 0;
virtual void setNonMaxSuppression(bool nonMaxSuppression) = 0;
virtual int getNumRejected() = 0;
};
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
The component tree of the image is extracted by a threshold increased step by step
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
\param cb Callback with the classifier.
default classifier can be implicitly load with function loadClassifierNM1()
from file in samples/cpp/trained_classifierNM1.xml
\param thresholdDelta Threshold step in subsequent thresholds when extracting the component tree
\param minArea The minimum area (% of image size) allowed for retreived ER's
\param minArea The maximum area (% of image size) allowed for retreived ER's
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb,
int thresholdDelta = 1, float minArea = 0.00025,
float maxArea = 0.13, float minProbability = 0.4,
bool nonMaxSuppression = true,
float minProbabilityDiff = 0.1);
/*!
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
In the second stage, the ERs that passed the first stage are classified into character
and non-character classes using more informative but also more computationally expensive
features. The classifier uses all the features calculated in the first stage and the following
additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
\param cb Callback with the classifier
default classifier can be implicitly load with function loadClassifierNM2()
from file in samples/cpp/trained_classifierNM2.xml
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
*/
CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb,
float minProbability = 0.3);
/*!
Allow to implicitly load the default classifier when creating an ERFilter object.
The function takes as parameter the XML or YAML file with the classifier model
(e.g. trained_classifierNM1.xml) returns a pointer to ERFilter::Callback.
*/
CV_EXPORTS Ptr<ERFilter::Callback> loadClassifierNM1(const std::string& filename);
/*!
Allow to implicitly load the default classifier when creating an ERFilter object.
The function takes as parameter the XML or YAML file with the classifier model
(e.g. trained_classifierNM1.xml) returns a pointer to ERFilter::Callback.
*/
CV_EXPORTS Ptr<ERFilter::Callback> loadClassifierNM2(const std::string& filename);
// computeNMChannels operation modes
enum { ERFILTER_NM_RGBLGrad = 0,
ERFILTER_NM_IHSGrad = 1
};
/*!
Compute the different channels to be processed independently in the N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient
magnitude channels (Grad) are used in order to obtain high localization recall.
This implementation also provides an alternative combination of red (R), green (G), blue (B),
lightness (L), and gradient magnitude (Grad).
\param _src Source image. Must be RGB CV_8UC3.
\param _channels Output vector<Mat> where computed channels are stored.
\param _mode Mode of operation. Currently the only available options are
ERFILTER_NM_RGBLGrad (by default) and ERFILTER_NM_IHSGrad.
*/
CV_EXPORTS void computeNMChannels(InputArray _src, OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad);
/*!
Find groups of Extremal Regions that are organized as text blocks. This function implements
the grouping algorithm described in:
Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013.
Notice that this implementation constrains the results to horizontally-aligned text and
latin script (since ERFilter classifiers are trained only for latin script detection).
The algorithm combines two different clustering techniques in a single parameter-free procedure
to detect groups of regions organized as text. The maximally meaningful groups are fist detected
in several feature spaces, where each feature space is a combination of proximity information
(x,y coordinates) and a similarity measure (intensity, color, size, gradient magnitude, etc.),
thus providing a set of hypotheses of text groups. Evidence Accumulation framework is used to
combine all these hypotheses to get the final estimate. Each of the resulting groups are finally
validated using a classifier in order to assest if they form a valid horizontally-aligned text block.
\param src Vector of sinle channel images CV_8UC1 from wich the regions were extracted.
\param regions Vector of ER's retreived from the ERFilter algorithm from each channel
\param filename The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)
\param minProbability The minimum probability for accepting a group
\param groups The output of the algorithm are stored in this parameter as list of rectangles.
*/
CV_EXPORTS void erGrouping(InputArrayOfArrays src, std::vector<std::vector<ERStat> > &regions,
const std::string& filename, float minProbablity,
std::vector<Rect > &groups);
}
}
#endif // _OPENCV_ERFILTER_HPP_

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

@ -0,0 +1,125 @@
/*
* textdetection.cpp
*
* A demo program of the Extremal Region Filter algorithm described in
* Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
*
* Created on: Sep 23, 2013
* Author: Lluis Gomez i Bigorda <lgomez AT cvc.uab.es>
*/
#include "opencv2/text.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <vector>
#include <iostream>
#include <iomanip>
using namespace std;
using namespace cv;
using namespace cv::text;
void show_help_and_exit(const char *cmd);
void groups_draw(Mat &src, vector<Rect> &groups);
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions);
int main(int argc, const char * argv[])
{
cout << endl << argv[0] << endl << endl;
cout << "Demo program of the Extremal Region Filter algorithm described in " << endl;
cout << "Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012" << endl << endl;
if (argc < 2) show_help_and_exit(argv[0]);
Mat src = imread(argv[1]);
// Extract channels to be processed individually
vector<Mat> channels;
computeNMChannels(src, channels);
int cn = (int)channels.size();
// Append negative channels to detect ER- (bright regions over dark background)
for (int c = 0; c < cn-1; c++)
channels.push_back(255-channels[c]);
// Create ERFilter objects with the 1st and 2nd stage default classifiers
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),16,0.00015f,0.13f,0.2f,true,0.1f);
Ptr<ERFilter> er_filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
vector<vector<ERStat> > regions(channels.size());
// Apply the default cascade classifier to each independent channel (could be done in parallel)
cout << "Extracting Class Specific Extremal Regions from " << (int)channels.size() << " channels ..." << endl;
cout << " (...) this may take a while (...)" << endl << endl;
for (int c=0; c<(int)channels.size(); c++)
{
er_filter1->run(channels[c], regions[c]);
er_filter2->run(channels[c], regions[c]);
}
// Detect character groups
cout << "Grouping extracted ERs ... ";
vector<Rect> groups;
erGrouping(channels, regions, "trained_classifier_erGrouping.xml", 0.5, groups);
// draw groups
groups_draw(src, groups);
imshow("grouping",src);
cout << "Done!" << endl << endl;
cout << "Press 'e' to show the extracted Extremal Regions, any other key to exit." << endl << endl;
if( waitKey (-1) == 101)
er_show(channels,regions);
// memory clean-up
er_filter1.release();
er_filter2.release();
regions.clear();
if (!groups.empty())
{
groups.clear();
}
}
// helper functions
void show_help_and_exit(const char *cmd)
{
cout << " Usage: " << cmd << " <input_image> " << endl;
cout << " Default classifier files (trained_classifierNM*.xml) must be in current directory" << endl << endl;
exit(-1);
}
void groups_draw(Mat &src, vector<Rect> &groups)
{
for (int i=(int)groups.size()-1; i>=0; i--)
{
if (src.type() == CV_8UC3)
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 0, 255, 255 ), 3, 8 );
else
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 255 ), 3, 8 );
}
}
void er_show(vector<Mat> &channels, vector<vector<ERStat> > &regions)
{
for (int c=0; c<(int)channels.size(); c++)
{
Mat dst = Mat::zeros(channels[0].rows+2,channels[0].cols+2,CV_8UC1);
for (int r=0; r<(int)regions[c].size(); r++)
{
ERStat er = regions[c][r];
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
floodFill(channels[c],dst,Point(er.pixel%channels[c].cols,er.pixel/channels[c].cols),
Scalar(255),0,Scalar(er.level),Scalar(0),flags);
}
}
char buff[10]; char *buff_ptr = buff;
sprintf(buff, "channel %d", c);
imshow(buff_ptr, dst);
}
waitKey(-1);
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/text.hpp"
#endif

@ -175,5 +175,4 @@ public:
} /* namespace xobjdetect */
} /* namespace cv */
#endif /* __OPENCV_XOBJDETECT_XOBJDETECT_HPP__ */

Loading…
Cancel
Save