Merge pull request #2619 from akashsharma02:submap
[GSoC] Add Submaps and PoseGraph optimization for Large Scale Depth Fusion * - Add HashTSDF class - Implement Integrate function (untested) * Integration seems to be working, raycasting does not * Update integration code * Integration and Raycasting fixes, (both work now) * - Format code - Clean up comments and few fixes * Add Kinect Fusion backup file * - Add interpolation for vertices and normals (slow and unreliable!) - Format code - Delete kinfu_back.cpp * Bug fix for integration and noisy odometry * - Create volume abstract class - Address Review comments * - Add getPoints and getNormals function - Fix formatting according to comments - Move volume abstract class to include/opencv2/rgbd/ - Write factory method for creating TSDFVolumes - Small bug fixes - Minor fixes according to comments * - Add tests for hashTSDF - Fix raycasting bug causing to loop forever - Suppress warnings by explicit conversion - Disable hashTsdf test until we figure out memory leak - style changes - Add missing license in a few files, correct precomp.hpp usage * - Use CRTP based static polymorphism to choose between CPU and GPU for HashTSDF volume * Create submap and submapMgr Implement overlap_ratio check to create new submaps * Early draft of posegraph and submaps (Doesn't even compile) * Minor cleanup (no compilation) * Track all submaps (no posegraph update yet) * Return inliers from ICP for weighting the constraints (Huber threshold based inliers pending) * Add updating constraints between submaps and retain same current map * Fix constraints creation between submaps and allow for switching between submaps * - Fix bug in allocate volumeUnits - Simplify calculation of visibleBlocks * Remove inlier calculation in fast_icp (not required) * Modify readFile to allow reading other datasets easily * - Implement posegraph update, Gauss newton is unstable - Minor changes to Gauss newton and Sparse matrix. Residual still increases slightly over iterations * Implement simplified levenberg marquardt * Bug fixes for Levenberg Marquardt and minor changes * minor changes * Fixes, but Optimizer is still not well behaved * Working Ceres optimizer * - Reorganize IO code for samples in a separate file - Minor fix for Ceres preprocessor definition - Remove unused generatorJacobian, will be used for opencv implementation of levenberg marquardt - Doxygen docs fix - Minor preprocessor fixes * - Reorganize IO code for samples in a separate file - Minor fix for Ceres preprocessor definition - Remove unused generatorJacobian, will be used for opencv implementation of levenberg marquardt - Doxygen docs fix - Minor preprocessor fixes - Move inline functions to header, and make function params const references * - Add Python bindings for volume struct - Remove makeVolume(const VolumeParams&) Python binding due to compilation issues - Minor changes according to comments * - Remove dynafu::Params() since it is identical to kinfu::Params() - Use common functions for dynafu_demo - Suppress "unreachable code" in volume.cpp * Minor API changes * Minor * Remove CRTP for HashTSDF class * Bug fixes for HashTSDF integrationpull/2727/head
parent
ef0c722f56
commit
7022f4e3e0
25 changed files with 2651 additions and 841 deletions
@ -1,2 +1,11 @@ |
||||
set(the_description "RGBD algorithms") |
||||
|
||||
find_package(Ceres QUIET) |
||||
ocv_define_module(rgbd opencv_core opencv_calib3d opencv_imgproc OPTIONAL opencv_viz WRAP python) |
||||
ocv_target_link_libraries(${the_module} ${CERES_LIBRARIES}) |
||||
|
||||
if(Ceres_FOUND) |
||||
ocv_target_compile_definitions(${the_module} PUBLIC CERES_FOUND) |
||||
else() |
||||
message(STATUS "CERES support is disabled. Ceres Solver is Required for Posegraph optimization") |
||||
endif() |
||||
|
@ -0,0 +1,143 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// This code is also subject to the license terms in the LICENSE_KinectFusion.md file found in this
|
||||
// module's directory
|
||||
|
||||
#ifndef __OPENCV_RGBD_LARGEKINFU_HPP__ |
||||
#define __OPENCV_RGBD_LARGEKINFU_HPP__ |
||||
|
||||
#include <opencv2/rgbd/volume.hpp> |
||||
|
||||
#include "opencv2/core.hpp" |
||||
#include "opencv2/core/affine.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace large_kinfu |
||||
{ |
||||
struct CV_EXPORTS_W Params |
||||
{ |
||||
/** @brief Default parameters
|
||||
A set of parameters which provides better model quality, can be very slow. |
||||
*/ |
||||
CV_WRAP static Ptr<Params> defaultParams(); |
||||
|
||||
/** @brief Coarse parameters
|
||||
A set of parameters which provides better speed, can fail to match frames |
||||
in case of rapid sensor motion. |
||||
*/ |
||||
CV_WRAP static Ptr<Params> coarseParams(); |
||||
|
||||
/** @brief HashTSDF parameters
|
||||
A set of parameters suitable for use with HashTSDFVolume |
||||
*/ |
||||
CV_WRAP static Ptr<Params> hashTSDFParams(bool isCoarse); |
||||
|
||||
/** @brief frame size in pixels */ |
||||
CV_PROP_RW Size frameSize; |
||||
|
||||
/** @brief camera intrinsics */ |
||||
CV_PROP_RW Matx33f intr; |
||||
|
||||
/** @brief pre-scale per 1 meter for input values
|
||||
Typical values are: |
||||
* 5000 per 1 meter for the 16-bit PNG files of TUM database |
||||
* 1000 per 1 meter for Kinect 2 device |
||||
* 1 per 1 meter for the 32-bit float images in the ROS bag files |
||||
*/ |
||||
CV_PROP_RW float depthFactor; |
||||
|
||||
/** @brief Depth sigma in meters for bilateral smooth */ |
||||
CV_PROP_RW float bilateral_sigma_depth; |
||||
/** @brief Spatial sigma in pixels for bilateral smooth */ |
||||
CV_PROP_RW float bilateral_sigma_spatial; |
||||
/** @brief Kernel size in pixels for bilateral smooth */ |
||||
CV_PROP_RW int bilateral_kernel_size; |
||||
|
||||
/** @brief Number of pyramid levels for ICP */ |
||||
CV_PROP_RW int pyramidLevels; |
||||
|
||||
/** @brief Minimal camera movement in meters
|
||||
Integrate new depth frame only if camera movement exceeds this value. |
||||
*/ |
||||
CV_PROP_RW float tsdf_min_camera_movement; |
||||
|
||||
/** @brief light pose for rendering in meters */ |
||||
CV_PROP_RW Vec3f lightPose; |
||||
|
||||
/** @brief distance theshold for ICP in meters */ |
||||
CV_PROP_RW float icpDistThresh; |
||||
/** @brief angle threshold for ICP in radians */ |
||||
CV_PROP_RW float icpAngleThresh; |
||||
/** @brief number of ICP iterations for each pyramid level */ |
||||
CV_PROP_RW std::vector<int> icpIterations; |
||||
|
||||
/** @brief Threshold for depth truncation in meters
|
||||
All depth values beyond this threshold will be set to zero |
||||
*/ |
||||
CV_PROP_RW float truncateThreshold; |
||||
|
||||
/** @brief Volume parameters
|
||||
*/ |
||||
kinfu::VolumeParams volumeParams; |
||||
}; |
||||
|
||||
/** @brief Large Scale Dense Depth Fusion implementation
|
||||
|
||||
This class implements a 3d reconstruction algorithm for larger environments using |
||||
Spatially hashed TSDF volume "Submaps". |
||||
It also runs a periodic posegraph optimization to minimize drift in tracking over long sequences. |
||||
Currently the algorithm does not implement a relocalization or loop closure module. |
||||
Potentially a Bag of words implementation or RGBD relocalization as described in |
||||
Glocker et al. ISMAR 2013 will be implemented |
||||
|
||||
It takes a sequence of depth images taken from depth sensor |
||||
(or any depth images source such as stereo camera matching algorithm or even raymarching |
||||
renderer). The output can be obtained as a vector of points and their normals or can be |
||||
Phong-rendered from given camera pose. |
||||
|
||||
An internal representation of a model is a spatially hashed voxel cube that stores TSDF values |
||||
which represent the distance to the closest surface (for details read the @cite kinectfusion article |
||||
about TSDF). There is no interface to that representation yet. |
||||
|
||||
For posegraph optimization, a Submap abstraction over the Volume class is created. |
||||
New submaps are added to the model when there is low visibility overlap between current viewing frustrum |
||||
and the existing volume/model. Multiple submaps are simultaneously tracked and a posegraph is created and |
||||
optimized periodically. |
||||
|
||||
LargeKinfu does not use any OpenCL acceleration yet. |
||||
To enable or disable it explicitly use cv::setUseOptimized() or cv::ocl::setUseOpenCL(). |
||||
|
||||
This implementation is inspired from Kintinuous, InfiniTAM and other SOTA algorithms |
||||
|
||||
You need to set the OPENCV_ENABLE_NONFREE option in CMake to use KinectFusion. |
||||
*/ |
||||
class CV_EXPORTS_W LargeKinfu |
||||
{ |
||||
public: |
||||
CV_WRAP static Ptr<LargeKinfu> create(const Ptr<Params>& _params); |
||||
virtual ~LargeKinfu() = default; |
||||
|
||||
virtual const Params& getParams() const = 0; |
||||
|
||||
CV_WRAP virtual void render(OutputArray image, |
||||
const Matx44f& cameraPose = Matx44f::eye()) const = 0; |
||||
|
||||
CV_WRAP virtual void getCloud(OutputArray points, OutputArray normals) const = 0; |
||||
|
||||
CV_WRAP virtual void getPoints(OutputArray points) const = 0; |
||||
|
||||
CV_WRAP virtual void getNormals(InputArray points, OutputArray normals) const = 0; |
||||
|
||||
CV_WRAP virtual void reset() = 0; |
||||
|
||||
virtual const Affine3f getPose() const = 0; |
||||
|
||||
CV_WRAP virtual bool update(InputArray depth) = 0; |
||||
}; |
||||
|
||||
} // namespace large_kinfu
|
||||
} // namespace cv
|
||||
#endif |
@ -0,0 +1,313 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef OPENCV_RGBS_IO_UTILS_HPP |
||||
#define OPENCV_RGBS_IO_UTILS_HPP |
||||
|
||||
#include <fstream> |
||||
#include <iostream> |
||||
#include <opencv2/calib3d.hpp> |
||||
#include <opencv2/core.hpp> |
||||
#include <opencv2/highgui.hpp> |
||||
#include <opencv2/rgbd/kinfu.hpp> |
||||
#include <opencv2/rgbd/large_kinfu.hpp> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace io_utils |
||||
{ |
||||
|
||||
static std::vector<std::string> readDepth(const std::string& fileList) |
||||
{ |
||||
std::vector<std::string> v; |
||||
|
||||
std::fstream file(fileList); |
||||
if (!file.is_open()) |
||||
throw std::runtime_error("Failed to read depth list"); |
||||
|
||||
std::string dir; |
||||
size_t slashIdx = fileList.rfind('/'); |
||||
slashIdx = slashIdx != std::string::npos ? slashIdx : fileList.rfind('\\'); |
||||
dir = fileList.substr(0, slashIdx); |
||||
|
||||
while (!file.eof()) |
||||
{ |
||||
std::string s, imgPath; |
||||
std::getline(file, s); |
||||
if (s.empty() || s[0] == '#') |
||||
continue; |
||||
std::stringstream ss; |
||||
ss << s; |
||||
double thumb; |
||||
ss >> thumb >> imgPath; |
||||
v.push_back(dir + '/' + imgPath); |
||||
} |
||||
|
||||
return v; |
||||
} |
||||
|
||||
struct DepthWriter |
||||
{ |
||||
DepthWriter(std::string fileList) : file(fileList, std::ios::out), count(0), dir() |
||||
{ |
||||
size_t slashIdx = fileList.rfind('/'); |
||||
slashIdx = slashIdx != std::string::npos ? slashIdx : fileList.rfind('\\'); |
||||
dir = fileList.substr(0, slashIdx); |
||||
|
||||
if (!file.is_open()) |
||||
throw std::runtime_error("Failed to write depth list"); |
||||
|
||||
file << "# depth maps saved from device" << std::endl; |
||||
file << "# useless_number filename" << std::endl; |
||||
} |
||||
|
||||
void append(InputArray _depth) |
||||
{ |
||||
Mat depth = _depth.getMat(); |
||||
std::string depthFname = cv::format("%04d.png", count); |
||||
std::string fullDepthFname = dir + '/' + depthFname; |
||||
if (!imwrite(fullDepthFname, depth)) |
||||
throw std::runtime_error("Failed to write depth to file " + fullDepthFname); |
||||
file << count++ << " " << depthFname << std::endl; |
||||
} |
||||
|
||||
std::fstream file; |
||||
int count; |
||||
std::string dir; |
||||
}; |
||||
|
||||
namespace Kinect2Params |
||||
{ |
||||
static const Size frameSize = Size(512, 424); |
||||
// approximate values, no guarantee to be correct
|
||||
static const float focal = 366.1f; |
||||
static const float cx = 258.2f; |
||||
static const float cy = 204.f; |
||||
static const float k1 = 0.12f; |
||||
static const float k2 = -0.34f; |
||||
static const float k3 = 0.12f; |
||||
}; // namespace Kinect2Params
|
||||
|
||||
struct DepthSource |
||||
{ |
||||
public: |
||||
enum Type |
||||
{ |
||||
DEPTH_LIST, |
||||
DEPTH_KINECT2_LIST, |
||||
DEPTH_KINECT2, |
||||
DEPTH_REALSENSE |
||||
}; |
||||
|
||||
DepthSource(int cam) : DepthSource("", cam) {} |
||||
|
||||
DepthSource(String fileListName) : DepthSource(fileListName, -1) {} |
||||
|
||||
DepthSource(String fileListName, int cam) |
||||
: depthFileList(fileListName.empty() ? std::vector<std::string>() |
||||
: readDepth(fileListName)), |
||||
frameIdx(0), |
||||
undistortMap1(), |
||||
undistortMap2() |
||||
{ |
||||
if (cam >= 0) |
||||
{ |
||||
vc = VideoCapture(VideoCaptureAPIs::CAP_OPENNI2 + cam); |
||||
if (vc.isOpened()) |
||||
{ |
||||
sourceType = Type::DEPTH_KINECT2; |
||||
} |
||||
else |
||||
{ |
||||
vc = VideoCapture(VideoCaptureAPIs::CAP_REALSENSE + cam); |
||||
if (vc.isOpened()) |
||||
{ |
||||
sourceType = Type::DEPTH_REALSENSE; |
||||
} |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
vc = VideoCapture(); |
||||
sourceType = Type::DEPTH_KINECT2_LIST; |
||||
} |
||||
} |
||||
|
||||
UMat getDepth() |
||||
{ |
||||
UMat out; |
||||
if (!vc.isOpened()) |
||||
{ |
||||
if (frameIdx < depthFileList.size()) |
||||
{ |
||||
Mat f = cv::imread(depthFileList[frameIdx++], IMREAD_ANYDEPTH); |
||||
f.copyTo(out); |
||||
} |
||||
else |
||||
{ |
||||
return UMat(); |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
vc.grab(); |
||||
switch (sourceType) |
||||
{ |
||||
case Type::DEPTH_KINECT2: vc.retrieve(out, CAP_OPENNI_DEPTH_MAP); break; |
||||
case Type::DEPTH_REALSENSE: vc.retrieve(out, CAP_INTELPERC_DEPTH_MAP); break; |
||||
default: |
||||
// unknown depth source
|
||||
vc.retrieve(out); |
||||
} |
||||
|
||||
// workaround for Kinect 2
|
||||
if (sourceType == Type::DEPTH_KINECT2) |
||||
{ |
||||
out = out(Rect(Point(), Kinect2Params::frameSize)); |
||||
|
||||
UMat outCopy; |
||||
// linear remap adds gradient between valid and invalid pixels
|
||||
// which causes garbage, use nearest instead
|
||||
remap(out, outCopy, undistortMap1, undistortMap2, cv::INTER_NEAREST); |
||||
|
||||
cv::flip(outCopy, out, 1); |
||||
} |
||||
} |
||||
if (out.empty()) |
||||
throw std::runtime_error("Matrix is empty"); |
||||
return out; |
||||
} |
||||
|
||||
bool empty() { return depthFileList.empty() && !(vc.isOpened()); } |
||||
|
||||
void updateIntrinsics(Matx33f& _intrinsics, Size& _frameSize, float& _depthFactor) |
||||
{ |
||||
if (vc.isOpened()) |
||||
{ |
||||
// this should be set in according to user's depth sensor
|
||||
int w = (int)vc.get(VideoCaptureProperties::CAP_PROP_FRAME_WIDTH); |
||||
int h = (int)vc.get(VideoCaptureProperties::CAP_PROP_FRAME_HEIGHT); |
||||
|
||||
// it's recommended to calibrate sensor to obtain its intrinsics
|
||||
float fx, fy, cx, cy; |
||||
float depthFactor = 1000.f; |
||||
Size frameSize; |
||||
if (sourceType == Type::DEPTH_KINECT2) |
||||
{ |
||||
fx = fy = Kinect2Params::focal; |
||||
cx = Kinect2Params::cx; |
||||
cy = Kinect2Params::cy; |
||||
|
||||
frameSize = Kinect2Params::frameSize; |
||||
} |
||||
else |
||||
{ |
||||
if (sourceType == Type::DEPTH_REALSENSE) |
||||
{ |
||||
fx = (float)vc.get(CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ); |
||||
fy = (float)vc.get(CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT); |
||||
depthFactor = 1.f / (float)vc.get(CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE); |
||||
} |
||||
else |
||||
{ |
||||
fx = fy = |
||||
(float)vc.get(CAP_OPENNI_DEPTH_GENERATOR | CAP_PROP_OPENNI_FOCAL_LENGTH); |
||||
} |
||||
|
||||
cx = w / 2 - 0.5f; |
||||
cy = h / 2 - 0.5f; |
||||
|
||||
frameSize = Size(w, h); |
||||
} |
||||
|
||||
Matx33f camMatrix = Matx33f(fx, 0, cx, 0, fy, cy, 0, 0, 1); |
||||
_intrinsics = camMatrix; |
||||
_frameSize = frameSize; |
||||
_depthFactor = depthFactor; |
||||
} |
||||
} |
||||
|
||||
void updateVolumeParams(const Vec3i& _resolution, float& _voxelSize, float& _tsdfTruncDist, |
||||
Affine3f& _volumePose, float& _depthTruncateThreshold) |
||||
{ |
||||
float volumeSize = 3.0f; |
||||
_depthTruncateThreshold = 0.0f; |
||||
// RealSense has shorter depth range, some params should be tuned
|
||||
if (sourceType == Type::DEPTH_REALSENSE) |
||||
{ |
||||
volumeSize = 1.f; |
||||
_voxelSize = volumeSize / _resolution[0]; |
||||
_tsdfTruncDist = 0.01f; |
||||
_depthTruncateThreshold = 2.5f; |
||||
} |
||||
_volumePose = Affine3f().translate(Vec3f(-volumeSize / 2.f, -volumeSize / 2.f, 0.05f)); |
||||
} |
||||
|
||||
void updateICPParams(float& _icpDistThresh, float& _bilateralSigmaDepth) |
||||
{ |
||||
_icpDistThresh = 0.1f; |
||||
_bilateralSigmaDepth = 0.04f; |
||||
// RealSense has shorter depth range, some params should be tuned
|
||||
if (sourceType == Type::DEPTH_REALSENSE) |
||||
{ |
||||
_icpDistThresh = 0.01f; |
||||
_bilateralSigmaDepth = 0.01f; |
||||
} |
||||
} |
||||
|
||||
void updateParams(large_kinfu::Params& params) |
||||
{ |
||||
if (vc.isOpened()) |
||||
{ |
||||
updateIntrinsics(params.intr, params.frameSize, params.depthFactor); |
||||
updateVolumeParams(params.volumeParams.resolution, params.volumeParams.voxelSize, |
||||
params.volumeParams.tsdfTruncDist, params.volumeParams.pose, |
||||
params.truncateThreshold); |
||||
updateICPParams(params.icpDistThresh, params.bilateral_sigma_depth); |
||||
|
||||
if (sourceType == Type::DEPTH_KINECT2) |
||||
{ |
||||
Matx<float, 1, 5> distCoeffs; |
||||
distCoeffs(0) = Kinect2Params::k1; |
||||
distCoeffs(1) = Kinect2Params::k2; |
||||
distCoeffs(4) = Kinect2Params::k3; |
||||
|
||||
initUndistortRectifyMap(params.intr, distCoeffs, cv::noArray(), params.intr, |
||||
params.frameSize, CV_16SC2, undistortMap1, undistortMap2); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void updateParams(kinfu::Params& params) |
||||
{ |
||||
if (vc.isOpened()) |
||||
{ |
||||
updateIntrinsics(params.intr, params.frameSize, params.depthFactor); |
||||
updateVolumeParams(params.volumeDims, params.voxelSize, |
||||
params.tsdf_trunc_dist, params.volumePose, params.truncateThreshold); |
||||
updateICPParams(params.icpDistThresh, params.bilateral_sigma_depth); |
||||
|
||||
if (sourceType == Type::DEPTH_KINECT2) |
||||
{ |
||||
Matx<float, 1, 5> distCoeffs; |
||||
distCoeffs(0) = Kinect2Params::k1; |
||||
distCoeffs(1) = Kinect2Params::k2; |
||||
distCoeffs(4) = Kinect2Params::k3; |
||||
|
||||
initUndistortRectifyMap(params.intr, distCoeffs, cv::noArray(), params.intr, |
||||
params.frameSize, CV_16SC2, undistortMap1, undistortMap2); |
||||
} |
||||
} |
||||
} |
||||
|
||||
std::vector<std::string> depthFileList; |
||||
size_t frameIdx; |
||||
VideoCapture vc; |
||||
UMat undistortMap1, undistortMap2; |
||||
Type sourceType; |
||||
}; |
||||
} // namespace io_utils
|
||||
|
||||
} // namespace cv
|
||||
#endif /* ifndef OPENCV_RGBS_IO_UTILS_HPP */ |
@ -0,0 +1,260 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// This code is also subject to the license terms in the LICENSE_KinectFusion.md file found in this
|
||||
// module's directory
|
||||
|
||||
#include <fstream> |
||||
#include <iostream> |
||||
#include <opencv2/calib3d.hpp> |
||||
#include <opencv2/highgui.hpp> |
||||
#include <opencv2/imgproc.hpp> |
||||
#include <opencv2/rgbd/large_kinfu.hpp> |
||||
|
||||
#include "io_utils.hpp" |
||||
|
||||
using namespace cv; |
||||
using namespace cv::kinfu; |
||||
using namespace cv::large_kinfu; |
||||
using namespace cv::io_utils; |
||||
|
||||
#ifdef HAVE_OPENCV_VIZ |
||||
#include <opencv2/viz.hpp> |
||||
#endif |
||||
|
||||
#ifdef HAVE_OPENCV_VIZ |
||||
const std::string vizWindowName = "cloud"; |
||||
|
||||
struct PauseCallbackArgs |
||||
{ |
||||
PauseCallbackArgs(LargeKinfu& _largeKinfu) : largeKinfu(_largeKinfu) {} |
||||
|
||||
LargeKinfu& largeKinfu; |
||||
}; |
||||
|
||||
void pauseCallback(const viz::MouseEvent& me, void* args); |
||||
void pauseCallback(const viz::MouseEvent& me, void* args) |
||||
{ |
||||
if (me.type == viz::MouseEvent::Type::MouseMove || |
||||
me.type == viz::MouseEvent::Type::MouseScrollDown || |
||||
me.type == viz::MouseEvent::Type::MouseScrollUp) |
||||
{ |
||||
PauseCallbackArgs pca = *((PauseCallbackArgs*)(args)); |
||||
viz::Viz3d window(vizWindowName); |
||||
UMat rendered; |
||||
pca.largeKinfu.render(rendered, window.getViewerPose().matrix); |
||||
imshow("render", rendered); |
||||
waitKey(1); |
||||
} |
||||
} |
||||
#endif |
||||
|
||||
static const char* keys = { |
||||
"{help h usage ? | | print this message }" |
||||
"{depth | | Path to depth.txt file listing a set of depth images }" |
||||
"{camera |0| Index of depth camera to be used as a depth source }" |
||||
"{coarse | | Run on coarse settings (fast but ugly) or on default (slow but looks better)," |
||||
" in coarse mode points and normals are displayed }" |
||||
"{idle | | Do not run LargeKinfu, just display depth frames }" |
||||
"{record | | Write depth frames to specified file list" |
||||
" (the same format as for the 'depth' key) }" |
||||
}; |
||||
|
||||
static const std::string message = |
||||
"\nThis demo uses live depth input or RGB-D dataset taken from" |
||||
"\nhttps://vision.in.tum.de/data/datasets/rgbd-dataset" |
||||
"\nto demonstrate Submap based large environment reconstruction" |
||||
"\nThis module uses the newer hashtable based TSDFVolume (relatively fast) for larger " |
||||
"reconstructions by default\n"; |
||||
|
||||
int main(int argc, char** argv) |
||||
{ |
||||
bool coarse = false; |
||||
bool idle = false; |
||||
std::string recordPath; |
||||
|
||||
CommandLineParser parser(argc, argv, keys); |
||||
parser.about(message); |
||||
|
||||
if (!parser.check()) |
||||
{ |
||||
parser.printMessage(); |
||||
parser.printErrors(); |
||||
return -1; |
||||
} |
||||
|
||||
if (parser.has("help")) |
||||
{ |
||||
parser.printMessage(); |
||||
return 0; |
||||
} |
||||
if (parser.has("coarse")) |
||||
{ |
||||
coarse = true; |
||||
} |
||||
if (parser.has("record")) |
||||
{ |
||||
recordPath = parser.get<String>("record"); |
||||
} |
||||
if (parser.has("idle")) |
||||
{ |
||||
idle = true; |
||||
} |
||||
|
||||
Ptr<DepthSource> ds; |
||||
if (parser.has("depth")) |
||||
ds = makePtr<DepthSource>(parser.get<String>("depth")); |
||||
else |
||||
ds = makePtr<DepthSource>(parser.get<int>("camera")); |
||||
|
||||
if (ds->empty()) |
||||
{ |
||||
std::cerr << "Failed to open depth source" << std::endl; |
||||
parser.printMessage(); |
||||
return -1; |
||||
} |
||||
|
||||
Ptr<DepthWriter> depthWriter; |
||||
if (!recordPath.empty()) |
||||
depthWriter = makePtr<DepthWriter>(recordPath); |
||||
|
||||
Ptr<large_kinfu::Params> params; |
||||
Ptr<LargeKinfu> largeKinfu; |
||||
|
||||
params = large_kinfu::Params::hashTSDFParams(coarse); |
||||
|
||||
// These params can be different for each depth sensor
|
||||
ds->updateParams(*params); |
||||
|
||||
// Disabled until there is no OpenCL accelerated HashTSDF is available
|
||||
cv::setUseOptimized(false); |
||||
|
||||
if (!idle) |
||||
largeKinfu = LargeKinfu::create(params); |
||||
|
||||
#ifdef HAVE_OPENCV_VIZ |
||||
cv::viz::Viz3d window(vizWindowName); |
||||
window.setViewerPose(Affine3f::Identity()); |
||||
bool pause = false; |
||||
#endif |
||||
|
||||
UMat rendered; |
||||
UMat points; |
||||
UMat normals; |
||||
|
||||
int64 prevTime = getTickCount(); |
||||
|
||||
for (UMat frame = ds->getDepth(); !frame.empty(); frame = ds->getDepth()) |
||||
{ |
||||
if (depthWriter) |
||||
depthWriter->append(frame); |
||||
|
||||
#ifdef HAVE_OPENCV_VIZ |
||||
if (pause) |
||||
{ |
||||
// doesn't happen in idle mode
|
||||
largeKinfu->getCloud(points, normals); |
||||
if (!points.empty() && !normals.empty()) |
||||
{ |
||||
viz::WCloud cloudWidget(points, viz::Color::white()); |
||||
viz::WCloudNormals cloudNormals(points, normals, /*level*/ 1, /*scale*/ 0.05, |
||||
viz::Color::gray()); |
||||
window.showWidget("cloud", cloudWidget); |
||||
window.showWidget("normals", cloudNormals); |
||||
|
||||
Vec3d volSize = largeKinfu->getParams().volumeParams.voxelSize * |
||||
Vec3d(largeKinfu->getParams().volumeParams.resolution); |
||||
window.showWidget("cube", viz::WCube(Vec3d::all(0), volSize), |
||||
largeKinfu->getParams().volumeParams.pose); |
||||
PauseCallbackArgs pca(*largeKinfu); |
||||
window.registerMouseCallback(pauseCallback, (void*)&pca); |
||||
window.showWidget("text", |
||||
viz::WText(cv::String("Move camera in this window. " |
||||
"Close the window or press Q to resume"), |
||||
Point())); |
||||
window.spin(); |
||||
window.removeWidget("text"); |
||||
window.removeWidget("cloud"); |
||||
window.removeWidget("normals"); |
||||
window.registerMouseCallback(0); |
||||
} |
||||
|
||||
pause = false; |
||||
} |
||||
else |
||||
#endif |
||||
{ |
||||
UMat cvt8; |
||||
float depthFactor = params->depthFactor; |
||||
convertScaleAbs(frame, cvt8, 0.25 * 256. / depthFactor); |
||||
if (!idle) |
||||
{ |
||||
imshow("depth", cvt8); |
||||
|
||||
if (!largeKinfu->update(frame)) |
||||
{ |
||||
largeKinfu->reset(); |
||||
std::cout << "reset" << std::endl; |
||||
} |
||||
#ifdef HAVE_OPENCV_VIZ |
||||
else |
||||
{ |
||||
if (coarse) |
||||
{ |
||||
largeKinfu->getCloud(points, normals); |
||||
if (!points.empty() && !normals.empty()) |
||||
{ |
||||
viz::WCloud cloudWidget(points, viz::Color::white()); |
||||
viz::WCloudNormals cloudNormals(points, normals, /*level*/ 1, |
||||
/*scale*/ 0.05, viz::Color::gray()); |
||||
window.showWidget("cloud", cloudWidget); |
||||
window.showWidget("normals", cloudNormals); |
||||
} |
||||
} |
||||
|
||||
// window.showWidget("worldAxes", viz::WCoordinateSystem());
|
||||
Vec3d volSize = largeKinfu->getParams().volumeParams.voxelSize * |
||||
largeKinfu->getParams().volumeParams.resolution; |
||||
window.showWidget("cube", viz::WCube(Vec3d::all(0), volSize), |
||||
largeKinfu->getParams().volumeParams.pose); |
||||
window.setViewerPose(largeKinfu->getPose()); |
||||
window.spinOnce(1, true); |
||||
} |
||||
#endif |
||||
|
||||
largeKinfu->render(rendered); |
||||
} |
||||
else |
||||
{ |
||||
rendered = cvt8; |
||||
} |
||||
} |
||||
|
||||
int64 newTime = getTickCount(); |
||||
putText(rendered, |
||||
cv::format("FPS: %2d press R to reset, P to pause, Q to quit", |
||||
(int)(getTickFrequency() / (newTime - prevTime))), |
||||
Point(0, rendered.rows - 1), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 255)); |
||||
prevTime = newTime; |
||||
imshow("render", rendered); |
||||
|
||||
int c = waitKey(1); |
||||
switch (c) |
||||
{ |
||||
case 'r': |
||||
if (!idle) |
||||
largeKinfu->reset(); |
||||
break; |
||||
case 'q': return 0; |
||||
#ifdef HAVE_OPENCV_VIZ |
||||
case 'p': |
||||
if (!idle) |
||||
pause = true; |
||||
#endif |
||||
default: break; |
||||
} |
||||
} |
||||
|
||||
return 0; |
||||
} |
@ -0,0 +1,361 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// This code is also subject to the license terms in the LICENSE_KinectFusion.md file found in this
|
||||
// module's directory
|
||||
|
||||
#include "fast_icp.hpp" |
||||
#include "hash_tsdf.hpp" |
||||
#include "kinfu_frame.hpp" |
||||
#include "precomp.hpp" |
||||
#include "submap.hpp" |
||||
#include "tsdf.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace large_kinfu |
||||
{ |
||||
using namespace kinfu; |
||||
|
||||
Ptr<Params> Params::defaultParams() |
||||
{ |
||||
Params p; |
||||
|
||||
//! Frame parameters
|
||||
{ |
||||
p.frameSize = Size(640, 480); |
||||
|
||||
float fx, fy, cx, cy; |
||||
fx = fy = 525.f; |
||||
cx = p.frameSize.width / 2.0f - 0.5f; |
||||
cy = p.frameSize.height / 2.0f - 0.5f; |
||||
p.intr = Matx33f(fx, 0, cx, 0, fy, cy, 0, 0, 1); |
||||
|
||||
// 5000 for the 16-bit PNG files
|
||||
// 1 for the 32-bit float images in the ROS bag files
|
||||
p.depthFactor = 5000; |
||||
|
||||
// sigma_depth is scaled by depthFactor when calling bilateral filter
|
||||
p.bilateral_sigma_depth = 0.04f; // meter
|
||||
p.bilateral_sigma_spatial = 4.5; // pixels
|
||||
p.bilateral_kernel_size = 7; // pixels
|
||||
p.truncateThreshold = 0.f; // meters
|
||||
} |
||||
//! ICP parameters
|
||||
{ |
||||
p.icpAngleThresh = (float)(30. * CV_PI / 180.); // radians
|
||||
p.icpDistThresh = 0.1f; // meters
|
||||
|
||||
p.icpIterations = { 10, 5, 4 }; |
||||
p.pyramidLevels = (int)p.icpIterations.size(); |
||||
} |
||||
//! Volume parameters
|
||||
{ |
||||
float volumeSize = 3.0f; |
||||
p.volumeParams.type = VolumeType::TSDF; |
||||
p.volumeParams.resolution = Vec3i::all(512); |
||||
p.volumeParams.pose = Affine3f().translate(Vec3f(-volumeSize / 2.f, -volumeSize / 2.f, 0.5f)); |
||||
p.volumeParams.voxelSize = volumeSize / 512.f; // meters
|
||||
p.volumeParams.tsdfTruncDist = 7 * p.volumeParams.voxelSize; // about 0.04f in meters
|
||||
p.volumeParams.maxWeight = 64; // frames
|
||||
p.volumeParams.raycastStepFactor = 0.25f; // in voxel sizes
|
||||
p.volumeParams.depthTruncThreshold = p.truncateThreshold; |
||||
} |
||||
//! Unused parameters
|
||||
p.tsdf_min_camera_movement = 0.f; // meters, disabled
|
||||
p.lightPose = Vec3f::all(0.f); // meters
|
||||
|
||||
return makePtr<Params>(p); |
||||
} |
||||
|
||||
Ptr<Params> Params::coarseParams() |
||||
{ |
||||
Ptr<Params> p = defaultParams(); |
||||
|
||||
//! Reduce ICP iterations and pyramid levels
|
||||
{ |
||||
p->icpIterations = { 5, 3, 2 }; |
||||
p->pyramidLevels = (int)p->icpIterations.size(); |
||||
} |
||||
//! Make the volume coarse
|
||||
{ |
||||
float volumeSize = 3.f; |
||||
p->volumeParams.resolution = Vec3i::all(128); // number of voxels
|
||||
p->volumeParams.voxelSize = volumeSize / 128.f; |
||||
p->volumeParams.tsdfTruncDist = 2 * p->volumeParams.voxelSize; // 0.04f in meters
|
||||
p->volumeParams.raycastStepFactor = 0.75f; // in voxel sizes
|
||||
} |
||||
return p; |
||||
} |
||||
Ptr<Params> Params::hashTSDFParams(bool isCoarse) |
||||
{ |
||||
Ptr<Params> p; |
||||
if (isCoarse) |
||||
p = coarseParams(); |
||||
else |
||||
p = defaultParams(); |
||||
|
||||
p->volumeParams.type = VolumeType::HASHTSDF; |
||||
p->volumeParams.depthTruncThreshold = rgbd::Odometry::DEFAULT_MAX_DEPTH(); |
||||
p->volumeParams.unitResolution = 16; |
||||
return p; |
||||
} |
||||
|
||||
// MatType should be Mat or UMat
|
||||
template<typename MatType> |
||||
class LargeKinfuImpl : public LargeKinfu |
||||
{ |
||||
public: |
||||
LargeKinfuImpl(const Params& _params); |
||||
virtual ~LargeKinfuImpl(); |
||||
|
||||
const Params& getParams() const CV_OVERRIDE; |
||||
|
||||
void render(OutputArray image, const Matx44f& cameraPose) const CV_OVERRIDE; |
||||
|
||||
virtual void getCloud(OutputArray points, OutputArray normals) const CV_OVERRIDE; |
||||
void getPoints(OutputArray points) const CV_OVERRIDE; |
||||
void getNormals(InputArray points, OutputArray normals) const CV_OVERRIDE; |
||||
|
||||
void reset() CV_OVERRIDE; |
||||
|
||||
const Affine3f getPose() const CV_OVERRIDE; |
||||
|
||||
bool update(InputArray depth) CV_OVERRIDE; |
||||
|
||||
bool updateT(const MatType& depth); |
||||
|
||||
private: |
||||
Params params; |
||||
|
||||
cv::Ptr<ICP> icp; |
||||
//! TODO: Submap manager and Pose graph optimizer
|
||||
cv::Ptr<SubmapManager<MatType>> submapMgr; |
||||
|
||||
int frameCounter; |
||||
Affine3f pose; |
||||
}; |
||||
|
||||
template<typename MatType> |
||||
LargeKinfuImpl<MatType>::LargeKinfuImpl(const Params& _params) |
||||
: params(_params) |
||||
{ |
||||
icp = makeICP(params.intr, params.icpIterations, params.icpAngleThresh, params.icpDistThresh); |
||||
|
||||
submapMgr = cv::makePtr<SubmapManager<MatType>>(params.volumeParams); |
||||
reset(); |
||||
submapMgr->createNewSubmap(true); |
||||
|
||||
} |
||||
|
||||
template<typename MatType> |
||||
void LargeKinfuImpl<MatType>::reset() |
||||
{ |
||||
frameCounter = 0; |
||||
pose = Affine3f::Identity(); |
||||
submapMgr->reset(); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
LargeKinfuImpl<MatType>::~LargeKinfuImpl() |
||||
{ |
||||
} |
||||
|
||||
template<typename MatType> |
||||
const Params& LargeKinfuImpl<MatType>::getParams() const |
||||
{ |
||||
return params; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
const Affine3f LargeKinfuImpl<MatType>::getPose() const |
||||
{ |
||||
return pose; |
||||
} |
||||
|
||||
template<> |
||||
bool LargeKinfuImpl<Mat>::update(InputArray _depth) |
||||
{ |
||||
CV_Assert(!_depth.empty() && _depth.size() == params.frameSize); |
||||
|
||||
Mat depth; |
||||
if (_depth.isUMat()) |
||||
{ |
||||
_depth.copyTo(depth); |
||||
return updateT(depth); |
||||
} |
||||
else |
||||
{ |
||||
return updateT(_depth.getMat()); |
||||
} |
||||
} |
||||
|
||||
template<> |
||||
bool LargeKinfuImpl<UMat>::update(InputArray _depth) |
||||
{ |
||||
CV_Assert(!_depth.empty() && _depth.size() == params.frameSize); |
||||
|
||||
UMat depth; |
||||
if (!_depth.isUMat()) |
||||
{ |
||||
_depth.copyTo(depth); |
||||
return updateT(depth); |
||||
} |
||||
else |
||||
{ |
||||
return updateT(_depth.getUMat()); |
||||
} |
||||
} |
||||
|
||||
template<typename MatType> |
||||
bool LargeKinfuImpl<MatType>::updateT(const MatType& _depth) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
|
||||
MatType depth; |
||||
if (_depth.type() != DEPTH_TYPE) |
||||
_depth.convertTo(depth, DEPTH_TYPE); |
||||
else |
||||
depth = _depth; |
||||
|
||||
std::vector<MatType> newPoints, newNormals; |
||||
makeFrameFromDepth(depth, newPoints, newNormals, params.intr, params.pyramidLevels, params.depthFactor, |
||||
params.bilateral_sigma_depth, params.bilateral_sigma_spatial, params.bilateral_kernel_size, |
||||
params.truncateThreshold); |
||||
|
||||
std::cout << "Current frameID: " << frameCounter << "\n"; |
||||
for (const auto& it : submapMgr->activeSubmaps) |
||||
{ |
||||
int currTrackingId = it.first; |
||||
auto submapData = it.second; |
||||
Ptr<Submap<MatType>> currTrackingSubmap = submapMgr->getSubmap(currTrackingId); |
||||
Affine3f affine; |
||||
std::cout << "Current tracking ID: " << currTrackingId << std::endl; |
||||
|
||||
if(frameCounter == 0) //! Only one current tracking map
|
||||
{ |
||||
currTrackingSubmap->integrate(depth, params.depthFactor, params.intr, frameCounter); |
||||
currTrackingSubmap->pyrPoints = newPoints; |
||||
currTrackingSubmap->pyrNormals = newNormals; |
||||
continue; |
||||
} |
||||
|
||||
//1. Track
|
||||
bool trackingSuccess = |
||||
icp->estimateTransform(affine, currTrackingSubmap->pyrPoints, currTrackingSubmap->pyrNormals, newPoints, newNormals); |
||||
if (trackingSuccess) |
||||
currTrackingSubmap->composeCameraPose(affine); |
||||
else |
||||
{ |
||||
std::cout << "Tracking failed" << std::endl; |
||||
continue; |
||||
} |
||||
|
||||
//2. Integrate
|
||||
if(submapData.type == SubmapManager<MatType>::Type::NEW || submapData.type == SubmapManager<MatType>::Type::CURRENT) |
||||
{ |
||||
float rnorm = (float)cv::norm(affine.rvec()); |
||||
float tnorm = (float)cv::norm(affine.translation()); |
||||
// We do not integrate volume if camera does not move
|
||||
if ((rnorm + tnorm) / 2 >= params.tsdf_min_camera_movement) |
||||
currTrackingSubmap->integrate(depth, params.depthFactor, params.intr, frameCounter); |
||||
} |
||||
|
||||
//3. Raycast
|
||||
currTrackingSubmap->raycast(currTrackingSubmap->cameraPose, params.intr, params.frameSize, currTrackingSubmap->pyrPoints[0], currTrackingSubmap->pyrNormals[0]); |
||||
|
||||
currTrackingSubmap->updatePyrPointsNormals(params.pyramidLevels); |
||||
|
||||
std::cout << "Submap: " << currTrackingId << " Total allocated blocks: " << currTrackingSubmap->getTotalAllocatedBlocks() << "\n"; |
||||
std::cout << "Submap: " << currTrackingId << " Visible blocks: " << currTrackingSubmap->getVisibleBlocks(frameCounter) << "\n"; |
||||
|
||||
} |
||||
//4. Update map
|
||||
bool isMapUpdated = submapMgr->updateMap(frameCounter, newPoints, newNormals); |
||||
|
||||
if(isMapUpdated) |
||||
{ |
||||
// TODO: Convert constraints to posegraph
|
||||
PoseGraph poseGraph = submapMgr->MapToPoseGraph(); |
||||
std::cout << "Created posegraph\n"; |
||||
Optimizer::optimize(poseGraph); |
||||
submapMgr->PoseGraphToMap(poseGraph); |
||||
|
||||
} |
||||
std::cout << "Number of submaps: " << submapMgr->submapList.size() << "\n"; |
||||
|
||||
frameCounter++; |
||||
return true; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void LargeKinfuImpl<MatType>::render(OutputArray image, const Matx44f& _cameraPose) const |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
|
||||
Affine3f cameraPose(_cameraPose); |
||||
|
||||
auto currSubmap = submapMgr->getCurrentSubmap(); |
||||
const Affine3f id = Affine3f::Identity(); |
||||
if ((cameraPose.rotation() == pose.rotation() && cameraPose.translation() == pose.translation()) || |
||||
(cameraPose.rotation() == id.rotation() && cameraPose.translation() == id.translation())) |
||||
{ |
||||
//! TODO: Can render be dependent on current submap
|
||||
renderPointsNormals(currSubmap->pyrPoints[0], currSubmap->pyrNormals[0], image, params.lightPose); |
||||
} |
||||
else |
||||
{ |
||||
MatType points, normals; |
||||
currSubmap->raycast(cameraPose, params.intr, params.frameSize, points, normals); |
||||
renderPointsNormals(points, normals, image, params.lightPose); |
||||
} |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void LargeKinfuImpl<MatType>::getCloud(OutputArray p, OutputArray n) const |
||||
{ |
||||
auto currSubmap = submapMgr->getCurrentSubmap(); |
||||
currSubmap->volume.fetchPointsNormals(p, n); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void LargeKinfuImpl<MatType>::getPoints(OutputArray points) const |
||||
{ |
||||
auto currSubmap = submapMgr->getCurrentSubmap(); |
||||
currSubmap->volume.fetchPointsNormals(points, noArray()); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void LargeKinfuImpl<MatType>::getNormals(InputArray points, OutputArray normals) const |
||||
{ |
||||
auto currSubmap = submapMgr->getCurrentSubmap(); |
||||
currSubmap->volume.fetchNormals(points, normals); |
||||
} |
||||
|
||||
// importing class
|
||||
|
||||
#ifdef OPENCV_ENABLE_NONFREE |
||||
|
||||
Ptr<LargeKinfu> LargeKinfu::create(const Ptr<Params>& params) |
||||
{ |
||||
CV_Assert((int)params->icpIterations.size() == params->pyramidLevels); |
||||
CV_Assert(params->intr(0, 1) == 0 && params->intr(1, 0) == 0 && params->intr(2, 0) == 0 && params->intr(2, 1) == 0 && |
||||
params->intr(2, 2) == 1); |
||||
#ifdef HAVE_OPENCL |
||||
if (cv::ocl::useOpenCL()) |
||||
return makePtr<LargeKinfuImpl<UMat>>(*params); |
||||
#endif |
||||
return makePtr<LargeKinfuImpl<Mat>>(*params); |
||||
} |
||||
|
||||
#else |
||||
Ptr<LargeKinfu> LargeKinfu::create(const Ptr<Params>& /* params */) |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, |
||||
"This algorithm is patented and is excluded in this configuration; " |
||||
"Set OPENCV_ENABLE_NONFREE CMake option and rebuild the library"); |
||||
} |
||||
#endif |
||||
} // namespace large_kinfu
|
||||
} // namespace cv
|
@ -0,0 +1,169 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "pose_graph.hpp" |
||||
|
||||
#include <iostream> |
||||
#include <limits> |
||||
#include <unordered_set> |
||||
#include <vector> |
||||
|
||||
#if defined(CERES_FOUND) |
||||
#include <ceres/ceres.h> |
||||
#endif |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace kinfu |
||||
{ |
||||
bool PoseGraph::isValid() const |
||||
{ |
||||
int numNodes = getNumNodes(); |
||||
int numEdges = getNumEdges(); |
||||
|
||||
if (numNodes <= 0 || numEdges <= 0) |
||||
return false; |
||||
|
||||
std::unordered_set<int> nodesVisited; |
||||
std::vector<int> nodesToVisit; |
||||
|
||||
nodesToVisit.push_back(nodes.at(0).getId()); |
||||
|
||||
bool isGraphConnected = false; |
||||
while (!nodesToVisit.empty()) |
||||
{ |
||||
int currNodeId = nodesToVisit.back(); |
||||
nodesToVisit.pop_back(); |
||||
std::cout << "Visiting node: " << currNodeId << "\n"; |
||||
nodesVisited.insert(currNodeId); |
||||
// Since each node does not maintain its neighbor list
|
||||
for (int i = 0; i < numEdges; i++) |
||||
{ |
||||
const PoseGraphEdge& potentialEdge = edges.at(i); |
||||
int nextNodeId = -1; |
||||
|
||||
if (potentialEdge.getSourceNodeId() == currNodeId) |
||||
{ |
||||
nextNodeId = potentialEdge.getTargetNodeId(); |
||||
} |
||||
else if (potentialEdge.getTargetNodeId() == currNodeId) |
||||
{ |
||||
nextNodeId = potentialEdge.getSourceNodeId(); |
||||
} |
||||
if (nextNodeId != -1) |
||||
{ |
||||
std::cout << "Next node: " << nextNodeId << " " << nodesVisited.count(nextNodeId) |
||||
<< std::endl; |
||||
if (nodesVisited.count(nextNodeId) == 0) |
||||
{ |
||||
nodesToVisit.push_back(nextNodeId); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
isGraphConnected = (int(nodesVisited.size()) == numNodes); |
||||
std::cout << "nodesVisited: " << nodesVisited.size() |
||||
<< " IsGraphConnected: " << isGraphConnected << std::endl; |
||||
bool invalidEdgeNode = false; |
||||
for (int i = 0; i < numEdges; i++) |
||||
{ |
||||
const PoseGraphEdge& edge = edges.at(i); |
||||
// edges have spurious source/target nodes
|
||||
if ((nodesVisited.count(edge.getSourceNodeId()) != 1) || |
||||
(nodesVisited.count(edge.getTargetNodeId()) != 1)) |
||||
{ |
||||
invalidEdgeNode = true; |
||||
break; |
||||
} |
||||
} |
||||
return isGraphConnected && !invalidEdgeNode; |
||||
} |
||||
|
||||
#if defined(CERES_FOUND) && defined(HAVE_EIGEN) |
||||
void Optimizer::createOptimizationProblem(PoseGraph& poseGraph, ceres::Problem& problem) |
||||
{ |
||||
int numEdges = poseGraph.getNumEdges(); |
||||
int numNodes = poseGraph.getNumNodes(); |
||||
if (numEdges == 0) |
||||
{ |
||||
CV_Error(Error::StsBadArg, "PoseGraph has no edges, no optimization to be done"); |
||||
return; |
||||
} |
||||
|
||||
ceres::LossFunction* lossFunction = nullptr; |
||||
// TODO: Experiment with SE3 parameterization
|
||||
ceres::LocalParameterization* quatLocalParameterization = |
||||
new ceres::EigenQuaternionParameterization; |
||||
|
||||
for (int currEdgeNum = 0; currEdgeNum < numEdges; ++currEdgeNum) |
||||
{ |
||||
const PoseGraphEdge& currEdge = poseGraph.edges.at(currEdgeNum); |
||||
int sourceNodeId = currEdge.getSourceNodeId(); |
||||
int targetNodeId = currEdge.getTargetNodeId(); |
||||
Pose3d& sourcePose = poseGraph.nodes.at(sourceNodeId).se3Pose; |
||||
Pose3d& targetPose = poseGraph.nodes.at(targetNodeId).se3Pose; |
||||
|
||||
const Matx66f& informationMatrix = currEdge.information; |
||||
|
||||
ceres::CostFunction* costFunction = Pose3dErrorFunctor::create( |
||||
Pose3d(currEdge.transformation.rotation(), currEdge.transformation.translation()), |
||||
informationMatrix); |
||||
|
||||
problem.AddResidualBlock(costFunction, lossFunction, sourcePose.t.data(), |
||||
sourcePose.r.coeffs().data(), targetPose.t.data(), |
||||
targetPose.r.coeffs().data()); |
||||
problem.SetParameterization(sourcePose.r.coeffs().data(), quatLocalParameterization); |
||||
problem.SetParameterization(targetPose.r.coeffs().data(), quatLocalParameterization); |
||||
} |
||||
|
||||
for (int currNodeId = 0; currNodeId < numNodes; ++currNodeId) |
||||
{ |
||||
PoseGraphNode& currNode = poseGraph.nodes.at(currNodeId); |
||||
if (currNode.isPoseFixed()) |
||||
{ |
||||
problem.SetParameterBlockConstant(currNode.se3Pose.t.data()); |
||||
problem.SetParameterBlockConstant(currNode.se3Pose.r.coeffs().data()); |
||||
} |
||||
} |
||||
} |
||||
#endif |
||||
|
||||
void Optimizer::optimize(PoseGraph& poseGraph) |
||||
{ |
||||
PoseGraph poseGraphOriginal = poseGraph; |
||||
|
||||
if (!poseGraphOriginal.isValid()) |
||||
{ |
||||
CV_Error(Error::StsBadArg, |
||||
"Invalid PoseGraph that is either not connected or has invalid nodes"); |
||||
return; |
||||
} |
||||
|
||||
int numNodes = poseGraph.getNumNodes(); |
||||
int numEdges = poseGraph.getNumEdges(); |
||||
std::cout << "Optimizing PoseGraph with " << numNodes << " nodes and " << numEdges << " edges" |
||||
<< std::endl; |
||||
|
||||
#if defined(CERES_FOUND) && defined(HAVE_EIGEN) |
||||
ceres::Problem problem; |
||||
createOptimizationProblem(poseGraph, problem); |
||||
|
||||
ceres::Solver::Options options; |
||||
options.max_num_iterations = 100; |
||||
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; |
||||
|
||||
ceres::Solver::Summary summary; |
||||
ceres::Solve(options, &problem, &summary); |
||||
|
||||
std::cout << summary.FullReport() << '\n'; |
||||
|
||||
std::cout << "Is solution usable: " << summary.IsSolutionUsable() << std::endl; |
||||
#else |
||||
CV_Error(Error::StsNotImplemented, "Ceres and Eigen required for Pose Graph optimization"); |
||||
#endif |
||||
} |
||||
|
||||
} // namespace kinfu
|
||||
} // namespace cv
|
@ -0,0 +1,321 @@ |
||||
#ifndef OPENCV_RGBD_GRAPH_NODE_H |
||||
#define OPENCV_RGBD_GRAPH_NODE_H |
||||
|
||||
#include <map> |
||||
#include <unordered_map> |
||||
|
||||
#include "opencv2/core/affine.hpp" |
||||
#if defined(HAVE_EIGEN) |
||||
#include <Eigen/Core> |
||||
#include <Eigen/Geometry> |
||||
#include "opencv2/core/eigen.hpp" |
||||
#endif |
||||
|
||||
#if defined(CERES_FOUND) |
||||
#include <ceres/ceres.h> |
||||
#endif |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace kinfu |
||||
{ |
||||
/*! \class GraphNode
|
||||
* \brief Defines a node/variable that is optimizable in a posegraph |
||||
* |
||||
* Detailed description |
||||
*/ |
||||
#if defined(HAVE_EIGEN) |
||||
struct Pose3d |
||||
{ |
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW |
||||
|
||||
Eigen::Vector3d t; |
||||
Eigen::Quaterniond r; |
||||
|
||||
Pose3d() |
||||
{ |
||||
t.setZero(); |
||||
r.setIdentity(); |
||||
}; |
||||
Pose3d(const Eigen::Matrix3d& rotation, const Eigen::Vector3d& translation) |
||||
: t(translation), r(Eigen::Quaterniond(rotation)) |
||||
{ |
||||
normalizeRotation(); |
||||
} |
||||
|
||||
Pose3d(const Matx33d& rotation, const Vec3d& translation) |
||||
{ |
||||
Eigen::Matrix3d R; |
||||
cv2eigen(rotation, R); |
||||
cv2eigen(translation, t); |
||||
r = Eigen::Quaterniond(R); |
||||
normalizeRotation(); |
||||
} |
||||
|
||||
explicit Pose3d(const Matx44f& pose) |
||||
{ |
||||
Matx33d rotation(pose.val[0], pose.val[1], pose.val[2], pose.val[4], pose.val[5], |
||||
pose.val[6], pose.val[8], pose.val[9], pose.val[10]); |
||||
Vec3d translation(pose.val[3], pose.val[7], pose.val[11]); |
||||
Pose3d(rotation, translation); |
||||
} |
||||
|
||||
// NOTE: Eigen overloads quaternion multiplication appropriately
|
||||
inline Pose3d operator*(const Pose3d& otherPose) const |
||||
{ |
||||
Pose3d out(*this); |
||||
out.t += r * otherPose.t; |
||||
out.r *= otherPose.r; |
||||
out.normalizeRotation(); |
||||
return out; |
||||
} |
||||
|
||||
inline Pose3d& operator*=(const Pose3d& otherPose) |
||||
{ |
||||
t += otherPose.t; |
||||
r *= otherPose.r; |
||||
normalizeRotation(); |
||||
return *this; |
||||
} |
||||
|
||||
inline Pose3d inverse() const |
||||
{ |
||||
Pose3d out; |
||||
out.r = r.conjugate(); |
||||
out.t = out.r * (t * -1.0); |
||||
return out; |
||||
} |
||||
|
||||
inline void normalizeRotation() |
||||
{ |
||||
if (r.w() < 0) |
||||
r.coeffs() *= -1.0; |
||||
r.normalize(); |
||||
} |
||||
}; |
||||
#endif |
||||
|
||||
struct PoseGraphNode |
||||
{ |
||||
public: |
||||
explicit PoseGraphNode(int _nodeId, const Affine3f& _pose) |
||||
: nodeId(_nodeId), isFixed(false), pose(_pose) |
||||
{ |
||||
#if defined(HAVE_EIGEN) |
||||
se3Pose = Pose3d(_pose.rotation(), _pose.translation()); |
||||
#endif |
||||
} |
||||
virtual ~PoseGraphNode() = default; |
||||
|
||||
int getId() const { return nodeId; } |
||||
inline Affine3f getPose() const |
||||
{ |
||||
return pose; |
||||
} |
||||
void setPose(const Affine3f& _pose) |
||||
{ |
||||
pose = _pose; |
||||
#if defined(HAVE_EIGEN) |
||||
se3Pose = Pose3d(pose.rotation(), pose.translation()); |
||||
#endif |
||||
} |
||||
#if defined(HAVE_EIGEN) |
||||
void setPose(const Pose3d& _pose) |
||||
{ |
||||
se3Pose = _pose; |
||||
const Eigen::Matrix3d& rotation = se3Pose.r.toRotationMatrix(); |
||||
const Eigen::Vector3d& translation = se3Pose.t; |
||||
Matx33d rot; |
||||
Vec3d trans; |
||||
eigen2cv(rotation, rot); |
||||
eigen2cv(translation, trans); |
||||
Affine3d poseMatrix(rot, trans); |
||||
pose = poseMatrix; |
||||
} |
||||
#endif |
||||
void setFixed(bool val = true) { isFixed = val; } |
||||
bool isPoseFixed() const { return isFixed; } |
||||
|
||||
public: |
||||
int nodeId; |
||||
bool isFixed; |
||||
Affine3f pose; |
||||
#if defined(HAVE_EIGEN) |
||||
Pose3d se3Pose; |
||||
#endif |
||||
}; |
||||
|
||||
/*! \class PoseGraphEdge
|
||||
* \brief Defines the constraints between two PoseGraphNodes |
||||
* |
||||
* Detailed description |
||||
*/ |
||||
struct PoseGraphEdge |
||||
{ |
||||
public: |
||||
PoseGraphEdge(int _sourceNodeId, int _targetNodeId, const Affine3f& _transformation, |
||||
const Matx66f& _information = Matx66f::eye()) |
||||
: sourceNodeId(_sourceNodeId), |
||||
targetNodeId(_targetNodeId), |
||||
transformation(_transformation), |
||||
information(_information) |
||||
{ |
||||
} |
||||
virtual ~PoseGraphEdge() = default; |
||||
|
||||
int getSourceNodeId() const { return sourceNodeId; } |
||||
int getTargetNodeId() const { return targetNodeId; } |
||||
|
||||
bool operator==(const PoseGraphEdge& edge) |
||||
{ |
||||
if ((edge.getSourceNodeId() == sourceNodeId && edge.getTargetNodeId() == targetNodeId) || |
||||
(edge.getSourceNodeId() == targetNodeId && edge.getTargetNodeId() == sourceNodeId)) |
||||
return true; |
||||
return false; |
||||
} |
||||
|
||||
public: |
||||
int sourceNodeId; |
||||
int targetNodeId; |
||||
Affine3f transformation; |
||||
Matx66f information; |
||||
}; |
||||
|
||||
//! @brief Reference: A tutorial on SE(3) transformation parameterizations and on-manifold
|
||||
//! optimization Jose Luis Blanco Compactly represents the jacobian of the SE3 generator
|
||||
// clang-format off
|
||||
/* static const std::array<Matx44f, 6> generatorJacobian = { */ |
||||
/* // alpha */ |
||||
/* Matx44f(0, 0, 0, 0, */ |
||||
/* 0, 0, -1, 0, */ |
||||
/* 0, 1, 0, 0, */ |
||||
/* 0, 0, 0, 0), */ |
||||
/* // beta */ |
||||
/* Matx44f( 0, 0, 1, 0, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* -1, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0), */ |
||||
/* // gamma */ |
||||
/* Matx44f(0, -1, 0, 0, */ |
||||
/* 1, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0), */ |
||||
/* // x */ |
||||
/* Matx44f(0, 0, 0, 1, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0), */ |
||||
/* // y */ |
||||
/* Matx44f(0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 1, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0), */ |
||||
/* // z */ |
||||
/* Matx44f(0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 0, */ |
||||
/* 0, 0, 0, 1, */ |
||||
/* 0, 0, 0, 0) */ |
||||
/* }; */ |
||||
// clang-format on
|
||||
|
||||
class PoseGraph |
||||
{ |
||||
public: |
||||
typedef std::vector<PoseGraphNode> NodeVector; |
||||
typedef std::vector<PoseGraphEdge> EdgeVector; |
||||
|
||||
explicit PoseGraph(){}; |
||||
virtual ~PoseGraph() = default; |
||||
|
||||
//! PoseGraph can be copied/cloned
|
||||
PoseGraph(const PoseGraph& _poseGraph) = default; |
||||
PoseGraph& operator=(const PoseGraph& _poseGraph) = default; |
||||
|
||||
void addNode(const PoseGraphNode& node) { nodes.push_back(node); } |
||||
void addEdge(const PoseGraphEdge& edge) { edges.push_back(edge); } |
||||
|
||||
bool nodeExists(int nodeId) const |
||||
{ |
||||
return std::find_if(nodes.begin(), nodes.end(), [nodeId](const PoseGraphNode& currNode) { |
||||
return currNode.getId() == nodeId; |
||||
}) != nodes.end(); |
||||
} |
||||
|
||||
bool isValid() const; |
||||
|
||||
int getNumNodes() const { return int(nodes.size()); } |
||||
int getNumEdges() const { return int(edges.size()); } |
||||
|
||||
public: |
||||
NodeVector nodes; |
||||
EdgeVector edges; |
||||
}; |
||||
|
||||
namespace Optimizer |
||||
{ |
||||
void optimize(PoseGraph& poseGraph); |
||||
|
||||
#if defined(CERES_FOUND) |
||||
void createOptimizationProblem(PoseGraph& poseGraph, ceres::Problem& problem); |
||||
|
||||
//! Error Functor required for Ceres to obtain an auto differentiable cost function
|
||||
class Pose3dErrorFunctor |
||||
{ |
||||
public: |
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW |
||||
Pose3dErrorFunctor(const Pose3d& _poseMeasurement, const Matx66d& _sqrtInformation) |
||||
: poseMeasurement(_poseMeasurement) |
||||
{ |
||||
cv2eigen(_sqrtInformation, sqrtInfo); |
||||
} |
||||
Pose3dErrorFunctor(const Pose3d& _poseMeasurement, |
||||
const Eigen::Matrix<double, 6, 6>& _sqrtInformation) |
||||
: poseMeasurement(_poseMeasurement), sqrtInfo(_sqrtInformation) |
||||
{ |
||||
} |
||||
|
||||
template<typename T> |
||||
bool operator()(const T* const _pSourceTrans, const T* const _pSourceQuat, |
||||
const T* const _pTargetTrans, const T* const _pTargetQuat, T* _pResidual) const |
||||
{ |
||||
Eigen::Map<const Eigen::Matrix<T, 3, 1>> sourceTrans(_pSourceTrans); |
||||
Eigen::Map<const Eigen::Matrix<T, 3, 1>> targetTrans(_pTargetTrans); |
||||
Eigen::Map<const Eigen::Quaternion<T>> sourceQuat(_pSourceQuat); |
||||
Eigen::Map<const Eigen::Quaternion<T>> targetQuat(_pTargetQuat); |
||||
Eigen::Map<Eigen::Matrix<T, 6, 1>> residual(_pResidual); |
||||
|
||||
Eigen::Quaternion<T> targetQuatInv = targetQuat.conjugate(); |
||||
|
||||
Eigen::Quaternion<T> relativeQuat = targetQuatInv * sourceQuat; |
||||
Eigen::Matrix<T, 3, 1> relativeTrans = targetQuatInv * (targetTrans - sourceTrans); |
||||
|
||||
//! Definition should actually be relativeQuat * poseMeasurement.r.conjugate()
|
||||
Eigen::Quaternion<T> deltaRot = |
||||
poseMeasurement.r.template cast<T>() * relativeQuat.conjugate(); |
||||
|
||||
residual.template block<3, 1>(0, 0) = relativeTrans - poseMeasurement.t.template cast<T>(); |
||||
residual.template block<3, 1>(3, 0) = T(2.0) * deltaRot.vec(); |
||||
|
||||
residual.applyOnTheLeft(sqrtInfo.template cast<T>()); |
||||
|
||||
return true; |
||||
} |
||||
|
||||
static ceres::CostFunction* create(const Pose3d& _poseMeasurement, |
||||
const Matx66f& _sqrtInformation) |
||||
{ |
||||
return new ceres::AutoDiffCostFunction<Pose3dErrorFunctor, 6, 3, 4, 3, 4>( |
||||
new Pose3dErrorFunctor(_poseMeasurement, _sqrtInformation)); |
||||
} |
||||
|
||||
private: |
||||
const Pose3d poseMeasurement; |
||||
Eigen::Matrix<double, 6, 6> sqrtInfo; |
||||
}; |
||||
#endif |
||||
|
||||
} // namespace Optimizer
|
||||
|
||||
} // namespace kinfu
|
||||
} // namespace cv
|
||||
#endif /* ifndef OPENCV_RGBD_GRAPH_NODE_H */ |
@ -0,0 +1,159 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include <iostream> |
||||
#include <unordered_map> |
||||
|
||||
#include "opencv2/core/base.hpp" |
||||
#include "opencv2/core/types.hpp" |
||||
|
||||
#if defined(HAVE_EIGEN) |
||||
#include <Eigen/Core> |
||||
#include <Eigen/Sparse> |
||||
#include <Eigen/SparseCholesky> |
||||
|
||||
#include "opencv2/core/eigen.hpp" |
||||
#endif |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace kinfu |
||||
{ |
||||
/*!
|
||||
* \class BlockSparseMat |
||||
* Naive implementation of Sparse Block Matrix |
||||
*/ |
||||
template<typename _Tp, int blockM, int blockN> |
||||
struct BlockSparseMat |
||||
{ |
||||
struct Point2iHash |
||||
{ |
||||
size_t operator()(const cv::Point2i& point) const noexcept |
||||
{ |
||||
size_t seed = 0; |
||||
constexpr uint32_t GOLDEN_RATIO = 0x9e3779b9; |
||||
seed ^= std::hash<int>()(point.x) + GOLDEN_RATIO + (seed << 6) + (seed >> 2); |
||||
seed ^= std::hash<int>()(point.y) + GOLDEN_RATIO + (seed << 6) + (seed >> 2); |
||||
return seed; |
||||
} |
||||
}; |
||||
typedef Matx<_Tp, blockM, blockN> MatType; |
||||
typedef std::unordered_map<Point2i, MatType, Point2iHash> IDtoBlockValueMap; |
||||
|
||||
BlockSparseMat(int _nBlocks) : nBlocks(_nBlocks), ijValue() {} |
||||
|
||||
MatType& refBlock(int i, int j) |
||||
{ |
||||
Point2i p(i, j); |
||||
auto it = ijValue.find(p); |
||||
if (it == ijValue.end()) |
||||
{ |
||||
it = ijValue.insert({ p, Matx<_Tp, blockM, blockN>::zeros() }).first; |
||||
} |
||||
return it->second; |
||||
} |
||||
|
||||
Mat diagonal() |
||||
{ |
||||
// Diagonal max length is the number of columns in the sparse matrix
|
||||
int diagLength = blockN * nBlocks; |
||||
cv::Mat diag = cv::Mat::zeros(diagLength, 1, CV_32F); |
||||
|
||||
for (int i = 0; i < diagLength; i++) |
||||
{ |
||||
diag.at<float>(i, 0) = refElem(i, i); |
||||
} |
||||
return diag; |
||||
} |
||||
|
||||
float& refElem(int i, int j) |
||||
{ |
||||
Point2i ib(i / blockM, j / blockN), iv(i % blockM, j % blockN); |
||||
return refBlock(ib.x, ib.y)(iv.x, iv.y); |
||||
} |
||||
|
||||
#if defined(HAVE_EIGEN) |
||||
Eigen::SparseMatrix<_Tp> toEigen() const |
||||
{ |
||||
std::vector<Eigen::Triplet<double>> tripletList; |
||||
tripletList.reserve(ijValue.size() * blockM * blockN); |
||||
for (auto ijv : ijValue) |
||||
{ |
||||
int xb = ijv.first.x, yb = ijv.first.y; |
||||
MatType vblock = ijv.second; |
||||
for (int i = 0; i < blockM; i++) |
||||
{ |
||||
for (int j = 0; j < blockN; j++) |
||||
{ |
||||
float val = vblock(i, j); |
||||
if (abs(val) >= NON_ZERO_VAL_THRESHOLD) |
||||
{ |
||||
tripletList.push_back(Eigen::Triplet<double>(blockM * xb + i, blockN * yb + j, val)); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
Eigen::SparseMatrix<_Tp> EigenMat(blockM * nBlocks, blockN * nBlocks); |
||||
EigenMat.setFromTriplets(tripletList.begin(), tripletList.end()); |
||||
EigenMat.makeCompressed(); |
||||
|
||||
return EigenMat; |
||||
} |
||||
#endif |
||||
size_t nonZeroBlocks() const { return ijValue.size(); } |
||||
|
||||
static constexpr float NON_ZERO_VAL_THRESHOLD = 0.0001f; |
||||
int nBlocks; |
||||
IDtoBlockValueMap ijValue; |
||||
}; |
||||
|
||||
//! Function to solve a sparse linear system of equations HX = B
|
||||
//! Requires Eigen
|
||||
static bool sparseSolve(const BlockSparseMat<float, 6, 6>& H, const Mat& B, Mat& X, Mat& predB) |
||||
{ |
||||
bool result = false; |
||||
#if defined(HAVE_EIGEN) |
||||
Eigen::SparseMatrix<float> bigA = H.toEigen(); |
||||
Eigen::VectorXf bigB; |
||||
cv2eigen(B, bigB); |
||||
|
||||
Eigen::SparseMatrix<float> bigAtranspose = bigA.transpose(); |
||||
if(!bigA.isApprox(bigAtranspose)) |
||||
{ |
||||
CV_Error(Error::StsBadArg, "H matrix is not symmetrical"); |
||||
return result; |
||||
} |
||||
|
||||
Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>> solver; |
||||
|
||||
solver.compute(bigA); |
||||
if (solver.info() != Eigen::Success) |
||||
{ |
||||
std::cout << "failed to eigen-decompose" << std::endl; |
||||
result = false; |
||||
} |
||||
else |
||||
{ |
||||
Eigen::VectorXf solutionX = solver.solve(bigB); |
||||
Eigen::VectorXf predBEigen = bigA * solutionX; |
||||
if (solver.info() != Eigen::Success) |
||||
{ |
||||
std::cout << "failed to eigen-solve" << std::endl; |
||||
result = false; |
||||
} |
||||
else |
||||
{ |
||||
eigen2cv(solutionX, X); |
||||
eigen2cv(predBEigen, predB); |
||||
result = true; |
||||
} |
||||
} |
||||
#else |
||||
std::cout << "no eigen library" << std::endl; |
||||
CV_Error(Error::StsNotImplemented, "Eigen library required for matrix solve, dense solver is not implemented"); |
||||
#endif |
||||
return result; |
||||
} |
||||
} // namespace kinfu
|
||||
} // namespace cv
|
@ -0,0 +1,544 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef __OPENCV_RGBD_SUBMAP_HPP__ |
||||
#define __OPENCV_RGBD_SUBMAP_HPP__ |
||||
|
||||
#include <opencv2/core/cvdef.h> |
||||
|
||||
#include <opencv2/core/affine.hpp> |
||||
#include <type_traits> |
||||
#include <vector> |
||||
|
||||
#include "hash_tsdf.hpp" |
||||
#include "opencv2/core/mat.inl.hpp" |
||||
#include "pose_graph.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace kinfu |
||||
{ |
||||
template<typename MatType> |
||||
class Submap |
||||
{ |
||||
public: |
||||
struct PoseConstraint |
||||
{ |
||||
Affine3f estimatedPose; |
||||
int weight; |
||||
|
||||
PoseConstraint() : weight(0){}; |
||||
|
||||
void accumulatePose(const Affine3f& _pose, int _weight = 1) |
||||
{ |
||||
Matx44f accPose = estimatedPose.matrix * weight + _pose.matrix * _weight; |
||||
weight += _weight; |
||||
accPose /= float(weight); |
||||
estimatedPose = Affine3f(accPose); |
||||
} |
||||
}; |
||||
typedef std::map<int, PoseConstraint> Constraints; |
||||
|
||||
Submap(int _id, const VolumeParams& volumeParams, const cv::Affine3f& _pose = cv::Affine3f::Identity(), |
||||
int _startFrameId = 0) |
||||
: id(_id), pose(_pose), cameraPose(Affine3f::Identity()), startFrameId(_startFrameId), volume(volumeParams) |
||||
{ |
||||
std::cout << "Created volume\n"; |
||||
} |
||||
virtual ~Submap() = default; |
||||
|
||||
virtual void integrate(InputArray _depth, float depthFactor, const cv::kinfu::Intr& intrinsics, const int currframeId); |
||||
virtual void raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics, cv::Size frameSize, |
||||
OutputArray points, OutputArray normals); |
||||
virtual void updatePyrPointsNormals(const int pyramidLevels); |
||||
|
||||
virtual int getTotalAllocatedBlocks() const { return int(volume.getTotalVolumeUnits()); }; |
||||
virtual int getVisibleBlocks(int currFrameId) const |
||||
{ |
||||
return volume.getVisibleBlocks(currFrameId, FRAME_VISIBILITY_THRESHOLD); |
||||
} |
||||
|
||||
float calcVisibilityRatio(int currFrameId) const |
||||
{ |
||||
int allocate_blocks = getTotalAllocatedBlocks(); |
||||
int visible_blocks = getVisibleBlocks(currFrameId); |
||||
return float(visible_blocks) / float(allocate_blocks); |
||||
} |
||||
|
||||
//! TODO: Possibly useless
|
||||
virtual void setStartFrameId(int _startFrameId) { startFrameId = _startFrameId; }; |
||||
virtual void setStopFrameId(int _stopFrameId) { stopFrameId = _stopFrameId; }; |
||||
|
||||
void composeCameraPose(const cv::Affine3f& _relativePose) { cameraPose = cameraPose * _relativePose; } |
||||
PoseConstraint& getConstraint(const int _id) |
||||
{ |
||||
//! Creates constraints if doesn't exist yet
|
||||
return constraints[_id]; |
||||
} |
||||
|
||||
public: |
||||
const int id; |
||||
cv::Affine3f pose; |
||||
cv::Affine3f cameraPose; |
||||
Constraints constraints; |
||||
|
||||
int startFrameId; |
||||
int stopFrameId; |
||||
//! TODO: Should we support submaps for regular volumes?
|
||||
static constexpr int FRAME_VISIBILITY_THRESHOLD = 5; |
||||
|
||||
//! TODO: Add support for GPU arrays (UMat)
|
||||
std::vector<MatType> pyrPoints; |
||||
std::vector<MatType> pyrNormals; |
||||
HashTSDFVolumeCPU volume; |
||||
}; |
||||
|
||||
template<typename MatType> |
||||
|
||||
void Submap<MatType>::integrate(InputArray _depth, float depthFactor, const cv::kinfu::Intr& intrinsics, |
||||
const int currFrameId) |
||||
{ |
||||
CV_Assert(currFrameId >= startFrameId); |
||||
volume.integrate(_depth, depthFactor, cameraPose.matrix, intrinsics, currFrameId); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void Submap<MatType>::raycast(const cv::Affine3f& _cameraPose, const cv::kinfu::Intr& intrinsics, cv::Size frameSize, |
||||
OutputArray points, OutputArray normals) |
||||
{ |
||||
volume.raycast(_cameraPose.matrix, intrinsics, frameSize, points, normals); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
void Submap<MatType>::updatePyrPointsNormals(const int pyramidLevels) |
||||
{ |
||||
MatType& points = pyrPoints[0]; |
||||
MatType& normals = pyrNormals[0]; |
||||
|
||||
buildPyramidPointsNormals(points, normals, pyrPoints, pyrNormals, pyramidLevels); |
||||
} |
||||
|
||||
/**
|
||||
* @brief: Manages all the created submaps for a particular scene |
||||
*/ |
||||
template<typename MatType> |
||||
class SubmapManager |
||||
{ |
||||
public: |
||||
enum class Type |
||||
{ |
||||
NEW = 0, |
||||
CURRENT = 1, |
||||
RELOCALISATION = 2, |
||||
LOOP_CLOSURE = 3, |
||||
LOST = 4 |
||||
}; |
||||
|
||||
struct ActiveSubmapData |
||||
{ |
||||
Type type; |
||||
std::vector<Affine3f> constraints; |
||||
int trackingAttempts; |
||||
}; |
||||
typedef Submap<MatType> SubmapT; |
||||
typedef std::map<int, Ptr<SubmapT>> IdToSubmapPtr; |
||||
typedef std::unordered_map<int, ActiveSubmapData> IdToActiveSubmaps; |
||||
|
||||
SubmapManager(const VolumeParams& _volumeParams) : volumeParams(_volumeParams) {} |
||||
virtual ~SubmapManager() = default; |
||||
|
||||
void reset() { submapList.clear(); }; |
||||
|
||||
bool shouldCreateSubmap(int frameId); |
||||
bool shouldChangeCurrSubmap(int _frameId, int toSubmapId); |
||||
|
||||
//! Adds a new submap/volume into the current list of managed/Active submaps
|
||||
int createNewSubmap(bool isCurrentActiveMap, const int currFrameId = 0, const Affine3f& pose = cv::Affine3f::Identity()); |
||||
|
||||
void removeSubmap(int _id); |
||||
size_t numOfSubmaps(void) const { return submapList.size(); }; |
||||
size_t numOfActiveSubmaps(void) const { return activeSubmaps.size(); }; |
||||
|
||||
Ptr<SubmapT> getSubmap(int _id) const; |
||||
Ptr<SubmapT> getCurrentSubmap(void) const; |
||||
|
||||
int estimateConstraint(int fromSubmapId, int toSubmapId, int& inliers, Affine3f& inlierPose); |
||||
bool updateMap(int _frameId, std::vector<MatType> _framePoints, std::vector<MatType> _frameNormals); |
||||
|
||||
PoseGraph MapToPoseGraph(); |
||||
void PoseGraphToMap(const PoseGraph& updatedPoseGraph); |
||||
|
||||
VolumeParams volumeParams; |
||||
|
||||
std::vector<Ptr<SubmapT>> submapList; |
||||
IdToActiveSubmaps activeSubmaps; |
||||
|
||||
PoseGraph poseGraph; |
||||
}; |
||||
|
||||
template<typename MatType> |
||||
int SubmapManager<MatType>::createNewSubmap(bool isCurrentMap, int currFrameId, const Affine3f& pose) |
||||
{ |
||||
int newId = int(submapList.size()); |
||||
|
||||
Ptr<SubmapT> newSubmap = cv::makePtr<SubmapT>(newId, volumeParams, pose, currFrameId); |
||||
submapList.push_back(newSubmap); |
||||
|
||||
ActiveSubmapData newSubmapData; |
||||
newSubmapData.trackingAttempts = 0; |
||||
newSubmapData.type = isCurrentMap ? Type::CURRENT : Type::NEW; |
||||
activeSubmaps[newId] = newSubmapData; |
||||
|
||||
std::cout << "Created new submap\n"; |
||||
|
||||
return newId; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
Ptr<Submap<MatType>> SubmapManager<MatType>::getSubmap(int _id) const |
||||
{ |
||||
CV_Assert(submapList.size() > 0); |
||||
CV_Assert(_id >= 0 && _id < int(submapList.size())); |
||||
return submapList.at(_id); |
||||
} |
||||
|
||||
template<typename MatType> |
||||
Ptr<Submap<MatType>> SubmapManager<MatType>::getCurrentSubmap(void) const |
||||
{ |
||||
for (const auto& it : activeSubmaps) |
||||
{ |
||||
if (it.second.type == Type::CURRENT) |
||||
return getSubmap(it.first); |
||||
} |
||||
return nullptr; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
bool SubmapManager<MatType>::shouldCreateSubmap(int currFrameId) |
||||
{ |
||||
int currSubmapId = -1; |
||||
for (const auto& it : activeSubmaps) |
||||
{ |
||||
auto submapData = it.second; |
||||
// No more than 1 new submap at a time!
|
||||
if (submapData.type == Type::NEW) |
||||
{ |
||||
return false; |
||||
} |
||||
if (submapData.type == Type::CURRENT) |
||||
{ |
||||
currSubmapId = it.first; |
||||
} |
||||
} |
||||
//! TODO: This shouldn't be happening? since there should always be one active current submap
|
||||
if (currSubmapId < 0) |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
Ptr<SubmapT> currSubmap = getSubmap(currSubmapId); |
||||
float ratio = currSubmap->calcVisibilityRatio(currFrameId); |
||||
|
||||
std::cout << "Ratio: " << ratio << "\n"; |
||||
|
||||
if (ratio < 0.2f) |
||||
return true; |
||||
return false; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
int SubmapManager<MatType>::estimateConstraint(int fromSubmapId, int toSubmapId, int& inliers, Affine3f& inlierPose) |
||||
{ |
||||
static constexpr int MAX_ITER = 10; |
||||
static constexpr float CONVERGE_WEIGHT_THRESHOLD = 0.01f; |
||||
static constexpr float INLIER_WEIGHT_THRESH = 0.8f; |
||||
static constexpr int MIN_INLIERS = 10; |
||||
static constexpr int MAX_TRACKING_ATTEMPTS = 25; |
||||
|
||||
//! thresh = HUBER_THRESH
|
||||
auto huberWeight = [](float residual, float thresh = 0.1f) -> float { |
||||
float rAbs = abs(residual); |
||||
if (rAbs < thresh) |
||||
return 1.0; |
||||
float numerator = sqrt(2 * thresh * rAbs - thresh * thresh); |
||||
return numerator / rAbs; |
||||
}; |
||||
|
||||
Ptr<SubmapT> fromSubmap = getSubmap(fromSubmapId); |
||||
Ptr<SubmapT> toSubmap = getSubmap(toSubmapId); |
||||
ActiveSubmapData& fromSubmapData = activeSubmaps.at(fromSubmapId); |
||||
|
||||
Affine3f TcameraToFromSubmap = fromSubmap->cameraPose; |
||||
Affine3f TcameraToToSubmap = toSubmap->cameraPose; |
||||
|
||||
// FromSubmap -> ToSubmap transform
|
||||
Affine3f candidateConstraint = TcameraToToSubmap * TcameraToFromSubmap.inv(); |
||||
fromSubmapData.trackingAttempts++; |
||||
fromSubmapData.constraints.push_back(candidateConstraint); |
||||
|
||||
std::vector<float> weights(fromSubmapData.constraints.size() + 1, 1.0f); |
||||
|
||||
Affine3f prevConstraint = fromSubmap->getConstraint(toSubmap->id).estimatedPose; |
||||
int prevWeight = fromSubmap->getConstraint(toSubmap->id).weight; |
||||
|
||||
// Iterative reweighted least squares with huber threshold to find the inliers in the past observations
|
||||
Vec6f meanConstraint; |
||||
float sumWeight = 0.0f; |
||||
for (int i = 0; i < MAX_ITER; i++) |
||||
{ |
||||
Vec6f constraintVec; |
||||
for (int j = 0; j < int(weights.size() - 1); j++) |
||||
{ |
||||
Affine3f currObservation = fromSubmapData.constraints[j]; |
||||
cv::vconcat(currObservation.rvec(), currObservation.translation(), constraintVec); |
||||
meanConstraint += weights[j] * constraintVec; |
||||
sumWeight += weights[j]; |
||||
} |
||||
// Heavier weight given to the estimatedPose
|
||||
cv::vconcat(prevConstraint.rvec(), prevConstraint.translation(), constraintVec); |
||||
meanConstraint += weights.back() * prevWeight * constraintVec; |
||||
sumWeight += prevWeight; |
||||
meanConstraint /= float(sumWeight); |
||||
|
||||
float residual = 0.0f; |
||||
float diff = 0.0f; |
||||
for (int j = 0; j < int(weights.size()); j++) |
||||
{ |
||||
int w; |
||||
if (j == int(weights.size() - 1)) |
||||
{ |
||||
cv::vconcat(prevConstraint.rvec(), prevConstraint.translation(), constraintVec); |
||||
w = prevWeight; |
||||
} |
||||
else |
||||
{ |
||||
Affine3f currObservation = fromSubmapData.constraints[j]; |
||||
cv::vconcat(currObservation.rvec(), currObservation.translation(), constraintVec); |
||||
w = 1; |
||||
} |
||||
|
||||
cv::Vec6f residualVec = (constraintVec - meanConstraint); |
||||
residual = float(norm(residualVec)); |
||||
float newWeight = huberWeight(residual); |
||||
diff += w * abs(newWeight - weights[j]); |
||||
weights[j] = newWeight; |
||||
} |
||||
|
||||
if (diff / (prevWeight + weights.size() - 1) < CONVERGE_WEIGHT_THRESHOLD) |
||||
break; |
||||
} |
||||
|
||||
int localInliers = 0; |
||||
Matx44f inlierConstraint; |
||||
for (int i = 0; i < int(weights.size()); i++) |
||||
{ |
||||
if (weights[i] > INLIER_WEIGHT_THRESH) |
||||
{ |
||||
localInliers++; |
||||
if (i == int(weights.size() - 1)) |
||||
inlierConstraint += prevConstraint.matrix; |
||||
else |
||||
inlierConstraint += fromSubmapData.constraints[i].matrix; |
||||
} |
||||
} |
||||
inlierConstraint /= float(max(localInliers, 1)); |
||||
inlierPose = Affine3f(inlierConstraint); |
||||
inliers = localInliers; |
||||
|
||||
/* std::cout << inlierPose.matrix << "\n"; */ |
||||
/* std::cout << " inliers: " << inliers << "\n"; */ |
||||
|
||||
if (inliers >= MIN_INLIERS) |
||||
{ |
||||
return 1; |
||||
} |
||||
if(fromSubmapData.trackingAttempts - inliers > (MAX_TRACKING_ATTEMPTS - MIN_INLIERS)) |
||||
{ |
||||
return -1; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
bool SubmapManager<MatType>::shouldChangeCurrSubmap(int _frameId, int toSubmapId) |
||||
{ |
||||
auto toSubmap = getSubmap(toSubmapId); |
||||
auto toSubmapData = activeSubmaps.at(toSubmapId); |
||||
auto currActiveSubmap = getCurrentSubmap(); |
||||
|
||||
int blocksInNewMap = toSubmap->getTotalAllocatedBlocks(); |
||||
float newRatio = toSubmap->calcVisibilityRatio(_frameId); |
||||
|
||||
float currRatio = currActiveSubmap->calcVisibilityRatio(_frameId); |
||||
|
||||
//! TODO: Check for a specific threshold?
|
||||
if (blocksInNewMap <= 0) |
||||
return false; |
||||
if ((newRatio > currRatio) && (toSubmapData.type == Type::NEW)) |
||||
return true; |
||||
|
||||
return false; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
bool SubmapManager<MatType>::updateMap(int _frameId, std::vector<MatType> _framePoints, std::vector<MatType> _frameNormals) |
||||
{ |
||||
bool mapUpdated = false; |
||||
int changedCurrentMapId = -1; |
||||
|
||||
const int currSubmapId = getCurrentSubmap()->id; |
||||
|
||||
for (auto& it : activeSubmaps) |
||||
{ |
||||
int submapId = it.first; |
||||
auto& submapData = it.second; |
||||
if (submapData.type == Type::NEW || submapData.type == Type::LOOP_CLOSURE) |
||||
{ |
||||
// Check with previous estimate
|
||||
int inliers; |
||||
Affine3f inlierPose; |
||||
int constraintUpdate = estimateConstraint(submapId, currSubmapId, inliers, inlierPose); |
||||
std::cout << "SubmapId: " << submapId << " Tracking attempts: " << submapData.trackingAttempts << "\n"; |
||||
if (constraintUpdate == 1) |
||||
{ |
||||
typename SubmapT::PoseConstraint& submapConstraint = getSubmap(submapId)->getConstraint(currSubmapId); |
||||
submapConstraint.accumulatePose(inlierPose, inliers); |
||||
std::cout << "Submap constraint estimated pose: \n" << submapConstraint.estimatedPose.matrix << "\n"; |
||||
submapData.constraints.clear(); |
||||
submapData.trackingAttempts = 0; |
||||
|
||||
if (shouldChangeCurrSubmap(_frameId, submapId)) |
||||
{ |
||||
std::cout << "Should change current map to the new map\n"; |
||||
changedCurrentMapId = submapId; |
||||
} |
||||
mapUpdated = true; |
||||
} |
||||
else if(constraintUpdate == -1) |
||||
{ |
||||
submapData.type = Type::LOST; |
||||
} |
||||
} |
||||
} |
||||
|
||||
std::vector<int> createNewConstraintsList; |
||||
for (auto& it : activeSubmaps) |
||||
{ |
||||
int submapId = it.first; |
||||
auto& submapData = it.second; |
||||
|
||||
if (submapId == changedCurrentMapId) |
||||
{ |
||||
submapData.type = Type::CURRENT; |
||||
} |
||||
if ((submapData.type == Type::CURRENT) && (changedCurrentMapId >= 0) && (submapId != changedCurrentMapId)) |
||||
{ |
||||
submapData.type = Type::LOST; |
||||
createNewConstraintsList.push_back(submapId); |
||||
} |
||||
if ((submapData.type == Type::NEW || submapData.type == Type::LOOP_CLOSURE) && (changedCurrentMapId >= 0)) |
||||
{ |
||||
//! TODO: Add a new type called NEW_LOST?
|
||||
submapData.type = Type::LOST; |
||||
createNewConstraintsList.push_back(submapId); |
||||
} |
||||
} |
||||
|
||||
for (typename IdToActiveSubmaps::iterator it = activeSubmaps.begin(); it != activeSubmaps.end();) |
||||
{ |
||||
auto& submapData = it->second; |
||||
if (submapData.type == Type::LOST) |
||||
it = activeSubmaps.erase(it); |
||||
else |
||||
it++; |
||||
} |
||||
|
||||
for (std::vector<int>::const_iterator it = createNewConstraintsList.begin(); it != createNewConstraintsList.end(); ++it) |
||||
{ |
||||
int dataId = *it; |
||||
ActiveSubmapData newSubmapData; |
||||
newSubmapData.trackingAttempts = 0; |
||||
newSubmapData.type = Type::LOOP_CLOSURE; |
||||
activeSubmaps[dataId] = newSubmapData; |
||||
} |
||||
|
||||
if (shouldCreateSubmap(_frameId)) |
||||
{ |
||||
Ptr<SubmapT> currActiveSubmap = getCurrentSubmap(); |
||||
Affine3f newSubmapPose = currActiveSubmap->pose * currActiveSubmap->cameraPose; |
||||
int submapId = createNewSubmap(false, _frameId, newSubmapPose); |
||||
auto newSubmap = getSubmap(submapId); |
||||
newSubmap->pyrPoints = _framePoints; |
||||
newSubmap->pyrNormals = _frameNormals; |
||||
} |
||||
|
||||
// Debugging only
|
||||
if(_frameId%100 == 0) |
||||
{ |
||||
for(size_t i = 0; i < submapList.size(); i++) |
||||
{ |
||||
Ptr<SubmapT> currSubmap = submapList.at(i); |
||||
typename SubmapT::Constraints::const_iterator itBegin = currSubmap->constraints.begin(); |
||||
std::cout << "Constraint list for SubmapID: " << currSubmap->id << "\n"; |
||||
for(typename SubmapT::Constraints::const_iterator it = itBegin; it != currSubmap->constraints.end(); ++it) |
||||
{ |
||||
const typename SubmapT::PoseConstraint& constraint = it->second; |
||||
std::cout << "[" << it->first << "] weight: " << constraint.weight << "\n " << constraint.estimatedPose.matrix << " \n"; |
||||
} |
||||
} |
||||
} |
||||
|
||||
return mapUpdated; |
||||
} |
||||
|
||||
template<typename MatType> |
||||
PoseGraph SubmapManager<MatType>::MapToPoseGraph() |
||||
{ |
||||
PoseGraph localPoseGraph; |
||||
|
||||
|
||||
for(const Ptr<SubmapT> currSubmap : submapList) |
||||
{ |
||||
const typename SubmapT::Constraints& constraintList = currSubmap->constraints; |
||||
for(const auto& currConstraintPair : constraintList) |
||||
{ |
||||
// TODO: Handle case with duplicate constraints A -> B and B -> A
|
||||
/* Matx66f informationMatrix = Matx66f::eye() * (currConstraintPair.second.weight/10); */ |
||||
Matx66f informationMatrix = Matx66f::eye(); |
||||
PoseGraphEdge currEdge(currSubmap->id, currConstraintPair.first, currConstraintPair.second.estimatedPose, informationMatrix); |
||||
localPoseGraph.addEdge(currEdge); |
||||
} |
||||
} |
||||
|
||||
for(const Ptr<SubmapT> currSubmap : submapList) |
||||
{ |
||||
PoseGraphNode currNode(currSubmap->id, currSubmap->pose); |
||||
if(currSubmap->id == 0) |
||||
{ |
||||
currNode.setFixed(); |
||||
} |
||||
localPoseGraph.addNode(currNode); |
||||
} |
||||
|
||||
|
||||
|
||||
return localPoseGraph; |
||||
} |
||||
|
||||
template <typename MatType> |
||||
void SubmapManager<MatType>::PoseGraphToMap(const PoseGraph &updatedPoseGraph) |
||||
{ |
||||
for(const Ptr<SubmapT> currSubmap : submapList) |
||||
{ |
||||
const PoseGraphNode& currNode = updatedPoseGraph.nodes.at(currSubmap->id); |
||||
if(!currNode.isPoseFixed()) |
||||
currSubmap->pose = currNode.getPose(); |
||||
std::cout << "Current node: " << currSubmap->id << " Updated Pose: \n" << currSubmap->pose.matrix << std::endl; |
||||
} |
||||
} |
||||
|
||||
} // namespace kinfu
|
||||
} // namespace cv
|
||||
#endif /* ifndef __OPENCV_RGBD_SUBMAP_HPP__ */ |
Loading…
Reference in new issue