Merge pull request #2606 from DumDereDum:tsdf_fix_and_test

TSDF fixes and tests

* getMat using fix

* min/max fix

* create WeightType

* create normals test

* bug fix

* complete normals test

* fix makeVolume and rewrite tests

* minor fixes

* add new normal tests

* replace operator() on lambda expressions

* make a valid points test

* minor fixes

* getNormalVoxel fix in tsdf and hashTsdf

* create renderPointsNormals

* replace Affine3f with Matx44f oin make volume

* minor fixes

* minor fix

* tmp

* create function interpolateVoxel for hashTSDF

* tmp

* right interpolation for HashTSDF

* rewrite intrinsics normalize

* minor fix

* rewrite GPU normalize

* start to write perf tests

* make Volume fix

* GPU normalize fix

* minor fix

* create perf test for raycast

* fix LNK2019 problem in perf test

* made all perf tests

* replace all Affine3f with Matx44f

* replace Point3i with Vec3i

* minor fix

* minor fix

* add CV_EXPORT_W

* build fix 1

* build fix 2

* build fix 3

* warning fix

* build test

* win test

* tests without HashTSDF

* create noparallel normals checking

* test without fetch

* test without fetch points normals

* add end line

* revert rotation() in hash_tsdf

* fix matrix multiplication order

* fetch points normals invoker fix

* warning fix

* warning fix

* Docs fix

* Hash push normals fix

* replace operator() with lambda in PushNormals

* warning fix

* comments fix

Co-authored-by: arsaratovtsev <artem.saratovtsev@intel.com>
pull/2645/head
DumDereDum 5 years ago committed by GitHub
parent 1d6480ae2c
commit 80cdf41e3a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 12
      modules/rgbd/include/opencv2/rgbd/volume.hpp
  2. 6
      modules/rgbd/perf/perf_main.cpp
  3. 20
      modules/rgbd/perf/perf_precomp.hpp
  4. 222
      modules/rgbd/perf/perf_tsdf.cpp
  5. 120
      modules/rgbd/src/hash_tsdf.cpp
  6. 10
      modules/rgbd/src/hash_tsdf.hpp
  7. 13
      modules/rgbd/src/kinfu.cpp
  8. 5
      modules/rgbd/src/opencl/tsdf.cl
  9. 100
      modules/rgbd/src/tsdf.cpp
  10. 26
      modules/rgbd/src/tsdf.hpp
  11. 7
      modules/rgbd/src/volume.cpp
  12. 513
      modules/rgbd/test/test_tsdf.cpp

@ -15,10 +15,10 @@ namespace cv
{
namespace kinfu
{
class Volume
class CV_EXPORTS_W Volume
{
public:
Volume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor)
Volume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor)
: voxelSize(_voxelSize),
voxelSizeInv(1.0f / voxelSize),
pose(_pose),
@ -28,9 +28,9 @@ class Volume
virtual ~Volume(){};
virtual void integrate(InputArray _depth, float depthFactor, const cv::Affine3f& cameraPose,
virtual void integrate(InputArray _depth, float depthFactor, const cv::Matx44f& cameraPose,
const cv::kinfu::Intr& intrinsics) = 0;
virtual void raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics,
virtual void raycast(const cv::Matx44f& cameraPose, const cv::kinfu::Intr& intrinsics,
cv::Size frameSize, cv::OutputArray points,
cv::OutputArray normals) const = 0;
virtual void fetchNormals(cv::InputArray points, cv::OutputArray _normals) const = 0;
@ -50,9 +50,9 @@ enum class VolumeType
HASHTSDF = 1
};
cv::Ptr<Volume> makeVolume(VolumeType _volumeType, float _voxelSize, cv::Affine3f _pose,
CV_EXPORTS_W cv::Ptr<Volume> makeVolume(VolumeType _volumeType, float _voxelSize, cv::Matx44f _pose,
float _raycastStepFactor, float _truncDist, int _maxWeight,
float _truncateThreshold, Point3i _resolution);
float _truncateThreshold, Vec3i _resolution);
} // namespace kinfu
} // namespace cv
#endif

@ -0,0 +1,6 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(rgbd)

@ -0,0 +1,20 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/rgbd.hpp>
#include <opencv2/calib3d.hpp>
#ifdef HAVE_OPENCL
#include <opencv2/core/ocl.hpp>
#endif
namespace opencv_test {
using namespace cv::rgbd;
}
#endif

@ -0,0 +1,222 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "perf_precomp.hpp"
namespace opencv_test { namespace {
using namespace cv;
/** Reprojects screen point to camera space given z coord. */
struct Reprojector
{
Reprojector() {}
inline Reprojector(Matx33f intr)
{
fxinv = 1.f / intr(0, 0), fyinv = 1.f / intr(1, 1);
cx = intr(0, 2), cy = intr(1, 2);
}
template<typename T>
inline cv::Point3_<T> operator()(cv::Point3_<T> p) const
{
T x = p.z * (p.x - cx) * fxinv;
T y = p.z * (p.y - cy) * fyinv;
return cv::Point3_<T>(x, y, p.z);
}
float fxinv, fyinv, cx, cy;
};
template<class Scene>
struct RenderInvoker : ParallelLoopBody
{
RenderInvoker(Mat_<float>& _frame, Affine3f _pose,
Reprojector _reproj,
float _depthFactor) : ParallelLoopBody(),
frame(_frame),
pose(_pose),
reproj(_reproj),
depthFactor(_depthFactor)
{ }
virtual void operator ()(const cv::Range& r) const
{
for (int y = r.start; y < r.end; y++)
{
float* frameRow = frame[y];
for (int x = 0; x < frame.cols; x++)
{
float pix = 0;
Point3f orig = pose.translation();
// direction through pixel
Point3f screenVec = reproj(Point3f((float)x, (float)y, 1.f));
float xyt = 1.f / (screenVec.x * screenVec.x +
screenVec.y * screenVec.y + 1.f);
Point3f dir = normalize(Vec3f(pose.rotation() * screenVec));
// screen space axis
dir.y = -dir.y;
const float maxDepth = 20.f;
const float maxSteps = 256;
float t = 0.f;
for (int step = 0; step < maxSteps && t < maxDepth; step++)
{
Point3f p = orig + dir * t;
float d = Scene::map(p);
if (d < 0.000001f)
{
float depth = std::sqrt(t * t * xyt);
pix = depth * depthFactor;
break;
}
t += d;
}
frameRow[x] = pix;
}
}
}
Mat_<float>& frame;
Affine3f pose;
Reprojector reproj;
float depthFactor;
};
struct Scene
{
virtual ~Scene() {}
static Ptr<Scene> create(Size sz, Matx33f _intr, float _depthFactor);
virtual Mat depth(Affine3f pose) = 0;
virtual std::vector<Affine3f> getPoses() = 0;
};
struct SemisphereScene : Scene
{
const int framesPerCycle = 72;
const float nCycles = 1.0f;
const Affine3f startPose = Affine3f(Vec3f(0.f, 0.f, 0.f), Vec3f(1.5f, 0.3f, -1.5f));
Size frameSize;
Matx33f intr;
float depthFactor;
SemisphereScene(Size sz, Matx33f _intr, float _depthFactor) :
frameSize(sz), intr(_intr), depthFactor(_depthFactor)
{ }
static float map(Point3f p)
{
float plane = p.y + 0.5f;
Point3f boxPose = p - Point3f(-0.0f, 0.3f, 0.5f);
float boxSize = 0.5f;
float roundness = 0.08f;
Point3f boxTmp;
boxTmp.x = max(abs(boxPose.x) - boxSize, 0.0f);
boxTmp.y = max(abs(boxPose.y) - boxSize, 0.0f);
boxTmp.z = max(abs(boxPose.z) - boxSize, 0.0f);
float roundBox = (float)cv::norm(boxTmp) - roundness;
Point3f spherePose = p - Point3f(-0.0f, 0.3f, 0.0f);
float sphereRadius = 0.5f;
float sphere = (float)cv::norm(spherePose) - sphereRadius;
float sphereMinusBox = max(sphere, -roundBox);
float subSphereRadius = 0.05f;
Point3f subSpherePose = p - Point3f(0.3f, -0.1f, -0.3f);
float subSphere = (float)cv::norm(subSpherePose) - subSphereRadius;
float res = min({ sphereMinusBox, subSphere, plane });
return res;
}
Mat depth(Affine3f pose) override
{
Mat_<float> frame(frameSize);
Reprojector reproj(intr);
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<SemisphereScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
}
std::vector<Affine3f> getPoses() override
{
std::vector<Affine3f> poses;
for (int i = 0; i < framesPerCycle * nCycles; i++)
{
float angle = (float)(CV_2PI * i / framesPerCycle);
Affine3f pose;
pose = pose.rotate(startPose.rotation());
pose = pose.rotate(Vec3f(0.f, -1.f, 0.f) * angle);
pose = pose.translate(Vec3f(startPose.translation()[0] * sin(angle),
startPose.translation()[1],
startPose.translation()[2] * cos(angle)));
poses.push_back(pose);
}
return poses;
}
};
Ptr<Scene> Scene::create(Size sz, Matx33f _intr, float _depthFactor)
{
return makePtr<SemisphereScene>(sz, _intr, _depthFactor);
}
PERF_TEST(Perf_TSDF, integrate)
{
Ptr<kinfu::Params> _params;
_params = kinfu::Params::coarseParams();
Ptr<kinfu::Volume> volume = kinfu::makeVolume(_params->volumeType, _params->voxelSize, _params->volumePose.matrix,
_params->raycast_step_factor, _params->tsdf_trunc_dist, _params->tsdf_max_weight,
_params->truncateThreshold, _params->volumeDims);
Ptr<Scene> scene = Scene::create(_params->frameSize, _params->intr, _params->depthFactor);
std::vector<Affine3f> poses = scene->getPoses();
for (size_t i = 0; i < poses.size(); i++)
{
UMat _points, _normals;
Matx44f pose = poses[i].matrix;
Mat depth = scene->depth(pose);
startTimer();
volume->integrate(depth, _params->depthFactor, pose, _params->intr);
stopTimer();
}
SANITY_CHECK_NOTHING();
}
PERF_TEST(Perf_TSDF, raycast)
{
Ptr<kinfu::Params> _params;
_params = kinfu::Params::coarseParams();
Ptr<kinfu::Volume> volume = kinfu::makeVolume(_params->volumeType, _params->voxelSize, _params->volumePose.matrix,
_params->raycast_step_factor, _params->tsdf_trunc_dist, _params->tsdf_max_weight,
_params->truncateThreshold, _params->volumeDims);
Ptr<Scene> scene = Scene::create(_params->frameSize, _params->intr, _params->depthFactor);
std::vector<Affine3f> poses = scene->getPoses();
for (size_t i = 0; i < poses.size(); i++)
{
UMat _points, _normals;
Matx44f pose = poses[i].matrix;
Mat depth = scene->depth(pose);
volume->integrate(depth, _params->depthFactor, pose, _params->intr);
startTimer();
volume->raycast(pose, _params->intr, _params->frameSize, _points, _normals);
stopTimer();
}
SANITY_CHECK_NOTHING();
}
}} // namespace

@ -20,7 +20,7 @@ namespace cv
{
namespace kinfu
{
HashTSDFVolume::HashTSDFVolume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
HashTSDFVolume::HashTSDFVolume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, float _truncateThreshold,
int _volumeUnitRes, bool _zFirstMemOrder)
: Volume(_voxelSize, _pose, _raycastStepFactor),
@ -33,7 +33,7 @@ HashTSDFVolume::HashTSDFVolume(float _voxelSize, cv::Affine3f _pose, float _rayc
truncDist = std::max(_truncDist, 4.0f * voxelSize);
}
HashTSDFVolumeCPU::HashTSDFVolumeCPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
HashTSDFVolumeCPU::HashTSDFVolumeCPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, float _truncateThreshold,
int _volumeUnitRes, bool _zFirstMemOrder)
: HashTSDFVolume(_voxelSize, _pose, _raycastStepFactor, _truncDist, _maxWeight,
@ -51,12 +51,12 @@ void HashTSDFVolumeCPU::reset()
struct AllocateVolumeUnitsInvoker : ParallelLoopBody
{
AllocateVolumeUnitsInvoker(HashTSDFVolumeCPU& _volume, const Depth& _depth, Intr intrinsics,
cv::Affine3f cameraPose, float _depthFactor, int _depthStride = 4)
cv::Matx44f cameraPose, float _depthFactor, int _depthStride = 4)
: ParallelLoopBody(),
volume(_volume),
depth(_depth),
reproj(intrinsics.makeReprojector()),
cam2vol(_volume.pose.inv() * cameraPose),
cam2vol(_volume.pose.inv() * Affine3f(cameraPose)),
depthFactor(1.0f / _depthFactor),
depthStride(_depthStride)
{
@ -103,8 +103,8 @@ struct AllocateVolumeUnitsInvoker : ParallelLoopBody
volume.volumeUnitResolution,
volume.volumeUnitResolution);
cv::Affine3f subvolumePose =
volume.pose.translate(volume.volumeUnitIdxToVolume(tsdf_idx));
cv::Matx44f subvolumePose =
volume.pose.translate(volume.volumeUnitIdxToVolume(tsdf_idx)).matrix;
volumeUnit.pVolume = cv::makePtr<TSDFVolumeCPU>(
volume.voxelSize, subvolumePose, volume.raycastStepFactor,
volume.truncDist, volume.maxWeight, volumeDims);
@ -129,7 +129,7 @@ struct AllocateVolumeUnitsInvoker : ParallelLoopBody
void HashTSDFVolumeCPU::integrate(InputArray _depth, float depthFactor,
const cv::Affine3f& cameraPose, const Intr& intrinsics)
const cv::Matx44f& cameraPose, const Intr& intrinsics)
{
CV_TRACE_FUNCTION();
@ -151,7 +151,7 @@ void HashTSDFVolumeCPU::integrate(InputArray _depth, float depthFactor,
//! Mark volumes in the camera frustum as active
Range inFrustumRange(0, (int)volumeUnits.size());
parallel_for_(inFrustumRange, [&](const Range& range) {
const Affine3f vol2cam(cameraPose.inv() * pose);
const Affine3f vol2cam(Affine3f(cameraPose.inv()) * pose);
const Intr::Projector proj(intrinsics.makeProjector());
for (int i = range.start; i < range.end; ++i)
@ -269,6 +269,41 @@ inline TsdfVoxel HashTSDFVolumeCPU::at(const cv::Point3f& point) const
return volumeUnit->at(volUnitLocalIdx);
}
inline TsdfType HashTSDFVolumeCPU::interpolateVoxel(const cv::Point3f& point) const
{
cv::Point3f neighbourCoords[] = {
Point3f(0, 0, 0),
Point3f(0, 0, 1),
Point3f(0, 1, 0),
Point3f(0, 1, 1),
Point3f(1, 0, 0),
Point3f(1, 0, 1),
Point3f(1, 1, 0),
Point3f(1, 1, 1) };
int ix = cvFloor(point.x);
int iy = cvFloor(point.y);
int iz = cvFloor(point.z);
float tx = point.x - ix;
float ty = point.y - iy;
float tz = point.z - iz;
TsdfType vx[8];
for (int i = 0; i < 8; i++)
vx[i] = at(neighbourCoords[i] * voxelSize + point).tsdf;
TsdfType v00 = vx[0] + tz * (vx[1] - vx[0]);
TsdfType v01 = vx[2] + tz * (vx[3] - vx[2]);
TsdfType v10 = vx[4] + tz * (vx[5] - vx[4]);
TsdfType v11 = vx[6] + tz * (vx[7] - vx[6]);
TsdfType v0 = v00 + ty * (v01 - v00);
TsdfType v1 = v10 + ty * (v11 - v10);
return v0 + tx * (v1 - v0);
}
inline Point3f HashTSDFVolumeCPU::getNormalVoxel(Point3f point) const
{
Vec3f pointVec(point);
@ -279,29 +314,33 @@ inline Point3f HashTSDFVolumeCPU::getNormalVoxel(Point3f point) const
for (int c = 0; c < 3; c++)
{
pointPrev[c] -= voxelSize * 0.5f;
pointNext[c] += voxelSize * 0.5f;
pointPrev[c] -= voxelSize;
pointNext[c] += voxelSize;
normal[c] = at(Point3f(pointNext)).tsdf - at(Point3f(pointPrev)).tsdf;
normal[c] *= 0.5f;
//normal[c] = at(Point3f(pointNext)).tsdf - at(Point3f(pointPrev)).tsdf;
normal[c] = interpolateVoxel(Point3f(pointNext)) - interpolateVoxel(Point3f(pointPrev));
pointPrev[c] = pointVec[c];
pointNext[c] = pointVec[c];
}
return normalize(normal);
//std::cout << normal << std::endl;
float nv = sqrt(normal[0] * normal[0] +
normal[1] * normal[1] +
normal[2] * normal[2]);
return nv < 0.0001f ? nan3 : normal/nv;
}
struct HashRaycastInvoker : ParallelLoopBody
{
HashRaycastInvoker(Points& _points, Normals& _normals, const Affine3f& cameraPose,
HashRaycastInvoker(Points& _points, Normals& _normals, const Matx44f& cameraPose,
const Intr& intrinsics, const HashTSDFVolumeCPU& _volume)
: ParallelLoopBody(),
points(_points),
normals(_normals),
volume(_volume),
tstep(_volume.truncDist * _volume.raycastStepFactor),
cam2vol(volume.pose.inv() * cameraPose),
vol2cam(cameraPose.inv() * volume.pose),
cam2vol(volume.pose.inv() * Affine3f(cameraPose)),
vol2cam(Affine3f(cameraPose.inv()) * volume.pose),
reproj(intrinsics.makeReprojector())
{
}
@ -405,7 +444,7 @@ struct HashRaycastInvoker : ParallelLoopBody
const Intr::Reprojector reproj;
};
void HashTSDFVolumeCPU::raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics,
void HashTSDFVolumeCPU::raycast(const cv::Matx44f& cameraPose, const cv::kinfu::Intr& intrinsics,
cv::Size frameSize, cv::OutputArray _points,
cv::OutputArray _normals) const
{
@ -424,9 +463,9 @@ void HashTSDFVolumeCPU::raycast(const cv::Affine3f& cameraPose, const cv::kinfu:
parallel_for_(Range(0, points.rows), ri, nstripes);
}
struct FetchPointsNormalsInvoker : ParallelLoopBody
struct HashFetchPointsNormalsInvoker : ParallelLoopBody
{
FetchPointsNormalsInvoker(const HashTSDFVolumeCPU& _volume,
HashFetchPointsNormalsInvoker(const HashTSDFVolumeCPU& _volume,
const std::vector<Vec3i>& _totalVolUnits,
std::vector<std::vector<ptype>>& _pVecs,
std::vector<std::vector<ptype>>& _nVecs, bool _needNormals)
@ -502,7 +541,7 @@ void HashTSDFVolumeCPU::fetchPointsNormals(OutputArray _points, OutputArray _nor
{
totalVolUnits.push_back(keyvalue.first);
}
FetchPointsNormalsInvoker fi(*this, totalVolUnits, pVecs, nVecs, _normals.needed());
HashFetchPointsNormalsInvoker fi(*this, totalVolUnits, pVecs, nVecs, _normals.needed());
Range range(0, (int)totalVolUnits.size());
const int nstripes = -1;
parallel_for_(range, fi, nstripes);
@ -526,29 +565,6 @@ void HashTSDFVolumeCPU::fetchPointsNormals(OutputArray _points, OutputArray _nor
}
}
struct PushNormals
{
PushNormals(const HashTSDFVolumeCPU& _volume, Normals& _normals)
: volume(_volume), normals(_normals), invPose(volume.pose.inv())
{
}
void operator()(const ptype& point, const int* position) const
{
Point3f p = fromPtype(point);
Point3f n = nan3;
if (!isNaN(p))
{
Point3f voxelPoint = invPose * p;
n = volume.pose.rotation() * volume.getNormalVoxel(voxelPoint);
}
normals(position[0], position[1]) = toPtype(n);
}
const HashTSDFVolumeCPU& volume;
Normals& normals;
Affine3f invPose;
};
void HashTSDFVolumeCPU::fetchNormals(cv::InputArray _points, cv::OutputArray _normals) const
{
CV_TRACE_FUNCTION();
@ -561,11 +577,25 @@ void HashTSDFVolumeCPU::fetchNormals(cv::InputArray _points, cv::OutputArray _no
_normals.createSameSize(_points, _points.type());
Normals normals = _normals.getMat();
points.forEach(PushNormals(*this, normals));
const HashTSDFVolumeCPU& _volume = *this;
auto HashPushNormals = [&](const ptype& point, const int* position)
{
const HashTSDFVolumeCPU& volume(_volume);
Affine3f invPose(volume.pose.inv());
Point3f p = fromPtype(point);
Point3f n = nan3;
if (!isNaN(p))
{
Point3f voxelPoint = invPose * p;
n = volume.pose.rotation() * volume.getNormalVoxel(voxelPoint);
}
normals(position[0], position[1]) = toPtype(n);
};
points.forEach(HashPushNormals);
}
}
cv::Ptr<HashTSDFVolume> makeHashTSDFVolume(float _voxelSize, cv::Affine3f _pose,
cv::Ptr<HashTSDFVolume> makeHashTSDFVolume(float _voxelSize, cv::Matx44f _pose,
float _raycastStepFactor, float _truncDist,
int _maxWeight, float _truncateThreshold,
int _volumeUnitResolution)

@ -20,7 +20,7 @@ class HashTSDFVolume : public Volume
public:
// dimension in voxels, size in meters
//! Use fixed volume cuboid
HashTSDFVolume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor, float _truncDist,
HashTSDFVolume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor, float _truncDist,
int _maxWeight, float _truncateThreshold, int _volumeUnitRes,
bool zFirstMemOrder = true);
@ -67,13 +67,13 @@ class HashTSDFVolumeCPU : public HashTSDFVolume
{
public:
// dimension in voxels, size in meters
HashTSDFVolumeCPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
HashTSDFVolumeCPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, float _truncateThreshold,
int _volumeUnitRes, bool zFirstMemOrder = true);
virtual void integrate(InputArray _depth, float depthFactor, const cv::Affine3f& cameraPose,
virtual void integrate(InputArray _depth, float depthFactor, const cv::Matx44f& cameraPose,
const cv::kinfu::Intr& intrinsics) override;
virtual void raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics,
virtual void raycast(const cv::Matx44f& cameraPose, const cv::kinfu::Intr& intrinsics,
cv::Size frameSize, cv::OutputArray points,
cv::OutputArray normals) const override;
@ -103,7 +103,7 @@ class HashTSDFVolumeCPU : public HashTSDFVolume
//! Hashtable of individual smaller volume units
VolumeUnitMap volumeUnits;
};
cv::Ptr<HashTSDFVolume> makeHashTSDFVolume(float _voxelSize, cv::Affine3f _pose,
cv::Ptr<HashTSDFVolume> makeHashTSDFVolume(float _voxelSize, cv::Matx44f _pose,
float _raycastStepFactor, float _truncDist,
int _maxWeight, float truncateThreshold,
int volumeUnitResolution = 16);

@ -139,7 +139,7 @@ private:
cv::Ptr<Volume> volume;
int frameCounter;
Affine3f pose;
Matx44f pose;
std::vector<MatType> pyrPoints;
std::vector<MatType> pyrNormals;
};
@ -151,7 +151,7 @@ KinFuImpl<MatType>::KinFuImpl(const Params &_params) :
icp(makeICP(params.intr, params.icpIterations, params.icpAngleThresh, params.icpDistThresh)),
pyrPoints(), pyrNormals()
{
volume = makeVolume(params.volumeType, params.voxelSize, params.volumePose, params.raycast_step_factor,
volume = makeVolume(params.volumeType, params.voxelSize, params.volumePose.matrix, params.raycast_step_factor,
params.tsdf_trunc_dist, params.tsdf_max_weight, params.truncateThreshold, params.volumeDims);
reset();
}
@ -160,7 +160,7 @@ template< typename MatType >
void KinFuImpl<MatType >::reset()
{
frameCounter = 0;
pose = Affine3f::Identity();
pose = Affine3f::Identity().matrix;
volume->reset();
}
@ -251,7 +251,7 @@ bool KinFuImpl<MatType>::updateT(const MatType& _depth)
if(!success)
return false;
pose = pose * affine;
pose = (Affine3f(pose) * affine).matrix;
float rnorm = (float)cv::norm(affine.rvec());
float tnorm = (float)cv::norm(affine.translation());
@ -279,9 +279,10 @@ void KinFuImpl<MatType>::render(OutputArray image, const Matx44f& _cameraPose) c
CV_TRACE_FUNCTION();
Affine3f cameraPose(_cameraPose);
Affine3f _pose(pose);
const Affine3f id = Affine3f::Identity();
if((cameraPose.rotation() == pose.rotation() && cameraPose.translation() == pose.translation()) ||
if((cameraPose.rotation() == _pose.rotation() && cameraPose.translation() == _pose.translation()) ||
(cameraPose.rotation() == id.rotation() && cameraPose.translation() == id.translation()))
{
renderPointsNormals(pyrPoints[0], pyrNormals[0], image, params.lightPose);
@ -289,7 +290,7 @@ void KinFuImpl<MatType>::render(OutputArray image, const Matx44f& _cameraPose) c
else
{
MatType points, normals;
volume->raycast(cameraPose, params.intr, params.frameSize, points, normals);
volume->raycast(_cameraPose, params.intr, params.frameSize, points, normals);
renderPointsNormals(points, normals, image, params.lightPose);
}
}

@ -214,7 +214,10 @@ inline float3 getNormalVoxel(float3 p, __global const float2* volumePtr,
}
//gradientDeltaFactor is fixed at 1.0 of voxel size
return fast_normalize(vload3(0, an));
float3 n = vload3(0, an);
float Norm = sqrt(n.x*n.x + n.y*n.y + n.z*n.z);
return Norm < 0.0001f ? nan((uint)0) : n / Norm;
//return fast_normalize(vload3(0, an));
}
typedef float4 ptype;

@ -12,7 +12,7 @@ namespace cv {
namespace kinfu {
TSDFVolume::TSDFVolume(float _voxelSize, Affine3f _pose, float _raycastStepFactor, float _truncDist,
TSDFVolume::TSDFVolume(float _voxelSize, Matx44f _pose, float _raycastStepFactor, float _truncDist,
int _maxWeight, Point3i _resolution, bool zFirstMemOrder)
: Volume(_voxelSize, _pose, _raycastStepFactor),
volResolution(_resolution),
@ -55,8 +55,8 @@ TSDFVolume::TSDFVolume(float _voxelSize, Affine3f _pose, float _raycastStepFacto
}
// dimension in voxels, size in meters
TSDFVolumeCPU::TSDFVolumeCPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, Point3i _resolution,
TSDFVolumeCPU::TSDFVolumeCPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, WeightType _maxWeight, Vec3i _resolution,
bool zFirstMemOrder)
: TSDFVolume(_voxelSize, _pose, _raycastStepFactor, _truncDist, _maxWeight, _resolution,
zFirstMemOrder)
@ -177,13 +177,13 @@ static inline depthType bilinearDepth(const Depth& m, cv::Point2f pt)
struct IntegrateInvoker : ParallelLoopBody
{
IntegrateInvoker(TSDFVolumeCPU& _volume, const Depth& _depth, const Intr& intrinsics, const cv::Affine3f& cameraPose,
IntegrateInvoker(TSDFVolumeCPU& _volume, const Depth& _depth, const Intr& intrinsics, const cv::Matx44f& cameraPose,
float depthFactor) :
ParallelLoopBody(),
volume(_volume),
depth(_depth),
proj(intrinsics.makeProjector()),
vol2cam(cameraPose.inv() * _volume.pose),
vol2cam(Affine3f(cameraPose.inv()) * _volume.pose),
truncDistInv(1.f/_volume.truncDist),
dfac(1.f/depthFactor)
{
@ -317,7 +317,7 @@ struct IntegrateInvoker : ParallelLoopBody
TsdfType tsdf = fmin(1.f, sdf * truncDistInv);
TsdfVoxel& voxel = volDataY[z*volume.volDims[2]];
int& weight = voxel.weight;
WeightType& weight = voxel.weight;
TsdfType& value = voxel.tsdf;
// update TSDF
@ -403,7 +403,7 @@ struct IntegrateInvoker : ParallelLoopBody
TsdfType tsdf = fmin(1.f, sdf * truncDistInv);
TsdfVoxel& voxel = volDataY[z*volume.volDims[2]];
int& weight = voxel.weight;
WeightType& weight = voxel.weight;
TsdfType& value = voxel.tsdf;
// update TSDF
@ -426,12 +426,13 @@ struct IntegrateInvoker : ParallelLoopBody
};
// use depth instead of distance (optimization)
void TSDFVolumeCPU::integrate(InputArray _depth, float depthFactor, const cv::Affine3f& cameraPose,
void TSDFVolumeCPU::integrate(InputArray _depth, float depthFactor, const cv::Matx44f& cameraPose,
const Intr& intrinsics)
{
CV_TRACE_FUNCTION();
CV_Assert(_depth.type() == DEPTH_TYPE);
CV_Assert(!_depth.empty());
Depth depth = _depth.getMat();
IntegrateInvoker ii(*this, depth, intrinsics, cameraPose, depthFactor);
Range range(0, volResolution.x);
@ -581,8 +582,9 @@ inline v_float32x4 TSDFVolumeCPU::getNormalVoxel(const v_float32x4& p) const
}
v_float32x4 n = v_load_aligned(an);
v_float32x4 invNorm = v_invsqrt(v_setall_f32(v_reduce_sum(n*n)));
return n*invNorm;
v_float32x4 Norm = v_sqrt(v_setall_f32(v_reduce_sum(n*n)));
return Norm.get0() < 0.0001f ? nanv : n/Norm;
}
#else
inline Point3f TSDFVolumeCPU::getNormalVoxel(Point3f p) const
@ -627,13 +629,16 @@ inline Point3f TSDFVolumeCPU::getNormalVoxel(Point3f p) const
nv = v0 + tx*(v1 - v0);
}
return normalize(an);
float nv = sqrt(an[0] * an[0] +
an[1] * an[1] +
an[2] * an[2]);
return nv < 0.0001f ? nan3 : an / nv;
}
#endif
struct RaycastInvoker : ParallelLoopBody
{
RaycastInvoker(Points& _points, Normals& _normals, const Affine3f& cameraPose,
RaycastInvoker(Points& _points, Normals& _normals, const Matx44f& cameraPose,
const Intr& intrinsics, const TSDFVolumeCPU& _volume) :
ParallelLoopBody(),
points(_points),
@ -647,8 +652,8 @@ struct RaycastInvoker : ParallelLoopBody
volume.voxelSize,
volume.voxelSize)),
boxMin(),
cam2vol(volume.pose.inv() * cameraPose),
vol2cam(cameraPose.inv() * volume.pose),
cam2vol(volume.pose.inv() * Affine3f(cameraPose)),
vol2cam(Affine3f(cameraPose.inv()) * volume.pose),
reproj(intrinsics.makeReprojector())
{ }
@ -823,8 +828,10 @@ struct RaycastInvoker : ParallelLoopBody
// near clipping plane
const float clip = 0.f;
float tmin = max(max(max(minAx.x, minAx.y), max(minAx.x, minAx.z)), clip);
float tmax = min(min(maxAx.x, maxAx.y), min(maxAx.x, maxAx.z));
//float tmin = max(max(max(minAx.x, minAx.y), max(minAx.x, minAx.z)), clip);
//float tmax = min(min(maxAx.x, maxAx.y), min(maxAx.x, maxAx.z));
float tmin = max({ minAx.x, minAx.y, minAx.z, clip });
float tmax = min({ maxAx.x, maxAx.y, maxAx.z });
// precautions against getting coordinates out of bounds
tmin = tmin + tstep;
@ -915,7 +922,7 @@ struct RaycastInvoker : ParallelLoopBody
};
void TSDFVolumeCPU::raycast(const cv::Affine3f& cameraPose, const Intr& intrinsics, Size frameSize,
void TSDFVolumeCPU::raycast(const cv::Matx44f& cameraPose, const Intr& intrinsics, Size frameSize,
cv::OutputArray _points, cv::OutputArray _normals) const
{
CV_TRACE_FUNCTION();
@ -1076,34 +1083,10 @@ void TSDFVolumeCPU::fetchPointsNormals(OutputArray _points, OutputArray _normals
}
}
struct PushNormals
{
PushNormals(const TSDFVolumeCPU& _vol, Normals& _nrm) :
vol(_vol), normals(_nrm), invPose(vol.pose.inv())
{ }
void operator ()(const ptype &pp, const int * position) const
{
Point3f p = fromPtype(pp);
Point3f n = nan3;
if(!isNaN(p))
{
Point3f voxPt = (invPose * p);
voxPt = voxPt * vol.voxelSizeInv;
n = vol.pose.rotation() * vol.getNormalVoxel(voxPt);
}
normals(position[0], position[1]) = toPtype(n);
}
const TSDFVolumeCPU& vol;
Normals& normals;
Affine3f invPose;
};
void TSDFVolumeCPU::fetchNormals(InputArray _points, OutputArray _normals) const
{
CV_TRACE_FUNCTION();
CV_Assert(!_points.empty());
if(_normals.needed())
{
Points points = _points.getMat();
@ -1112,14 +1095,29 @@ void TSDFVolumeCPU::fetchNormals(InputArray _points, OutputArray _normals) const
_normals.createSameSize(_points, _points.type());
Normals normals = _normals.getMat();
points.forEach(PushNormals(*this, normals));
const TSDFVolumeCPU& _vol = *this;
auto PushNormals = [&](const ptype& pp, const int* position)
{
const TSDFVolumeCPU& vol(_vol);
Affine3f invPose(vol.pose.inv());
Point3f p = fromPtype(pp);
Point3f n = nan3;
if (!isNaN(p))
{
Point3f voxPt = (invPose * p);
voxPt = voxPt * vol.voxelSizeInv;
n = vol.pose.rotation() * vol.getNormalVoxel(voxPt);
}
normals(position[0], position[1]) = toPtype(n);
};
points.forEach(PushNormals);
}
}
///////// GPU implementation /////////
#ifdef HAVE_OPENCL
TSDFVolumeGPU::TSDFVolumeGPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor, float _truncDist, int _maxWeight,
TSDFVolumeGPU::TSDFVolumeGPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor, float _truncDist, int _maxWeight,
Point3i _resolution) :
TSDFVolume(_voxelSize, _pose, _raycastStepFactor, _truncDist, _maxWeight, _resolution, false)
{
@ -1140,9 +1138,10 @@ void TSDFVolumeGPU::reset()
// use depth instead of distance (optimization)
void TSDFVolumeGPU::integrate(InputArray _depth, float depthFactor,
const cv::Affine3f& cameraPose, const Intr& intrinsics)
const cv::Matx44f& cameraPose, const Intr& intrinsics)
{
CV_TRACE_FUNCTION();
CV_Assert(!_depth.empty());
UMat depth = _depth.getUMat();
@ -1156,7 +1155,7 @@ void TSDFVolumeGPU::integrate(InputArray _depth, float depthFactor,
if(k.empty())
throw std::runtime_error("Failed to create kernel: " + errorStr);
cv::Affine3f vol2cam(cameraPose.inv() * pose);
cv::Affine3f vol2cam(Affine3f(cameraPose.inv()) * pose);
float dfac = 1.f/depthFactor;
Vec4i volResGpu(volResolution.x, volResolution.y, volResolution.z);
Vec2f fxy(intrinsics.fx, intrinsics.fy), cxy(intrinsics.cx, intrinsics.cy);
@ -1185,7 +1184,7 @@ void TSDFVolumeGPU::integrate(InputArray _depth, float depthFactor,
}
void TSDFVolumeGPU::raycast(const cv::Affine3f& cameraPose, const Intr& intrinsics, Size frameSize,
void TSDFVolumeGPU::raycast(const cv::Matx44f& cameraPose, const Intr& intrinsics, Size frameSize,
cv::OutputArray _points, cv::OutputArray _normals) const
{
CV_TRACE_FUNCTION();
@ -1209,8 +1208,8 @@ void TSDFVolumeGPU::raycast(const cv::Affine3f& cameraPose, const Intr& intrinsi
UMat normals = _normals.getUMat();
UMat vol2camGpu, cam2volGpu;
Affine3f vol2cam = cameraPose.inv() * pose;
Affine3f cam2vol = pose.inv() * cameraPose;
Affine3f vol2cam = Affine3f(cameraPose.inv()) * pose;
Affine3f cam2vol = pose.inv() * Affine3f(cameraPose);
Mat(cam2vol.matrix).copyTo(cam2volGpu);
Mat(vol2cam.matrix).copyTo(vol2camGpu);
Intr::Reprojector r = intrinsics.makeReprojector();
@ -1251,6 +1250,7 @@ void TSDFVolumeGPU::raycast(const cv::Affine3f& cameraPose, const Intr& intrinsi
void TSDFVolumeGPU::fetchNormals(InputArray _points, OutputArray _normals) const
{
CV_TRACE_FUNCTION();
CV_Assert(!_points.empty());
if(_normals.needed())
{
@ -1417,7 +1417,7 @@ void TSDFVolumeGPU::fetchPointsNormals(OutputArray points, OutputArray normals)
#endif
cv::Ptr<TSDFVolume> makeTSDFVolume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
cv::Ptr<TSDFVolume> makeTSDFVolume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, Point3i _resolution)
{
#ifdef HAVE_OPENCL

@ -18,12 +18,14 @@ namespace kinfu
{
// TODO: Optimization possible:
// * TsdfType can be FP16
// * weight can be uint16
// * WeightType can be uint16
typedef float TsdfType;
typedef int WeightType;
struct TsdfVoxel
{
TsdfType tsdf;
int weight;
WeightType weight;
};
typedef Vec<uchar, sizeof(TsdfVoxel)> VecTsdfVoxel;
@ -31,13 +33,13 @@ class TSDFVolume : public Volume
{
public:
// dimension in voxels, size in meters
TSDFVolume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor, float _truncDist,
TSDFVolume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor, float _truncDist,
int _maxWeight, Point3i _resolution, bool zFirstMemOrder = true);
virtual ~TSDFVolume() = default;
public:
Point3i volResolution;
int maxWeight;
WeightType maxWeight;
Point3f volSize;
float truncDist;
@ -49,12 +51,12 @@ class TSDFVolumeCPU : public TSDFVolume
{
public:
// dimension in voxels, size in meters
TSDFVolumeCPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor, float _truncDist,
int _maxWeight, Point3i _resolution, bool zFirstMemOrder = true);
TSDFVolumeCPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor, float _truncDist,
WeightType _maxWeight, Vec3i _resolution, bool zFirstMemOrder = true);
virtual void integrate(InputArray _depth, float depthFactor, const cv::Affine3f& cameraPose,
virtual void integrate(InputArray _depth, float depthFactor, const cv::Matx44f& cameraPose,
const cv::kinfu::Intr& intrinsics) override;
virtual void raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics,
virtual void raycast(const cv::Matx44f& cameraPose, const cv::kinfu::Intr& intrinsics,
cv::Size frameSize, cv::OutputArray points,
cv::OutputArray normals) const override;
@ -83,12 +85,12 @@ class TSDFVolumeGPU : public TSDFVolume
{
public:
// dimension in voxels, size in meters
TSDFVolumeGPU(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor, float _truncDist,
TSDFVolumeGPU(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor, float _truncDist,
int _maxWeight, Point3i _resolution);
virtual void integrate(InputArray _depth, float depthFactor, const cv::Affine3f& cameraPose,
virtual void integrate(InputArray _depth, float depthFactor, const cv::Matx44f& cameraPose,
const cv::kinfu::Intr& intrinsics) override;
virtual void raycast(const cv::Affine3f& cameraPose, const cv::kinfu::Intr& intrinsics,
virtual void raycast(const cv::Matx44f& cameraPose, const cv::kinfu::Intr& intrinsics,
cv::Size frameSize, cv::OutputArray _points,
cv::OutputArray _normals) const override;
@ -104,7 +106,7 @@ class TSDFVolumeGPU : public TSDFVolume
UMat volume;
};
#endif
cv::Ptr<TSDFVolume> makeTSDFVolume(float _voxelSize, cv::Affine3f _pose, float _raycastStepFactor,
cv::Ptr<TSDFVolume> makeTSDFVolume(float _voxelSize, cv::Matx44f _pose, float _raycastStepFactor,
float _truncDist, int _maxWeight, Point3i _resolution);
} // namespace kinfu
} // namespace cv

@ -12,14 +12,15 @@ namespace cv
{
namespace kinfu
{
cv::Ptr<Volume> makeVolume(VolumeType _volumeType, float _voxelSize, cv::Affine3f _pose,
cv::Ptr<Volume> makeVolume(VolumeType _volumeType, float _voxelSize, cv::Matx44f _pose,
float _raycastStepFactor, float _truncDist, int _maxWeight,
float _truncateThreshold, Point3i _resolution)
float _truncateThreshold, Vec3i _resolution)
{
Point3i _presolution = _resolution;
if (_volumeType == VolumeType::TSDF)
{
return makeTSDFVolume(_voxelSize, _pose, _raycastStepFactor, _truncDist, _maxWeight,
_resolution);
_presolution);
}
else if (_volumeType == VolumeType::HASHTSDF)
{

@ -0,0 +1,513 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
namespace opencv_test { namespace {
using namespace cv;
/** Reprojects screen point to camera space given z coord. */
struct Reprojector
{
Reprojector() {}
inline Reprojector(Matx33f intr)
{
fxinv = 1.f / intr(0, 0), fyinv = 1.f / intr(1, 1);
cx = intr(0, 2), cy = intr(1, 2);
}
template<typename T>
inline cv::Point3_<T> operator()(cv::Point3_<T> p) const
{
T x = p.z * (p.x - cx) * fxinv;
T y = p.z * (p.y - cy) * fyinv;
return cv::Point3_<T>(x, y, p.z);
}
float fxinv, fyinv, cx, cy;
};
template<class Scene>
struct RenderInvoker : ParallelLoopBody
{
RenderInvoker(Mat_<float>& _frame, Affine3f _pose,
Reprojector _reproj,
float _depthFactor) : ParallelLoopBody(),
frame(_frame),
pose(_pose),
reproj(_reproj),
depthFactor(_depthFactor)
{ }
virtual void operator ()(const cv::Range& r) const
{
for (int y = r.start; y < r.end; y++)
{
float* frameRow = frame[y];
for (int x = 0; x < frame.cols; x++)
{
float pix = 0;
Point3f orig = pose.translation();
// direction through pixel
Point3f screenVec = reproj(Point3f((float)x, (float)y, 1.f));
float xyt = 1.f / (screenVec.x * screenVec.x +
screenVec.y * screenVec.y + 1.f);
Point3f dir = normalize(Vec3f(pose.rotation() * screenVec));
// screen space axis
dir.y = -dir.y;
const float maxDepth = 20.f;
const float maxSteps = 256;
float t = 0.f;
for (int step = 0; step < maxSteps && t < maxDepth; step++)
{
Point3f p = orig + dir * t;
float d = Scene::map(p);
if (d < 0.000001f)
{
float depth = std::sqrt(t * t * xyt);
pix = depth * depthFactor;
break;
}
t += d;
}
frameRow[x] = pix;
}
}
}
Mat_<float>& frame;
Affine3f pose;
Reprojector reproj;
float depthFactor;
};
struct Scene
{
virtual ~Scene() {}
static Ptr<Scene> create(Size sz, Matx33f _intr, float _depthFactor);
virtual Mat depth(Affine3f pose) = 0;
virtual std::vector<Affine3f> getPoses() = 0;
};
struct SemisphereScene : Scene
{
const int framesPerCycle = 72;
const float nCycles = 0.25f;
const Affine3f startPose = Affine3f(Vec3f(0.f, 0.f, 0.f), Vec3f(1.5f, 0.3f, -1.5f));
Size frameSize;
Matx33f intr;
float depthFactor;
SemisphereScene(Size sz, Matx33f _intr, float _depthFactor) :
frameSize(sz), intr(_intr), depthFactor(_depthFactor)
{ }
static float map(Point3f p)
{
float plane = p.y + 0.5f;
Point3f boxPose = p - Point3f(-0.0f, 0.3f, 0.5f);
float boxSize = 0.5f;
float roundness = 0.08f;
Point3f boxTmp;
boxTmp.x = max(abs(boxPose.x) - boxSize, 0.0f);
boxTmp.y = max(abs(boxPose.y) - boxSize, 0.0f);
boxTmp.z = max(abs(boxPose.z) - boxSize, 0.0f);
float roundBox = (float)cv::norm(boxTmp) - roundness;
Point3f spherePose = p - Point3f(-0.0f, 0.3f, 0.0f);
float sphereRadius = 0.5f;
float sphere = (float)cv::norm(spherePose) - sphereRadius;
float sphereMinusBox = max(sphere, -roundBox);
float subSphereRadius = 0.05f;
Point3f subSpherePose = p - Point3f(0.3f, -0.1f, -0.3f);
float subSphere = (float)cv::norm(subSpherePose) - subSphereRadius;
float res = min({sphereMinusBox, subSphere, plane});
return res;
}
Mat depth(Affine3f pose) override
{
Mat_<float> frame(frameSize);
Reprojector reproj(intr);
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<SemisphereScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
}
std::vector<Affine3f> getPoses() override
{
std::vector<Affine3f> poses;
for (int i = 0; i < framesPerCycle * nCycles; i++)
{
float angle = (float)(CV_2PI * i / framesPerCycle);
Affine3f pose;
pose = pose.rotate(startPose.rotation());
pose = pose.rotate(Vec3f(0.f, -1.f, 0.f) * angle);
pose = pose.translate(Vec3f(startPose.translation()[0] * sin(angle),
startPose.translation()[1],
startPose.translation()[2] * cos(angle)));
poses.push_back(pose);
}
return poses;
}
};
Ptr<Scene> Scene::create(Size sz, Matx33f _intr, float _depthFactor)
{
return makePtr<SemisphereScene>(sz, _intr, _depthFactor);
}
// this is a temporary solution
// ----------------------------
typedef cv::Vec4f ptype;
typedef cv::Mat_< ptype > Points;
typedef Points Normals;
typedef Size2i Size;
template<int p>
inline float specPow(float x)
{
if (p % 2 == 0)
{
float v = specPow<p / 2>(x);
return v * v;
}
else
{
float v = specPow<(p - 1) / 2>(x);
return v * v * x;
}
}
template<>
inline float specPow<0>(float /*x*/)
{
return 1.f;
}
template<>
inline float specPow<1>(float x)
{
return x;
}
inline cv::Vec3f fromPtype(const ptype& x)
{
return cv::Vec3f(x[0], x[1], x[2]);
}
inline Point3f normalize(const Vec3f& v)
{
double nv = sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
return v * (nv ? 1. / nv : 0.);
}
void renderPointsNormals(InputArray _points, InputArray _normals, OutputArray image, Affine3f lightPose)
{
Size sz = _points.size();
image.create(sz, CV_8UC4);
Points points = _points.getMat();
Normals normals = _normals.getMat();
Mat_<Vec4b> img = image.getMat();
Range range(0, sz.height);
const int nstripes = -1;
parallel_for_(range, [&](const Range&)
{
for (int y = range.start; y < range.end; y++)
{
Vec4b* imgRow = img[y];
const ptype* ptsRow = points[y];
const ptype* nrmRow = normals[y];
for (int x = 0; x < sz.width; x++)
{
Point3f p = fromPtype(ptsRow[x]);
Point3f n = fromPtype(nrmRow[x]);
Vec4b color;
if (cvIsNaN(p.x) || cvIsNaN(p.y) || cvIsNaN(p.z) )
{
color = Vec4b(0, 32, 0, 0);
}
else
{
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const int sp = 20; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
Point3f l = normalize(lightPose.translation() - Vec3f(p));
Point3f v = normalize(-Vec3f(p));
Point3f r = normalize(Vec3f(2.f * n * n.dot(l) - l));
uchar ix = (uchar)((Ax * Ka * Dx + Lx * Kd * Dx * max(0.f, n.dot(l)) +
Lx * Ks * Sx * specPow<sp>(max(0.f, r.dot(v)))) * 255.f);
color = Vec4b(ix, ix, ix, 0);
}
imgRow[x] = color;
}
}
}, nstripes);
}
// ----------------------------
static const bool display = false;
static const bool parallelCheck = false;
void normalsCheck(Mat normals)
{
Vec4f vector;
for (auto pvector = normals.begin<Vec4f>(); pvector < normals.end<Vec4f>(); pvector++)
{
vector = *pvector;
if (!cvIsNaN(vector[0]))
{
float length = vector[0] * vector[0] +
vector[1] * vector[1] +
vector[2] * vector[2];
ASSERT_LT(abs(1 - length), 0.0001f);
}
}
}
void normal_test(bool isHashTSDF, bool isRaycast, bool isFetchPointsNormals, bool isFetchNormals)
{
Ptr<kinfu::Params> _params;
if (isHashTSDF)
_params = kinfu::Params::hashTSDFParams(true);
else
_params = kinfu::Params::coarseParams();
Ptr<Scene> scene = Scene::create(_params->frameSize, _params->intr, _params->depthFactor);
std::vector<Affine3f> poses = scene->getPoses();
Mat depth = scene->depth(poses[0]);
UMat _points, _normals, _tmpnormals;
UMat _newPoints, _newNormals;
Mat points, normals;
Mat image;
AccessFlag af = ACCESS_READ;
auto normalCheck = [](Vec4f& vector, const int*)
{
if (!cvIsNaN(vector[0]))
{
float length = vector[0] * vector[0] +
vector[1] * vector[1] +
vector[2] * vector[2];
ASSERT_LT(abs(1 - length), 0.0001f);
}
};
Ptr<kinfu::Volume> volume = kinfu::makeVolume(_params->volumeType, _params->voxelSize, _params->volumePose.matrix,
_params->raycast_step_factor, _params->tsdf_trunc_dist, _params->tsdf_max_weight,
_params->truncateThreshold, _params->volumeDims);
volume->integrate(depth, _params->depthFactor, poses[0].matrix, _params->intr);
if (isRaycast)
{
volume->raycast(poses[0].matrix, _params->intr, _params->frameSize, _points, _normals);
}
if (isFetchPointsNormals)
{
volume->fetchPointsNormals(_points, _normals);
}
if (isFetchNormals)
{
volume->fetchPointsNormals(_points, _tmpnormals);
volume->fetchNormals(_points, _normals);
}
normals = _normals.getMat(af);
if (parallelCheck)
{
normals.forEach<Vec4f>(normalCheck);
}
else
{
normalsCheck(normals);
}
if (isRaycast && display)
{
imshow("depth", depth * (1.f / _params->depthFactor / 4.f));
points = _points.getMat(af);
renderPointsNormals(points, normals, image, _params->lightPose);
imshow("render", image);
waitKey(20000);
}
if (isRaycast)
{
volume->raycast(poses[17].matrix, _params->intr, _params->frameSize, _newPoints, _newNormals);
normals = _newNormals.getMat(af);
normalsCheck(normals);
if (parallelCheck)
{
normals.forEach<Vec4f>(normalCheck);
}
else
{
normalsCheck(normals);
}
if (display)
{
imshow("depth", depth * (1.f / _params->depthFactor / 4.f));
points = _newPoints.getMat(af);
renderPointsNormals(points, normals, image, _params->lightPose);
imshow("render", image);
waitKey(20000);
}
}
points.release(); normals.release();
}
int counterOfValid(Mat points)
{
Vec4f* v;
int i, j;
int count = 0;
for (i = 0; i < points.rows; ++i)
{
v = (points.ptr<Vec4f>(i));
for (j = 0; j < points.cols; ++j)
{
if ((v[j])[0] != 0 ||
(v[j])[1] != 0 ||
(v[j])[2] != 0)
{
count++;
}
}
}
return count;
}
void valid_points_test(bool isHashTSDF)
{
Ptr<kinfu::Params> _params;
if (isHashTSDF)
_params = kinfu::Params::hashTSDFParams(true);
else
_params = kinfu::Params::coarseParams();
Ptr<Scene> scene = Scene::create(_params->frameSize, _params->intr, _params->depthFactor);
std::vector<Affine3f> poses = scene->getPoses();
Mat depth = scene->depth(poses[0]);
UMat _points, _normals;
UMat _newPoints, _newNormals;
Mat points, normals;
Mat image;
int anfas, profile;
AccessFlag af = ACCESS_READ;
Ptr<kinfu::Volume> volume = kinfu::makeVolume(_params->volumeType, _params->voxelSize, _params->volumePose.matrix,
_params->raycast_step_factor, _params->tsdf_trunc_dist, _params->tsdf_max_weight,
_params->truncateThreshold, _params->volumeDims);
volume->integrate(depth, _params->depthFactor, poses[0].matrix, _params->intr);
volume->raycast(poses[0].matrix, _params->intr, _params->frameSize, _points, _normals);
normals = _normals.getMat(af);
points = _points.getMat(af);
patchNaNs(points);
anfas = counterOfValid(points);
if (display)
{
imshow("depth", depth * (1.f / _params->depthFactor / 4.f));
renderPointsNormals(points, normals, image, _params->lightPose);
imshow("render", image);
waitKey(20000);
}
volume->raycast(poses[17].matrix, _params->intr, _params->frameSize, _newPoints, _newNormals);
normals = _newNormals.getMat(af);
points = _newPoints.getMat(af);
patchNaNs(points);
profile = counterOfValid(points);
if (display)
{
imshow("depth", depth * (1.f / _params->depthFactor / 4.f));
renderPointsNormals(points, normals, image, _params->lightPose);
imshow("render", image);
waitKey(20000);
}
float percentValidity = float(profile) / float(anfas);
ASSERT_LT(0.5 - percentValidity, 0.3);
}
TEST(TSDF, raycast_normals)
{
normal_test(false, true, false, false);
}
TEST(HashTSDF, raycast_normals)
{
normal_test(true, true, false, false);
}
TEST(TSDF, fetch_points_normals)
{
normal_test(false, false, true, false);
}
TEST(HashTSDF, fetch_points_normals)
{
normal_test(true, false, true, false);
}
TEST(TSDF, fetch_normals)
{
normal_test(false, false, false, true);
}
TEST(HashTSDF, fetch_normals)
{
normal_test(true, false, false, true);
}
TEST(TSDF, valid_points)
{
valid_points_test(false);
}
TEST(HashTSDF, valid_points)
{
valid_points_test(true);
}
}} // namespace
Loading…
Cancel
Save