refactored WImage3D

pull/2173/head
Anatoly Baksheev 11 years ago
parent f37c31742a
commit e3ff28dacc
  1. 34
      modules/viz/doc/widget.rst
  2. 15
      modules/viz/include/opencv2/viz/widgets.hpp
  3. 45
      modules/viz/src/precomp.hpp
  4. 119
      modules/viz/src/shapes.cpp
  5. 3
      modules/viz/src/vizcore.cpp
  6. 2
      modules/viz/src/vizimpl.cpp
  7. 2
      modules/viz/src/vizimpl.hpp
  8. 1
      modules/viz/src/vtk/vtkCloudMatSource.cpp
  9. 11
      modules/viz/src/widget.cpp
  10. 36
      modules/viz/test/tests_simple.cpp

@ -170,13 +170,16 @@ Base class of all 3D widgets. ::
public:
Widget3D() {}
//! widget position manipulation, i.e. place where it is rendered.
void setPose(const Affine3d &pose);
void updatePose(const Affine3d &pose);
Affine3d getPose() const;
//! updates internal widget data, i.e. points, normals, etc.
void applyTransform(const Affine3d &transform);
void setColor(const Color &color);
private:
/* hidden */
};
viz::Widget3D::setPose
@ -201,6 +204,15 @@ Returns the current pose of the widget.
.. ocv:function:: Affine3d getWidgetPose() const
viz::Widget3D::applyTransform
-------------------------------
Transforms internal widget data (i.e. points, normals) using the given transform.
.. ocv:function:: void applyTransform(const Affine3d &transform);
:param transform: Specified transformation to apply.
viz::Widget3D::setColor
-----------------------
Sets the color of the widget.
@ -598,25 +610,25 @@ This 3D Widget represents an image in 3D space. ::
class CV_EXPORTS WImage3D : public Widget3D
{
public:
//! Creates 3D image at the origin
WImage3D(const Mat &image, const Size &size);
//! Creates 3D image at a given position, pointing in the direction of the normal, and having the up_vector orientation
WImage3D(const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector, const Mat &image, const Size &size);
public:
//! Creates 3D image at the origin
WImage3D(const Mat &image, const Size2d &size);
//! Creates 3D image at a given position, pointing in the direction of the normal, and having the up_vector orientation
WImage3D(const Mat &image, const Size2d &size, const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector);
void setImage(const Mat &image);
};
void setImage(const Mat &image);
};
viz::WImage3D::WImage3D
-----------------------
Constructs an WImage3D.
.. ocv:function:: WImage3D(const Mat &image, const Size &size)
.. ocv:function:: WImage3D(const Mat &image, const Size2d &size)
:param image: BGR or Gray-Scale image.
:param size: Size of the image.
.. ocv:function:: WImage3D(const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector, const Mat &image, const Size &size)
.. ocv:function:: WImage3D(const Mat &image, const Size2d &size, const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector)
:param position: Position of the image.
:param normal: Normal of the plane that represents the image.

@ -111,10 +111,14 @@ namespace cv
public:
Widget3D() {}
//! widget position manipulation, i.e. place where it is rendered
void setPose(const Affine3d &pose);
void updatePose(const Affine3d &pose);
Affine3d getPose() const;
//! update internal widget data, i.e. points, normals, etc.
void applyTransform(const Affine3d &transform);
void setColor(const Color &color);
};
@ -172,7 +176,8 @@ namespace cv
class CV_EXPORTS WCube : public Widget3D
{
public:
WCube(const Point3d& pt_min, const Point3d& pt_max, bool wire_frame = true, const Color &color = Color::white());
WCube(const Point3d& min_point = Vec3d::all(-0.5), const Point3d& max_point = Vec3d::all(0.5),
bool wire_frame = true, const Color &color = Color::white());
};
class CV_EXPORTS WPolyLine : public Widget3D
@ -213,10 +218,12 @@ namespace cv
class CV_EXPORTS WImage3D : public Widget3D
{
public:
//! Creates 3D image at the origin
WImage3D(const Mat &image, const Size &size);
//! Creates 3D image in a plane centered at the origin with normal orientaion along z-axis,
//! image x- and y-axes are oriented along x- and y-axes of 3d world
WImage3D(const Mat &image, const Size2d &size);
//! Creates 3D image at a given position, pointing in the direction of the normal, and having the up_vector orientation
WImage3D(const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector, const Mat &image, const Size &size);
WImage3D(const Mat &image, const Size2d &size, const Vec3d &center, const Vec3d &normal, const Vec3d &up_vector);
void setImage(const Mat &image);
};

@ -126,6 +126,7 @@
#include <vtkVRMLExporter.h>
#include <vtkTensorGlyph.h>
#include <vtkImageAlgorithm.h>
#include <vtkTransformFilter.h>
#if !defined(_WIN32) || defined(__CYGWIN__)
# include <unistd.h> /* unlink */
@ -175,6 +176,8 @@ namespace cv
friend class Viz3d;
};
template<typename _Tp> inline _Tp normalized(const _Tp& v) { return v * 1/norm(v); }
template<typename _Tp> inline bool isNan(const _Tp* data)
{
return isNan(data[0]) || isNan(data[1]) || isNan(data[2]);
@ -187,10 +190,24 @@ namespace cv
return vtkPolyData::SafeDownCast(mapper->GetInput());
}
inline vtkSmartPointer<vtkMatrix4x4> vtkmatrix(const cv::Matx44d &matrix)
{
vtkSmartPointer<vtkMatrix4x4> vtk_matrix = vtkSmartPointer<vtkMatrix4x4>::New();
vtk_matrix->DeepCopy(matrix.val);
return vtk_matrix;
}
inline Color vtkcolor(const Color& color)
{
Color scaled_color = color * (1.0/255.0);
std::swap(scaled_color[0], scaled_color[2]);
return scaled_color;
}
struct VtkUtils
{
template<class Filter>
static void SetInputData(vtkSmartPointer<Filter> filter, vtkPolyData *polydata)
static void SetInputData(vtkSmartPointer<Filter> filter, vtkPolyData* polydata)
{
#if VTK_MAJOR_VERSION <= 5
filter->SetInput(polydata);
@ -238,23 +255,19 @@ namespace cv
normals_generator->Update();
return normals_generator->GetOutput();
}
};
inline vtkSmartPointer<vtkMatrix4x4> vtkmatrix(const cv::Matx44d &matrix)
{
vtkSmartPointer<vtkMatrix4x4> vtk_matrix = vtkSmartPointer<vtkMatrix4x4>::New();
vtk_matrix->DeepCopy(matrix.val);
return vtk_matrix;
}
inline Color vtkcolor(const Color& color)
{
Color scaled_color = color * (1.0/255.0);
std::swap(scaled_color[0], scaled_color[2]);
return scaled_color;
}
static vtkSmartPointer<vtkPolyData> TransformPolydata(vtkSmartPointer<vtkPolyData> polydata, const Affine3d& pose)
{
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
transform->SetMatrix(vtkmatrix(pose.matrix));
template<typename _Tp> inline _Tp normalized(const _Tp& v) { return v * 1/norm(v); }
vtkSmartPointer<vtkTransformPolyDataFilter> transform_filter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();
transform_filter->SetTransform(transform);
transform_filter->SetInputConnection(polydata->GetProducerPort());
transform_filter->Update();
return transform_filter->GetOutput();
}
};
}
}

@ -294,18 +294,18 @@ template<> cv::viz::WCylinder cv::viz::Widget::cast<cv::viz::WCylinder>()
///////////////////////////////////////////////////////////////////////////////////////////////
/// cylinder widget implementation
cv::viz::WCube::WCube(const Point3d& pt_min, const Point3d& pt_max, bool wire_frame, const Color &color)
cv::viz::WCube::WCube(const Point3d& min_point, const Point3d& max_point, bool wire_frame, const Color &color)
{
vtkSmartPointer<vtkPolyDataAlgorithm> cube;
if (wire_frame)
{
cube = vtkSmartPointer<vtkOutlineSource>::New();
vtkOutlineSource::SafeDownCast(cube)->SetBounds(pt_min.x, pt_max.x, pt_min.y, pt_max.y, pt_min.z, pt_max.z);
vtkOutlineSource::SafeDownCast(cube)->SetBounds(min_point.x, max_point.x, min_point.y, max_point.y, min_point.z, max_point.z);
}
else
{
cube = vtkSmartPointer<vtkCubeSource>::New();
vtkCubeSource::SafeDownCast(cube)->SetBounds(pt_min.x, pt_max.x, pt_min.y, pt_max.y, pt_min.z, pt_max.z);
vtkCubeSource::SafeDownCast(cube)->SetBounds(min_point.x, max_point.x, min_point.y, max_point.y, min_point.z, max_point.z);
}
cube->Update();
@ -620,10 +620,9 @@ cv::viz::WImageOverlay::WImageOverlay(const Mat &image, const Rect &rect)
vtkSmartPointer<vtkImageMatSource> source = vtkSmartPointer<vtkImageMatSource>::New();
source->SetImage(image);
// Need to flip the image as the coordinates are different in OpenCV and VTK
vtkSmartPointer<vtkImageFlip> flip_filter = vtkSmartPointer<vtkImageFlip>::New();
flip_filter->SetFilteredAxis(1); // Vertical flip
flip_filter->SetInputConnection(source->GetOutputPort());
flip_filter->SetFilteredAxis(1);
// Scale the image based on the Rect
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
@ -663,13 +662,11 @@ void cv::viz::WImageOverlay::setImage(const Mat &image)
vtkSmartPointer<vtkImageMatSource> source = vtkSmartPointer<vtkImageMatSource>::New();
source->SetImage(image);
// Need to flip the image as the coordinates are different in OpenCV and VTK
vtkSmartPointer<vtkImageFlip> flipFilter = vtkSmartPointer<vtkImageFlip>::New();
flipFilter->SetFilteredAxis(1); // Vertical flip
flipFilter->SetInputConnection(source->GetOutputPort());
flipFilter->Update();
vtkSmartPointer<vtkImageFlip> flip_filter = vtkSmartPointer<vtkImageFlip>::New();
flip_filter->SetInputConnection(source->GetOutputPort());
flip_filter->SetFilteredAxis(1);
mapper->SetInputConnection(flipFilter->GetOutputPort());
mapper->SetInputConnection(flip_filter->GetOutputPort());
}
template<> cv::viz::WImageOverlay cv::viz::Widget::cast<cv::viz::WImageOverlay>()
@ -681,104 +678,49 @@ template<> cv::viz::WImageOverlay cv::viz::Widget::cast<cv::viz::WImageOverlay>(
///////////////////////////////////////////////////////////////////////////////////////////////
/// image 3D widget implementation
cv::viz::WImage3D::WImage3D(const Mat &image, const Size &size)
cv::viz::WImage3D::WImage3D(const Mat &image, const Size2d &size)
{
CV_Assert(!image.empty() && image.depth() == CV_8U);
vtkSmartPointer<vtkImageMatSource> source = vtkSmartPointer<vtkImageMatSource>::New();
source->SetImage(image);
// Need to flip the image as the coordinates are different in OpenCV and VTK
vtkSmartPointer<vtkImageFlip> flipFilter = vtkSmartPointer<vtkImageFlip>::New();
flipFilter->SetFilteredAxis(1); // Vertical flip
flipFilter->SetInputConnection(source->GetOutputPort());
flipFilter->Update();
Vec3d plane_center(size.width * 0.5, size.height * 0.5, 0.0);
vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();
texture->SetInputConnection(source->GetOutputPort());
vtkSmartPointer<vtkPlaneSource> plane = vtkSmartPointer<vtkPlaneSource>::New();
plane->SetCenter(plane_center[0], plane_center[1], plane_center[2]);
plane->SetNormal(0.0, 0.0, 1.0);
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
transform->PreMultiply();
transform->Translate(plane_center[0], plane_center[1], plane_center[2]);
transform->Scale(size.width, size.height, 1.0);
transform->Translate(-plane_center[0], -plane_center[1], -plane_center[2]);
vtkSmartPointer<vtkTransformPolyDataFilter> transform_filter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();
transform_filter->SetTransform(transform);
transform_filter->SetInputConnection(plane->GetOutputPort());
transform_filter->Update();
// Apply the texture
vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();
texture->SetInputConnection(flipFilter->GetOutputPort());
plane->SetOrigin(-0.5 * size.width, -0.5 * size.height, 0.0);
plane->SetPoint1( 0.5 * size.width, -0.5 * size.height, 0.0);
plane->SetPoint2(-0.5 * size.width, 0.5 * size.height, 0.0);
vtkSmartPointer<vtkTextureMapToPlane> texturePlane = vtkSmartPointer<vtkTextureMapToPlane>::New();
texturePlane->SetInputConnection(transform_filter->GetOutputPort());
vtkSmartPointer<vtkTextureMapToPlane> textured_plane = vtkSmartPointer<vtkTextureMapToPlane>::New();
textured_plane->SetInputConnection(plane->GetOutputPort());
vtkSmartPointer<vtkPolyDataMapper> planeMapper = vtkSmartPointer<vtkPolyDataMapper>::New();
planeMapper->SetInputConnection(texturePlane->GetOutputPort());
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(textured_plane->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(planeMapper);
actor->SetMapper(mapper);
actor->SetTexture(texture);
actor->GetProperty()->ShadingOff();
actor->GetProperty()->LightingOff();
WidgetAccessor::setProp(*this, actor);
}
cv::viz::WImage3D::WImage3D(const Vec3d &position, const Vec3d &normal, const Vec3d &up_vector, const Mat &image, const Size &size)
cv::viz::WImage3D::WImage3D(const Mat &image, const Size2d &size, const Vec3d &center, const Vec3d &normal, const Vec3d &up_vector)
{
CV_Assert(!image.empty() && image.depth() == CV_8U);
// Create the vtk image and set its parameters based on input image
vtkSmartPointer<vtkImageMatSource> source = vtkSmartPointer<vtkImageMatSource>::New();
source->SetImage(image);
// Need to flip the image as the coordinates are different in OpenCV and VTK
vtkSmartPointer<vtkImageFlip> flipFilter = vtkSmartPointer<vtkImageFlip>::New();
flipFilter->SetFilteredAxis(1); // Vertical flip
flipFilter->SetInputConnection(source->GetOutputPort());
flipFilter->Update();
vtkSmartPointer<vtkPlaneSource> plane = vtkSmartPointer<vtkPlaneSource>::New();
plane->SetCenter(0.0, 0.0, 0.0);
plane->SetNormal(0.0, 0.0, 1.0);
// Compute the transformation matrix for drawing the camera frame in a scene
Vec3d n = normalize(normal);
Vec3d u = normalize(up_vector.cross(n));
Vec3d v = n.cross(u);
Affine3d pose = makeTransformToGlobal(u, v, n, center);
Affine3d pose = makeTransformToGlobal(u, v, n, position);
// Apply the texture
vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();
texture->SetInputConnection(flipFilter->GetOutputPort());
vtkSmartPointer<vtkTextureMapToPlane> texturePlane = vtkSmartPointer<vtkTextureMapToPlane>::New();
texturePlane->SetInputConnection(plane->GetOutputPort());
// Apply the transform after texture mapping
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
transform->PreMultiply();
transform->SetMatrix(vtkmatrix(pose.matrix));
transform->Scale(size.width, size.height, 1.0);
vtkSmartPointer<vtkTransformPolyDataFilter> transform_filter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();
transform_filter->SetTransform(transform);
transform_filter->SetInputConnection(texturePlane->GetOutputPort());
transform_filter->Update();
vtkSmartPointer<vtkPolyDataMapper> planeMapper = vtkSmartPointer<vtkPolyDataMapper>::New();
planeMapper->SetInputConnection(transform_filter->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(planeMapper);
actor->SetTexture(texture);
WidgetAccessor::setProp(*this, actor);
WImage3D image3d(image, size);
image3d.applyTransform(pose);
*this = image3d;
}
void cv::viz::WImage3D::setImage(const Mat &image)
@ -791,15 +733,8 @@ void cv::viz::WImage3D::setImage(const Mat &image)
vtkSmartPointer<vtkImageMatSource> source = vtkSmartPointer<vtkImageMatSource>::New();
source->SetImage(image);
// Need to flip the image as the coordinates are different in OpenCV and VTK
vtkSmartPointer<vtkImageFlip> flipFilter = vtkSmartPointer<vtkImageFlip>::New();
flipFilter->SetFilteredAxis(1); // Vertical flip
flipFilter->SetInputConnection(source->GetOutputPort());
flipFilter->Update();
// Apply the texture
vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();
texture->SetInputConnection(flipFilter->GetOutputPort());
texture->SetInputConnection(source->GetOutputPort());
actor->SetTexture(texture);
}

@ -305,6 +305,3 @@ void cv::viz::computeNormals(const Mesh& mesh, OutputArray _normals)

@ -271,7 +271,7 @@ void cv::viz::Viz3d::VizImpl::removeAllWidgets()
}
/////////////////////////////////////////////////////////////////////////////////////////////
bool cv::viz::Viz3d::VizImpl::removeActorFromRenderer(const vtkSmartPointer<vtkProp> &actor)
bool cv::viz::Viz3d::VizImpl::removeActorFromRenderer(vtkSmartPointer<vtkProp> actor)
{
vtkProp* actor_to_remove = vtkProp::SafeDownCast(actor);

@ -176,7 +176,7 @@ private:
/** \brief Boolean that holds whether or not the camera parameters were manually initialized*/
bool camera_set_;
bool removeActorFromRenderer(const vtkSmartPointer<vtkProp> &actor);
bool removeActorFromRenderer(vtkSmartPointer<vtkProp> actor);
};
#endif

@ -212,6 +212,7 @@ template<typename _Tn, typename _Msk>
void cv::viz::vtkCloudMatSource::filterNanNormalsCopy(const Mat& cloud_normals, const Mat& mask, int total)
{
normals = vtkSmartPointer< VtkDepthTraits<_Tn>::array_type >::New();
normals->SetName("Normals");
normals->SetNumberOfComponents(3);
normals->SetNumberOfTuples(total);

@ -269,6 +269,17 @@ cv::Affine3d cv::viz::Widget3D::getPose() const
return Affine3d(*actor->GetUserMatrix()->Element);
}
void cv::viz::Widget3D::applyTransform(const Affine3d &transform)
{
vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this));
CV_Assert("Widget is not 3D actor." && actor);
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper());
CV_Assert("Widget doesn't have a polydata mapper" && mapper);
VtkUtils::SetInputData(mapper, VtkUtils::TransformPolydata(mapper->GetInput(), transform));
}
void cv::viz::Widget3D::setColor(const Color &color)
{
// Cast to actor instead of prop3d since prop3d doesn't provide getproperty

@ -229,22 +229,44 @@ TEST(Viz, show_overlay_image)
Viz3d viz("show_overlay_image");
viz.showWidget("coos", WCoordinateSystem());
viz.showWidget("img1", WImageOverlay(lena, Rect(Point(0, 0), Size_<double>(viz.getWindowSize()) * 0.5)));
viz.showWidget("cube", WCube(Vec3d::all(-0.5), Vec3d::all(0.5)));
viz.showWidget("img1", WImageOverlay(lena, Rect(Point(0, 400), Size_<double>(viz.getWindowSize()) * 0.5)));
viz.showWidget("img2", WImageOverlay(gray, Rect(Point(640, 0), Size_<double>(viz.getWindowSize()) * 0.5)));
viz.spin();
int i = 0;
while(!viz.wasStopped())
{
double a = ++i % 360;
Vec3d pose(sin(a * CV_PI/180), 0.7, cos(a * CV_PI/180));
viz.setViewerPose(makeCameraPose(pose * 3, Vec3d(0.0, 0.5, 0.0), Vec3d(0.0, 0.1, 0.0)));
viz.getWidget("img1").cast<WImageOverlay>().setImage(lena * pow(sin(i*10*CV_PI/180) * 0.5 + 0.5, 1.0));
//viz.getWidget("img1").cast<WImageOverlay>().setImage(gray);
viz.spinOnce(1, true);
}
//viz.spin();
}
TEST(Viz, show_image_3d)
TEST(Viz, DISABLED_show_image_3d)
{
Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png"));
Mat gray = make_gray(lena);
Viz3d viz("show_image_3d");
viz.showWidget("coos", WCoordinateSystem(100));
viz.showWidget("img1", WImage3D(lena, Size(lena.cols, lena.rows/2)), makeCameraPose(Vec3d(1.0, 1.0, 1.0), Vec3d::all(0.0), Vec3d(0.0, -1.0, 0.0)));
viz.showWidget("img2", WImage3D(Vec3d(1.0, -1.0, 1.0), Vec3d(-1, 1, -1), Vec3d(0.0, -1.0, 0.0), gray, lena.size()));
viz.showWidget("coos", WCoordinateSystem());
viz.showWidget("cube", WCube(Vec3d::all(-0.5), Vec3d::all(0.5)));
viz.showWidget("arr0", WArrow(Vec3d(0.5, 0.0, 0.0), Vec3d(1.5, 0.0, 0.0), 0.009, Color::raspberry()));
viz.showWidget("img0", WImage3D(lena, Size2d(1.0, 1.0)), Affine3d(Vec3d(0.0, CV_PI/2, 0.0), Vec3d(.5, 0.0, 0.0)));
viz.showWidget("arr1", WArrow(Vec3d(-0.5, -0.5, 0.0), Vec3d(0.2, 0.2, 0.0), 0.009, Color::raspberry()));
viz.showWidget("img1", WImage3D(gray, Size2d(1.0, 1.0), Vec3d(-0.5, -0.5, 0.0), Vec3d(1.0, 1.0, 0.0), Vec3d(0.0, 1.0, 0.0)));
viz.spin();
int i = 0;
while(!viz.wasStopped())
{
viz.getWidget("img0").cast<WImage3D>().setImage(lena * pow(sin(i++*7.5*CV_PI/180) * 0.5 + 0.5, 1.0));
viz.spinOnce(1, true);
}
//viz.spin();
}
TEST(Viz, DISABLED_spin_twice_____________________________TODO_UI_BUG)

Loading…
Cancel
Save