Added translation parameter into stitching module warpers. For now only PlaneWarper and PlaneWarperGpu warpers support it.

pull/13383/head
Alexey Spizhevoy 14 years ago
parent 0aaaad1ea8
commit 4a5abc7552
  1. 26
      modules/stitching/include/opencv2/stitching/detail/warpers.hpp
  2. 34
      modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
  3. 4
      modules/stitching/src/stitcher.cpp
  4. 66
      modules/stitching/src/warpers.cpp
  5. 4
      samples/cpp/stitching_detailed.cpp

@ -57,20 +57,26 @@ class CV_EXPORTS Warper
public:
virtual ~Warper() {}
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;
int interp_mode, int border_mode) = 0;
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode) = 0;
virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R) = 0;
virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R, const Mat &T) = 0;
};
struct CV_EXPORTS ProjectorBase
{
void setCameraParams(const Mat &K, const Mat &R);
void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F),
const Mat &R = Mat::eye(3, 3, CV_32F),
const Mat &T = Mat::zeros(3, 1, CV_32F));
float scale;
float k[9];
float rinv[9];
float r_kinv[9];
float k_rinv[9];
float t[3];
};
@ -78,10 +84,12 @@ template <class P>
class CV_EXPORTS WarperBase : public Warper
{
public:
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R);
Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode);
Rect warpRoi(const Size &sz, const Mat &K, const Mat &R);
Rect warpRoi(const Size &sz, const Mat &K, const Mat &R, const Mat &T);
protected:
// Detects ROI of the destination image. It's correct for any projection.
@ -108,6 +116,10 @@ class CV_EXPORTS PlaneWarper : public WarperBase<PlaneProjector>
{
public:
PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
void setScale(float scale) { projector_.scale = scale; }
Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode);
Rect warpRoi(const Size &sz, const Mat &K, const Mat &R, const Mat &T);
protected:
void detectResultRoi(Point &dst_tl, Point &dst_br);
@ -120,6 +132,8 @@ public:
PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}
Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode);
private:
gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;

@ -80,6 +80,15 @@ Point WarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
}
template <class P>
Point WarperBase<P>::warp(const Mat &/*src*/, const Mat &/*K*/, const Mat &/*R*/, const Mat &/*T*/, Mat &/*dst*/,
int /*interp_mode*/, int /*border_mode*/)
{
CV_Error(CV_StsNotImplemented, "translation support isn't implemented");
return Point();
}
template <class P>
Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R)
{
@ -93,6 +102,14 @@ Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R)
}
template <class P>
Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R, const Mat &T)
{
CV_Error(CV_StsNotImplemented, "translation support isn't implemented");
return Rect();
}
template <class P>
void WarperBase<P>::detectResultRoi(Point &dst_tl, Point &dst_br)
{
@ -163,21 +180,24 @@ void PlaneProjector::mapForward(float x, float y, float &u, float &v)
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
u = scale * x_ / z_;
v = scale * y_ / z_;
x_ = t[0] + x_ / z_ * (1 - t[2]);
y_ = t[1] + y_ / z_ * (1 - t[2]);
u = scale * x_;
v = scale * y_;
}
inline
void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
u = u / scale - t[0];
v = v / scale - t[1];
float z;
x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2];
y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5];
z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8];
x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2] * (1 - t[2]);
y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5] * (1 - t[2]);
z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8] * (1 - t[2]);
x /= z;
y /= z;

@ -237,7 +237,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
corners[i] = warper->warp(seam_est_imgs[i], K, cameras[i].R, images_warped[i]);
corners[i] = warper->warp(seam_est_imgs[i], K, cameras[i].R, images_warped[i], INTER_LINEAR, BORDER_REFLECT);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
@ -321,7 +321,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
cameras[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
warper->warp(img, K, cameras[img_idx].R, img_warped);
warper->warp(img, K, cameras[img_idx].R, img_warped, INTER_LINEAR, BORDER_REFLECT);
// Warp the current image mask
mask.create(img_size, CV_8U);

@ -47,10 +47,11 @@ using namespace std;
namespace cv {
namespace detail {
void ProjectorBase::setCameraParams(const Mat &K, const Mat &R)
void ProjectorBase::setCameraParams(const Mat &K, const Mat &R, const Mat &T)
{
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
CV_Assert((T.size() == Size(1, 3) || T.size() == Size(3, 1)) && T.type() == CV_32F);
Mat_<float> K_(K);
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
@ -71,6 +72,51 @@ void ProjectorBase::setCameraParams(const Mat &K, const Mat &R)
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
Mat_<float> T_(T.reshape(0, 3));
t[0] = T_(0,0); t[1] = T_(1,0); t[2] = T_(2,0);
}
Point PlaneWarper::warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
Mat xmap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
Mat ymap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
float x, y;
for (int v = dst_tl.y; v <= dst_br.y; ++v)
{
for (int u = dst_tl.x; u <= dst_br.x; ++u)
{
projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
}
}
dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());
remap(src, dst, xmap, ymap, interp_mode, border_mode);
return dst_tl;
}
Rect PlaneWarper::warpRoi(const Size &sz, const Mat &K, const Mat &R, const Mat &T)
{
src_size_ = sz;
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));
}
@ -105,11 +151,20 @@ void PlaneWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
dst_br.y = static_cast<int>(br_vf);
}
#ifndef ANDROID
Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst, int interp_mode, int border_mode)
Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
{
return warp(src, K, R, Mat::zeros(3, 1, CV_32F), dst, interp_mode, border_mode);
}
Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, Mat &dst,
int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.setCameraParams(K, R);
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
@ -145,8 +200,6 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
float z = projector_.rinv[7];
if (y > 0.f)
{
//x = projector_.focal * x / z + src_size_.width * 0.5f;
//y = projector_.focal * y / z + src_size_.height * 0.5f;
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)
@ -161,8 +214,6 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
z = projector_.rinv[7];
if (y > 0.f)
{
//x = projector_.focal * x / z + src_size_.width * 0.5f;
//y = projector_.focal * y / z + src_size_.height * 0.5f;
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)
@ -178,6 +229,7 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
dst_br.y = static_cast<int>(br_vf);
}
#ifndef ANDROID
Point SphericalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode)

@ -485,7 +485,7 @@ int main(int argc, char* argv[])
K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
corners[i] = warper->warp(images[i], K, cameras[i].R, images_warped[i]);
corners[i] = warper->warp(images[i], K, cameras[i].R, images_warped[i], INTER_LINEAR, BORDER_REFLECT);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
@ -572,7 +572,7 @@ int main(int argc, char* argv[])
cameras[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
warper->warp(img, K, cameras[img_idx].R, img_warped);
warper->warp(img, K, cameras[img_idx].R, img_warped, INTER_LINEAR, BORDER_REFLECT);
// Warp the current image mask
mask.create(img_size, CV_8U);

Loading…
Cancel
Save