Merge pull request #13267 from LaurentBerger:StitchPython

* Python wrapper for detail

* hide pyrotationwrapper

* copy code in pyopencv_rotationwarper.hpp

* move ImageFeatures MatchInfo and CameraParams in core/misc/

* add python test for detail

* move test_detail in test_stitching

* rename
pull/13474/head
LaurentBerger 6 years ago committed by Alexander Alekhin
parent fd27d5ea00
commit 2fb409b286
  1. 8
      modules/core/misc/python/pyopencv_rotationwarper.hpp
  2. 55
      modules/python/src2/cv2.cpp
  3. 63
      modules/python/test/test_stitching.py
  4. 4
      modules/stitching/include/opencv2/stitching/detail/autocalib.hpp
  5. 56
      modules/stitching/include/opencv2/stitching/detail/blenders.hpp
  6. 16
      modules/stitching/include/opencv2/stitching/detail/camera.hpp
  7. 44
      modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
  8. 75
      modules/stitching/include/opencv2/stitching/detail/matchers.hpp
  9. 60
      modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
  10. 36
      modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp
  11. 12
      modules/stitching/include/opencv2/stitching/detail/timelapsers.hpp
  12. 14
      modules/stitching/include/opencv2/stitching/detail/util.hpp
  13. 24
      modules/stitching/include/opencv2/stitching/detail/warpers.hpp
  14. 97
      modules/stitching/include/opencv2/stitching/warpers.hpp
  15. 2
      modules/stitching/src/blenders.cpp
  16. 126
      modules/stitching/src/exposure_compensate.cpp
  17. 6
      modules/stitching/src/matchers.cpp
  18. 45
      modules/stitching/src/seam_finders.cpp
  19. 74
      modules/stitching/src/warpers.cpp
  20. 387
      samples/python/stitching_detailed.py

@ -0,0 +1,8 @@
#ifdef HAVE_OPENCV_STITCHING
typedef std::vector<detail::ImageFeatures> vector_ImageFeatures;
typedef std::vector<detail::MatchesInfo> vector_MatchesInfo;
typedef std::vector<detail::CameraParams> vector_CameraParams;
#endif

@ -197,6 +197,7 @@ typedef std::vector<size_t> vector_size_t;
typedef std::vector<Point> vector_Point;
typedef std::vector<Point2f> vector_Point2f;
typedef std::vector<Point3f> vector_Point3f;
typedef std::vector<Size> vector_Size;
typedef std::vector<Vec2f> vector_Vec2f;
typedef std::vector<Vec3f> vector_Vec3f;
typedef std::vector<Vec4f> vector_Vec4f;
@ -1338,6 +1339,19 @@ template<> struct pyopencvVecConverter<Mat>
}
};
template<> struct pyopencvVecConverter<UMat>
{
static bool to(PyObject* obj, std::vector<UMat>& value, const ArgInfo info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<UMat>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<KeyPoint>
{
static bool to(PyObject* obj, std::vector<KeyPoint>& value, const ArgInfo info)
@ -1364,6 +1378,47 @@ template<> struct pyopencvVecConverter<DMatch>
}
};
template<> struct pyopencvVecConverter<detail::ImageFeatures>
{
static bool to(PyObject* obj, std::vector<detail::ImageFeatures>& value, const ArgInfo info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::ImageFeatures>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<detail::MatchesInfo>
{
static bool to(PyObject* obj, std::vector<detail::MatchesInfo>& value, const ArgInfo info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::MatchesInfo>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<detail::CameraParams>
{
static bool to(PyObject* obj, std::vector<detail::CameraParams>& value, const ArgInfo info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::CameraParams>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<String>
{
static bool to(PyObject* obj, std::vector<String>& value, const ArgInfo info)

@ -19,5 +19,68 @@ class stitching_test(NewOpenCVTests):
self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape))
self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape))
class stitching_detail_test(NewOpenCVTests):
def test_simple(self):
img = self.get_sample('stitching/a1.png')
finder= cv.ORB.create()
imgFea = cv.detail.computeImageFeatures2(finder,img)
self.assertIsNotNone(imgFea)
matcher = cv.detail_BestOf2NearestMatcher(False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_BestOf2NearestRangeMatcher(2, False, 0.3)
self.assertIsNotNone(matcher)
estimator = cv.detail_AffineBasedEstimator()
self.assertIsNotNone(estimator)
estimator = cv.detail_HomographyBasedEstimator()
self.assertIsNotNone(estimator)
adjuster = cv.detail_BundleAdjusterReproj()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterRay()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterAffinePartial()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_NoBundleAdjuster()
self.assertIsNotNone(adjuster)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_NO)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN_BLOCKS)
self.assertIsNotNone(compensator)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD")
self.assertIsNotNone(seam_finder)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTI_BAND)
self.assertIsNotNone(blender)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_AS_IS);
self.assertIsNotNone(timelapser)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_CROP);
self.assertIsNotNone(timelapser)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

@ -64,7 +64,7 @@ undergoes rotations around its centre only.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
*/
void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
void CV_EXPORTS_W focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
/** @brief Estimates focal lengths for each given camera.
@ -76,7 +76,7 @@ void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<double> &focals);
bool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K);
bool CV_EXPORTS_W calibrateRotatingCamera(const std::vector<Mat> &Hs,CV_OUT Mat &K);
//! @} stitching_autocalib

@ -60,35 +60,35 @@ namespace detail {
Simple blender which puts one image over another
*/
class CV_EXPORTS Blender
class CV_EXPORTS_W Blender
{
public:
virtual ~Blender() {}
enum { NO, FEATHER, MULTI_BAND };
static Ptr<Blender> createDefault(int type, bool try_gpu = false);
CV_WRAP static Ptr<Blender> createDefault(int type, bool try_gpu = false);
/** @brief Prepares the blender for blending.
@param corners Source images top-left corners
@param sizes Source image sizes
*/
void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_WRAP void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
/** @overload */
virtual void prepare(Rect dst_roi);
CV_WRAP virtual void prepare(Rect dst_roi);
/** @brief Processes the image.
@param img Source image
@param mask Source image mask
@param tl Source image top-left corners
*/
virtual void feed(InputArray img, InputArray mask, Point tl);
CV_WRAP virtual void feed(InputArray img, InputArray mask, Point tl);
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst_mask Final pano mask
*/
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);
CV_WRAP virtual void blend(CV_IN_OUT InputOutputArray dst,CV_IN_OUT InputOutputArray dst_mask);
protected:
UMat dst_, dst_mask_;
@ -97,22 +97,22 @@ protected:
/** @brief Simple blender which mixes images at its borders.
*/
class CV_EXPORTS FeatherBlender : public Blender
class CV_EXPORTS_W FeatherBlender : public Blender
{
public:
FeatherBlender(float sharpness = 0.02f);
CV_WRAP FeatherBlender(float sharpness = 0.02f);
float sharpness() const { return sharpness_; }
void setSharpness(float val) { sharpness_ = val; }
CV_WRAP float sharpness() const { return sharpness_; }
CV_WRAP void setSharpness(float val) { sharpness_ = val; }
void prepare(Rect dst_roi) CV_OVERRIDE;
void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
CV_WRAP void prepare(Rect dst_roi) CV_OVERRIDE;
CV_WRAP void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
CV_WRAP void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
//! Creates weight maps for fixed set of source images by their masks and top-left corners.
//! Final image can be obtained by simple weighting of the source images.
Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<UMat> &weight_maps);
CV_WRAP Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &weight_maps);
private:
float sharpness_;
@ -124,17 +124,17 @@ inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpnes
/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).
*/
class CV_EXPORTS MultiBandBlender : public Blender
class CV_EXPORTS_W MultiBandBlender : public Blender
{
public:
MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F);
CV_WRAP MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F);
int numBands() const { return actual_num_bands_; }
void setNumBands(int val) { actual_num_bands_ = val; }
CV_WRAP int numBands() const { return actual_num_bands_; }
CV_WRAP void setNumBands(int val) { actual_num_bands_ = val; }
void prepare(Rect dst_roi) CV_OVERRIDE;
void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
CV_WRAP void prepare(Rect dst_roi) CV_OVERRIDE;
CV_WRAP void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
CV_WRAP void blend(CV_IN_OUT InputOutputArray dst, CV_IN_OUT InputOutputArray dst_mask) CV_OVERRIDE;
private:
int actual_num_bands_, num_bands_;
@ -165,16 +165,16 @@ private:
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
void CV_EXPORTS normalizeUsingWeightMap(InputArray weight, InputOutputArray src);
void CV_EXPORTS_W normalizeUsingWeightMap(InputArray weight, CV_IN_OUT InputOutputArray src);
void CV_EXPORTS createWeightMap(InputArray mask, float sharpness, InputOutputArray weight);
void CV_EXPORTS_W createWeightMap(InputArray mask, float sharpness, CV_IN_OUT InputOutputArray weight);
void CV_EXPORTS createLaplacePyr(InputArray img, int num_levels, std::vector<UMat>& pyr);
void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat>& pyr);
void CV_EXPORTS_W createLaplacePyr(InputArray img, int num_levels, CV_IN_OUT std::vector<UMat>& pyr);
void CV_EXPORTS_W createLaplacePyrGpu(InputArray img, int num_levels, CV_IN_OUT std::vector<UMat>& pyr);
// Restores source image
void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<UMat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<UMat>& pyr);
void CV_EXPORTS_W restoreImageFromLaplacePyr(CV_IN_OUT std::vector<UMat>& pyr);
void CV_EXPORTS_W restoreImageFromLaplacePyrGpu(CV_IN_OUT std::vector<UMat>& pyr);
//! @}

@ -55,19 +55,19 @@ namespace detail {
@note Translation is assumed to be zero during the whole stitching pipeline. :
*/
struct CV_EXPORTS CameraParams
struct CV_EXPORTS_W_SIMPLE CameraParams
{
CameraParams();
CameraParams(const CameraParams& other);
CameraParams& operator =(const CameraParams& other);
Mat K() const;
CV_WRAP Mat K() const;
double focal; // Focal length
double aspect; // Aspect ratio
double ppx; // Principal point X
double ppy; // Principal point Y
Mat R; // Rotation
Mat t; // Translation
CV_PROP_RW double focal; // Focal length
CV_PROP_RW double aspect; // Aspect ratio
CV_PROP_RW double ppx; // Principal point X
CV_PROP_RW double ppy; // Principal point Y
CV_PROP_RW Mat R; // Rotation
CV_PROP_RW Mat t; // Translation
};
//! @}

@ -57,54 +57,64 @@ namespace detail {
/** @brief Base class for all exposure compensators.
*/
class CV_EXPORTS ExposureCompensator
class CV_EXPORTS_W ExposureCompensator
{
public:
virtual ~ExposureCompensator() {}
enum { NO, GAIN, GAIN_BLOCKS };
static Ptr<ExposureCompensator> createDefault(int type);
CV_WRAP static Ptr<ExposureCompensator> createDefault(int type);
/**
@param corners Source image top-left corners
@param images Source images
@param masks Image masks to update (second value in pair specifies the value which should be used
to detect where image is)
*/
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
*/
CV_WRAP void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
/** @overload */
virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) = 0;
const std::vector<std::pair<UMat, uchar> > &masks) = 0;
/** @brief Compensate exposure in the specified image.
@param index Image index
@param corner Image top-left corner
@param image Image to process
@param mask Image mask
*/
virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
*/
CV_WRAP virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
CV_WRAP virtual void getMatGains(CV_OUT std::vector<Mat>& ) {CV_Error(Error::StsInternal, "");};
CV_WRAP virtual void setMatGains(std::vector<Mat>& ) { CV_Error(Error::StsInternal, ""); };
CV_WRAP void setUpdateGain(bool b) { updateGain = b; };
CV_WRAP bool getUpdateGain() { return updateGain; };
protected :
bool updateGain;
};
/** @brief Stub exposure compensator which does nothing.
*/
class CV_EXPORTS NoExposureCompensator : public ExposureCompensator
class CV_EXPORTS_W NoExposureCompensator : public ExposureCompensator
{
public:
void feed(const std::vector<Point> &/*corners*/, const std::vector<UMat> &/*images*/,
const std::vector<std::pair<UMat,uchar> > &/*masks*/) CV_OVERRIDE { }
void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) CV_OVERRIDE { }
CV_WRAP void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) CV_OVERRIDE { }
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE { umv.clear(); return; };
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE { umv.clear(); return; };
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities, see @cite BL07 and @cite WJ10 for details.
*/
class CV_EXPORTS GainCompensator : public ExposureCompensator
class CV_EXPORTS_W GainCompensator : public ExposureCompensator
{
public:
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE ;
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE ;
std::vector<double> gains() const;
private:
@ -114,14 +124,16 @@ private:
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
intensities, see @cite UES01 for details.
*/
class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator
class CV_EXPORTS_W BlocksGainCompensator : public ExposureCompensator
{
public:
BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
: bl_width_(bl_width), bl_height_(bl_height) {}
CV_WRAP BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
: bl_width_(bl_width), bl_height_(bl_height) {setUpdateGain(true);}
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE;
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE;
private:
int bl_width_, bl_height_;

@ -55,24 +55,38 @@ namespace detail {
//! @{
/** @brief Structure containing image keypoints and descriptors. */
struct CV_EXPORTS ImageFeatures
struct CV_EXPORTS_W_SIMPLE ImageFeatures
{
int img_idx;
Size img_size;
CV_PROP_RW int img_idx;
CV_PROP_RW Size img_size;
std::vector<KeyPoint> keypoints;
UMat descriptors;
CV_PROP_RW UMat descriptors;
CV_WRAP std::vector<KeyPoint> getKeypoints() { return keypoints; };
};
/** @brief
CV_EXPORTS void computeImageFeatures(
@param featuresFinder
@param images
@param features
@param masks
*/
CV_EXPORTS_W void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArrayOfArrays images,
std::vector<ImageFeatures> &features,
CV_OUT std::vector<ImageFeatures> &features,
InputArrayOfArrays masks = noArray());
CV_EXPORTS void computeImageFeatures(
/** @brief
@param featuresFinder
@param image
@param features
@param mask
*/
CV_EXPORTS_AS(computeImageFeatures2) void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArray image,
ImageFeatures &features,
CV_OUT ImageFeatures &features,
InputArray mask = noArray());
/** @brief Structure containing information about matches between two images.
@ -82,33 +96,36 @@ homography or affine transformation based on selected matcher.
@sa detail::FeaturesMatcher
*/
struct CV_EXPORTS MatchesInfo
struct CV_EXPORTS_W_SIMPLE MatchesInfo
{
MatchesInfo();
MatchesInfo(const MatchesInfo &other);
MatchesInfo& operator =(const MatchesInfo &other);
int src_img_idx, dst_img_idx; //!< Images indices (optional)
CV_PROP_RW int src_img_idx;
CV_PROP_RW int dst_img_idx; //!< Images indices (optional)
std::vector<DMatch> matches;
std::vector<uchar> inliers_mask; //!< Geometrically consistent matches mask
int num_inliers; //!< Number of geometrically consistent matches
Mat H; //!< Estimated transformation
double confidence; //!< Confidence two images are from the same panorama
CV_PROP_RW int num_inliers; //!< Number of geometrically consistent matches
CV_PROP_RW Mat H; //!< Estimated transformation
CV_PROP_RW double confidence; //!< Confidence two images are from the same panorama
CV_WRAP std::vector<DMatch> getMatches() { return matches; };
CV_WRAP std::vector<uchar> getInliers() { return inliers_mask; };
};
/** @brief Feature matchers base class. */
class CV_EXPORTS FeaturesMatcher
class CV_EXPORTS_W FeaturesMatcher
{
public:
virtual ~FeaturesMatcher() {}
CV_WRAP virtual ~FeaturesMatcher() {}
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches_info Found matches
*/
void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) { match(features1, features2, matches_info); }
CV_WRAP_AS(apply) void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
CV_OUT MatchesInfo& matches_info) { match(features1, features2, matches_info); }
/** @brief Performs images matching.
@ -120,16 +137,16 @@ public:
@sa detail::MatchesInfo
*/
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
CV_WRAP_AS(apply2) void operator ()(const std::vector<ImageFeatures> &features, CV_OUT std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
/** @return True, if it's possible to use the same matcher instance in parallel, false otherwise
*/
bool isThreadSafe() const { return is_thread_safe_; }
CV_WRAP bool isThreadSafe() const { return is_thread_safe_; }
/** @brief Frees unused memory allocated before if there is any.
*/
virtual void collectGarbage() {}
CV_WRAP virtual void collectGarbage() {}
protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
@ -152,7 +169,7 @@ ratio between descriptor distances is greater than the threshold match_conf
@sa detail::FeaturesMatcher
*/
class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
class CV_EXPORTS_W BestOf2NearestMatcher : public FeaturesMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher.
@ -164,23 +181,25 @@ public:
@param num_matches_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
CV_WRAP BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6);
void collectGarbage() CV_OVERRIDE;
CV_WRAP void collectGarbage() CV_OVERRIDE;
CV_WRAP static Ptr<BestOf2NearestMatcher> create(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6);
protected:
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
int num_matches_thresh1_;
int num_matches_thresh2_;
Ptr<FeaturesMatcher> impl_;
};
class CV_EXPORTS BestOf2NearestRangeMatcher : public BestOf2NearestMatcher
class CV_EXPORTS_W BestOf2NearestRangeMatcher : public BestOf2NearestMatcher
{
public:
BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f,
CV_WRAP BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f,
int num_matches_thresh1 = 6, int num_matches_thresh2 = 6);
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
@ -200,7 +219,7 @@ transformation (affine trasformation estimate will be placed in matches_info).
@sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher
*/
class CV_EXPORTS AffineBestOf2NearestMatcher : public BestOf2NearestMatcher
class CV_EXPORTS_W AffineBestOf2NearestMatcher : public BestOf2NearestMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher that expects affine trasformation
@ -215,7 +234,7 @@ public:
@sa cv::estimateAffine2D cv::estimateAffinePartial2D
*/
AffineBestOf2NearestMatcher(bool full_affine = false, bool try_use_gpu = false,
CV_WRAP AffineBestOf2NearestMatcher(bool full_affine = false, bool try_use_gpu = false,
float match_conf = 0.3f, int num_matches_thresh1 = 6) :
BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh1),
full_affine_(full_affine) {}

@ -62,7 +62,7 @@ cameras.
@note The coordinate system origin is implementation-dependent, but you can always normalize the
rotations in respect to the first camera, for instance. :
*/
class CV_EXPORTS Estimator
class CV_EXPORTS_W Estimator
{
public:
virtual ~Estimator() {}
@ -74,10 +74,12 @@ public:
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
bool operator ()(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras)
{ return estimate(features, pairwise_matches, cameras); }
CV_WRAP_AS(apply) bool operator ()(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
CV_OUT CV_IN_OUT std::vector<CameraParams> &cameras)
{
return estimate(features, pairwise_matches, cameras);
}
protected:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
@ -90,15 +92,15 @@ protected:
*/
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) = 0;
CV_OUT std::vector<CameraParams> &cameras) = 0;
};
/** @brief Homography based rotation estimator.
*/
class CV_EXPORTS HomographyBasedEstimator : public Estimator
class CV_EXPORTS_W HomographyBasedEstimator : public Estimator
{
public:
HomographyBasedEstimator(bool is_focals_estimated = false)
CV_WRAP HomographyBasedEstimator(bool is_focals_estimated = false)
: is_focals_estimated_(is_focals_estimated) {}
private:
@ -116,7 +118,7 @@ final transformation for each camera.
@sa cv::detail::HomographyBasedEstimator
*/
class CV_EXPORTS AffineBasedEstimator : public Estimator
class CV_EXPORTS_W AffineBasedEstimator : public Estimator
{
private:
virtual bool estimate(const std::vector<ImageFeatures> &features,
@ -126,21 +128,21 @@ private:
/** @brief Base class for all camera parameters refinement methods.
*/
class CV_EXPORTS BundleAdjusterBase : public Estimator
class CV_EXPORTS_W BundleAdjusterBase : public Estimator
{
public:
const Mat refinementMask() const { return refinement_mask_.clone(); }
void setRefinementMask(const Mat &mask)
CV_WRAP const Mat refinementMask() const { return refinement_mask_.clone(); }
CV_WRAP void setRefinementMask(const Mat &mask)
{
CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3));
refinement_mask_ = mask.clone();
}
double confThresh() const { return conf_thresh_; }
void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
CV_WRAP double confThresh() const { return conf_thresh_; }
CV_WRAP void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
TermCriteria termCriteria() { return term_criteria_; }
void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }
CV_WRAP TermCriteria termCriteria() { return term_criteria_; }
CV_WRAP void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }
protected:
/** @brief Construct a bundle adjuster base instance.
@ -214,10 +216,10 @@ protected:
/** @brief Stub bundle adjuster that does nothing.
*/
class CV_EXPORTS NoBundleAdjuster : public BundleAdjusterBase
class CV_EXPORTS_W NoBundleAdjuster : public BundleAdjusterBase
{
public:
NoBundleAdjuster() : BundleAdjusterBase(0, 0) {}
CV_WRAP NoBundleAdjuster() : BundleAdjusterBase(0, 0) {}
private:
bool estimate(const std::vector<ImageFeatures> &, const std::vector<MatchesInfo> &,
@ -238,10 +240,10 @@ error squares
It can estimate focal length, aspect ratio, principal point.
You can affect only on them via the refinement mask.
*/
class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase
class CV_EXPORTS_W BundleAdjusterReproj : public BundleAdjusterBase
{
public:
BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}
CV_WRAP BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
@ -258,10 +260,10 @@ between the rays passing through the camera center and a feature. :
It can estimate focal length. It ignores the refinement mask for now.
*/
class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase
class CV_EXPORTS_W BundleAdjusterRay : public BundleAdjusterBase
{
public:
BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}
CV_WRAP BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
@ -282,10 +284,10 @@ It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffinePartial
*/
class CV_EXPORTS BundleAdjusterAffine : public BundleAdjusterBase
class CV_EXPORTS_W BundleAdjusterAffine : public BundleAdjusterBase
{
public:
BundleAdjusterAffine() : BundleAdjusterBase(6, 2) {}
CV_WRAP BundleAdjusterAffine() : BundleAdjusterBase(6, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
@ -306,10 +308,10 @@ It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffine
*/
class CV_EXPORTS BundleAdjusterAffinePartial : public BundleAdjusterBase
class CV_EXPORTS_W BundleAdjusterAffinePartial : public BundleAdjusterBase
{
public:
BundleAdjusterAffinePartial() : BundleAdjusterBase(4, 2) {}
CV_WRAP BundleAdjusterAffinePartial() : BundleAdjusterBase(4, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
@ -332,17 +334,17 @@ enum WaveCorrectKind
@param rmats Camera rotation matrices.
@param kind Correction kind, see detail::WaveCorrectKind.
*/
void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);
void CV_EXPORTS_W waveCorrect(CV_IN_OUT std::vector<Mat> &rmats, WaveCorrectKind kind);
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
// Returns matches graph representation in DOT language
String CV_EXPORTS matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,
String CV_EXPORTS_W matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold);
std::vector<int> CV_EXPORTS leaveBiggestComponent(
CV_EXPORTS_W std::vector<int> leaveBiggestComponent(
std::vector<ImageFeatures> &features,
std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold);

@ -55,35 +55,37 @@ namespace detail {
/** @brief Base class for a seam estimator.
*/
class CV_EXPORTS SeamFinder
class CV_EXPORTS_W SeamFinder
{
public:
virtual ~SeamFinder() {}
CV_WRAP virtual ~SeamFinder() {}
enum { NO, VORONOI_SEAM, DP_SEAM };
/** @brief Estimates seams.
@param src Source images
@param corners Source image top-left corners
@param masks Source image masks to update
*/
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) = 0;
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) = 0;
CV_WRAP static Ptr<SeamFinder> createDefault(int type);
};
/** @brief Stub seam estimator which does nothing.
*/
class CV_EXPORTS NoSeamFinder : public SeamFinder
class CV_EXPORTS_W NoSeamFinder : public SeamFinder
{
public:
void find(const std::vector<UMat>&, const std::vector<Point>&, std::vector<UMat>&) CV_OVERRIDE {}
CV_WRAP void find(const std::vector<UMat>&, const std::vector<Point>&, CV_IN_OUT std::vector<UMat>&) CV_OVERRIDE {}
};
/** @brief Base class for all pairwise seam estimators.
*/
class CV_EXPORTS PairwiseSeamFinder : public SeamFinder
class CV_EXPORTS_W PairwiseSeamFinder : public SeamFinder
{
public:
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) CV_OVERRIDE;
protected:
void run();
@ -103,11 +105,11 @@ protected:
/** @brief Voronoi diagram-based seam estimator.
*/
class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder
class CV_EXPORTS_W VoronoiSeamFinder : public PairwiseSeamFinder
{
public:
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) CV_OVERRIDE;
virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,
std::vector<UMat> &masks);
private:
@ -115,15 +117,17 @@ private:
};
class CV_EXPORTS DpSeamFinder : public SeamFinder
class CV_EXPORTS_W DpSeamFinder : public SeamFinder
{
public:
enum CostFunction { COLOR, COLOR_GRAD };
DpSeamFinder(CostFunction costFunc = COLOR);
CV_WRAP DpSeamFinder(String costFunc );
CostFunction costFunction() const { return costFunc_; }
void setCostFunction(CostFunction val) { costFunc_ = val; }
CV_WRAP void setCostFunction(String val);
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
@ -233,15 +237,17 @@ public:
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 .
*/
class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
class CV_EXPORTS_W GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
{
public:
GraphCutSeamFinder(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f);
CV_WRAP GraphCutSeamFinder(String cost_type,float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f);
~GraphCutSeamFinder();
void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_WRAP void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
private:

@ -54,7 +54,7 @@ namespace detail {
// Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_.
class CV_EXPORTS Timelapser
class CV_EXPORTS_W Timelapser
{
public:
@ -62,11 +62,11 @@ public:
virtual ~Timelapser() {}
static Ptr<Timelapser> createDefault(int type);
CV_WRAP static Ptr<Timelapser> createDefault(int type);
virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);
virtual void process(InputArray img, InputArray mask, Point tl);
virtual const UMat& getDst() {return dst_;}
CV_WRAP virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_WRAP virtual void process(InputArray img, InputArray mask, Point tl);
CV_WRAP virtual const UMat& getDst() {return dst_;}
protected:
@ -77,7 +77,7 @@ protected:
};
class CV_EXPORTS TimelapserCrop : public Timelapser
class CV_EXPORTS_W TimelapserCrop : public Timelapser
{
public:
virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes) CV_OVERRIDE;

@ -100,16 +100,16 @@ private:
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS Point resultTl(const std::vector<Point> &corners);
CV_EXPORTS_W bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
CV_EXPORTS_W Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);
CV_EXPORTS_W Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS_W Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS_W Point resultTl(const std::vector<Point> &corners);
// Returns random 'count' element subset of the {0,1,...,size-1} set
CV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset);
CV_EXPORTS_W void selectRandomSubset(int count, int size, std::vector<int> &subset);
CV_EXPORTS int& stitchingLogLevel();
CV_EXPORTS_W int& stitchingLogLevel();
//! @}

@ -92,7 +92,7 @@ public:
@return Project image top-left corner
*/
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) = 0;
CV_OUT OutputArray dst) = 0;
/** @brief Projects the image backward.
@ -105,7 +105,7 @@ public:
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) = 0;
Size dst_size, CV_OUT OutputArray dst) = 0;
/**
@param src_size Source image bounding box
@ -121,7 +121,7 @@ public:
/** @brief Base class for warping logic implementation.
*/
struct CV_EXPORTS ProjectorBase
struct CV_EXPORTS_W_SIMPLE ProjectorBase
{
void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F),
InputArray R = Mat::eye(3, 3, CV_32F),
@ -189,13 +189,13 @@ public:
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap);
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap);
Rect buildMaps(Size src_size, InputArray K, InputArray R, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
int interp_mode, int border_mode, CV_OUT OutputArray dst) CV_OVERRIDE;
virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
OutputArray dst);
CV_OUT OutputArray dst);
Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
Rect warpRoi(Size src_size, InputArray K, InputArray R, InputArray T);
@ -220,9 +220,9 @@ public:
AffineWarper(float scale = 1.f) : PlaneWarper(scale) {}
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Rect buildMaps(Size src_size, InputArray K, InputArray R, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
int interp_mode, int border_mode, CV_OUT OutputArray dst) CV_OVERRIDE;
Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
protected:
@ -233,10 +233,10 @@ protected:
};
struct CV_EXPORTS SphericalProjector : ProjectorBase
struct CV_EXPORTS_W_SIMPLE SphericalProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
CV_WRAP void mapForward(float x, float y, float &u, float &v);
CV_WRAP void mapBackward(float u, float v, float &x, float &y);
};

@ -44,25 +44,94 @@
#define OPENCV_STITCHING_WARPER_CREATORS_HPP
#include "opencv2/stitching/detail/warpers.hpp"
#include <string>
namespace cv {
class CV_EXPORTS_W PyRotationWarper
{
Ptr<detail::RotationWarper> rw;
public:
CV_WRAP PyRotationWarper(String type, float scale);
CV_WRAP PyRotationWarper() {};
~PyRotationWarper() {}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
CV_WRAP Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R);
/** @brief Builds the projection maps according to the given camera data.
@param src_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
CV_WRAP Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
CV_WRAP Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
CV_OUT OutputArray dst);
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
CV_WRAP void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, CV_OUT OutputArray dst);
/**
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
CV_WRAP Rect warpRoi(Size src_size, InputArray K, InputArray R);
CV_WRAP float getScale() const { return 1.f; }
CV_WRAP void setScale(float) {}
};
//! @addtogroup stitching_warp
//! @{
/** @brief Image warper factories base class.
*/
class WarperCreator
class CV_EXPORTS_W WarperCreator
{
public:
virtual ~WarperCreator() {}
CV_WRAP virtual ~WarperCreator() {}
virtual Ptr<detail::RotationWarper> create(float scale) const = 0;
};
/** @brief Plane warper factory class.
@sa detail::PlaneWarper
*/
class PlaneWarper : public WarperCreator
class CV_EXPORTS PlaneWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PlaneWarper>(scale); }
@ -71,7 +140,7 @@ public:
/** @brief Affine warper factory class.
@sa detail::AffineWarper
*/
class AffineWarper : public WarperCreator
class CV_EXPORTS AffineWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::AffineWarper>(scale); }
@ -80,32 +149,32 @@ public:
/** @brief Cylindrical warper factory class.
@sa detail::CylindricalWarper
*/
class CylindricalWarper: public WarperCreator
class CV_EXPORTS CylindricalWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CylindricalWarper>(scale); }
};
/** @brief Spherical warper factory class */
class SphericalWarper: public WarperCreator
class CV_EXPORTS SphericalWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::SphericalWarper>(scale); }
};
class FisheyeWarper : public WarperCreator
class CV_EXPORTS FisheyeWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::FisheyeWarper>(scale); }
};
class StereographicWarper: public WarperCreator
class CV_EXPORTS StereographicWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::StereographicWarper>(scale); }
};
class CompressedRectilinearWarper: public WarperCreator
class CV_EXPORTS CompressedRectilinearWarper: public WarperCreator
{
float a, b;
public:
@ -116,7 +185,7 @@ public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearWarper>(scale, a, b); }
};
class CompressedRectilinearPortraitWarper: public WarperCreator
class CV_EXPORTS CompressedRectilinearPortraitWarper: public WarperCreator
{
float a, b;
public:
@ -127,7 +196,7 @@ public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearPortraitWarper>(scale, a, b); }
};
class PaniniWarper: public WarperCreator
class CV_EXPORTS PaniniWarper: public WarperCreator
{
float a, b;
public:
@ -138,7 +207,7 @@ public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniWarper>(scale, a, b); }
};
class PaniniPortraitWarper: public WarperCreator
class CV_EXPORTS PaniniPortraitWarper: public WarperCreator
{
float a, b;
public:
@ -149,13 +218,13 @@ public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniPortraitWarper>(scale, a, b); }
};
class MercatorWarper: public WarperCreator
class CV_EXPORTS MercatorWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::MercatorWarper>(scale); }
};
class TransverseMercatorWarper: public WarperCreator
class CV_EXPORTS TransverseMercatorWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::TransverseMercatorWarper>(scale); }

@ -70,7 +70,7 @@ Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
if (type == NO)
return makePtr<Blender>();
if (type == FEATHER)
return makePtr<FeatherBlender>();
return makePtr<FeatherBlender>(try_gpu);
if (type == MULTI_BAND)
return makePtr<MultiBandBlender>(try_gpu);
CV_Error(Error::StsBadArg, "unsupported blending method");

@ -47,12 +47,18 @@ namespace detail {
Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
{
Ptr<ExposureCompensator> e;
if (type == NO)
return makePtr<NoExposureCompensator>();
if (type == GAIN)
return makePtr<GainCompensator>();
e = makePtr<NoExposureCompensator>();
else if (type == GAIN)
e = makePtr<GainCompensator>();
if (type == GAIN_BLOCKS)
return makePtr<BlocksGainCompensator>();
e = makePtr<BlocksGainCompensator>();
if (e.get() != nullptr)
{
e->setUpdateGain(true);
return e;
}
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
}
@ -120,25 +126,27 @@ void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<
}
}
}
double alpha = 0.01;
double beta = 100;
Mat_<double> A(num_images, num_images); A.setTo(0);
Mat_<double> b(num_images, 1); b.setTo(0);
for (int i = 0; i < num_images; ++i)
if (getUpdateGain() || gains_.rows != num_images)
{
for (int j = 0; j < num_images; ++j)
double alpha = 0.01;
double beta = 100;
Mat_<double> A(num_images, num_images); A.setTo(0);
Mat_<double> b(num_images, 1); b.setTo(0);
for (int i = 0; i < num_images; ++i)
{
b(i, 0) += beta * N(i, j);
A(i, i) += beta * N(i, j);
if (j == i) continue;
A(i, i) += 2 * alpha * I(i, j) * I(i, j) * N(i, j);
A(i, j) -= 2 * alpha * I(i, j) * I(j, i) * N(i, j);
for (int j = 0; j < num_images; ++j)
{
b(i, 0) += beta * N(i, j);
A(i, i) += beta * N(i, j);
if (j == i) continue;
A(i, i) += 2 * alpha * I(i, j) * I(i, j) * N(i, j);
A(i, j) -= 2 * alpha * I(i, j) * I(j, i) * N(i, j);
}
}
}
solve(A, b, gains_);
solve(A, b, gains_);
}
LOGLN("Exposure compensation, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}
@ -160,6 +168,24 @@ std::vector<double> GainCompensator::gains() const
return gains_vec;
}
void GainCompensator::getMatGains(std::vector<Mat>& umv)
{
umv.clear();
for (int i = 0; i < gains_.rows; ++i)
umv.push_back(Mat(1,1,CV_64FC1,Scalar(gains_(i, 0))));
}
void GainCompensator::setMatGains(std::vector<Mat>& umv)
{
gains_=Mat_<double>(static_cast<int>(umv.size()),1);
for (int i = 0; i < static_cast<int>(umv.size()); i++)
{
int type = umv[i].type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_CheckType(type, depth == CV_64F && cn == 1, "Only double images are supported for gain");
CV_Assert(umv[i].rows == 1 && umv[i].cols == 1);
gains_(i, 0) = umv[i].at<double>(0, 0);
}
}
void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
@ -197,29 +223,32 @@ void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::v
}
}
GainCompensator compensator;
compensator.feed(block_corners, block_images, block_masks);
std::vector<double> gains = compensator.gains();
gain_maps_.resize(num_images);
Mat_<float> ker(1, 3);
ker(0,0) = 0.25; ker(0,1) = 0.5; ker(0,2) = 0.25;
int bl_idx = 0;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
if (getUpdateGain())
{
Size bl_per_img = bl_per_imgs[img_idx];
gain_maps_[img_idx].create(bl_per_img, CV_32F);
GainCompensator compensator;
compensator.feed(block_corners, block_images, block_masks);
std::vector<double> gains = compensator.gains();
gain_maps_.resize(num_images);
Mat_<float> ker(1, 3);
ker(0, 0) = 0.25; ker(0, 1) = 0.5; ker(0, 2) = 0.25;
int bl_idx = 0;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
Mat_<float> gain_map = gain_maps_[img_idx].getMat(ACCESS_WRITE);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
}
Size bl_per_img = bl_per_imgs[img_idx];
gain_maps_[img_idx].create(bl_per_img, CV_32F);
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
{
Mat_<float> gain_map = gain_maps_[img_idx].getMat(ACCESS_WRITE);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
}
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
}
}
}
@ -251,5 +280,26 @@ void BlocksGainCompensator::apply(int index, Point /*corner*/, InputOutputArray
}
}
void BlocksGainCompensator::getMatGains(std::vector<Mat>& umv)
{
umv.clear();
for (int i = 0; i < static_cast<int>(gain_maps_.size()); ++i)
{
Mat m;
gain_maps_[i].copyTo(m);
umv.push_back(m);
}
}
void BlocksGainCompensator::setMatGains(std::vector<Mat>& umv)
{
for (int i = 0; i < static_cast<int>(umv.size()); i++)
{
UMat m;
umv[i].copyTo(m);
gain_maps_.push_back(m);
}
}
} // namespace detail
} // namespace cv

@ -384,6 +384,12 @@ BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf,
num_matches_thresh2_ = num_matches_thresh2;
}
Ptr<BestOf2NearestMatcher> BestOf2NearestMatcher::create(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
{
return makePtr<BestOf2NearestMatcher>(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2);
}
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo &matches_info)

@ -47,6 +47,18 @@
namespace cv {
namespace detail {
Ptr<SeamFinder> SeamFinder::createDefault(int type)
{
if (type == NO)
return makePtr<NoSeamFinder>();
if (type == VORONOI_SEAM)
return makePtr<VoronoiSeamFinder>();
if (type == DP_SEAM)
return makePtr<DpSeamFinder>();
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
}
void PairwiseSeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks)
{
@ -165,6 +177,26 @@ void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi)
DpSeamFinder::DpSeamFinder(CostFunction costFunc) : costFunc_(costFunc), ncomps_(0) {}
DpSeamFinder::DpSeamFinder(String costFunc)
{
ncomps_ = 0;
if (costFunc == "COLOR")
costFunc_ = COLOR;
else if (costFunc == "COLOR_GRAD")
costFunc_ = COLOR_GRAD;
else
CV_Error(-1, "Unknown cost function");
}
void DpSeamFinder::setCostFunction(String costFunc)
{
if (costFunc == "COLOR")
costFunc_ = COLOR;
else if (costFunc == "COLOR_GRAD")
costFunc_ = COLOR_GRAD;
else
CV_Error(-1, "Unknown cost function");
}
void DpSeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners, std::vector<UMat> &masks)
{
@ -1324,6 +1356,19 @@ void GraphCutSeamFinder::Impl::findInPair(size_t first, size_t second, Rect roi)
}
}
GraphCutSeamFinder::GraphCutSeamFinder(String cost_type, float terminal_cost, float bad_region_penalty)
{
CostType t;
if (cost_type == "COST_COLOR")
t = COST_COLOR;
else if (cost_type == "COST_COLOR_GRAD")
t = COST_COLOR_GRAD;
else
CV_Error(Error::StsBadFunc, "Unknown cost type function");
impl_ = new Impl(t, terminal_cost, bad_region_penalty);
}
GraphCutSeamFinder::GraphCutSeamFinder(int cost_type, float terminal_cost, float bad_region_penalty)
: impl_(new Impl(cost_type, terminal_cost, bad_region_penalty)) {}

@ -42,8 +42,79 @@
#include "precomp.hpp"
#include "opencl_kernels_stitching.hpp"
#include <iostream>
namespace cv {
PyRotationWarper::PyRotationWarper(String warp_type, float scale)
{
Ptr<WarperCreator> warper_creator;
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarper>();
else if (warp_type == "affine")
warper_creator = makePtr<cv::AffineWarper>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarper>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarper>();
else if (warp_type == "fisheye")
warper_creator = makePtr<cv::FisheyeWarper>();
else if (warp_type == "stereographic")
warper_creator = makePtr<cv::StereographicWarper>();
else if (warp_type == "compressedPlaneA2B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlaneA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
else if (warp_type == "compressedPlanePortraitA2B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlanePortraitA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "paniniA2B1")
warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
else if (warp_type == "paniniA1.5B1")
warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
else if (warp_type == "paniniPortraitA2B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "paniniPortraitA1.5B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "mercator")
warper_creator = makePtr<cv::MercatorWarper>();
else if (warp_type == "transverseMercator")
warper_creator = makePtr<cv::TransverseMercatorWarper>();
if (warper_creator.get() != nullptr)
{
rw = warper_creator->create(scale);
}
else
CV_Error(Error::StsError, "unknown warper :" + warp_type);
}
Point2f PyRotationWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
{
return rw.get()->warpPoint(pt, K, R);
}
Rect PyRotationWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
return rw.get()->buildMaps(src_size, K, R, xmap, ymap);
}
Point PyRotationWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst)
{
if (rw.get() == nullptr)
CV_Error(Error::StsError, "Warper is null");
Point p = rw.get()->warp(src, K, R, interp_mode, border_mode, dst);
return p;
}
void PyRotationWarper::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst)
{
return rw.get()->warpBackward(src, K, R, interp_mode, border_mode, dst_size, dst);
}
Rect PyRotationWarper::warpRoi(Size src_size, InputArray K, InputArray R)
{
return rw.get()->warpRoi(src_size, K, R);
}
namespace detail {
void ProjectorBase::setCameraParams(InputArray _K, InputArray _R, InputArray _T)
@ -157,7 +228,6 @@ Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R, InputArray T
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);

@ -0,0 +1,387 @@
"""Rotation model images stitcher.
stitching_detailed img1 img2 [...imgN] [flags]
Flags:
--preview
Run stitching in the preview mode. Works faster than usual mode,
but output image will have lower resolution.
--try_cuda (yes|no)
Try to use CUDA. The default value is 'no'. All default values
are for CPU mode.
\nMotion Estimation Flags:
--work_megapix <float>
Resolution for image registration step. The default is 0.6 Mpx.
--features (surf|orb|sift)
Type of features used for images matching. The default is surf.
--matcher (homography|affine)
Matcher used for pairwise image matching.
--estimator (homography|affine)
Type of estimator used for transformation estimation.
--match_conf <float>
Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.
--conf_thresh <float>
Threshold for two images are from the same panorama confidence.
The default is 1.0.
--ba (no|reproj|ray|affine)
Bundle adjustment cost function. The default is ray.
--ba_refine_mask (mask)
Set refinement mask for bundle adjustment. It looks like 'x_xxx',
where 'x' means refine respective parameter and '_' means don't
refine one, and has the following format:
<fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle
adjustment doesn't support estimation of selected parameter then
the respective flag is ignored.
--wave_correct (no|horiz|vert)
Perform wave effect correction. The default is 'horiz'.
--save_graph <file_name>
Save matches graph represented in DOT language to <file_name> file.
Labels description: Nm is number of matches, Ni is number of inliers,
C is confidence.
\nCompositing Flags:
--warp (affine|plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)
Warp surface type. The default is 'spherical'.
--seam_megapix <float>
Resolution for seam estimation step. The default is 0.1 Mpx.
--seam (no|voronoi|gc_color|gc_colorgrad)
Seam estimation method. The default is 'gc_color'.
--compose_megapix <float>
Resolution for compositing step. Use -1 for original resolution.
The default is -1.
--expos_comp (no|gain|gain_blocks)
Exposure compensation method. The default is 'gain_blocks'.
--blend (no|feather|multiband)
Blending method. The default is 'multiband'.
--blend_strength <float>
Blending strength from [0,100] range. The default is 5.
--output <result_img>
The default is 'result.jpg'.
--timelapse (as_is|crop)
Output warped images separately as frames of a time lapse movie, with 'fixed_' prepended to input file names.
--rangewidth <int>
uses range_width to limit number of images to match with.\n
"""
import numpy as np
import cv2 as cv
import sys
import argparse
parser = argparse.ArgumentParser(description='stitching_detailed')
parser.add_argument('img_names', nargs='+',help='files to stitch',type=str)
parser.add_argument('--preview',help='Run stitching in the preview mode. Works faster than usual mode but output image will have lower resolution.',type=bool,dest = 'preview' )
parser.add_argument('--try_cuda',action = 'store', default = False,help='Try to use CUDA. The default value is no. All default values are for CPU mode.',type=bool,dest = 'try_cuda' )
parser.add_argument('--work_megapix',action = 'store', default = 0.6,help=' Resolution for image registration step. The default is 0.6 Mpx',type=float,dest = 'work_megapix' )
parser.add_argument('--features',action = 'store', default = 'orb',help='Type of features used for images matching. The default is orb.',type=str,dest = 'features' )
parser.add_argument('--matcher',action = 'store', default = 'homography',help='Matcher used for pairwise image matching.',type=str,dest = 'matcher' )
parser.add_argument('--estimator',action = 'store', default = 'homography',help='Type of estimator used for transformation estimation.',type=str,dest = 'estimator' )
parser.add_argument('--match_conf',action = 'store', default = 0.3,help='Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.',type=float,dest = 'match_conf' )
parser.add_argument('--conf_thresh',action = 'store', default = 1.0,help='Threshold for two images are from the same panorama confidence.The default is 1.0.',type=float,dest = 'conf_thresh' )
parser.add_argument('--ba',action = 'store', default = 'ray',help='Bundle adjustment cost function. The default is ray.',type=str,dest = 'ba' )
parser.add_argument('--ba_refine_mask',action = 'store', default = 'xxxxx',help='Set refinement mask for bundle adjustment. mask is "xxxxx"',type=str,dest = 'ba_refine_mask' )
parser.add_argument('--wave_correct',action = 'store', default = 'horiz',help='Perform wave effect correction. The default is "horiz"',type=str,dest = 'wave_correct' )
parser.add_argument('--save_graph',action = 'store', default = None,help='Save matches graph represented in DOT language to <file_name> file.',type=str,dest = 'save_graph' )
parser.add_argument('--warp',action = 'store', default = 'plane',help='Warp surface type. The default is "spherical".',type=str,dest = 'warp' )
parser.add_argument('--seam_megapix',action = 'store', default = 0.1,help=' Resolution for seam estimation step. The default is 0.1 Mpx.',type=float,dest = 'seam_megapix' )
parser.add_argument('--seam',action = 'store', default = 'no',help='Seam estimation method. The default is "gc_color".',type=str,dest = 'seam' )
parser.add_argument('--compose_megapix',action = 'store', default = -1,help='Resolution for compositing step. Use -1 for original resolution.',type=float,dest = 'compose_megapix' )
parser.add_argument('--expos_comp',action = 'store', default = 'no',help='Exposure compensation method. The default is "gain_blocks".',type=str,dest = 'expos_comp' )
parser.add_argument('--blend',action = 'store', default = 'multiband',help='Blending method. The default is "multiband".',type=str,dest = 'blend' )
parser.add_argument('--blend_strength',action = 'store', default = 5,help='Blending strength from [0,100] range.',type=int,dest = 'blend_strength' )
parser.add_argument('--output',action = 'store', default = 'result.jpg',help='The default is "result.jpg"',type=str,dest = 'output' )
parser.add_argument('--timelapse',action = 'store', default = None,help='Output warped images separately as frames of a time lapse movie, with "fixed_" prepended to input file names.',type=str,dest = 'timelapse' )
parser.add_argument('--rangewidth',action = 'store', default = -1,help='uses range_width to limit number of images to match with.',type=int,dest = 'rangewidth' )
args = parser.parse_args()
img_names=args.img_names
print(img_names)
preview = args.preview
try_cuda = args.try_cuda
work_megapix = args.work_megapix
seam_megapix = args.seam_megapix
compose_megapix = args.compose_megapix
conf_thresh = args.conf_thresh
features_type = args.features
matcher_type = args.matcher
estimator_type = args.estimator
ba_cost_func = args.ba
ba_refine_mask = args.ba_refine_mask
wave_correct = args.wave_correct
if wave_correct=='no':
do_wave_correct= False
else:
do_wave_correct=True
if args.save_graph is None:
save_graph = False
else:
save_graph =True
save_graph_to = args.save_graph
warp_type = args.warp
if args.expos_comp=='no':
expos_comp_type = cv.detail.ExposureCompensator_NO
elif args.expos_comp=='gain':
expos_comp_type = cv.detail.ExposureCompensator_GAIN
elif args.expos_comp=='gain_blocks':
expos_comp_type = cv.detail.ExposureCompensator_GAIN_BLOCKS
else:
print("Bad exposure compensation method")
exit
match_conf = args.match_conf
seam_find_type = args.seam
blend_type = args.blend
blend_strength = args.blend_strength
result_name = args.output
if args.timelapse is not None:
timelapse = True
if args.timelapse=="as_is":
timelapse_type = cv.detail.Timelapser_AS_IS
elif args.timelapse=="crop":
timelapse_type = cv.detail.Timelapser_CROP
else:
print("Bad timelapse method")
exit()
else:
timelapse= False
range_width = args.rangewidth
if features_type=='orb':
finder= cv.ORB.create()
elif features_type=='surf':
finder= cv.xfeatures2d_SURF.create()
elif features_type=='sift':
finder= cv.xfeatures2d_SIFT.create()
else:
print ("Unknown descriptor type")
exit()
seam_work_aspect = 1
full_img_sizes=[]
features=[]
images=[]
is_work_scale_set = False
is_seam_scale_set = False
is_compose_scale_set = False;
for name in img_names:
full_img = cv.imread(name)
if full_img is None:
print("Cannot read image ",name)
exit()
full_img_sizes.append((full_img.shape[1],full_img.shape[0]))
if work_megapix < 0:
img = full_img
work_scale = 1
is_work_scale_set = True
else:
if is_work_scale_set is False:
work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
is_work_scale_set = True
img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
if is_seam_scale_set is False:
seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
seam_work_aspect = seam_scale / work_scale
is_seam_scale_set = True
imgFea= cv.detail.computeImageFeatures2(finder,img)
features.append(imgFea)
img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
images.append(img)
if matcher_type== "affine":
matcher = cv.detail.AffineBestOf2NearestMatcher_create(False, try_cuda, match_conf)
elif range_width==-1:
matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
else:
matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
p=matcher.apply2(features)
matcher.collectGarbage()
if save_graph:
f = open(save_graph_to,"w")
# f.write(matchesGraphAsString(img_names, pairwise_matches, conf_thresh))
f.close()
indices=cv.detail.leaveBiggestComponent(features,p,0.3)
img_subset =[]
img_names_subset=[]
full_img_sizes_subset=[]
num_images=len(indices)
for i in range(0,num_images):
img_names_subset.append(img_names[indices[i,0]])
img_subset.append(images[indices[i,0]])
full_img_sizes_subset.append(full_img_sizes[indices[i,0]])
images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
num_images = len(img_names)
if num_images < 2:
print("Need more images")
exit()
if estimator_type == "affine":
estimator = cv.detail_AffineBasedEstimator()
else:
estimator = cv.detail_HomographyBasedEstimator()
b, cameras =estimator.apply(features,p,None)
if not b:
print("Homography estimation failed.")
exit()
for cam in cameras:
cam.R=cam.R.astype(np.float32)
if ba_cost_func == "reproj":
adjuster = cv.detail_BundleAdjusterReproj()
elif ba_cost_func == "ray":
adjuster = cv.detail_BundleAdjusterRay()
elif ba_cost_func == "affine":
adjuster = cv.detail_BundleAdjusterAffinePartial()
elif ba_cost_func == "no":
adjuster = cv.detail_NoBundleAdjuster()
else:
print( "Unknown bundle adjustment cost function: ", ba_cost_func )
exit()
adjuster.setConfThresh(1)
refine_mask=np.zeros((3,3),np.uint8)
if ba_refine_mask[0] == 'x':
refine_mask[0,0] = 1
if ba_refine_mask[1] == 'x':
refine_mask[0,1] = 1
if ba_refine_mask[2] == 'x':
refine_mask[0,2] = 1
if ba_refine_mask[3] == 'x':
refine_mask[1,1] = 1
if ba_refine_mask[4] == 'x':
refine_mask[1,2] = 1
adjuster.setRefinementMask(refine_mask)
b,cameras = adjuster.apply(features,p,cameras)
if not b:
print("Camera parameters adjusting failed.")
exit()
focals=[]
for cam in cameras:
focals.append(cam.focal)
sorted(focals)
if len(focals)%2==1:
warped_image_scale = focals[len(focals) // 2]
else:
warped_image_scale = (focals[len(focals) // 2]+focals[len(focals) // 2-1])/2
if do_wave_correct:
rmats=[]
for cam in cameras:
rmats.append(np.copy(cam.R))
rmats = cv.detail.waveCorrect( rmats, cv.detail.WAVE_CORRECT_HORIZ)
for idx,cam in enumerate(cameras):
cam.R = rmats[idx]
corners=[]
mask=[]
masks_warped=[]
images_warped=[]
sizes=[]
masks=[]
for i in range(0,num_images):
um=cv.UMat(255*np.ones((images[i].shape[0],images[i].shape[1]),np.uint8))
masks.append(um)
warper = cv.PyRotationWarper(warp_type,warped_image_scale*seam_work_aspect) # warper peut etre nullptr?
for i in range(0,num_images):
K = cameras[i].K().astype(np.float32)
swa = seam_work_aspect
K[0,0] *= swa
K[0,2] *= swa
K[1,1] *= swa
K[1,2] *= swa
corner,image_wp =warper.warp(images[i],K,cameras[i].R,cv.INTER_LINEAR, cv.BORDER_REFLECT)
corners.append(corner)
sizes.append((image_wp.shape[1],image_wp.shape[0]))
images_warped.append(image_wp)
p,mask_wp =warper.warp(masks[i],K,cameras[i].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT)
masks_warped.append(mask_wp)
images_warped_f=[]
for img in images_warped:
imgf=img.astype(np.float32)
images_warped_f.append(imgf)
compensator=cv.detail.ExposureCompensator_createDefault(expos_comp_type)
compensator.feed(corners, images_warped, masks_warped)
if seam_find_type == "no":
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
elif seam_find_type == "voronoi":
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM);
elif seam_find_type == "gc_color":
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR")
elif seam_find_type == "gc_colorgrad":
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
elif seam_find_type == "dp_color":
seam_finder = cv.detail_DpSeamFinder("COLOR")
elif seam_find_type == "dp_colorgrad":
seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD")
if seam_finder is None:
print("Can't create the following seam finder ",seam_find_type)
exit()
seam_finder.find(images_warped_f, corners,masks_warped )
imgListe=[]
compose_scale=1
corners=[]
sizes=[]
images_warped=[]
images_warped_f=[]
masks=[]
blender= None
timelapser=None
compose_work_aspect=1
for idx,name in enumerate(img_names): # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
full_img = cv.imread(name)
if not is_compose_scale_set:
if compose_megapix > 0:
compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
is_compose_scale_set = True;
compose_work_aspect = compose_scale / work_scale;
warped_image_scale *= compose_work_aspect
warper = cv.PyRotationWarper(warp_type,warped_image_scale)
for i in range(0,len(img_names)):
cameras[i].focal *= compose_work_aspect
cameras[i].ppx *= compose_work_aspect
cameras[i].ppy *= compose_work_aspect
sz = (full_img.shape[1] * compose_scale,full_img.shape[0] * compose_scale)
K = cameras[i].K().astype(np.float32)
roi = warper.warpRoi(sz, K, cameras[i].R);
corners.append(roi[0:2])
sizes.append(roi[2:4])
if abs(compose_scale - 1) > 1e-1:
img =cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale, interpolation=cv.INTER_LINEAR_EXACT)
else:
img = full_img;
img_size = (img.shape[1],img.shape[0]);
K=cameras[idx].K().astype(np.float32)
corner,image_warped =warper.warp(img,K,cameras[idx].R,cv.INTER_LINEAR, cv.BORDER_REFLECT)
mask =255*np.ones((img.shape[0],img.shape[1]),np.uint8)
p,mask_warped =warper.warp(mask,K,cameras[idx].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT)
compensator.apply(idx,corners[idx],image_warped,mask_warped)
image_warped_s = image_warped.astype(np.int16)
image_warped=[]
dilated_mask = cv.dilate(masks_warped[idx],None)
seam_mask = cv.resize(dilated_mask,(mask_warped.shape[1],mask_warped.shape[0]),0,0,cv.INTER_LINEAR_EXACT)
mask_warped = cv.bitwise_and(seam_mask,mask_warped)
if blender==None and not timelapse:
blender = cv.detail.Blender_createDefault(1)
dst_sz = cv.detail.resultRoi(corners,sizes)
blend_strength=1
blend_width = np.sqrt(dst_sz[2]*dst_sz[3]) * blend_strength / 100
if blend_width < 1:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
elif blend_type == "MULTI_BAND":
blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTIBAND)
blender.setNumBands((np.log(blend_width)/np.log(2.) - 1.).astype(np.int))
elif blend_type == "FEATHER":
blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER)
blender.setSharpness(1./blend_width)
blender.prepare(corners, sizes)
elif timelapser==None and timelapse:
timelapser = cv.detail.createDefault(timelapse_type);
timelapser.initialize(corners, sizes)
if timelapse:
matones=np.ones((image_warped_s.shape[0],image_warped_s.shape[1]), np.uint8)
timelapser.process(image_warped_s, matones, corners[idx])
pos_s = img_names[idx].rfind("/");
if pos_s == -1:
fixedFileName = "fixed_" + img_names[idx];
else:
fixedFileName = img_names[idx][:pos_s + 1 ]+"fixed_" + img_names[idx][pos_s + 1: ]
cv.imwrite(fixedFileName, timelapser.getDst())
else:
blender.feed(image_warped_s, mask_warped, corners[idx])
if not timelapse:
result=None
result_mask=None
result,result_mask = blender.blend(result,result_mask)
cv.imwrite(result_name,result)
Loading…
Cancel
Save