diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp index c6ead3a42c..a96a2e53cf 100644 --- a/modules/video/include/opencv2/video/tracking.hpp +++ b/modules/video/include/opencv2/video/tracking.hpp @@ -297,7 +297,7 @@ row is ignored. Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an area-based alignment that builds on intensity similarities. In essence, the function updates the initial transformation that roughly aligns the images. If this information is missing, the identity -warp (unity matrix) should be given as input. Note that if images undergo strong +warp (unity matrix) is used as an initialization. Note that if images undergo strong displacements/rotations, an initial transformation that roughly aligns the images is necessary (e.g., a simple euclidean/similarity transform that allows for the images showing the same image content approximately). Use inverse warping in the second image to take an image close to the first diff --git a/modules/video/src/ecc.cpp b/modules/video/src/ecc.cpp index d11419e402..377b775072 100644 --- a/modules/video/src/ecc.cpp +++ b/modules/video/src/ecc.cpp @@ -325,6 +325,16 @@ double cv::findTransformECC(InputArray templateImage, CV_Assert(!src.empty()); CV_Assert(!dst.empty()); + // If the user passed an un-initialized warpMatrix, initialize to identity + if(map.empty()) { + int rowCount = 2; + if(motionType == MOTION_HOMOGRAPHY) + rowCount = 3; + + warpMatrix.create(rowCount, 3, CV_32FC1); + map = warpMatrix.getMat(); + map = Mat::eye(rowCount, 3, CV_32F); + } if( ! (src.type()==dst.type())) CV_Error( Error::StsUnmatchedFormats, "Both input images must have the same data type" );