diff --git a/modules/optflow/CMakeLists.txt b/modules/optflow/CMakeLists.txt index 9887a3828..41029bcf4 100644 --- a/modules/optflow/CMakeLists.txt +++ b/modules/optflow/CMakeLists.txt @@ -1,2 +1,2 @@ set(the_description "Optical Flow Algorithms") -ocv_define_module(optflow opencv_core opencv_imgproc opencv_video opencv_highgui WRAP python) +ocv_define_module(optflow opencv_core opencv_imgproc opencv_video opencv_highgui opencv_ximgproc WRAP python) diff --git a/modules/optflow/include/opencv2/optflow.hpp b/modules/optflow/include/opencv2/optflow.hpp index a01b0c8ff..667adcb6a 100644 --- a/modules/optflow/include/opencv2/optflow.hpp +++ b/modules/optflow/include/opencv2/optflow.hpp @@ -109,7 +109,30 @@ CV_EXPORTS_W void calcOpticalFlowSF( InputArray from, InputArray to, OutputArray double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, - double upscale_sigma_color, double speed_up_thr ); + double upscale_sigma_color, double speed_up_thr ); + +/** @brief Fast dense optical flow based on PyrLK sparse matches interpolation. + +@param from first 8-bit 3-channel or 1-channel image. +@param to second 8-bit 3-channel or 1-channel image of the same size as from +@param flow computed flow image that has the same size as from and CV_32FC2 type +@param grid_step stride used in sparse match computation. Lower values usually + result in higher quality but slow down the algorithm. +@param k number of nearest-neighbor matches considered, when fitting a locally affine + model. Lower values can make the algorithm noticeably faster at the cost of + some quality degradation. +@param sigma parameter defining how fast the weights decrease in the locally-weighted affine + fitting. Higher values can help preserve fine details, lower values can help to get rid + of the noise in the output flow. +@param use_post_proc defines whether the ximgproc::fastGlobalSmootherFilter() is used + for post-processing after interpolation +@param fgs_lambda see the respective parameter of the ximgproc::fastGlobalSmootherFilter() +@param fgs_sigma see the respective parameter of the ximgproc::fastGlobalSmootherFilter() + */ +CV_EXPORTS_W void calcOpticalFlowSparseToDense ( InputArray from, InputArray to, OutputArray flow, + int grid_step = 8, int k = 128, float sigma = 0.05f, + bool use_post_proc = true, float fgs_lambda = 500.0f, + float fgs_sigma = 1.5f ); /** @brief Read a .flo file @@ -165,6 +188,9 @@ CV_EXPORTS_W Ptr createOptFlow_SimpleFlow(); //! Additional interface to the Farneback's algorithm - calcOpticalFlowFarneback() CV_EXPORTS_W Ptr createOptFlow_Farneback(); +//! Additional interface to the SparseToDenseFlow algorithm - calcOpticalFlowSparseToDense() +CV_EXPORTS_W Ptr createOptFlow_SparseToDense(); + //! @} } //optflow diff --git a/modules/optflow/samples/optical_flow_evaluation.cpp b/modules/optflow/samples/optical_flow_evaluation.cpp index c65412a3e..4ecc798d5 100644 --- a/modules/optflow/samples/optical_flow_evaluation.cpp +++ b/modules/optflow/samples/optical_flow_evaluation.cpp @@ -10,7 +10,7 @@ using namespace optflow; const String keys = "{help h usage ? | | print this message }" "{@image1 | | image1 }" "{@image2 | | image2 }" - "{@algorithm | | [farneback, simpleflow, tvl1 or deepflow] }" + "{@algorithm | | [farneback, simpleflow, tvl1, deepflow or sparsetodenseflow] }" "{@groundtruth | | path to the .flo file (optional), Middlebury format }" "{m measure |endpoint| error measure - [endpoint or angular] }" "{r region |all | region to compute stats about [all, discontinuities, untextured] }" @@ -249,6 +249,8 @@ int main( int argc, char** argv ) algorithm = createOptFlow_DualTVL1(); else if ( method == "deepflow" ) algorithm = createOptFlow_DeepFlow(); + else if ( method == "sparsetodenseflow" ) + algorithm = createOptFlow_SparseToDense(); else { printf("Wrong method!\n"); diff --git a/modules/optflow/src/interfaces.cpp b/modules/optflow/src/interfaces.cpp index df510ebc3..d2830eb14 100644 --- a/modules/optflow/src/interfaces.cpp +++ b/modules/optflow/src/interfaces.cpp @@ -177,5 +177,42 @@ Ptr createOptFlow_Farneback() { return makePtr(); } + +class OpticalFlowSparseToDense : public DenseOpticalFlow +{ +public: + OpticalFlowSparseToDense(int _grid_step, int _k, float _sigma, bool _use_post_proc, float _fgs_lambda, float _fgs_sigma); + void calc(InputArray I0, InputArray I1, InputOutputArray flow); + void collectGarbage(); +protected: + int grid_step; + int k; + float sigma; + bool use_post_proc; + float fgs_lambda; + float fgs_sigma; +}; + +OpticalFlowSparseToDense::OpticalFlowSparseToDense(int _grid_step, int _k, float _sigma, bool _use_post_proc, float _fgs_lambda, float _fgs_sigma) +{ + grid_step = _grid_step; + k = _k; + sigma = _sigma; + use_post_proc = _use_post_proc; + fgs_lambda = _fgs_lambda; + fgs_sigma = _fgs_sigma; +} + +void OpticalFlowSparseToDense::calc(InputArray I0, InputArray I1, InputOutputArray flow) +{ + calcOpticalFlowSparseToDense(I0,I1,flow,grid_step,k,sigma,use_post_proc,fgs_lambda,fgs_sigma); +} + +void OpticalFlowSparseToDense::collectGarbage() {} + +Ptr createOptFlow_SparseToDense() +{ + return makePtr(8,128,0.05f,true,500.0f,1.5f); +} } } diff --git a/modules/optflow/src/sparsetodenseflow.cpp b/modules/optflow/src/sparsetodenseflow.cpp new file mode 100644 index 000000000..ad896b228 --- /dev/null +++ b/modules/optflow/src/sparsetodenseflow.cpp @@ -0,0 +1,112 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "opencv2/ximgproc/sparse_match_interpolator.hpp" +using namespace std; + +namespace cv { +namespace optflow { + +CV_EXPORTS_W void calcOpticalFlowSparseToDense(InputArray from, InputArray to, OutputArray flow, + int grid_step, int k, + float sigma, bool use_post_proc, + float fgs_lambda, float fgs_sigma) +{ + CV_Assert( grid_step>1 && k>3 && sigma>0.0001f && fgs_lambda>1.0f && fgs_sigma>0.01f ); + CV_Assert( !from.empty() && from.depth() == CV_8U && (from.channels() == 3 || from.channels() == 1) ); + CV_Assert( !to .empty() && to .depth() == CV_8U && (to .channels() == 3 || to .channels() == 1) ); + CV_Assert( from.sameSize(to) ); + + Mat prev = from.getMat(); + Mat cur = to.getMat(); + Mat prev_grayscale, cur_grayscale; + + while( (prev.cols/grid_step)*(prev.rows/grid_step) > SHRT_MAX ) //ensure that the number matches is not too big + grid_step*=2; + + if(prev.channels()==3) + { + cvtColor(prev,prev_grayscale,COLOR_BGR2GRAY); + cvtColor(cur, cur_grayscale, COLOR_BGR2GRAY); + } + else + { + prev.copyTo(prev_grayscale); + cur .copyTo(cur_grayscale); + } + + vector points; + vector dst_points; + vector status; + vector err; + vector points_filtered, dst_points_filtered; + + for(int i=0;i gd = ximgproc::createEdgeAwareInterpolator(); + gd->setK(k); + gd->setSigma(sigma); + gd->setUsePostProcessing(use_post_proc); + gd->setFGSLambda(fgs_lambda); + gd->setFGSSigma (fgs_sigma); + gd->interpolate(prev,points_filtered,cur,dst_points_filtered,dense_flow); +} + +} +} \ No newline at end of file diff --git a/modules/optflow/test/test_sparsetodenseflow.cpp b/modules/optflow/test/test_sparsetodenseflow.cpp new file mode 100644 index 000000000..a8d645d30 --- /dev/null +++ b/modules/optflow/test/test_sparsetodenseflow.cpp @@ -0,0 +1,146 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "test_precomp.hpp" + +#include + +using namespace std; +using namespace cv; + +/* ///////////////////// sparsetodenseflow_test ///////////////////////// */ + +class CV_SparseToDenseFlowTest : public cvtest::BaseTest +{ +protected: + void run(int); +}; + +static bool isFlowCorrect(float u) { + return !cvIsNaN(u) && (fabs(u) < 1e9); +} + +static float calc_rmse(Mat flow1, Mat flow2) { + float sum = 0; + int counter = 0; + const int rows = flow1.rows; + const int cols = flow1.cols; + + for (int y = 0; y < rows; ++y) { + for (int x = 0; x < cols; ++x) { + Vec2f flow1_at_point = flow1.at(y, x); + Vec2f flow2_at_point = flow2.at(y, x); + + float u1 = flow1_at_point[0]; + float v1 = flow1_at_point[1]; + float u2 = flow2_at_point[0]; + float v2 = flow2_at_point[1]; + + if (isFlowCorrect(u1) && isFlowCorrect(u2) && isFlowCorrect(v1) && isFlowCorrect(v2)) { + sum += (u1-u2)*(u1-u2) + (v1-v2)*(v1-v2); + counter++; + } + } + } + return (float)sqrt(sum / (1e-9 + counter)); +} + +void CV_SparseToDenseFlowTest::run(int) { + const float MAX_RMSE = 0.6f; + const string frame1_path = ts->get_data_path() + "optflow/RubberWhale1.png"; + const string frame2_path = ts->get_data_path() + "optflow/RubberWhale2.png"; + const string gt_flow_path = ts->get_data_path() + "optflow/RubberWhale.flo"; + + Mat frame1 = imread(frame1_path); + Mat frame2 = imread(frame2_path); + + if (frame1.empty()) { + ts->printf(cvtest::TS::LOG, "could not read image %s\n", frame2_path.c_str()); + ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA); + return; + } + + if (frame2.empty()) { + ts->printf(cvtest::TS::LOG, "could not read image %s\n", frame2_path.c_str()); + ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA); + return; + } + + if (frame1.rows != frame2.rows && frame1.cols != frame2.cols) { + ts->printf(cvtest::TS::LOG, "images should be of equal sizes (%s and %s)", + frame1_path.c_str(), frame2_path.c_str()); + ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA); + return; + } + + if (frame1.type() != 16 || frame2.type() != 16) { + ts->printf(cvtest::TS::LOG, "images should be of equal type CV_8UC3 (%s and %s)", + frame1_path.c_str(), frame2_path.c_str()); + ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA); + return; + } + + Mat flow_gt = optflow::readOpticalFlow(gt_flow_path); + if(flow_gt.empty()) { + ts->printf(cvtest::TS::LOG, "error while reading flow data from file %s", + gt_flow_path.c_str()); + ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA); + return; + } + + Mat flow; + optflow::calcOpticalFlowSparseToDense(frame1, frame2, flow); + + float rmse = calc_rmse(flow_gt, flow); + + ts->printf(cvtest::TS::LOG, "Optical flow estimation RMSE for SparseToDenseFlow algorithm : %lf\n", + rmse); + + if (rmse > MAX_RMSE) { + ts->printf( cvtest::TS::LOG, + "Too big rmse error : %lf ( >= %lf )\n", rmse, MAX_RMSE); + ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); + return; + } +} + + +TEST(Video_OpticalFlowSparseToDenseFlow, accuracy) { CV_SparseToDenseFlowTest test; test.safe_run(); } diff --git a/modules/ximgproc/doc/ximgproc.bib b/modules/ximgproc/doc/ximgproc.bib index 322b37121..75a59331b 100644 --- a/modules/ximgproc/doc/ximgproc.bib +++ b/modules/ximgproc/doc/ximgproc.bib @@ -77,3 +77,11 @@ year={2008}, organization={ACM} } + +@inproceedings{Revaud2015, + title={EpicFlow: Edge-Preserving Interpolation of Correspondences for Optical Flow}, + author={Revaud, Jerome and Weinzaepfel, Philippe and Harchaoui, Zaid and Schmid, Cordelia}, + booktitle={Computer Vision and Pattern Recognition (CVPR), IEEE Conference on}, + pages={1164--1172}, + year={2015} +} diff --git a/modules/ximgproc/include/opencv2/ximgproc.hpp b/modules/ximgproc/include/opencv2/ximgproc.hpp index 41b726096..cb55da078 100644 --- a/modules/ximgproc/include/opencv2/ximgproc.hpp +++ b/modules/ximgproc/include/opencv2/ximgproc.hpp @@ -39,6 +39,7 @@ #include "ximgproc/edge_filter.hpp" #include "ximgproc/disparity_filter.hpp" +#include "ximgproc/sparse_match_interpolator.hpp" #include "ximgproc/structured_edge_detection.hpp" #include "ximgproc/seeds.hpp" #include "ximgproc/fast_hough_transform.hpp" diff --git a/modules/ximgproc/include/opencv2/ximgproc/sparse_match_interpolator.hpp b/modules/ximgproc/include/opencv2/ximgproc/sparse_match_interpolator.hpp new file mode 100644 index 000000000..4821aba25 --- /dev/null +++ b/modules/ximgproc/include/opencv2/ximgproc/sparse_match_interpolator.hpp @@ -0,0 +1,132 @@ +/* + * By downloading, copying, installing or using the software you agree to this license. + * If you do not agree to this license, do not download, install, + * copy or use the software. + * + * + * License Agreement + * For Open Source Computer Vision Library + * (3 - clause BSD License) + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met : + * + * *Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and / or other materials provided with the distribution. + * + * * Neither the names of the copyright holders nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * This software is provided by the copyright holders and contributors "as is" and + * any express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are disclaimed. + * In no event shall copyright holders or contributors be liable for any direct, + * indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused + * and on any theory of liability, whether in contract, strict liability, + * or tort(including negligence or otherwise) arising in any way out of + * the use of this software, even if advised of the possibility of such damage. + */ + +#ifndef __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__ +#define __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__ +#ifdef __cplusplus + +#include + +namespace cv { +namespace ximgproc { + +//! @addtogroup ximgproc_filters +//! @{ + +/** @brief Main interface for all filters, that take sparse matches as an +input and produce a dense per-pixel matching (optical flow) as an output. + */ +class CV_EXPORTS_W SparseMatchInterpolator : public Algorithm +{ +public: + /** @brief Interpolate input sparse matches. + + @param from_image first of the two matched images, 8-bit single-channel or three-channel. + + @param from_points points of the from_image for which there are correspondences in the + to_image (Point2f vector, size shouldn't exceed 32767) + + @param to_image second of the two matched images, 8-bit single-channel or three-channel. + + @param to_points points in the to_image corresponding to from_points + (Point2f vector, size shouldn't exceed 32767) + + @param dense_flow output dense matching (two-channel CV_32F image) + */ + CV_WRAP virtual void interpolate(InputArray from_image, InputArray from_points, + InputArray to_image , InputArray to_points, + OutputArray dense_flow) = 0; +}; + +/** @brief Sparse match interpolation algorithm based on modified locally-weighted affine +estimator from @cite Revaud2015 and Fast Global Smoother as post-processing filter. + */ +class CV_EXPORTS_W EdgeAwareInterpolator : public SparseMatchInterpolator +{ +public: + /** @brief K is a number of nearest-neighbor matches considered, when fitting a locally affine + model. Usually it should be around 128. However, lower values would make the interpolation + noticeably faster. + */ + CV_WRAP virtual void setK(int _k) = 0; + /** @see setK */ + CV_WRAP virtual int getK() = 0; + + /** @brief Sigma is a parameter defining how fast the weights decrease in the locally-weighted affine + fitting. Higher values can help preserve fine details, lower values can help to get rid of noise in the + output flow. + */ + CV_WRAP virtual void setSigma(float _sigma) = 0; + /** @see setSigma */ + CV_WRAP virtual float getSigma() = 0; + + /** @brief Lambda is a parameter defining the weight of the edge-aware term in geodesic distance, + should be in the range of 0 to 1000. + */ + CV_WRAP virtual void setLambda(float _lambda) = 0; + /** @see setLambda */ + CV_WRAP virtual float getLambda() = 0; + + /** @brief Sets whether the fastGlobalSmootherFilter() post-processing is employed. It is turned on by + default. + */ + CV_WRAP virtual void setUsePostProcessing(bool _use_post_proc) = 0; + /** @see setUsePostProcessing */ + CV_WRAP virtual bool getUsePostProcessing() = 0; + + /** @brief Sets the respective fastGlobalSmootherFilter() parameter. + */ + CV_WRAP virtual void setFGSLambda(float _lambda) = 0; + /** @see setFGSLambda */ + CV_WRAP virtual float getFGSLambda() = 0; + + /** @see setFGSLambda */ + CV_WRAP virtual void setFGSSigma(float _sigma) = 0; + /** @see setFGSLambda */ + CV_WRAP virtual float getFGSSigma() = 0; +}; + +/** @brief Factory method that creates an instance of the +EdgeAwareInterpolator. +*/ +CV_EXPORTS_W +Ptr createEdgeAwareInterpolator(); + +//! @} +} +} +#endif +#endif diff --git a/modules/ximgproc/src/sparse_match_interpolators.cpp b/modules/ximgproc/src/sparse_match_interpolators.cpp new file mode 100644 index 000000000..d5d8d1f1d --- /dev/null +++ b/modules/ximgproc/src/sparse_match_interpolators.cpp @@ -0,0 +1,881 @@ +/* + * By downloading, copying, installing or using the software you agree to this license. + * If you do not agree to this license, do not download, install, + * copy or use the software. + * + * + * License Agreement + * For Open Source Computer Vision Library + * (3 - clause BSD License) + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met : + * + * *Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and / or other materials provided with the distribution. + * + * * Neither the names of the copyright holders nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * This software is provided by the copyright holders and contributors "as is" and + * any express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are disclaimed. + * In no event shall copyright holders or contributors be liable for any direct, + * indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused + * and on any theory of liability, whether in contract, strict liability, + * or tort(including negligence or otherwise) arising in any way out of + * the use of this software, even if advised of the possibility of such damage. + */ + +#include "precomp.hpp" +#include "opencv2/ximgproc/sparse_match_interpolator.hpp" +#include +#include + +using namespace std; + +#define INF 1E+20F + +namespace cv { +namespace ximgproc { + +struct SparseMatch +{ + Point2f reference_image_pos; + Point2f target_image_pos; + SparseMatch(){} + SparseMatch(Point2f ref_point, Point2f target_point): reference_image_pos(ref_point), target_image_pos(target_point) {} +}; + +bool operator<(const SparseMatch& lhs,const SparseMatch& rhs); + +void weightedLeastSquaresAffineFit(short* labels, float* weights, int count, float lambda, SparseMatch* matches, Mat& dst); +void generateHypothesis(short* labels, int count, RNG& rng, unsigned char* is_used, SparseMatch* matches, Mat& dst); +void verifyHypothesis(short* labels, float* weights, int count, SparseMatch* matches, float eps, float lambda, Mat& hypothesis_transform, Mat& old_transform, float& old_weighted_num_inliers); + +struct node +{ + float dist; + short label; + node() {} + node(short l,float d): dist(d), label(l) {} +}; + +class EdgeAwareInterpolatorImpl : public EdgeAwareInterpolator +{ +public: + static Ptr create(); + void interpolate(InputArray from_image, InputArray from_points, InputArray to_image, InputArray to_points, OutputArray dense_flow); + +protected: + int w,h; + int match_num; + + //internal buffers: + vector* g; + Mat labels; + Mat NNlabels; + Mat NNdistances; + + //tunable parameters: + float lambda; + int k; + float sigma; + bool use_post_proc; + float fgs_lambda; + float fgs_sigma; + + // static parameters: + static const int distance_transform_num_iter = 1; + static const int ransac_interpolation_num_iter = 1; + float regularization_coef; + static const int ransac_num_stripes = 4; + RNG rngs[ransac_num_stripes]; + + void init(); + void preprocessData(Mat& src, vector& matches); + void computeGradientMagnitude(Mat& src, Mat& dst); + void geodesicDistanceTransform(Mat& distances, Mat& cost_map); + void buildGraph(Mat& distances, Mat& cost_map); + void ransacInterpolation(vector& matches, Mat& dst_dense_flow); + +protected: + struct GetKNNMatches_ParBody : public ParallelLoopBody + { + EdgeAwareInterpolatorImpl* inst; + int num_stripes; + int stripe_sz; + + GetKNNMatches_ParBody(EdgeAwareInterpolatorImpl& _inst, int _num_stripes); + void operator () (const Range& range) const; + }; + + struct RansacInterpolation_ParBody : public ParallelLoopBody + { + EdgeAwareInterpolatorImpl* inst; + Mat* transforms; + float* weighted_inlier_nums; + float* eps; + SparseMatch* matches; + int num_stripes; + int stripe_sz; + int inc; + + RansacInterpolation_ParBody(EdgeAwareInterpolatorImpl& _inst, Mat* _transforms, float* _weighted_inlier_nums, float* _eps, SparseMatch* _matches, int _num_stripes, int _inc); + void operator () (const Range& range) const; + }; + +public: + void setK(int _k) {k=_k;} + int getK() {return k;} + void setSigma(float _sigma) {sigma=_sigma;} + float getSigma() {return sigma;} + void setLambda(float _lambda) {lambda=_lambda;} + float getLambda() {return lambda;} + void setUsePostProcessing(bool _use_post_proc) {use_post_proc=_use_post_proc;} + bool getUsePostProcessing() {return use_post_proc;} + void setFGSLambda(float _lambda) {fgs_lambda=_lambda;} + float getFGSLambda() {return fgs_lambda;} + void setFGSSigma(float _sigma) {fgs_sigma = _sigma;} + float getFGSSigma() {return fgs_sigma;} +}; + +void EdgeAwareInterpolatorImpl::init() +{ + lambda = 999.0f; + k = 128; + sigma = 0.05f; + use_post_proc = true; + fgs_lambda = 500.0f; + fgs_sigma = 1.5f; + regularization_coef = 0.01f; +} + +Ptr EdgeAwareInterpolatorImpl::create() +{ + EdgeAwareInterpolatorImpl *eai = new EdgeAwareInterpolatorImpl(); + eai->init(); + return Ptr(eai); +} + +void EdgeAwareInterpolatorImpl::interpolate(InputArray from_image, InputArray from_points, InputArray, InputArray to_points, OutputArray dense_flow) +{ + CV_Assert( !from_image.empty() && (from_image.depth() == CV_8U) && (from_image.channels() == 3 || from_image.channels() == 1) ); + CV_Assert( !from_points.empty() && from_points.isVector() && + !to_points .empty() && to_points .isVector() && + from_points.sameSize(to_points) ); + + w = from_image.cols(); + h = from_image.rows(); + + vector from_vector = *(const vector*)from_points.getObj(); + vector to_vector = *(const vector*)to_points .getObj(); + vector matches_vector(from_vector.size()); + for(unsigned int i=0;i[match_num]; + + preprocessData(src,matches_vector); + + dense_flow.create(from_image.size(),CV_32FC2); + Mat dst = dense_flow.getMat(); + ransacInterpolation(matches_vector,dst); + if(use_post_proc) + fastGlobalSmootherFilter(src,dst,dst,fgs_lambda,fgs_sigma); + + delete[] g; +} + +void EdgeAwareInterpolatorImpl::preprocessData(Mat& src, vector& matches) +{ + Mat distances(h,w,CV_32F); + Mat cost_map (h,w,CV_32F); + distances = Scalar(INF); + + int x,y; + for(unsigned int i=0;i(y,x) = 0.0f; + labels.at(y,x) = (short)i; + } + + computeGradientMagnitude(src,cost_map); + cost_map = (1000.0f-lambda) + lambda*cost_map; + + geodesicDistanceTransform(distances,cost_map); + buildGraph(distances,cost_map); + parallel_for_(Range(0,getNumThreads()),GetKNNMatches_ParBody(*this,getNumThreads())); +} + +void EdgeAwareInterpolatorImpl::computeGradientMagnitude(Mat& src, Mat& dst) +{ + Mat dx,dy; + Sobel(src, dx, CV_16S, 1, 0); + Sobel(src, dy, CV_16S, 0, 1); + float norm_coef = src.channels()*4.0f*255.0f; + + if(src.channels()==1) + { + for(int i=0;i(i); + short* dy_row = dy.ptr(i); + float* dst_row = dst.ptr(i); + + for(int j=0;j(i); + Vec3s* dy_row = dy.ptr(i); + float* dst_row = dst.ptr(i); + + for(int j=0;jd){\ + cur_dist=d;\ + cur_label = prev_label;}\ +} + + for(int it=0;it(0); + label_row = labels.ptr(0); + cost_row = cost_map.ptr(0); + for(j=1;j(i); + dist_row_prev = distances.ptr(i-1); + + label_row = labels.ptr(i); + label_row_prev = labels.ptr(i-1); + + cost_row = cost_map.ptr(i); + cost_row_prev = cost_map.ptr(i-1); + + j=0; + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j] ,label_row_prev[j] ,cost_row_prev[j] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j+1],label_row_prev[j+1],cost_row_prev[j+1],c2); + j++; + for(;j(h-1); + label_row = labels.ptr(h-1); + cost_row = cost_map.ptr(h-1); + for(j=w-2;j>=0;j--) + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row[j+1],label_row[j+1],cost_row[j+1],c1); + + for(i=h-2;i>=0;i--) + { + dist_row = distances.ptr(i); + dist_row_prev = distances.ptr(i+1); + + label_row = labels.ptr(i); + label_row_prev = labels.ptr(i+1); + + cost_row = cost_map.ptr(i); + cost_row_prev = cost_map.ptr(i+1); + + j=w-1; + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j] ,label_row_prev[j] ,cost_row_prev[j] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j-1],label_row_prev[j-1],cost_row_prev[j-1],c2); + j--; + for(;j>0;j--) + { + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row[j+1] ,label_row[j+1] ,cost_row[j+1] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j+1],label_row_prev[j+1],cost_row_prev[j+1],c2); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j] ,label_row_prev[j] ,cost_row_prev[j] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j-1],label_row_prev[j-1],cost_row_prev[j-1],c2); + } + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row[j+1] ,label_row[j+1] ,cost_row[j+1] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j+1],label_row_prev[j+1],cost_row_prev[j+1],c2); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j] ,label_row_prev[j] ,cost_row_prev[j] ,c1); + } + } +#undef CHECK +} + +void EdgeAwareInterpolatorImpl::buildGraph(Mat& distances, Mat& cost_map) +{ + float *dist_row, *cost_row; + float *dist_row_prev, *cost_row_prev; + short *label_row; + short *label_row_prev; + int i,j; + const float c1 = 1.0f/2.0f; + const float c2 = sqrt(2.0f)/2.0f; + float d; + bool found; + +#define CHECK(cur_dist,cur_label,cur_cost,prev_dist,prev_label,prev_cost,coef)\ + if(cur_label!=prev_label)\ + {\ + d = prev_dist + cur_dist + coef*(cur_cost+prev_cost);\ + found = false;\ + for(unsigned int n=0;n(0); + label_row = labels.ptr(0); + cost_row = cost_map.ptr(0); + for(j=1;j(i); + dist_row_prev = distances.ptr(i-1); + + label_row = labels.ptr(i); + label_row_prev = labels.ptr(i-1); + + cost_row = cost_map.ptr(i); + cost_row_prev = cost_map.ptr(i-1); + + j=0; + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j] ,label_row_prev[j] ,cost_row_prev[j] ,c1); + CHECK(dist_row[j],label_row[j],cost_row[j],dist_row_prev[j+1],label_row_prev[j+1],cost_row_prev[j+1],c2); + j++; + for(;j>1 + node* heap; + short* heap_pos; + node tmp_node; + short size; + short num_labels; + + nodeHeap(short _num_labels) + { + num_labels = _num_labels; + heap = new node[num_labels+1]; + heap[0] = node(-1,-1.0f); + heap_pos = new short[num_labels]; + memset(heap_pos,0,sizeof(short)*num_labels); + size=0; + } + + ~nodeHeap() + { + delete[] heap; + delete[] heap_pos; + } + + void clear() + { + size=0; + memset(heap_pos,0,sizeof(short)*num_labels); + } + + inline bool empty() + { + return (size==0); + } + + inline void nodeSwap(short idx1, short idx2) + { + heap_pos[heap[idx1].label] = idx2; + heap_pos[heap[idx2].label] = idx1; + + tmp_node = heap[idx1]; + heap[idx1] = heap[idx2]; + heap[idx2] = tmp_node; + } + + void add(node n) + { + size++; + heap[size] = n; + heap_pos[n.label] = size; + short i = size; + short parent_i = i>>1; + while(heap[i].dist>1; + } + } + + node getMin() + { + node res = heap[1]; + heap_pos[res.label] = 0; + + short i=1; + short left,right; + while( (left=i<<1) < size ) + { + right = left+1; + if(heap[left].dist>1; + while(heap[i].dist>1; + } + + size--; + return res; + } + + //checks if node is already in the heap + //if not - add it + //if it is - update it with the min dist of the two + void updateNode(node n) + { + if(heap_pos[n.label]) + { + short i = heap_pos[n.label]; + heap[i].dist = min(heap[i].dist,n.dist); + short parent_i = i>>1; + while(heap[i].dist>1; + } + } + else + add(n); + } +}; + +EdgeAwareInterpolatorImpl::GetKNNMatches_ParBody::GetKNNMatches_ParBody(EdgeAwareInterpolatorImpl& _inst, int _num_stripes): +inst(&_inst),num_stripes(_num_stripes) +{ + stripe_sz = (int)ceil(inst->match_num/(double)num_stripes); +} + +void EdgeAwareInterpolatorImpl::GetKNNMatches_ParBody::operator() (const Range& range) const +{ + int start = std::min(range.start * stripe_sz, inst->match_num); + int end = std::min(range.end * stripe_sz, inst->match_num); + nodeHeap q((short)inst->match_num); + int num_expanded_vertices; + unsigned char* expanded_flag = new unsigned char[inst->match_num]; + node* neighbors; + + for(int i=start;ig[i].empty()) + continue; + + num_expanded_vertices = 0; + memset(expanded_flag,0,inst->match_num); + q.clear(); + q.add(node((short)i,0.0f)); + short* NNlabels_row = inst->NNlabels.ptr(i); + float* NNdistances_row = inst->NNdistances.ptr(i); + while(num_expanded_verticesk && !q.empty()) + { + node vert_for_expansion = q.getMin(); + expanded_flag[vert_for_expansion.label] = 1; + + //write the expanded vertex to the dst: + NNlabels_row[num_expanded_vertices] = vert_for_expansion.label; + NNdistances_row[num_expanded_vertices] = vert_for_expansion.dist; + num_expanded_vertices++; + + //update the heap: + neighbors = &inst->g[vert_for_expansion.label].front(); + for(int j=0;j<(int)inst->g[vert_for_expansion.label].size();j++) + { + if(!expanded_flag[neighbors[j].label]) + q.updateNode(node(neighbors[j].label,vert_for_expansion.dist+neighbors[j].dist)); + } + } + } + delete[] expanded_flag; +} + +void weightedLeastSquaresAffineFit(short* labels, float* weights, int count, float lambda, SparseMatch* matches, Mat& dst) +{ + double sa[6][6]={{0.}}, sb[6]={0.}; + Mat A (6, 6, CV_64F, &sa[0][0]), + B (6, 1, CV_64F, sb), + MM(1, 6, CV_64F); + Point2f a,b; + float w; + + for( int i = 0; i < count; i++ ) + { + a = matches[labels[i]].reference_image_pos; + b = matches[labels[i]].target_image_pos; + w = weights[i]; + + sa[0][0] += w*a.x*a.x; + sa[0][1] += w*a.y*a.x; + sa[0][2] += w*a.x; + sa[1][1] += w*a.y*a.y; + sa[1][2] += w*a.y; + sa[2][2] += w; + + sb[0] += w*a.x*b.x; + sb[1] += w*a.y*b.x; + sb[2] += w*b.x; + sb[3] += w*a.x*b.y; + sb[4] += w*a.y*b.y; + sb[5] += w*b.y; + } + sa[0][0] += lambda; + sa[1][1] += lambda; + + sa[3][4] = sa[4][3] = sa[1][0] = sa[0][1]; + sa[3][5] = sa[5][3] = sa[2][0] = sa[0][2]; + sa[4][5] = sa[5][4] = sa[2][1] = sa[1][2]; + + sa[3][3] = sa[0][0]; + sa[4][4] = sa[1][1]; + sa[5][5] = sa[2][2]; + + sb[0] += lambda; + sb[4] += lambda; + + solve(A, B, MM, DECOMP_EIG); + MM.reshape(2,3).convertTo(dst,CV_32F); +} + +void generateHypothesis(short* labels, int count, RNG& rng, unsigned char* is_used, SparseMatch* matches, Mat& dst) +{ + int idx; + Point2f src_points[3]; + Point2f dst_points[3]; + memset(is_used,0,count); + + // randomly get 3 distinct matches + idx = rng.uniform(0,count-2); + is_used[idx] = true; + src_points[0] = matches[labels[idx]].reference_image_pos; + dst_points[0] = matches[labels[idx]].target_image_pos; + + idx = rng.uniform(0,count-1); + if (is_used[idx]) + idx = count-2; + is_used[idx] = true; + src_points[1] = matches[labels[idx]].reference_image_pos; + dst_points[1] = matches[labels[idx]].target_image_pos; + + idx = rng.uniform(0,count); + if (is_used[idx]) + idx = count-1; + is_used[idx] = true; + src_points[2] = matches[labels[idx]].reference_image_pos; + dst_points[2] = matches[labels[idx]].target_image_pos; + + // compute an affine transform: + getAffineTransform(src_points,dst_points).convertTo(dst,CV_32F); +} + +void verifyHypothesis(short* labels, float* weights, int count, SparseMatch* matches, float eps, float lambda, Mat& hypothesis_transform, Mat& old_transform, float& old_weighted_num_inliers) +{ + float* tr = hypothesis_transform.ptr(0); + Point2f a,b; + float weighted_num_inliers = -lambda*((tr[0]-1)*(tr[0]-1)+tr[1]*tr[1]+tr[3]*tr[3]+(tr[4]-1)*(tr[4]-1)); + + for(int i=0;i=old_weighted_num_inliers) + { + old_weighted_num_inliers = weighted_num_inliers; + hypothesis_transform.copyTo(old_transform); + } +} + +EdgeAwareInterpolatorImpl::RansacInterpolation_ParBody::RansacInterpolation_ParBody(EdgeAwareInterpolatorImpl& _inst, Mat* _transforms, float* _weighted_inlier_nums, float* _eps, SparseMatch* _matches, int _num_stripes, int _inc): +inst(&_inst), transforms(_transforms), weighted_inlier_nums(_weighted_inlier_nums), eps(_eps), matches(_matches), num_stripes(_num_stripes), inc(_inc) +{ + stripe_sz = (int)ceil(inst->match_num/(double)num_stripes); +} + +void EdgeAwareInterpolatorImpl::RansacInterpolation_ParBody::operator() (const Range& range) const +{ + if(range.end>range.start+1) + { + for(int n=range.start;nmatch_num); + int end = std::min(range.end * stripe_sz, inst->match_num); + if(inc<0) + { + int tmp = end; + end = start-1; + start = tmp-1; + } + + short* KNNlabels; + float* KNNdistances; + unsigned char* is_used = new unsigned char[inst->k]; + Mat hypothesis_transform; + + short* inlier_labels = new short[inst->k]; + float* inlier_distances = new float[inst->k]; + float* tr; + int num_inliers; + Point2f a,b; + + for(int i=start;i!=end;i+=inc) + { + if(inst->g[i].empty()) + continue; + + KNNlabels = inst->NNlabels.ptr(i); + KNNdistances = inst->NNdistances.ptr(i); + if(inc>0) //forward pass + { + hal::exp(KNNdistances,KNNdistances,inst->k); + + Point2f average = Point2f(0.0f,0.0f); + for(int j=0;jk;j++) + average += matches[KNNlabels[j]].target_image_pos - matches[KNNlabels[j]].reference_image_pos; + average/=inst->k; + float average_dist = 0.0; + Point2f vec; + for(int j=0;jk;j++) + { + vec = (matches[KNNlabels[j]].target_image_pos - matches[KNNlabels[j]].reference_image_pos); + average_dist += abs(vec.x-average.x) + abs(vec.y-average.y); + } + eps[i] = min(0.5f*(average_dist/inst->k),2.0f); + } + + for(int it=0;itransac_interpolation_num_iter;it++) + { + generateHypothesis(KNNlabels,inst->k,inst->rngs[range.start],is_used,matches,hypothesis_transform); + verifyHypothesis(KNNlabels,KNNdistances,inst->k,matches,eps[i],inst->regularization_coef,hypothesis_transform,transforms[i],weighted_inlier_nums[i]); + } + + //propagate hypotheses from neighbors: + node* neighbors = &inst->g[i].front(); + for(int j=0;j<(int)inst->g[i].size();j++) + { + if((inc*neighbors[j].label)<(inc*i) && (inc*neighbors[j].label)>=(inc*start)) //already processed this neighbor + verifyHypothesis(KNNlabels,KNNdistances,inst->k,matches,eps[i],inst->regularization_coef,transforms[neighbors[j].label],transforms[i],weighted_inlier_nums[i]); + } + + if(inc<0) //backward pass + { + // determine inliers and compute a least squares fit: + tr = transforms[i].ptr(0); + num_inliers = 0; + + for(int j=0;jk;j++) + { + a = matches[KNNlabels[j]].reference_image_pos; + b = matches[KNNlabels[j]].target_image_pos; + if(abs(tr[0]*a.x + tr[1]*a.y + tr[2] - b.x) + + abs(tr[3]*a.x + tr[4]*a.y + tr[5] - b.y) < eps[i]) + { + inlier_labels[num_inliers] = KNNlabels[j]; + inlier_distances[num_inliers] = KNNdistances[j]; + num_inliers++; + } + } + + weightedLeastSquaresAffineFit(inlier_labels,inlier_distances,num_inliers,inst->regularization_coef,matches,transforms[i]); + } + } + + delete[] inlier_labels; + delete[] inlier_distances; + delete[] is_used; +} + +void EdgeAwareInterpolatorImpl::ransacInterpolation(vector& matches, Mat& dst_dense_flow) +{ + NNdistances *= (-sigma*sigma); + + Mat* transforms = new Mat[match_num]; + float* weighted_inlier_nums = new float[match_num]; + float* eps = new float[match_num]; + for(int i=0;i(i); + Point2f* dst_row = dst_dense_flow.ptr(i); + for(int j=0;j(0); + dst_row[j] = Point2f(tr[0]*j+tr[1]*i+tr[2],tr[3]*j+tr[4]*i+tr[5]) - Point2f((float)j,(float)i); + } + } + + delete[] transforms; + delete[] weighted_inlier_nums; + delete[] eps; +} + +CV_EXPORTS_W +Ptr createEdgeAwareInterpolator() +{ + return Ptr(EdgeAwareInterpolatorImpl::create()); +} + +bool operator<(const SparseMatch& lhs,const SparseMatch& rhs) +{ + if((int)(lhs.reference_image_pos.y+0.5f)!=(int)(rhs.reference_image_pos.y+0.5f)) + return (lhs.reference_image_pos.y + +namespace cvtest +{ + +using namespace std; +using namespace std::tr1; +using namespace testing; +using namespace perf; +using namespace cv; +using namespace cv::ximgproc; + +static string getDataDir() +{ + return cvtest::TS::ptr()->get_data_path(); +} + +const float FLOW_TAG_FLOAT = 202021.25f; +Mat readOpticalFlow( const String& path ) +{ +// CV_Assert(sizeof(float) == 4); + //FIXME: ensure right sizes of int and float - here and in writeOpticalFlow() + + Mat_ flow; + ifstream file(path.c_str(), std::ios_base::binary); + if ( !file.good() ) + return flow; // no file - return empty matrix + + float tag; + file.read((char*) &tag, sizeof(float)); + if ( tag != FLOW_TAG_FLOAT ) + return flow; + + int width, height; + + file.read((char*) &width, 4); + file.read((char*) &height, 4); + + flow.create(height, width); + + for ( int i = 0; i < flow.rows; ++i ) + { + for ( int j = 0; j < flow.cols; ++j ) + { + Point2f u; + file.read((char*) &u.x, sizeof(float)); + file.read((char*) &u.y, sizeof(float)); + if ( !file.good() ) + { + flow.release(); + return flow; + } + + flow(i, j) = u; + } + } + file.close(); + return flow; +} + +CV_ENUM(GuideTypes, CV_8UC1, CV_8UC3) +typedef tuple InterpolatorParams; +typedef TestWithParam InterpolatorTest; + +TEST(InterpolatorTest, ReferenceAccuracy) +{ + double MAX_DIF = 1.0; + double MAX_MEAN_DIF = 1.0 / 256.0; + string dir = getDataDir() + "cv/sparse_match_interpolator"; + + Mat src = imread(getDataDir() + "cv/optflow/RubberWhale1.png",IMREAD_COLOR); + ASSERT_FALSE(src.empty()); + + Mat ref_flow = readOpticalFlow(dir + "/RubberWhale_reference_result.flo"); + ASSERT_FALSE(ref_flow.empty()); + + ifstream file((dir + "/RubberWhale_sparse_matches.txt").c_str()); + float from_x,from_y,to_x,to_y; + vector from_points; + vector to_points; + + while(file >> from_x >> from_y >> to_x >> to_y) + { + from_points.push_back(Point2f(from_x,from_y)); + to_points.push_back(Point2f(to_x,to_y)); + } + + cv::setNumThreads(cv::getNumberOfCPUs()); + Mat res_flow; + + Ptr interpolator = createEdgeAwareInterpolator(); + interpolator->setK(128); + interpolator->setSigma(0.05f); + interpolator->setUsePostProcessing(true); + interpolator->setFGSLambda(500.0f); + interpolator->setFGSSigma(1.5f); + interpolator->interpolate(src,from_points,Mat(),to_points,res_flow); + + EXPECT_LE(cv::norm(res_flow, ref_flow, NORM_INF), MAX_DIF); + EXPECT_LE(cv::norm(res_flow, ref_flow, NORM_L1) , MAX_MEAN_DIF*res_flow.total()); +} + +TEST_P(InterpolatorTest, MultiThreadReproducibility) +{ + if (cv::getNumberOfCPUs() == 1) + return; + + double MAX_DIF = 1.0; + double MAX_MEAN_DIF = 1.0 / 256.0; + int loopsCount = 2; + RNG rng(0); + + InterpolatorParams params = GetParam(); + Size size = get<0>(params); + int guideType = get<1>(params); + + Mat from(size, guideType); + randu(from, 0, 255); + + int num_matches = rng.uniform(5,SHRT_MAX-1); + vector from_points; + vector to_points; + + for(int i=0;i interpolator = createEdgeAwareInterpolator(); + interpolator->setK(K); + interpolator->setSigma(sigma); + interpolator->setUsePostProcessing(true); + interpolator->setFGSLambda(FGSlambda); + interpolator->setFGSSigma(FGSsigma); + + cv::setNumThreads(cv::getNumberOfCPUs()); + Mat resMultiThread; + interpolator->interpolate(from,from_points,Mat(),to_points,resMultiThread); + + cv::setNumThreads(1); + Mat resSingleThread; + interpolator->interpolate(from,from_points,Mat(),to_points,resSingleThread); + + EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_INF), MAX_DIF); + EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_L1) , MAX_MEAN_DIF*resMultiThread.total()); + } +} +INSTANTIATE_TEST_CASE_P(FullSet,InterpolatorTest, Combine(Values(szODD,szVGA), GuideTypes::all())); +} \ No newline at end of file