diff --git a/modules/ximgproc/src/adaptive_manifold_filter_n.cpp b/modules/ximgproc/src/adaptive_manifold_filter_n.cpp index 931baa2f9..98ee98f3a 100644 --- a/modules/ximgproc/src/adaptive_manifold_filter_n.cpp +++ b/modules/ximgproc/src/adaptive_manifold_filter_n.cpp @@ -40,6 +40,10 @@ #include #include +#ifdef _MSC_VER +# pragma warning(disable: 4512) +#endif + namespace { @@ -182,24 +186,20 @@ private: /*inline functions*/ CV_DbgAssert(dst.size() == smallSize); } - void downsample(const vector& srcv, vector& dstv) - { - dstv.resize(srcv.size()); - for (int i = 0; i < (int)srcv.size(); i++) - downsample(srcv[i], dstv[i]); - } - void upsample(const Mat& src, Mat& dst) { CV_DbgAssert(src.empty() || src.size() == smallSize); resize(src, dst, srcSize, 0, 0); } + void downsample(const vector& srcv, vector& dstv) + { + mapParallel(&AdaptiveManifoldFilterN::downsample, srcv, dstv); + } + void upsample(const vector& srcv, vector& dstv) { - dstv.resize(srcv.size()); - for (int i = 0; i < (int)srcv.size(); i++) - upsample(srcv[i], dstv[i]); + mapParallel(&AdaptiveManifoldFilterN::upsample, srcv, dstv); } private: @@ -230,6 +230,35 @@ private: static void computeEigenVector(const vector& X, const Mat1b& mask, Mat1f& vecDst, int num_pca_iterations, const Mat1f& vecRand); static void computeOrientation(const vector& X, const Mat1f& vec, Mat1f& dst); + +private: /*Parallelization routines*/ + + typedef void (AdaptiveManifoldFilterN::*MapFunc)(const Mat& src, Mat& dst); + + void mapParallel(MapFunc func, const vector& srcv, vector& dstv) + { + dstv.resize(srcv.size()); + parallel_for_(Range(0, (int)srcv.size()), MapPrallelLoopBody(this, func, srcv, dstv)); + } + + struct MapPrallelLoopBody : public cv::ParallelLoopBody + { + MapPrallelLoopBody(AdaptiveManifoldFilterN *_instancePtr, MapFunc _transform, const vector& _srcv, vector& _dstv) + : instancePtr(_instancePtr), transform(_transform), srcv(_srcv), dstv(_dstv) + {} + + AdaptiveManifoldFilterN *instancePtr; + MapFunc transform; + const vector& srcv; + vector& dstv; + + void operator () (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + (instancePtr->*transform)(srcv[i], dstv[i]); + } + }; + }; CV_INIT_ALGORITHM(AdaptiveManifoldFilterN, "AdaptiveManifoldFilter", diff --git a/modules/ximgproc/test/test_adaptive_manifold.cpp b/modules/ximgproc/test/test_adaptive_manifold.cpp index 38f122512..a8970041c 100644 --- a/modules/ximgproc/test/test_adaptive_manifold.cpp +++ b/modules/ximgproc/test/test_adaptive_manifold.cpp @@ -67,6 +67,8 @@ TEST(AdaptiveManifoldTest, SplatSurfaceAccuracy) { RNG rnd(0); + cv::setNumThreads(cv::getNumberOfCPUs()); + for (int i = 0; i < 10; i++) { Size sz(rnd.uniform(512, 1024), rnd.uniform(512, 1024)); @@ -126,6 +128,8 @@ TEST(AdaptiveManifoldTest, AuthorsReferenceAccuracy) Mat srcImg = imread(getOpenCVExtraDir() + srcImgPath); ASSERT_TRUE(!srcImg.empty()); + cv::setNumThreads(cv::getNumberOfCPUs()); + for (int i = 0; i < 3; i++) { Mat refRes = imread(getOpenCVExtraDir() + refPaths[i]); @@ -190,9 +194,11 @@ TEST_P(AdaptiveManifoldRefImplTest, RefImplAccuracy) double sigma_r = rnd.uniform(0.1, 0.9); bool adjust_outliers = (iter % 2 == 0); + cv::setNumThreads(cv::getNumberOfCPUs()); Mat res; amFilter(guide, src, res, sigma_s, sigma_r, adjust_outliers); + cv::setNumThreads(1); Mat resRef; Ptr amf = createAMFilterRefImpl(sigma_s, sigma_r, adjust_outliers); amf->filter(src, resRef, guide);