Merge pull request #14013 from alalek:imgproc_dispatch_filter

pull/14033/head^2
Alexander Alekhin 6 years ago
commit f3074fd559
  1. 6
      modules/imgproc/CMakeLists.txt
  2. 427
      modules/imgproc/src/bilateral_filter.dispatch.cpp
  3. 392
      modules/imgproc/src/bilateral_filter.simd.hpp
  4. 557
      modules/imgproc/src/box_filter.dispatch.cpp
  5. 546
      modules/imgproc/src/box_filter.simd.hpp
  6. 197
      modules/imgproc/src/filter.avx2.cpp
  7. 1432
      modules/imgproc/src/filter.dispatch.cpp
  8. 8
      modules/imgproc/src/filter.hpp
  9. 1668
      modules/imgproc/src/filter.simd.hpp
  10. 5
      modules/imgproc/src/fixedpoint.inl.hpp
  11. 317
      modules/imgproc/src/median_blur.dispatch.cpp
  12. 288
      modules/imgproc/src/median_blur.simd.hpp
  13. 794
      modules/imgproc/src/morph.dispatch.cpp
  14. 846
      modules/imgproc/src/morph.simd.hpp
  15. 582
      modules/imgproc/src/smooth.dispatch.cpp
  16. 541
      modules/imgproc/src/smooth.simd.hpp

@ -1,6 +1,12 @@
set(the_description "Image Processing")
ocv_add_dispatched_file(accum SSE4_1 AVX AVX2)
ocv_add_dispatched_file(bilateral_filter SSE2 AVX2)
ocv_add_dispatched_file(box_filter SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(filter SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(color_hsv SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(color_rgb SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(color_yuv SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(median_blur SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(morph SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(smooth SSE2 SSE4_1 AVX2)
ocv_define_module(imgproc opencv_core WRAP java python js)

@ -0,0 +1,427 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "bilateral_filter.simd.hpp"
#include "bilateral_filter.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
/****************************************************************************************\
Bilateral Filtering
\****************************************************************************************/
namespace cv {
#ifdef HAVE_OPENCL
static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
double sigma_color, double sigma_space,
int borderType)
{
CV_INSTRUMENT_REGION();
#ifdef __ANDROID__
if (ocl::Device::getDefault().isNVidia())
return false;
#endif
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int i, j, maxk, radius;
if (depth != CV_8U || cn > 4)
return false;
if (sigma_color <= 0)
sigma_color = 1;
if (sigma_space <= 0)
sigma_space = 1;
double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
if ( d <= 0 )
radius = cvRound(sigma_space * 1.5);
else
radius = d / 2;
radius = MAX(radius, 1);
d = radius * 2 + 1;
UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
if (src.u == dst.u)
return false;
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
std::vector<float> _space_weight(d * d);
std::vector<int> _space_ofs(d * d);
float * const space_weight = &_space_weight[0];
int * const space_ofs = &_space_ofs[0];
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i * i + (double)j * j);
if ( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
space_ofs[maxk++] = (int)(i * temp.step + j * cn);
}
char cvt[3][40];
String cnstr = cn > 1 ? format("%d", cn) : "";
String kernelName("bilateral");
size_t sizeDiv = 1;
if ((ocl::Device::getDefault().isIntel()) &&
(ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU))
{
//Intel GPU
if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images.
{
kernelName = "bilateral_float4";
sizeDiv = 4;
}
}
ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc,
format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s"
" -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=(float)%f",
radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(),
ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]),
ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)),
ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]),
ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff));
if (k.empty())
return false;
Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
UMat ucolor_weight, uspace_weight, uspace_ofs;
mspace_weight.copyTo(uspace_weight);
mspace_ofs.copyTo(uspace_ofs);
k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
ocl::KernelArg::PtrReadOnly(uspace_weight),
ocl::KernelArg::PtrReadOnly(uspace_ofs));
size_t globalsize[2] = { (size_t)dst.cols / sizeDiv, (size_t)dst.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
static void
bilateralFilter_8u( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
{
CV_INSTRUMENT_REGION();
int cn = src.channels();
int i, j, maxk, radius;
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
std::vector<float> _color_weight(cn*256);
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* color_weight = &_color_weight[0];
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients
for( i = 0; i < 256*cn; i++ )
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
{
j = -radius;
for( ; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
}
CV_CPU_DISPATCH(bilateralFilterInvoker_8u, (dst, temp, radius, maxk, space_ofs, space_weight, color_weight),
CV_CPU_DISPATCH_MODES_ALL);
}
static void
bilateralFilter_32f( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
{
CV_INSTRUMENT_REGION();
int cn = src.channels();
int i, j, maxk, radius;
double minValSrc=-1, maxValSrc=1;
const int kExpNumBinsPerChannel = 1 << 12;
int kExpNumBins = 0;
float lastExpVal = 1.f;
float len, scale_index;
CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
// compute the min/max range for the input image (even if multichannel)
minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
{
src.copyTo(dst);
return;
}
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
// allocate lookup tables
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// assign a length which is slightly more than needed
len = (float)(maxValSrc - minValSrc) * cn;
kExpNumBins = kExpNumBinsPerChannel * cn;
std::vector<float> _expLUT(kExpNumBins+2);
float* expLUT = &_expLUT[0];
scale_index = kExpNumBins/len;
// initialize the exp LUT
for( i = 0; i < kExpNumBins+2; i++ )
{
if( lastExpVal > 0.f )
{
double val = i / scale_index;
expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
lastExpVal = expLUT[i];
}
else
expLUT[i] = 0.f;
}
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius || ( i == 0 && j == 0 ) )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
}
// parallel_for usage
CV_CPU_DISPATCH(bilateralFilterInvoker_32f, (cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT),
CV_CPU_DISPATCH_MODES_ALL);
}
#ifdef HAVE_IPP
#define IPP_BILATERAL_PARALLEL 1
#ifdef HAVE_IPP_IW
class ipp_bilateralFilterParallel: public ParallelLoopBody
{
public:
ipp_bilateralFilterParallel(::ipp::IwiImage &_src, ::ipp::IwiImage &_dst, int _radius, Ipp32f _valSquareSigma, Ipp32f _posSquareSigma, ::ipp::IwiBorderType _borderType, bool *_ok):
src(_src), dst(_dst)
{
pOk = _ok;
radius = _radius;
valSquareSigma = _valSquareSigma;
posSquareSigma = _posSquareSigma;
borderType = _borderType;
*pOk = true;
}
~ipp_bilateralFilterParallel() {}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
if(*pOk == false)
return;
try
{
::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, dst.m_size.width, range.end - range.start);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, src, dst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), borderType, tile);
}
catch(const ::ipp::IwException &)
{
*pOk = false;
return;
}
}
private:
::ipp::IwiImage &src;
::ipp::IwiImage &dst;
int radius;
Ipp32f valSquareSigma;
Ipp32f posSquareSigma;
::ipp::IwiBorderType borderType;
bool *pOk;
const ipp_bilateralFilterParallel& operator= (const ipp_bilateralFilterParallel&);
};
#endif
static bool ipp_bilateralFilter(Mat &src, Mat &dst, int d, double sigmaColor, double sigmaSpace, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
int radius = IPP_MAX(((d <= 0)?cvRound(sigmaSpace*1.5):d/2), 1);
Ipp32f valSquareSigma = (Ipp32f)((sigmaColor <= 0)?1:sigmaColor*sigmaColor);
Ipp32f posSquareSigma = (Ipp32f)((sigmaSpace <= 0)?1:sigmaSpace*sigmaSpace);
// Acquire data and begin processing
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize(radius);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
const int threads = ippiSuggestThreadsNum(iwDst, 2);
if(IPP_BILATERAL_PARALLEL && threads > 1) {
bool ok = true;
Range range(0, (int)iwDst.m_size.height);
ipp_bilateralFilterParallel invoker(iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ippBorder, &ok);
if(!ok)
return false;
parallel_for_(range, invoker, threads*4);
if(!ok)
return false;
} else {
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), ippBorder);
}
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(d); CV_UNUSED(sigmaColor); CV_UNUSED(sigmaSpace); CV_UNUSED(borderType);
return false;
#endif
}
#endif
void bilateralFilter( InputArray _src, OutputArray _dst, int d,
double sigmaColor, double sigmaSpace,
int borderType )
{
CV_INSTRUMENT_REGION();
_dst.create( _src.size(), _src.type() );
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
Mat src = _src.getMat(), dst = _dst.getMat();
CV_IPP_RUN_FAST(ipp_bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType));
if( src.depth() == CV_8U )
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
else if( src.depth() == CV_32F )
bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
else
CV_Error( CV_StsUnsupportedFormat,
"Bilateral filtering is only implemented for 8u and 32f images" );
}
} // namespace

@ -43,18 +43,25 @@
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
/****************************************************************************************\
Bilateral Filtering
\****************************************************************************************/
namespace cv
{
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
void bilateralFilterInvoker_8u(
Mat& dst, const Mat& temp, int radius, int maxk,
int* space_ofs, float *space_weight, float *color_weight);
void bilateralFilterInvoker_32f(
int cn, int radius, int maxk, int *space_ofs,
const Mat& temp, Mat& dst, float scale_index, float *space_weight, float *expLUT);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
namespace {
class BilateralFilter_8u_Invoker :
public ParallelLoopBody
{
@ -68,6 +75,8 @@ public:
virtual void operator() (const Range& range) const CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i, j, cn = dest->channels(), k;
Size size = dest->size();
@ -536,161 +545,20 @@ private:
float *space_weight, *color_weight;
};
#ifdef HAVE_OPENCL
static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
double sigma_color, double sigma_space,
int borderType)
{
#ifdef __ANDROID__
if (ocl::Device::getDefault().isNVidia())
return false;
#endif
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int i, j, maxk, radius;
if (depth != CV_8U || cn > 4)
return false;
if (sigma_color <= 0)
sigma_color = 1;
if (sigma_space <= 0)
sigma_space = 1;
double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
if ( d <= 0 )
radius = cvRound(sigma_space * 1.5);
else
radius = d / 2;
radius = MAX(radius, 1);
d = radius * 2 + 1;
UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
if (src.u == dst.u)
return false;
} // namespace anon
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
std::vector<float> _space_weight(d * d);
std::vector<int> _space_ofs(d * d);
float * const space_weight = &_space_weight[0];
int * const space_ofs = &_space_ofs[0];
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i * i + (double)j * j);
if ( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
space_ofs[maxk++] = (int)(i * temp.step + j * cn);
}
char cvt[3][40];
String cnstr = cn > 1 ? format("%d", cn) : "";
String kernelName("bilateral");
size_t sizeDiv = 1;
if ((ocl::Device::getDefault().isIntel()) &&
(ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU))
{
//Intel GPU
if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images.
{
kernelName = "bilateral_float4";
sizeDiv = 4;
}
}
ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc,
format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s"
" -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=(float)%f",
radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(),
ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]),
ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)),
ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]),
ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff));
if (k.empty())
return false;
Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
UMat ucolor_weight, uspace_weight, uspace_ofs;
mspace_weight.copyTo(uspace_weight);
mspace_ofs.copyTo(uspace_ofs);
k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
ocl::KernelArg::PtrReadOnly(uspace_weight),
ocl::KernelArg::PtrReadOnly(uspace_ofs));
size_t globalsize[2] = { (size_t)dst.cols / sizeDiv, (size_t)dst.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
static void
bilateralFilter_8u( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
void bilateralFilterInvoker_8u(
Mat& dst, const Mat& temp, int radius, int maxk,
int* space_ofs, float *space_weight, float *color_weight)
{
int cn = src.channels();
int i, j, maxk, radius;
Size size = src.size();
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
std::vector<float> _color_weight(cn*256);
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* color_weight = &_color_weight[0];
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients
for( i = 0; i < 256*cn; i++ )
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
{
j = -radius;
for( ; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
}
CV_INSTRUMENT_REGION();
BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16));
}
namespace {
class BilateralFilter_32f_Invoker :
public ParallelLoopBody
{
@ -705,6 +573,8 @@ public:
virtual void operator() (const Range& range) const CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i, j, k;
Size size = dest->size();
@ -1153,216 +1023,18 @@ private:
float scale_index, *space_weight, *expLUT;
};
} // namespace anon
static void
bilateralFilter_32f( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
void bilateralFilterInvoker_32f(
int cn, int radius, int maxk, int *space_ofs,
const Mat& temp, Mat& dst, float scale_index, float *space_weight, float *expLUT)
{
int cn = src.channels();
int i, j, maxk, radius;
double minValSrc=-1, maxValSrc=1;
const int kExpNumBinsPerChannel = 1 << 12;
int kExpNumBins = 0;
float lastExpVal = 1.f;
float len, scale_index;
Size size = src.size();
CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
// compute the min/max range for the input image (even if multichannel)
minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
{
src.copyTo(dst);
return;
}
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
// allocate lookup tables
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// assign a length which is slightly more than needed
len = (float)(maxValSrc - minValSrc) * cn;
kExpNumBins = kExpNumBinsPerChannel * cn;
std::vector<float> _expLUT(kExpNumBins+2);
float* expLUT = &_expLUT[0];
scale_index = kExpNumBins/len;
// initialize the exp LUT
for( i = 0; i < kExpNumBins+2; i++ )
{
if( lastExpVal > 0.f )
{
double val = i / scale_index;
expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
lastExpVal = expLUT[i];
}
else
expLUT[i] = 0.f;
}
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius || ( i == 0 && j == 0 ) )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
}
// parallel_for usage
CV_INSTRUMENT_REGION();
BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT);
parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16));
}
#ifdef HAVE_IPP
#define IPP_BILATERAL_PARALLEL 1
#ifdef HAVE_IPP_IW
class ipp_bilateralFilterParallel: public ParallelLoopBody
{
public:
ipp_bilateralFilterParallel(::ipp::IwiImage &_src, ::ipp::IwiImage &_dst, int _radius, Ipp32f _valSquareSigma, Ipp32f _posSquareSigma, ::ipp::IwiBorderType _borderType, bool *_ok):
src(_src), dst(_dst)
{
pOk = _ok;
radius = _radius;
valSquareSigma = _valSquareSigma;
posSquareSigma = _posSquareSigma;
borderType = _borderType;
*pOk = true;
}
~ipp_bilateralFilterParallel() {}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
if(*pOk == false)
return;
try
{
::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, dst.m_size.width, range.end - range.start);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, src, dst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), borderType, tile);
}
catch(const ::ipp::IwException &)
{
*pOk = false;
return;
}
}
private:
::ipp::IwiImage &src;
::ipp::IwiImage &dst;
int radius;
Ipp32f valSquareSigma;
Ipp32f posSquareSigma;
::ipp::IwiBorderType borderType;
bool *pOk;
const ipp_bilateralFilterParallel& operator= (const ipp_bilateralFilterParallel&);
};
#endif
static bool ipp_bilateralFilter(Mat &src, Mat &dst, int d, double sigmaColor, double sigmaSpace, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
int radius = IPP_MAX(((d <= 0)?cvRound(sigmaSpace*1.5):d/2), 1);
Ipp32f valSquareSigma = (Ipp32f)((sigmaColor <= 0)?1:sigmaColor*sigmaColor);
Ipp32f posSquareSigma = (Ipp32f)((sigmaSpace <= 0)?1:sigmaSpace*sigmaSpace);
// Acquire data and begin processing
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize(radius);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
const int threads = ippiSuggestThreadsNum(iwDst, 2);
if(IPP_BILATERAL_PARALLEL && threads > 1) {
bool ok = true;
Range range(0, (int)iwDst.m_size.height);
ipp_bilateralFilterParallel invoker(iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ippBorder, &ok);
if(!ok)
return false;
parallel_for_(range, invoker, threads*4);
if(!ok)
return false;
} else {
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), ippBorder);
}
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(d); CV_UNUSED(sigmaColor); CV_UNUSED(sigmaSpace); CV_UNUSED(borderType);
return false;
#endif
}
#endif
}
void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
double sigmaColor, double sigmaSpace,
int borderType )
{
CV_INSTRUMENT_REGION();
_dst.create( _src.size(), _src.type() );
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
Mat src = _src.getMat(), dst = _dst.getMat();
CV_IPP_RUN_FAST(ipp_bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType));
if( src.depth() == CV_8U )
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
else if( src.depth() == CV_32F )
bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
else
CV_Error( CV_StsUnsupportedFormat,
"Bilateral filtering is only implemented for 8u and 32f images" );
}
/* End of file. */
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace

@ -0,0 +1,557 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "box_filter.simd.hpp"
#include "box_filter.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
#ifdef HAVE_OPENCL
static bool ocl_boxFilter3x3_8UC1( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize )
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if (ddepth < 0)
ddepth = sdepth;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
if ( !(dev.isIntel() && (type == CV_8UC1) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
(_src.cols() % 16 == 0) && (_src.rows() % 2 == 0) &&
(anchor.x == 1) && (anchor.y == 1) &&
(ksize.width == 3) && (ksize.height == 3)) )
return false;
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
char build_opts[1024];
sprintf(build_opts, "-D %s %s", borderMap[borderType], normalize ? "-D NORMALIZE" : "");
ocl::Kernel kernel("boxFilter3x3_8UC1_cols16_rows2", cv::ocl::imgproc::boxFilter3x3_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize, bool sqr = false )
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (ddepth < 0)
ddepth = sdepth;
if (cn > 4 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
_src.offset() % esz != 0 || _src.step() % esz != 0)
return false;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
int computeUnits = ocl::Device::getDefault().maxComputeUnits();
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size(), wholeSize;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
int wdepth = std::max(CV_32F, std::max(ddepth, sdepth)),
wtype = CV_MAKE_TYPE(wdepth, cn), dtype = CV_MAKE_TYPE(ddepth, cn);
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
size_t localsize_general[2] = { 0, 1 }, * localsize = NULL;
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
int h = isolated ? size.height : wholeSize.height;
int w = isolated ? size.width : wholeSize.width;
size_t maxWorkItemSizes[32];
ocl::Device::getDefault().maxWorkItemSizes(maxWorkItemSizes);
int tryWorkItems = (int)maxWorkItemSizes[0];
ocl::Kernel kernel;
if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)))
{
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = size.width % 2 ? 1 : 2;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
globalsize[0] = size.width / pxPerWorkItemX;
globalsize[1] = size.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = roundUp(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = roundUp(globalsize[0], wgRound);
char build_options[1024], cvt[2][40];
sprintf(build_options, "-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s%s%s -D PX_LOAD_FLOAT_VEC_CONV=convert_%s -D OP_BOX_FILTER",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(CV_MAKE_TYPE(wdepth, pxLoadVecSize)) //PX_LOAD_FLOAT_VEC_CONV
);
if (!kernel.create("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, build_options))
return false;
}
else
{
localsize = localsize_general;
for ( ; ; )
{
int BLOCK_SIZE_X = tryWorkItems, BLOCK_SIZE_Y = std::min(ksize.height * 10, size.height);
while (BLOCK_SIZE_X > 32 && BLOCK_SIZE_X >= ksize.width * 2 && BLOCK_SIZE_X > size.width * 2)
BLOCK_SIZE_X /= 2;
while (BLOCK_SIZE_Y < BLOCK_SIZE_X / 8 && BLOCK_SIZE_Y * computeUnits * 32 < size.height)
BLOCK_SIZE_Y *= 2;
if (ksize.width > BLOCK_SIZE_X || w < ksize.width || h < ksize.height)
return false;
char cvt[2][50];
String opts = format("-D LOCAL_SIZE_X=%d -D BLOCK_SIZE_Y=%d -D ST=%s -D DT=%s -D WT=%s -D convertToDT=%s -D convertToWT=%s"
" -D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D %s%s%s%s%s"
" -D ST1=%s -D DT1=%s -D cn=%d",
BLOCK_SIZE_X, BLOCK_SIZE_Y, ocl::typeToStr(type), ocl::typeToStr(CV_MAKE_TYPE(ddepth, cn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[0]),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[1]),
anchor.x, anchor.y, ksize.width, ksize.height, borderMap[borderType],
isolated ? " -D BORDER_ISOLATED" : "", doubleSupport ? " -D DOUBLE_SUPPORT" : "",
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), cn);
localsize[0] = BLOCK_SIZE_X;
globalsize[0] = divUp(size.width, BLOCK_SIZE_X - (ksize.width - 1)) * BLOCK_SIZE_X;
globalsize[1] = divUp(size.height, BLOCK_SIZE_Y);
kernel.create("boxFilter", cv::ocl::imgproc::boxFilter_oclsrc, opts);
if (kernel.empty())
return false;
size_t kernelWorkGroupSize = kernel.workGroupSize();
if (localsize[0] <= kernelWorkGroupSize)
break;
if (BLOCK_SIZE_X < (int)kernelWorkGroupSize)
return false;
tryWorkItems = (int)kernelWorkGroupSize;
}
}
_dst.create(size, CV_MAKETYPE(ddepth, cn));
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
int srcOffsetY = (int)(src.offset / src.step);
int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
idxArg = kernel.set(idxArg, srcOffsetX);
idxArg = kernel.set(idxArg, srcOffsetY);
idxArg = kernel.set(idxArg, srcEndX);
idxArg = kernel.set(idxArg, srcEndY);
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, localsize, false);
}
#endif
Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getRowSumFilter, (srcType, sumType, ksize, anchor),
CV_CPU_DISPATCH_MODES_ALL);
}
Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType, int ksize, int anchor, double scale)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getColumnSumFilter, (sumType, dstType, ksize, anchor, scale),
CV_CPU_DISPATCH_MODES_ALL);
}
Ptr<FilterEngine> createBoxFilter(int srcType, int dstType, Size ksize,
Point anchor, bool normalize, int borderType)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(createBoxFilter, (srcType, dstType, ksize, anchor, normalize, borderType),
CV_CPU_DISPATCH_MODES_ALL);
}
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_BOX_3x3>(int w, int h) { return w*h < 640 * 480; }
}
static bool openvx_boxfilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
if (ddepth < 0)
ddepth = CV_8UC1;
if (_src.type() != CV_8UC1 || ddepth != CV_8U || !normalize ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3 ||
(anchor.x >= 0 && anchor.x != 1) ||
(anchor.y >= 0 && anchor.y != 1) ||
ovx::skipSmallImages<VX_KERNEL_BOX_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
_dst.create(src.size(), CV_8UC1);
Mat dst = _dst.getMat();
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#if defined(HAVE_IPP)
static bool ipp_boxfilter(Mat &src, Mat &dst, Size ksize, Point anchor, bool normalize, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Problem with SSE42 optimization for 16s and some 8u modes
if(ipp::getIppTopFeatures() == ippCPUID_SSE42 && (((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 3 || src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 3 && (ksize.width > 5 || ksize.height > 5))))
return false;
// Other optimizations has some degradations too
if((((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 1 && (ksize.width > 5 || ksize.height > 5))))
return false;
#endif
if(!normalize)
return false;
if(!ippiCheckAnchor(anchor, ksize))
return false;
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiSize iwKSize = ippiGetSize(ksize);
::ipp::IwiBorderSize borderSize(iwKSize);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBox, iwSrc, iwDst, iwKSize, ::ipp::IwDefault(), ippBorder);
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(ksize); CV_UNUSED(anchor); CV_UNUSED(normalize); CV_UNUSED(borderType);
return false;
#endif
}
#endif
void boxFilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_dst.isUMat() &&
(borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT ||
borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101),
ocl_boxFilter3x3_8UC1(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
Mat src = _src.getMat();
int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
if( ddepth < 0 )
ddepth = sdepth;
_dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
Mat dst = _dst.getMat();
if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 )
{
if( src.rows == 1 )
ksize.height = 1;
if( src.cols == 1 )
ksize.width = 1;
}
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType&BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(boxFilter, cv_hal_boxFilter, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, ddepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
anchor.x, anchor.y, normalize, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_boxfilter(src, dst, ddepth, ksize, anchor, normalize, borderType))
CV_IPP_RUN_FAST(ipp_boxfilter(src, dst, ksize, anchor, normalize, borderType));
borderType = (borderType&~BORDER_ISOLATED);
Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
ksize, anchor, normalize, borderType );
f->apply( src, dst, wsz, ofs );
}
void blur(InputArray src, OutputArray dst,
Size ksize, Point anchor, int borderType)
{
CV_INSTRUMENT_REGION();
boxFilter( src, dst, -1, ksize, anchor, true, borderType );
}
/****************************************************************************************\
Squared Box Filter
\****************************************************************************************/
static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getSqrRowSumFilter, (srcType, sumType, ksize, anchor),
CV_CPU_DISPATCH_MODES_ALL);
}
void sqrBoxFilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
CV_INSTRUMENT_REGION();
int srcType = _src.type(), sdepth = CV_MAT_DEPTH(srcType), cn = CV_MAT_CN(srcType);
Size size = _src.size();
if( ddepth < 0 )
ddepth = sdepth < CV_32F ? CV_32F : CV_64F;
if( borderType != BORDER_CONSTANT && normalize )
{
if( size.height == 1 )
ksize.height = 1;
if( size.width == 1 )
ksize.width = 1;
}
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize, true))
int sumDepth = CV_64F;
if( sdepth == CV_8U )
sumDepth = CV_32S;
int sumType = CV_MAKETYPE( sumDepth, cn ), dstType = CV_MAKETYPE(ddepth, cn);
Mat src = _src.getMat();
_dst.create( size, dstType );
Mat dst = _dst.getMat();
Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y,
normalize ? 1./(ksize.width*ksize.height) : 1);
Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
srcType, dstType, sumType, borderType );
Point ofs;
Size wsz(src.cols, src.rows);
src.locateROI( wsz, ofs );
f->apply( src, dst, wsz, ofs );
}
} // namespace

@ -42,21 +42,25 @@
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType, int ksize, int anchor);
Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType, int ksize, int anchor, double scale);
Ptr<FilterEngine> createBoxFilter(int srcType, int dstType, Size ksize,
Point anchor, bool normalize, int borderType);
namespace cv
{
Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
/****************************************************************************************\
Box Filter
\****************************************************************************************/
namespace {
template<typename T, typename ST>
struct RowSum :
public BaseRowFilter
@ -70,6 +74,8 @@ struct RowSum :
virtual void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
@ -183,6 +189,8 @@ struct ColumnSum :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i;
ST* SUM;
bool haveScale = scale != 1;
@ -281,6 +289,8 @@ struct ColumnSum<int, uchar> :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
@ -408,9 +418,6 @@ struct ColumnSum<int, uchar> :
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -452,6 +459,8 @@ public BaseColumnFilter
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
const int ds = divScale;
const int dd = divDelta;
ushort* SUM;
@ -586,9 +595,6 @@ public BaseColumnFilter
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -616,6 +622,8 @@ struct ColumnSum<int, short> :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i;
int* SUM;
bool haveScale = scale != 1;
@ -739,9 +747,6 @@ struct ColumnSum<int, short> :
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -767,6 +772,8 @@ struct ColumnSum<int, ushort> :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
@ -888,9 +895,6 @@ struct ColumnSum<int, ushort> :
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -915,6 +919,8 @@ struct ColumnSum<int, int> :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
@ -1022,9 +1028,6 @@ struct ColumnSum<int, int> :
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -1050,6 +1053,8 @@ struct ColumnSum<int, float> :
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
@ -1154,9 +1159,6 @@ struct ColumnSum<int, float> :
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
@ -1164,243 +1166,13 @@ struct ColumnSum<int, float> :
std::vector<int> sum;
};
#ifdef HAVE_OPENCL
} // namespace anon
static bool ocl_boxFilter3x3_8UC1( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize )
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if (ddepth < 0)
ddepth = sdepth;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
if ( !(dev.isIntel() && (type == CV_8UC1) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
(_src.cols() % 16 == 0) && (_src.rows() % 2 == 0) &&
(anchor.x == 1) && (anchor.y == 1) &&
(ksize.width == 3) && (ksize.height == 3)) )
return false;
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
char build_opts[1024];
sprintf(build_opts, "-D %s %s", borderMap[borderType], normalize ? "-D NORMALIZE" : "");
ocl::Kernel kernel("boxFilter3x3_8UC1_cols16_rows2", cv::ocl::imgproc::boxFilter3x3_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize, bool sqr = false )
Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (ddepth < 0)
ddepth = sdepth;
if (cn > 4 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
_src.offset() % esz != 0 || _src.step() % esz != 0)
return false;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
int computeUnits = ocl::Device::getDefault().maxComputeUnits();
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size(), wholeSize;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
int wdepth = std::max(CV_32F, std::max(ddepth, sdepth)),
wtype = CV_MAKE_TYPE(wdepth, cn), dtype = CV_MAKE_TYPE(ddepth, cn);
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
size_t localsize_general[2] = { 0, 1 }, * localsize = NULL;
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
int h = isolated ? size.height : wholeSize.height;
int w = isolated ? size.width : wholeSize.width;
size_t maxWorkItemSizes[32];
ocl::Device::getDefault().maxWorkItemSizes(maxWorkItemSizes);
int tryWorkItems = (int)maxWorkItemSizes[0];
ocl::Kernel kernel;
if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)))
{
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = size.width % 2 ? 1 : 2;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
globalsize[0] = size.width / pxPerWorkItemX;
globalsize[1] = size.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = roundUp(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = roundUp(globalsize[0], wgRound);
char build_options[1024], cvt[2][40];
sprintf(build_options, "-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s%s%s -D PX_LOAD_FLOAT_VEC_CONV=convert_%s -D OP_BOX_FILTER",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(CV_MAKE_TYPE(wdepth, pxLoadVecSize)) //PX_LOAD_FLOAT_VEC_CONV
);
if (!kernel.create("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, build_options))
return false;
}
else
{
localsize = localsize_general;
for ( ; ; )
{
int BLOCK_SIZE_X = tryWorkItems, BLOCK_SIZE_Y = std::min(ksize.height * 10, size.height);
while (BLOCK_SIZE_X > 32 && BLOCK_SIZE_X >= ksize.width * 2 && BLOCK_SIZE_X > size.width * 2)
BLOCK_SIZE_X /= 2;
while (BLOCK_SIZE_Y < BLOCK_SIZE_X / 8 && BLOCK_SIZE_Y * computeUnits * 32 < size.height)
BLOCK_SIZE_Y *= 2;
if (ksize.width > BLOCK_SIZE_X || w < ksize.width || h < ksize.height)
return false;
char cvt[2][50];
String opts = format("-D LOCAL_SIZE_X=%d -D BLOCK_SIZE_Y=%d -D ST=%s -D DT=%s -D WT=%s -D convertToDT=%s -D convertToWT=%s"
" -D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D %s%s%s%s%s"
" -D ST1=%s -D DT1=%s -D cn=%d",
BLOCK_SIZE_X, BLOCK_SIZE_Y, ocl::typeToStr(type), ocl::typeToStr(CV_MAKE_TYPE(ddepth, cn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[0]),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[1]),
anchor.x, anchor.y, ksize.width, ksize.height, borderMap[borderType],
isolated ? " -D BORDER_ISOLATED" : "", doubleSupport ? " -D DOUBLE_SUPPORT" : "",
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), cn);
localsize[0] = BLOCK_SIZE_X;
globalsize[0] = divUp(size.width, BLOCK_SIZE_X - (ksize.width - 1)) * BLOCK_SIZE_X;
globalsize[1] = divUp(size.height, BLOCK_SIZE_Y);
kernel.create("boxFilter", cv::ocl::imgproc::boxFilter_oclsrc, opts);
if (kernel.empty())
return false;
size_t kernelWorkGroupSize = kernel.workGroupSize();
if (localsize[0] <= kernelWorkGroupSize)
break;
if (BLOCK_SIZE_X < (int)kernelWorkGroupSize)
return false;
tryWorkItems = (int)kernelWorkGroupSize;
}
}
_dst.create(size, CV_MAKETYPE(ddepth, cn));
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
int srcOffsetY = (int)(src.offset / src.step);
int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
idxArg = kernel.set(idxArg, srcOffsetX);
idxArg = kernel.set(idxArg, srcOffsetY);
idxArg = kernel.set(idxArg, srcEndX);
idxArg = kernel.set(idxArg, srcEndY);
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, localsize, false);
}
#endif
}
CV_INSTRUMENT_REGION();
cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
@ -1434,9 +1206,10 @@ cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksi
}
cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, int ksize,
int anchor, double scale)
Ptr<BaseColumnFilter> getColumnSumFilter(int sumType, int dstType, int ksize, int anchor, double scale)
{
CV_INSTRUMENT_REGION();
int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) );
@ -1474,9 +1247,11 @@ cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, i
}
cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ksize,
Point anchor, bool normalize, int borderType )
Ptr<FilterEngine> createBoxFilter(int srcType, int dstType, Size ksize,
Point anchor, bool normalize, int borderType)
{
CV_INSTRUMENT_REGION();
int sdepth = CV_MAT_DEPTH(srcType);
int cn = CV_MAT_CN(srcType), sumType = CV_64F;
if( sdepth == CV_8U && CV_MAT_DEPTH(dstType) == CV_8U &&
@ -1496,199 +1271,12 @@ cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ks
srcType, dstType, sumType, borderType );
}
#ifdef HAVE_OPENVX
namespace cv
{
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_BOX_3x3>(int w, int h) { return w*h < 640 * 480; }
}
static bool openvx_boxfilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
if (ddepth < 0)
ddepth = CV_8UC1;
if (_src.type() != CV_8UC1 || ddepth != CV_8U || !normalize ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3 ||
(anchor.x >= 0 && anchor.x != 1) ||
(anchor.y >= 0 && anchor.y != 1) ||
ovx::skipSmallImages<VX_KERNEL_BOX_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
_dst.create(src.size(), CV_8UC1);
Mat dst = _dst.getMat();
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
#if defined(HAVE_IPP)
namespace cv
{
static bool ipp_boxfilter(Mat &src, Mat &dst, Size ksize, Point anchor, bool normalize, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Problem with SSE42 optimization for 16s and some 8u modes
if(ipp::getIppTopFeatures() == ippCPUID_SSE42 && (((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 3 || src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 3 && (ksize.width > 5 || ksize.height > 5))))
return false;
// Other optimizations has some degradations too
if((((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 1 && (ksize.width > 5 || ksize.height > 5))))
return false;
#endif
if(!normalize)
return false;
if(!ippiCheckAnchor(anchor, ksize))
return false;
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiSize iwKSize = ippiGetSize(ksize);
::ipp::IwiBorderSize borderSize(iwKSize);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBox, iwSrc, iwDst, iwKSize, ::ipp::IwDefault(), ippBorder);
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(ksize); CV_UNUSED(anchor); CV_UNUSED(normalize); CV_UNUSED(borderType);
return false;
#endif
}
}
#endif
void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType )
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_dst.isUMat() &&
(borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT ||
borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101),
ocl_boxFilter3x3_8UC1(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
Mat src = _src.getMat();
int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
if( ddepth < 0 )
ddepth = sdepth;
_dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
Mat dst = _dst.getMat();
if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 )
{
if( src.rows == 1 )
ksize.height = 1;
if( src.cols == 1 )
ksize.width = 1;
}
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType&BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(boxFilter, cv_hal_boxFilter, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, ddepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
anchor.x, anchor.y, normalize, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_boxfilter(src, dst, ddepth, ksize, anchor, normalize, borderType))
CV_IPP_RUN_FAST(ipp_boxfilter(src, dst, ksize, anchor, normalize, borderType));
borderType = (borderType&~BORDER_ISOLATED);
Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
ksize, anchor, normalize, borderType );
f->apply( src, dst, wsz, ofs );
}
void cv::blur( InputArray src, OutputArray dst,
Size ksize, Point anchor, int borderType )
{
CV_INSTRUMENT_REGION();
boxFilter( src, dst, -1, ksize, anchor, true, borderType );
}
/****************************************************************************************\
Squared Box Filter
\****************************************************************************************/
namespace cv
{
namespace {
template<typename T, typename ST>
struct SqrRowSum :
@ -1703,6 +1291,8 @@ struct SqrRowSum :
virtual void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
@ -1727,7 +1317,9 @@ struct SqrRowSum :
}
};
static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
} // namespace anon
Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
@ -1753,52 +1345,6 @@ static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize
srcType, sumType));
}
}
void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType )
{
CV_INSTRUMENT_REGION();
int srcType = _src.type(), sdepth = CV_MAT_DEPTH(srcType), cn = CV_MAT_CN(srcType);
Size size = _src.size();
if( ddepth < 0 )
ddepth = sdepth < CV_32F ? CV_32F : CV_64F;
if( borderType != BORDER_CONSTANT && normalize )
{
if( size.height == 1 )
ksize.height = 1;
if( size.width == 1 )
ksize.width = 1;
}
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize, true))
int sumDepth = CV_64F;
if( sdepth == CV_8U )
sumDepth = CV_32S;
int sumType = CV_MAKETYPE( sumDepth, cn ), dstType = CV_MAKETYPE(ddepth, cn);
Mat src = _src.getMat();
_dst.create( size, dstType );
Mat dst = _dst.getMat();
Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y,
normalize ? 1./(ksize.width*ksize.height) : 1);
Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
srcType, dstType, sumType, borderType );
Point ofs;
Size wsz(src.cols, src.rows);
src.locateROI( wsz, ofs );
f->apply( src, dst, wsz, ofs );
}
/* End of file. */
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace

@ -1,197 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "filter.hpp"
namespace cv
{
int RowVec_32f_AVX(const float* src0, const float* _kx, float* dst, int width, int cn, int _ksize)
{
int i = 0, k;
for (; i <= width - 8; i += 8)
{
const float* src = src0 + i;
__m256 f, x0;
__m256 s0 = _mm256_set1_ps(0.0f);
for (k = 0; k < _ksize; k++, src += cn)
{
f = _mm256_set1_ps(_kx[k]);
x0 = _mm256_loadu_ps(src);
#if CV_FMA3
s0 = _mm256_fmadd_ps(x0, f, s0);
#else
s0 = _mm256_add_ps(s0, _mm256_mul_ps(x0, f));
#endif
}
_mm256_storeu_ps(dst + i, s0);
}
_mm256_zeroupper();
return i;
}
int SymmColumnVec_32f_Symm_AVX(const float** src, const float* ky, float* dst, float delta, int width, int ksize2)
{
int i = 0, k;
const float *S, *S2;
const __m128 d4 = _mm_set1_ps(delta);
const __m256 d8 = _mm256_set1_ps(delta);
for( ; i <= width - 16; i += 16 )
{
__m256 f = _mm256_set1_ps(ky[0]);
__m256 s0, s1;
__m256 x0;
S = src[0] + i;
s0 = _mm256_loadu_ps(S);
#if CV_FMA3
s0 = _mm256_fmadd_ps(s0, f, d8);
#else
s0 = _mm256_add_ps(_mm256_mul_ps(s0, f), d8);
#endif
s1 = _mm256_loadu_ps(S+8);
#if CV_FMA3
s1 = _mm256_fmadd_ps(s1, f, d8);
#else
s1 = _mm256_add_ps(_mm256_mul_ps(s1, f), d8);
#endif
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
f = _mm256_set1_ps(ky[k]);
x0 = _mm256_add_ps(_mm256_loadu_ps(S), _mm256_loadu_ps(S2));
#if CV_FMA3
s0 = _mm256_fmadd_ps(x0, f, s0);
#else
s0 = _mm256_add_ps(s0, _mm256_mul_ps(x0, f));
#endif
x0 = _mm256_add_ps(_mm256_loadu_ps(S+8), _mm256_loadu_ps(S2+8));
#if CV_FMA3
s1 = _mm256_fmadd_ps(x0, f, s1);
#else
s1 = _mm256_add_ps(s1, _mm256_mul_ps(x0, f));
#endif
}
_mm256_storeu_ps(dst + i, s0);
_mm256_storeu_ps(dst + i + 8, s1);
}
for( ; i <= width - 4; i += 4 )
{
__m128 f = _mm_set1_ps(ky[0]);
__m128 x0, s0 = _mm_load_ps(src[0] + i);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
for( k = 1; k <= ksize2; k++ )
{
f = _mm_set1_ps(ky[k]);
x0 = _mm_add_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
_mm_storeu_ps(dst + i, s0);
}
_mm256_zeroupper();
return i;
}
int SymmColumnVec_32f_Unsymm_AVX(const float** src, const float* ky, float* dst, float delta, int width, int ksize2)
{
int i = 0, k;
const float *S2;
const __m128 d4 = _mm_set1_ps(delta);
const __m256 d8 = _mm256_set1_ps(delta);
for (; i <= width - 16; i += 16)
{
__m256 f, s0 = d8, s1 = d8;
__m256 x0;
for (k = 1; k <= ksize2; k++)
{
const float *S = src[k] + i;
S2 = src[-k] + i;
f = _mm256_set1_ps(ky[k]);
x0 = _mm256_sub_ps(_mm256_loadu_ps(S), _mm256_loadu_ps(S2));
#if CV_FMA3
s0 = _mm256_fmadd_ps(x0, f, s0);
#else
s0 = _mm256_add_ps(s0, _mm256_mul_ps(x0, f));
#endif
x0 = _mm256_sub_ps(_mm256_loadu_ps(S + 8), _mm256_loadu_ps(S2 + 8));
#if CV_FMA3
s1 = _mm256_fmadd_ps(x0, f, s1);
#else
s1 = _mm256_add_ps(s1, _mm256_mul_ps(x0, f));
#endif
}
_mm256_storeu_ps(dst + i, s0);
_mm256_storeu_ps(dst + i + 8, s1);
}
for (; i <= width - 4; i += 4)
{
__m128 f, x0, s0 = d4;
for (k = 1; k <= ksize2; k++)
{
f = _mm_set1_ps(ky[k]);
x0 = _mm_sub_ps(_mm_load_ps(src[k] + i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
_mm_storeu_ps(dst + i, s0);
}
_mm256_zeroupper();
return i;
}
}
/* End of file. */

File diff suppressed because it is too large Load Diff

@ -45,17 +45,13 @@
namespace cv
{
#if CV_TRY_AVX2
int RowVec_32f_AVX(const float* src0, const float* _kx, float* dst, int width, int cn, int _ksize);
int SymmColumnVec_32f_Symm_AVX(const float** src, const float* ky, float* dst, float delta, int width, int ksize2);
int SymmColumnVec_32f_Unsymm_AVX(const float** src, const float* ky, float* dst, float delta, int width, int ksize2);
#endif
#ifdef HAVE_OPENCL
bool ocl_sepFilter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernelX, InputArray _kernelY, Point anchor,
double delta, int borderType );
#endif
void preprocess2DKernel(const Mat& kernel, std::vector<Point>& coords, std::vector<uchar>& coeffs);
}
#endif

@ -9,10 +9,7 @@
#ifndef _CV_FIXEDPOINT_HPP_
#define _CV_FIXEDPOINT_HPP_
#include "opencv2/core/softfloat.hpp"
namespace
{
namespace {
class fixedpoint64
{

@ -0,0 +1,317 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "median_blur.simd.hpp"
#include "median_blur.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
#ifdef HAVE_OPENCL
#define DIVUP(total, grain) ((total + grain - 1) / (grain))
static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
{
size_t localsize[2] = { 16, 16 };
size_t globalsize[2];
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && cn <= 4 && (m == 3 || m == 5)) )
return false;
Size imgSize = _src.size();
bool useOptimized = (1 == cn) &&
(size_t)imgSize.width >= localsize[0] * 8 &&
(size_t)imgSize.height >= localsize[1] * 8 &&
imgSize.width % 4 == 0 &&
imgSize.height % 4 == 0 &&
(ocl::Device::getDefault().isIntel());
cv::String kname = format( useOptimized ? "medianFilter%d_u" : "medianFilter%d", m) ;
cv::String kdefs = useOptimized ?
format("-D T=%s -D T1=%s -D T4=%s%d -D cn=%d -D USE_4OPT", ocl::typeToStr(type),
ocl::typeToStr(depth), ocl::typeToStr(depth), cn*4, cn)
:
format("-D T=%s -D T1=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn) ;
ocl::Kernel k(kname.c_str(), ocl::imgproc::medianFilter_oclsrc, kdefs.c_str() );
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(src.size(), type);
UMat dst = _dst.getUMat();
k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst));
if( useOptimized )
{
globalsize[0] = DIVUP(src.cols / 4, localsize[0]) * localsize[0];
globalsize[1] = DIVUP(src.rows / 4, localsize[1]) * localsize[1];
}
else
{
globalsize[0] = (src.cols + localsize[0] + 2) / localsize[0] * localsize[0];
globalsize[1] = (src.rows + localsize[1] - 1) / localsize[1] * localsize[1];
}
return k.run(2, globalsize, localsize, false);
}
#undef DIVUP
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_MEDIAN_3x3>(int w, int h) { return w*h < 1280 * 720; }
}
static bool openvx_medianFilter(InputArray _src, OutputArray _dst, int ksize)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_8U
#ifndef VX_VERSION_1_1
|| ksize != 3
#endif
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if (
#ifdef VX_VERSION_1_1
ksize != 3 ? ovx::skipSmallImages<VX_KERNEL_NON_LINEAR_FILTER>(src.cols, src.rows) :
#endif
ovx::skipSmallImages<VX_KERNEL_MEDIAN_3x3>(src.cols, src.rows)
)
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#ifdef VX_VERSION_1_1
if ((vx_size)ksize > ctx.nonlinearMaxDimension())
return false;
#endif
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1
if (ksize == 3)
#endif
{
ivx::IVX_CHECK_STATUS(vxuMedian3x3(ctx, ia, ib));
}
#ifdef VX_VERSION_1_1
else
{
ivx::Matrix mtx;
if(ksize == 5)
mtx = ivx::Matrix::createFromPattern(ctx, VX_PATTERN_BOX, ksize, ksize);
else
{
vx_size supportedSize;
ivx::IVX_CHECK_STATUS(vxQueryContext(ctx, VX_CONTEXT_NONLINEAR_MAX_DIMENSION, &supportedSize, sizeof(supportedSize)));
if ((vx_size)ksize > supportedSize)
{
ctx.setImmediateBorder(prevBorder);
return false;
}
Mat mask(ksize, ksize, CV_8UC1, Scalar(255));
mtx = ivx::Matrix::create(ctx, VX_TYPE_UINT8, ksize, ksize);
mtx.copyFrom(mask);
}
ivx::IVX_CHECK_STATUS(vxuNonLinearFilter(ctx, VX_NONLINEAR_FILTER_MEDIAN, ia, mtx, ib));
}
#endif
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
static bool ipp_medianFilter(Mat &src0, Mat &dst, int ksize)
{
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Degradations for big kernel
if(ksize > 7)
return false;
#endif
{
int bufSize;
IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
IppDataType ippType = ippiGetDataType(src0.type());
int channels = src0.channels();
IppAutoBuffer<Ipp8u> buffer;
if(src0.isSubmatrix())
return false;
Mat src;
if(dst.data != src0.data)
src = src0;
else
src0.copyTo(src);
if(ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, ippType, channels, &bufSize) < 0)
return false;
buffer.allocate(bufSize);
switch(ippType)
{
case ipp8u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C1R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C3R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C4R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C1R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C3R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C4R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16s:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C1R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C3R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C4R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp32f:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_32f_C1R, src.ptr<Ipp32f>(), (int)src.step, dst.ptr<Ipp32f>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
default:
return false;
}
}
}
#endif
void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
{
CV_INSTRUMENT_REGION();
CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 ));
if( ksize <= 1 || _src0.empty() )
{
_src0.copyTo(_dst);
return;
}
CV_OCL_RUN(_dst.isUMat(),
ocl_medianFilter(_src0,_dst, ksize))
Mat src0 = _src0.getMat();
_dst.create( src0.size(), src0.type() );
Mat dst = _dst.getMat();
CALL_HAL(medianBlur, cv_hal_medianBlur, src0.data, src0.step, dst.data, dst.step, src0.cols, src0.rows, src0.depth(),
src0.channels(), ksize);
CV_OVX_RUN(true,
openvx_medianFilter(_src0, _dst, ksize))
CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
return;
#endif
CV_CPU_DISPATCH(medianBlur, (src0, dst, ksize),
CV_CPU_DISPATCH_MODES_ALL);
}
} // namespace
/* End of file. */

@ -46,9 +46,11 @@
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#ifdef _MSC_VER
#pragma warning(disable: 4244) // warning C4244: 'argument': conversion from 'int' to 'ushort', possible loss of data
// triggered on intrinsic code from medianBlur_8u_O1()
#endif
/*
* This file includes the code, contributed by Simon Perreault
@ -71,12 +73,18 @@
Median Filter
\****************************************************************************************/
namespace cv
{
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
static void
medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
{
CV_INSTRUMENT_REGION();
typedef ushort HT;
/**
@ -330,9 +338,6 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
}
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
#undef HOP
@ -342,6 +347,8 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
static void
medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
{
CV_INSTRUMENT_REGION();
#define N 16
int zone0[4][N];
int zone1[4][N*N];
@ -671,6 +678,8 @@ template<class Op, class VecOp>
static void
medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
{
CV_INSTRUMENT_REGION();
typedef typename Op::value_type T;
typedef typename Op::arg_type WT;
typedef typename VecOp::arg_type VT;
@ -770,9 +779,6 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
limit = size.width;
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
else if( m == 5 )
{
@ -934,266 +940,15 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
limit = size.width;
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
}
#ifdef HAVE_OPENCL
#define DIVUP(total, grain) ((total + grain - 1) / (grain))
static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
{
size_t localsize[2] = { 16, 16 };
size_t globalsize[2];
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && cn <= 4 && (m == 3 || m == 5)) )
return false;
Size imgSize = _src.size();
bool useOptimized = (1 == cn) &&
(size_t)imgSize.width >= localsize[0] * 8 &&
(size_t)imgSize.height >= localsize[1] * 8 &&
imgSize.width % 4 == 0 &&
imgSize.height % 4 == 0 &&
(ocl::Device::getDefault().isIntel());
cv::String kname = format( useOptimized ? "medianFilter%d_u" : "medianFilter%d", m) ;
cv::String kdefs = useOptimized ?
format("-D T=%s -D T1=%s -D T4=%s%d -D cn=%d -D USE_4OPT", ocl::typeToStr(type),
ocl::typeToStr(depth), ocl::typeToStr(depth), cn*4, cn)
:
format("-D T=%s -D T1=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn) ;
ocl::Kernel k(kname.c_str(), ocl::imgproc::medianFilter_oclsrc, kdefs.c_str() );
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(src.size(), type);
UMat dst = _dst.getUMat();
k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst));
if( useOptimized )
{
globalsize[0] = DIVUP(src.cols / 4, localsize[0]) * localsize[0];
globalsize[1] = DIVUP(src.rows / 4, localsize[1]) * localsize[1];
}
else
{
globalsize[0] = (src.cols + localsize[0] + 2) / localsize[0] * localsize[0];
globalsize[1] = (src.rows + localsize[1] - 1) / localsize[1] * localsize[1];
}
return k.run(2, globalsize, localsize, false);
}
#undef DIVUP
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_MEDIAN_3x3>(int w, int h) { return w*h < 1280 * 720; }
}
static bool openvx_medianFilter(InputArray _src, OutputArray _dst, int ksize)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_8U
#ifndef VX_VERSION_1_1
|| ksize != 3
#endif
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if (
#ifdef VX_VERSION_1_1
ksize != 3 ? ovx::skipSmallImages<VX_KERNEL_NON_LINEAR_FILTER>(src.cols, src.rows) :
#endif
ovx::skipSmallImages<VX_KERNEL_MEDIAN_3x3>(src.cols, src.rows)
)
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#ifdef VX_VERSION_1_1
if ((vx_size)ksize > ctx.nonlinearMaxDimension())
return false;
#endif
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1
if (ksize == 3)
#endif
{
ivx::IVX_CHECK_STATUS(vxuMedian3x3(ctx, ia, ib));
}
#ifdef VX_VERSION_1_1
else
{
ivx::Matrix mtx;
if(ksize == 5)
mtx = ivx::Matrix::createFromPattern(ctx, VX_PATTERN_BOX, ksize, ksize);
else
{
vx_size supportedSize;
ivx::IVX_CHECK_STATUS(vxQueryContext(ctx, VX_CONTEXT_NONLINEAR_MAX_DIMENSION, &supportedSize, sizeof(supportedSize)));
if ((vx_size)ksize > supportedSize)
{
ctx.setImmediateBorder(prevBorder);
return false;
}
Mat mask(ksize, ksize, CV_8UC1, Scalar(255));
mtx = ivx::Matrix::create(ctx, VX_TYPE_UINT8, ksize, ksize);
mtx.copyFrom(mask);
}
ivx::IVX_CHECK_STATUS(vxuNonLinearFilter(ctx, VX_NONLINEAR_FILTER_MEDIAN, ia, mtx, ib));
}
#endif
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
static bool ipp_medianFilter(Mat &src0, Mat &dst, int ksize)
{
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Degradations for big kernel
if(ksize > 7)
return false;
#endif
{
int bufSize;
IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
IppDataType ippType = ippiGetDataType(src0.type());
int channels = src0.channels();
IppAutoBuffer<Ipp8u> buffer;
if(src0.isSubmatrix())
return false;
Mat src;
if(dst.data != src0.data)
src = src0;
else
src0.copyTo(src);
if(ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, ippType, channels, &bufSize) < 0)
return false;
buffer.allocate(bufSize);
} // namespace anon
switch(ippType)
{
case ipp8u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C1R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C3R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C4R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C1R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C3R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C4R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16s:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C1R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C3R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C4R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp32f:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_32f_C1R, src.ptr<Ipp32f>(), (int)src.step, dst.ptr<Ipp32f>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
default:
return false;
}
}
}
#endif
}
void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize)
{
CV_INSTRUMENT_REGION();
CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 ));
if( ksize <= 1 || _src0.empty() )
{
_src0.copyTo(_dst);
return;
}
CV_OCL_RUN(_dst.isUMat(),
ocl_medianFilter(_src0,_dst, ksize))
Mat src0 = _src0.getMat();
_dst.create( src0.size(), src0.type() );
Mat dst = _dst.getMat();
CALL_HAL(medianBlur, cv_hal_medianBlur, src0.data, src0.step, dst.data, dst.step, src0.cols, src0.rows, src0.depth(),
src0.channels(), ksize);
CV_OVX_RUN(true,
openvx_medianFilter(_src0, _dst, ksize))
CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
return;
#endif
bool useSortNet = ksize == 3 || (ksize == 5
#if !(CV_SIMD)
&& ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 )
@ -1223,6 +978,7 @@ void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
}
else
{
// TODO AVX guard (external call)
cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE|BORDER_ISOLATED);
int cn = src0.channels();
@ -1237,6 +993,6 @@ void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
}
}
}
/* End of file. */
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace

@ -48,779 +48,49 @@
#include "opencv2/core/hal/intrin.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
#include "morph.simd.hpp"
#include "morph.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
/****************************************************************************************\
Basic Morphological Operations: Erosion & Dilation
\****************************************************************************************/
using namespace std;
namespace cv
{
template<typename T> struct MinOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::min(a, b); }
};
template<typename T> struct MaxOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::max(a, b); }
};
#undef CV_MIN_8U
#undef CV_MAX_8U
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
struct MorphRowNoVec
{
MorphRowNoVec(int, int) {}
int operator()(const uchar*, uchar*, int, int) const { return 0; }
};
struct MorphColumnNoVec
{
MorphColumnNoVec(int, int) {}
int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
};
struct MorphNoVec
{
int operator()(uchar**, int, uchar*, int) const { return 0; }
};
#if CV_SIMD
template<class VecUpdate> struct MorphRowVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphRowVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar* src, uchar* dst, int width, int cn) const
{
int i, k, _ksize = ksize*cn;
width *= cn;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
vtype s2 = vx_load((const stype*)src + i + 2*vtype::nlanes);
vtype s3 = vx_load((const stype*)src + i + 3*vtype::nlanes);
for (k = cn; k < _ksize; k += cn)
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
s2 = updateOp(s2, vx_load((const stype*)src + i + k + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load((const stype*)src + i + k + 3*vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
v_store((stype*)dst + i + 2*vtype::nlanes, s2);
v_store((stype*)dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
for( k = cn; k < _ksize; k += cn )
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s = vx_load((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load((const stype*)src + i + k));
v_store((stype*)dst + i, s);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s = vx_load_low((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load_low((const stype*)src + i + k));
v_store_low((stype*)dst + i, s);
i += vtype::nlanes/2;
}
return i - i % cn;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphColumnVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphColumnVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
{
int i = 0, k, _ksize = ksize;
VecUpdate updateOp;
for( i = 0; i < count + ksize - 1; i++ )
CV_Assert( ((size_t)_src[i] & (CV_SIMD_WIDTH-1)) == 0 );
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
dststep /= sizeof(dst[0]);
for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + dststep + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + dststep + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, updateOp(s0, vx_load_aligned(src[0] + i)));
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(src[k] + i)));
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, updateOp(s0, vx_load_low(src[0] + i)));
v_store_low(dst + dststep + i, updateOp(s0, vx_load_low(src[k] + i)));
i += vtype::nlanes/2;
}
}
for( ; count > 0; count--, dst += dststep, src++ )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
}
return i;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
int operator()(uchar** _src, int nz, uchar* _dst, int width) const
{
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
int i, k;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
vtype s2 = vx_load(sptr + 2*vtype::nlanes);
vtype s3 = vx_load(sptr + 3*vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
return i;
}
};
template <typename T> struct VMin
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_min(a,b); }
};
template <typename T> struct VMax
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_max(a,b); }
};
typedef MorphRowVec<VMin<v_uint8> > ErodeRowVec8u;
typedef MorphRowVec<VMax<v_uint8> > DilateRowVec8u;
typedef MorphRowVec<VMin<v_uint16> > ErodeRowVec16u;
typedef MorphRowVec<VMax<v_uint16> > DilateRowVec16u;
typedef MorphRowVec<VMin<v_int16> > ErodeRowVec16s;
typedef MorphRowVec<VMax<v_int16> > DilateRowVec16s;
typedef MorphRowVec<VMin<v_float32> > ErodeRowVec32f;
typedef MorphRowVec<VMax<v_float32> > DilateRowVec32f;
typedef MorphColumnVec<VMin<v_uint8> > ErodeColumnVec8u;
typedef MorphColumnVec<VMax<v_uint8> > DilateColumnVec8u;
typedef MorphColumnVec<VMin<v_uint16> > ErodeColumnVec16u;
typedef MorphColumnVec<VMax<v_uint16> > DilateColumnVec16u;
typedef MorphColumnVec<VMin<v_int16> > ErodeColumnVec16s;
typedef MorphColumnVec<VMax<v_int16> > DilateColumnVec16s;
typedef MorphColumnVec<VMin<v_float32> > ErodeColumnVec32f;
typedef MorphColumnVec<VMax<v_float32> > DilateColumnVec32f;
typedef MorphVec<VMin<v_uint8> > ErodeVec8u;
typedef MorphVec<VMax<v_uint8> > DilateVec8u;
typedef MorphVec<VMin<v_uint16> > ErodeVec16u;
typedef MorphVec<VMax<v_uint16> > DilateVec16u;
typedef MorphVec<VMin<v_int16> > ErodeVec16s;
typedef MorphVec<VMax<v_int16> > DilateVec16s;
typedef MorphVec<VMin<v_float32> > ErodeVec32f;
typedef MorphVec<VMax<v_float32> > DilateVec32f;
#else
typedef MorphRowNoVec ErodeRowVec8u;
typedef MorphRowNoVec DilateRowVec8u;
typedef MorphColumnNoVec ErodeColumnVec8u;
typedef MorphColumnNoVec DilateColumnVec8u;
typedef MorphRowNoVec ErodeRowVec16u;
typedef MorphRowNoVec DilateRowVec16u;
typedef MorphRowNoVec ErodeRowVec16s;
typedef MorphRowNoVec DilateRowVec16s;
typedef MorphRowNoVec ErodeRowVec32f;
typedef MorphRowNoVec DilateRowVec32f;
typedef MorphColumnNoVec ErodeColumnVec16u;
typedef MorphColumnNoVec DilateColumnVec16u;
typedef MorphColumnNoVec ErodeColumnVec16s;
typedef MorphColumnNoVec DilateColumnVec16s;
typedef MorphColumnNoVec ErodeColumnVec32f;
typedef MorphColumnNoVec DilateColumnVec32f;
typedef MorphNoVec ErodeVec8u;
typedef MorphNoVec DilateVec8u;
typedef MorphNoVec ErodeVec16u;
typedef MorphNoVec DilateVec16u;
typedef MorphNoVec ErodeVec16s;
typedef MorphNoVec DilateVec16s;
typedef MorphNoVec ErodeVec32f;
typedef MorphNoVec DilateVec32f;
#endif
typedef MorphRowNoVec ErodeRowVec64f;
typedef MorphRowNoVec DilateRowVec64f;
typedef MorphColumnNoVec ErodeColumnVec64f;
typedef MorphColumnNoVec DilateColumnVec64f;
typedef MorphNoVec ErodeVec64f;
typedef MorphNoVec DilateVec64f;
template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
{
typedef typename Op::rtype T;
MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
int i, j, k, _ksize = ksize*cn;
const T* S = (const T*)src;
Op op;
T* D = (T*)dst;
if( _ksize == cn )
{
for( i = 0; i < width*cn; i++ )
D[i] = S[i];
return;
}
int i0 = vecOp(src, dst, width, cn);
width *= cn;
for( k = 0; k < cn; k++, S++, D++ )
{
for( i = i0; i <= width - cn*2; i += cn*2 )
{
const T* s = S + i;
T m = s[cn];
for( j = cn*2; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = op(m, s[0]);
D[i+cn] = op(m, s[j]);
}
for( ; i < width; i += cn )
{
const T* s = S + i;
T m = s[0];
for( j = cn; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = m;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
{
typedef typename Op::rtype T;
MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int i, k, _ksize = ksize;
const T** src = (const T**)_src;
T* D = (T*)dst;
Op op;
int i0 = vecOp(_src, dst, dststep, count, width);
dststep /= sizeof(D[0]);
for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[1] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
sptr = src[0] + i;
D[i] = op(s0, sptr[0]);
D[i+1] = op(s1, sptr[1]);
D[i+2] = op(s2, sptr[2]);
D[i+3] = op(s3, sptr[3]);
sptr = src[k] + i;
D[i+dststep] = op(s0, sptr[0]);
D[i+dststep+1] = op(s1, sptr[1]);
D[i+dststep+2] = op(s2, sptr[2]);
D[i+dststep+3] = op(s3, sptr[3]);
}
#endif
for( ; i < width; i++ )
{
T s0 = src[1][i];
for( k = 2; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = op(s0, src[0][i]);
D[i+dststep] = op(s0, src[k][i]);
}
}
for( ; count > 0; count--, D += dststep, src++ )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = src[0][i];
for( k = 1; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = s0;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphFilter : BaseFilter
{
typedef typename Op::rtype T;
MorphFilter( const Mat& _kernel, Point _anchor )
{
anchor = _anchor;
ksize = _kernel.size();
CV_Assert( _kernel.type() == CV_8U );
std::vector<uchar> coeffs; // we do not really the values of non-zero
// kernel elements, just their locations
preprocess2DKernel( _kernel, coords, coeffs );
ptrs.resize( coords.size() );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn) CV_OVERRIDE
{
const Point* pt = &coords[0];
const T** kp = (const T**)&ptrs[0];
int i, k, nz = (int)coords.size();
Op op;
width *= cn;
for( ; count > 0; count--, dst += dststep, src++ )
{
T* D = (T*)dst;
for( k = 0; k < nz; k++ )
kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
i = vecOp(&ptrs[0], nz, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = kp[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < nz; k++ )
{
sptr = kp[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = kp[0][i];
for( k = 1; k < nz; k++ )
s0 = op(s0, kp[k][i]);
D[i] = s0;
}
}
}
std::vector<Point> coords;
std::vector<uchar*> ptrs;
VecOp vecOp;
};
}
namespace cv {
/////////////////////////////////// External Interface /////////////////////////////////////
cv::Ptr<cv::BaseRowFilter> cv::getMorphologyRowFilter(int op, int type, int ksize, int anchor)
Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor)
{
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MinOp<uchar>,
ErodeRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MinOp<ushort>,
ErodeRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MinOp<short>,
ErodeRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MinOp<float>,
ErodeRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MinOp<double>,
ErodeRowVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MaxOp<uchar>,
DilateRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MaxOp<ushort>,
DilateRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MaxOp<short>,
DilateRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MaxOp<float>,
DilateRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MaxOp<double>,
DilateRowVec64f> >(ksize, anchor);
}
CV_INSTRUMENT_REGION();
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
CV_CPU_DISPATCH(getMorphologyRowFilter, (op, type, ksize, anchor),
CV_CPU_DISPATCH_MODES_ALL);
}
cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
{
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MinOp<uchar>,
ErodeColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MinOp<ushort>,
ErodeColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MinOp<short>,
ErodeColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MinOp<float>,
ErodeColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MinOp<double>,
ErodeColumnVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MaxOp<uchar>,
DilateColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MaxOp<ushort>,
DilateColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MaxOp<short>,
DilateColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MaxOp<float>,
DilateColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MaxOp<double>,
DilateColumnVec64f> >(ksize, anchor);
}
CV_INSTRUMENT_REGION();
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
CV_CPU_DISPATCH(getMorphologyColumnFilter, (op, type, ksize, anchor),
CV_CPU_DISPATCH_MODES_ALL);
}
cv::Ptr<cv::BaseFilter> cv::getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor)
Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor)
{
Mat kernel = _kernel.getMat();
int depth = CV_MAT_DEPTH(type);
anchor = normalizeAnchor(anchor, kernel.size());
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphFilter<MinOp<uchar>, ErodeVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MinOp<ushort>, ErodeVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MinOp<short>, ErodeVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MinOp<float>, ErodeVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MinOp<double>, ErodeVec64f> >(kernel, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphFilter<MaxOp<uchar>, DilateVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MaxOp<ushort>, DilateVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MaxOp<short>, DilateVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MaxOp<float>, DilateVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MaxOp<double>, DilateVec64f> >(kernel, anchor);
}
CV_INSTRUMENT_REGION();
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
Mat kernel = _kernel.getMat();
CV_CPU_DISPATCH(getMorphologyFilter, (op, type, kernel, anchor),
CV_CPU_DISPATCH_MODES_ALL);
}
cv::Ptr<cv::FilterEngine> cv::createMorphologyFilter( int op, int type, InputArray _kernel,
Point anchor, int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
Ptr<FilterEngine> createMorphologyFilter(
int op, int type, InputArray _kernel,
Point anchor, int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue)
{
Mat kernel = _kernel.getMat();
anchor = normalizeAnchor(anchor, kernel.size());
@ -862,7 +132,7 @@ cv::Ptr<cv::FilterEngine> cv::createMorphologyFilter( int op, int type, InputArr
}
cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
Mat getStructuringElement(int shape, Size ksize, Point anchor)
{
int i, j;
int r = 0, c = 0;
@ -915,9 +185,6 @@ cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
return elem;
}
namespace cv
{
// ===== 1. replacement implementation
static bool halMorph(int op, int src_type, int dst_type,
@ -1732,9 +999,7 @@ static void morphOp( int op, InputArray _src, OutputArray _dst,
(src.isSubmatrix() && !isolated));
}
}
void cv::erode( InputArray src, OutputArray dst, InputArray kernel,
void erode( InputArray src, OutputArray dst, InputArray kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
@ -1744,7 +1009,7 @@ void cv::erode( InputArray src, OutputArray dst, InputArray kernel,
}
void cv::dilate( InputArray src, OutputArray dst, InputArray kernel,
void dilate( InputArray src, OutputArray dst, InputArray kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
@ -1755,8 +1020,6 @@ void cv::dilate( InputArray src, OutputArray dst, InputArray kernel,
#ifdef HAVE_OPENCL
namespace cv {
static bool ocl_morphologyEx(InputArray _src, OutputArray _dst, int op,
InputArray kernel, Point anchor, int iterations,
int borderType, const Scalar& borderValue)
@ -1813,13 +1076,11 @@ static bool ocl_morphologyEx(InputArray _src, OutputArray _dst, int op,
return true;
}
}
#endif
#define IPP_DISABLE_MORPH_ADV 1
#ifdef HAVE_IPP
#if !IPP_DISABLE_MORPH_ADV
namespace cv {
static bool ipp_morphologyEx(int op, InputArray _src, OutputArray _dst,
InputArray _kernel,
Point anchor, int iterations,
@ -1884,11 +1145,10 @@ static bool ipp_morphologyEx(int op, InputArray _src, OutputArray _dst,
return false;
#endif
}
}
#endif
#endif
void cv::morphologyEx( InputArray _src, OutputArray _dst, int op,
void morphologyEx( InputArray _src, OutputArray _dst, int op,
InputArray _kernel, Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
@ -1985,6 +1245,8 @@ void cv::morphologyEx( InputArray _src, OutputArray _dst, int op,
}
}
} // namespace cv
CV_IMPL IplConvKernel *
cvCreateStructuringElementEx( int cols, int rows,
int anchorX, int anchorY,

@ -0,0 +1,846 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <limits.h>
#include "opencv2/core/hal/intrin.hpp"
/****************************************************************************************\
Basic Morphological Operations: Erosion & Dilation
\****************************************************************************************/
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor);
Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor);
Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel, Point anchor);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
namespace {
template<typename T> struct MinOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::min(a, b); }
};
template<typename T> struct MaxOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::max(a, b); }
};
#if !defined(CV_SIMD) // min/max operation are usually fast enough (without using of control flow 'if' statements)
#undef CV_MIN_8U
#undef CV_MAX_8U
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
#endif
struct MorphRowNoVec
{
MorphRowNoVec(int, int) {}
int operator()(const uchar*, uchar*, int, int) const { return 0; }
};
struct MorphColumnNoVec
{
MorphColumnNoVec(int, int) {}
int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
};
struct MorphNoVec
{
int operator()(uchar**, int, uchar*, int) const { return 0; }
};
#if CV_SIMD
template<class VecUpdate> struct MorphRowVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphRowVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar* src, uchar* dst, int width, int cn) const
{
CV_INSTRUMENT_REGION();
int i, k, _ksize = ksize*cn;
width *= cn;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
vtype s2 = vx_load((const stype*)src + i + 2*vtype::nlanes);
vtype s3 = vx_load((const stype*)src + i + 3*vtype::nlanes);
for (k = cn; k < _ksize; k += cn)
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
s2 = updateOp(s2, vx_load((const stype*)src + i + k + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load((const stype*)src + i + k + 3*vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
v_store((stype*)dst + i + 2*vtype::nlanes, s2);
v_store((stype*)dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
for( k = cn; k < _ksize; k += cn )
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s = vx_load((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load((const stype*)src + i + k));
v_store((stype*)dst + i, s);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s = vx_load_low((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load_low((const stype*)src + i + k));
v_store_low((stype*)dst + i, s);
i += vtype::nlanes/2;
}
return i - i % cn;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphColumnVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphColumnVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
{
CV_INSTRUMENT_REGION();
int i = 0, k, _ksize = ksize;
VecUpdate updateOp;
for( i = 0; i < count + ksize - 1; i++ )
CV_Assert( ((size_t)_src[i] & (CV_SIMD_WIDTH-1)) == 0 );
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
dststep /= sizeof(dst[0]);
for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + dststep + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + dststep + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, updateOp(s0, vx_load_aligned(src[0] + i)));
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(src[k] + i)));
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, updateOp(s0, vx_load_low(src[0] + i)));
v_store_low(dst + dststep + i, updateOp(s0, vx_load_low(src[k] + i)));
i += vtype::nlanes/2;
}
}
for( ; count > 0; count--, dst += dststep, src++ )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
}
return i;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
int operator()(uchar** _src, int nz, uchar* _dst, int width) const
{
CV_INSTRUMENT_REGION();
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
int i, k;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
vtype s2 = vx_load(sptr + 2*vtype::nlanes);
vtype s3 = vx_load(sptr + 3*vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
return i;
}
};
template <typename T> struct VMin
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_min(a,b); }
};
template <typename T> struct VMax
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_max(a,b); }
};
typedef MorphRowVec<VMin<v_uint8> > ErodeRowVec8u;
typedef MorphRowVec<VMax<v_uint8> > DilateRowVec8u;
typedef MorphRowVec<VMin<v_uint16> > ErodeRowVec16u;
typedef MorphRowVec<VMax<v_uint16> > DilateRowVec16u;
typedef MorphRowVec<VMin<v_int16> > ErodeRowVec16s;
typedef MorphRowVec<VMax<v_int16> > DilateRowVec16s;
typedef MorphRowVec<VMin<v_float32> > ErodeRowVec32f;
typedef MorphRowVec<VMax<v_float32> > DilateRowVec32f;
typedef MorphColumnVec<VMin<v_uint8> > ErodeColumnVec8u;
typedef MorphColumnVec<VMax<v_uint8> > DilateColumnVec8u;
typedef MorphColumnVec<VMin<v_uint16> > ErodeColumnVec16u;
typedef MorphColumnVec<VMax<v_uint16> > DilateColumnVec16u;
typedef MorphColumnVec<VMin<v_int16> > ErodeColumnVec16s;
typedef MorphColumnVec<VMax<v_int16> > DilateColumnVec16s;
typedef MorphColumnVec<VMin<v_float32> > ErodeColumnVec32f;
typedef MorphColumnVec<VMax<v_float32> > DilateColumnVec32f;
typedef MorphVec<VMin<v_uint8> > ErodeVec8u;
typedef MorphVec<VMax<v_uint8> > DilateVec8u;
typedef MorphVec<VMin<v_uint16> > ErodeVec16u;
typedef MorphVec<VMax<v_uint16> > DilateVec16u;
typedef MorphVec<VMin<v_int16> > ErodeVec16s;
typedef MorphVec<VMax<v_int16> > DilateVec16s;
typedef MorphVec<VMin<v_float32> > ErodeVec32f;
typedef MorphVec<VMax<v_float32> > DilateVec32f;
#else
typedef MorphRowNoVec ErodeRowVec8u;
typedef MorphRowNoVec DilateRowVec8u;
typedef MorphColumnNoVec ErodeColumnVec8u;
typedef MorphColumnNoVec DilateColumnVec8u;
typedef MorphRowNoVec ErodeRowVec16u;
typedef MorphRowNoVec DilateRowVec16u;
typedef MorphRowNoVec ErodeRowVec16s;
typedef MorphRowNoVec DilateRowVec16s;
typedef MorphRowNoVec ErodeRowVec32f;
typedef MorphRowNoVec DilateRowVec32f;
typedef MorphColumnNoVec ErodeColumnVec16u;
typedef MorphColumnNoVec DilateColumnVec16u;
typedef MorphColumnNoVec ErodeColumnVec16s;
typedef MorphColumnNoVec DilateColumnVec16s;
typedef MorphColumnNoVec ErodeColumnVec32f;
typedef MorphColumnNoVec DilateColumnVec32f;
typedef MorphNoVec ErodeVec8u;
typedef MorphNoVec DilateVec8u;
typedef MorphNoVec ErodeVec16u;
typedef MorphNoVec DilateVec16u;
typedef MorphNoVec ErodeVec16s;
typedef MorphNoVec DilateVec16s;
typedef MorphNoVec ErodeVec32f;
typedef MorphNoVec DilateVec32f;
#endif
typedef MorphRowNoVec ErodeRowVec64f;
typedef MorphRowNoVec DilateRowVec64f;
typedef MorphColumnNoVec ErodeColumnVec64f;
typedef MorphColumnNoVec DilateColumnVec64f;
typedef MorphNoVec ErodeVec64f;
typedef MorphNoVec DilateVec64f;
template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
{
typedef typename Op::rtype T;
MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i, j, k, _ksize = ksize*cn;
const T* S = (const T*)src;
Op op;
T* D = (T*)dst;
if( _ksize == cn )
{
for( i = 0; i < width*cn; i++ )
D[i] = S[i];
return;
}
int i0 = vecOp(src, dst, width, cn);
width *= cn;
for( k = 0; k < cn; k++, S++, D++ )
{
for( i = i0; i <= width - cn*2; i += cn*2 )
{
const T* s = S + i;
T m = s[cn];
for( j = cn*2; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = op(m, s[0]);
D[i+cn] = op(m, s[j]);
}
for( ; i < width; i += cn )
{
const T* s = S + i;
T m = s[0];
for( j = cn; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = m;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
{
typedef typename Op::rtype T;
MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
int i, k, _ksize = ksize;
const T** src = (const T**)_src;
T* D = (T*)dst;
Op op;
int i0 = vecOp(_src, dst, dststep, count, width);
dststep /= sizeof(D[0]);
for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[1] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
sptr = src[0] + i;
D[i] = op(s0, sptr[0]);
D[i+1] = op(s1, sptr[1]);
D[i+2] = op(s2, sptr[2]);
D[i+3] = op(s3, sptr[3]);
sptr = src[k] + i;
D[i+dststep] = op(s0, sptr[0]);
D[i+dststep+1] = op(s1, sptr[1]);
D[i+dststep+2] = op(s2, sptr[2]);
D[i+dststep+3] = op(s3, sptr[3]);
}
#endif
for( ; i < width; i++ )
{
T s0 = src[1][i];
for( k = 2; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = op(s0, src[0][i]);
D[i+dststep] = op(s0, src[k][i]);
}
}
for( ; count > 0; count--, D += dststep, src++ )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = src[0][i];
for( k = 1; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = s0;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphFilter : BaseFilter
{
typedef typename Op::rtype T;
MorphFilter( const Mat& _kernel, Point _anchor )
{
anchor = _anchor;
ksize = _kernel.size();
CV_Assert( _kernel.type() == CV_8U );
std::vector<uchar> coeffs; // we do not really the values of non-zero
// kernel elements, just their locations
preprocess2DKernel( _kernel, coords, coeffs );
ptrs.resize( coords.size() );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
const Point* pt = &coords[0];
const T** kp = (const T**)&ptrs[0];
int i, k, nz = (int)coords.size();
Op op;
width *= cn;
for( ; count > 0; count--, dst += dststep, src++ )
{
T* D = (T*)dst;
for( k = 0; k < nz; k++ )
kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
i = vecOp(&ptrs[0], nz, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = kp[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < nz; k++ )
{
sptr = kp[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = kp[0][i];
for( k = 1; k < nz; k++ )
s0 = op(s0, kp[k][i]);
D[i] = s0;
}
}
}
std::vector<Point> coords;
std::vector<uchar*> ptrs;
VecOp vecOp;
};
} // namespace anon
/////////////////////////////////// External Interface /////////////////////////////////////
Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor)
{
CV_INSTRUMENT_REGION();
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MinOp<uchar>,
ErodeRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MinOp<ushort>,
ErodeRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MinOp<short>,
ErodeRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MinOp<float>,
ErodeRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MinOp<double>,
ErodeRowVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MaxOp<uchar>,
DilateRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MaxOp<ushort>,
DilateRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MaxOp<short>,
DilateRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MaxOp<float>,
DilateRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MaxOp<double>,
DilateRowVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
{
CV_INSTRUMENT_REGION();
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MinOp<uchar>,
ErodeColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MinOp<ushort>,
ErodeColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MinOp<short>,
ErodeColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MinOp<float>,
ErodeColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MinOp<double>,
ErodeColumnVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MaxOp<uchar>,
DilateColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MaxOp<ushort>,
DilateColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MaxOp<short>,
DilateColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MaxOp<float>,
DilateColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MaxOp<double>,
DilateColumnVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel, Point anchor)
{
CV_INSTRUMENT_REGION();
int depth = CV_MAT_DEPTH(type);
anchor = normalizeAnchor(anchor, kernel.size());
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphFilter<MinOp<uchar>, ErodeVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MinOp<ushort>, ErodeVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MinOp<short>, ErodeVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MinOp<float>, ErodeVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MinOp<double>, ErodeVec64f> >(kernel, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphFilter<MaxOp<uchar>, DilateVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MaxOp<ushort>, DilateVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MaxOp<short>, DilateVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MaxOp<float>, DilateVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MaxOp<double>, DilateVec64f> >(kernel, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace

@ -0,0 +1,582 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "filter.hpp"
#include "opencv2/core/softfloat.hpp"
namespace cv {
#include "fixedpoint.inl.hpp"
}
#include "smooth.simd.hpp"
#include "smooth.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
/****************************************************************************************\
Gaussian Blur
\****************************************************************************************/
Mat getGaussianKernel(int n, double sigma, int ktype)
{
CV_Assert(n > 0);
const int SMALL_GAUSSIAN_SIZE = 7;
static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
{
{1.f},
{0.25f, 0.5f, 0.25f},
{0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
{0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
};
const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
small_gaussian_tab[n>>1] : 0;
CV_Assert( ktype == CV_32F || ktype == CV_64F );
Mat kernel(n, 1, ktype);
float* cf = kernel.ptr<float>();
double* cd = kernel.ptr<double>();
double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
double scale2X = -0.5/(sigmaX*sigmaX);
double sum = 0;
int i;
for( i = 0; i < n; i++ )
{
double x = i - (n-1)*0.5;
double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
if( ktype == CV_32F )
{
cf[i] = (float)t;
sum += cf[i];
}
else
{
cd[i] = t;
sum += cd[i];
}
}
CV_DbgAssert(fabs(sum) > 0);
sum = 1./sum;
for( i = 0; i < n; i++ )
{
if( ktype == CV_32F )
cf[i] = (float)(cf[i]*sum);
else
cd[i] *= sum;
}
return kernel;
}
template <typename T>
static std::vector<T> getFixedpointGaussianKernel( int n, double sigma )
{
if (sigma <= 0)
{
if(n == 1)
return std::vector<T>(1, softdouble(1.0));
else if(n == 3)
{
T v3[] = { softdouble(0.25), softdouble(0.5), softdouble(0.25) };
return std::vector<T>(v3, v3 + 3);
}
else if(n == 5)
{
T v5[] = { softdouble(0.0625), softdouble(0.25), softdouble(0.375), softdouble(0.25), softdouble(0.0625) };
return std::vector<T>(v5, v5 + 5);
}
else if(n == 7)
{
T v7[] = { softdouble(0.03125), softdouble(0.109375), softdouble(0.21875), softdouble(0.28125), softdouble(0.21875), softdouble(0.109375), softdouble(0.03125) };
return std::vector<T>(v7, v7 + 7);
}
}
softdouble sigmaX = sigma > 0 ? softdouble(sigma) : mulAdd(softdouble(n),softdouble(0.15),softdouble(0.35));// softdouble(((n-1)*0.5 - 1)*0.3 + 0.8)
softdouble scale2X = softdouble(-0.5*0.25)/(sigmaX*sigmaX);
std::vector<softdouble> values(n);
softdouble sum(0.);
for(int i = 0, x = 1 - n; i < n; i++, x+=2 )
{
// x = i - (n - 1)*0.5
// t = std::exp(scale2X*x*x)
values[i] = exp(softdouble(x*x)*scale2X);
sum += values[i];
}
sum = softdouble::one()/sum;
std::vector<T> kernel(n);
for(int i = 0; i < n; i++ )
{
kernel[i] = values[i] * sum;
}
return kernel;
};
static void getGaussianKernel(int n, double sigma, int ktype, Mat& res) { res = getGaussianKernel(n, sigma, ktype); }
template <typename T> static void getGaussianKernel(int n, double sigma, int, std::vector<T>& res) { res = getFixedpointGaussianKernel<T>(n, sigma); }
template <typename T>
static void createGaussianKernels( T & kx, T & ky, int type, Size &ksize,
double sigma1, double sigma2 )
{
int depth = CV_MAT_DEPTH(type);
if( sigma2 <= 0 )
sigma2 = sigma1;
// automatic detection of kernel size from sigma
if( ksize.width <= 0 && sigma1 > 0 )
ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
if( ksize.height <= 0 && sigma2 > 0 )
ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 &&
ksize.height > 0 && ksize.height % 2 == 1 );
sigma1 = std::max( sigma1, 0. );
sigma2 = std::max( sigma2, 0. );
getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F), kx );
if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
ky = kx;
else
getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F), ky );
}
Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,
double sigma1, double sigma2,
int borderType )
{
Mat kx, ky;
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
}
#ifdef HAVE_OPENCL
static bool ocl_GaussianBlur_8UC1(InputArray _src, OutputArray _dst, Size ksize, int ddepth,
InputArray _kernelX, InputArray _kernelY, int borderType)
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !(dev.isIntel() && (type == CV_8UC1) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
((ksize.width == 5 && (_src.cols() % 4 == 0)) ||
(ksize.width == 3 && (_src.cols() % 16 == 0) && (_src.rows() % 2 == 0)))) )
return false;
Mat kernelX = _kernelX.getMat().reshape(1, 1);
if (kernelX.cols % 2 != 1)
return false;
Mat kernelY = _kernelY.getMat().reshape(1, 1);
if (kernelY.cols % 2 != 1)
return false;
if (ddepth < 0)
ddepth = sdepth;
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
if (ksize.width == 3)
{
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
}
else if (ksize.width == 5)
{
globalsize[0] = size.width / 4;
globalsize[1] = size.height / 1;
}
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
char build_opts[1024];
sprintf(build_opts, "-D %s %s%s", borderMap[borderType & ~BORDER_ISOLATED],
ocl::kernelToStr(kernelX, CV_32F, "KERNEL_MATRIX_X").c_str(),
ocl::kernelToStr(kernelY, CV_32F, "KERNEL_MATRIX_Y").c_str());
ocl::Kernel kernel;
if (ksize.width == 3)
kernel.create("gaussianBlur3x3_8UC1_cols16_rows2", cv::ocl::imgproc::gaussianBlur3x3_oclsrc, build_opts);
else if (ksize.width == 5)
kernel.create("gaussianBlur5x5_8UC1_cols4", cv::ocl::imgproc::gaussianBlur5x5_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(int w, int h) { return w*h < 320 * 240; }
}
static bool openvx_gaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2, int borderType)
{
if (sigma2 <= 0)
sigma2 = sigma1;
// automatic detection of kernel size from sigma
if (ksize.width <= 0 && sigma1 > 0)
ksize.width = cvRound(sigma1*6 + 1) | 1;
if (ksize.height <= 0 && sigma2 > 0)
ksize.height = cvRound(sigma2*6 + 1) | 1;
if (_src.type() != CV_8UC1 ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3)
return false;
sigma1 = std::max(sigma1, 0.);
sigma2 = std::max(sigma2, 0.);
if (!(sigma1 == 0.0 || (sigma1 - 0.8) < DBL_EPSILON) || !(sigma2 == 0.0 || (sigma2 - 0.8) < DBL_EPSILON) ||
ovx::skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuGaussian3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
// IW 2017u2 has bug which doesn't allow use of partial inMem with tiling
#if IPP_DISABLE_GAUSSIANBLUR_PARALLEL
#define IPP_GAUSSIANBLUR_PARALLEL 0
#else
#define IPP_GAUSSIANBLUR_PARALLEL 1
#endif
#ifdef HAVE_IPP_IW
class ipp_gaussianBlurParallel: public ParallelLoopBody
{
public:
ipp_gaussianBlurParallel(::ipp::IwiImage &src, ::ipp::IwiImage &dst, int kernelSize, float sigma, ::ipp::IwiBorderType &border, bool *pOk):
m_src(src), m_dst(dst), m_kernelSize(kernelSize), m_sigma(sigma), m_border(border), m_pOk(pOk) {
*m_pOk = true;
}
~ipp_gaussianBlurParallel()
{
}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
CV_INSTRUMENT_REGION_IPP();
if(!*m_pOk)
return;
try
{
::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, m_dst.m_size.width, range.end - range.start);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterGaussian, m_src, m_dst, m_kernelSize, m_sigma, ::ipp::IwDefault(), m_border, tile);
}
catch(const ::ipp::IwException &)
{
*m_pOk = false;
return;
}
}
private:
::ipp::IwiImage &m_src;
::ipp::IwiImage &m_dst;
int m_kernelSize;
float m_sigma;
::ipp::IwiBorderType &m_border;
volatile bool *m_pOk;
const ipp_gaussianBlurParallel& operator= (const ipp_gaussianBlurParallel&);
};
#endif
static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2, int borderType )
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201800 && ((defined _MSC_VER && defined _M_IX86) || (defined __GNUC__ && defined __i386__))
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false; // bug on ia32
#else
if(sigma1 != sigma2)
return false;
if(sigma1 < FLT_EPSILON)
return false;
if(ksize.width != ksize.height)
return false;
// Acquire data and begin processing
try
{
Mat src = _src.getMat();
Mat dst = _dst.getMat();
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize = ::ipp::iwiSizeToBorderSize(ippiGetSize(ksize));
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
const int threads = ippiSuggestThreadsNum(iwDst, 2);
if(IPP_GAUSSIANBLUR_PARALLEL && threads > 1) {
bool ok;
ipp_gaussianBlurParallel invoker(iwSrc, iwDst, ksize.width, (float) sigma1, ippBorder, &ok);
if(!ok)
return false;
const Range range(0, (int) iwDst.m_size.height);
parallel_for_(range, invoker, threads*4);
if(!ok)
return false;
} else {
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterGaussian, iwSrc, iwDst, ksize.width, sigma1, ::ipp::IwDefault(), ippBorder);
}
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#endif
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false;
#endif
}
#endif
void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2,
int borderType)
{
CV_INSTRUMENT_REGION();
int type = _src.type();
Size size = _src.size();
_dst.create( size, type );
if( (borderType & ~BORDER_ISOLATED) != BORDER_CONSTANT &&
((borderType & BORDER_ISOLATED) != 0 || !_src.getMat().isSubmatrix()) )
{
if( size.height == 1 )
ksize.height = 1;
if( size.width == 1 )
ksize.width = 1;
}
if( ksize.width == 1 && ksize.height == 1 )
{
_src.copyTo(_dst);
return;
}
bool useOpenCL = (ocl::isOpenCLActivated() && _dst.isUMat() && _src.dims() <= 2 &&
((ksize.width == 3 && ksize.height == 3) ||
(ksize.width == 5 && ksize.height == 5)) &&
_src.rows() > ksize.height && _src.cols() > ksize.width);
CV_UNUSED(useOpenCL);
int sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
Mat kx, ky;
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
CV_OCL_RUN(useOpenCL, ocl_GaussianBlur_8UC1(_src, _dst, ksize, CV_MAT_DEPTH(type), kx, ky, borderType));
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && (size_t)_src.rows() > kx.total() && (size_t)_src.cols() > kx.total(),
ocl_sepFilter2D(_src, _dst, sdepth, kx, ky, Point(-1, -1), 0, borderType))
Mat src = _src.getMat();
Mat dst = _dst.getMat();
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType & BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(gaussianBlur, cv_hal_gaussianBlur, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
sigma1, sigma2, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_gaussianBlur(src, dst, ksize, sigma1, sigma2, borderType))
CV_IPP_RUN_FAST(ipp_GaussianBlur(src, dst, ksize, sigma1, sigma2, borderType));
if(sdepth == CV_8U && ((borderType & BORDER_ISOLATED) || !_src.getMat().isSubmatrix()))
{
std::vector<ufixedpoint16> fkx, fky;
createGaussianKernels(fkx, fky, type, ksize, sigma1, sigma2);
if (src.data == dst.data)
src = src.clone();
CV_CPU_DISPATCH(GaussianBlurFixedPoint, (src, dst, (const uint16_t*)&fkx[0], (int)fkx.size(), (const uint16_t*)&fky[0], (int)fky.size(), borderType),
CV_CPU_DISPATCH_MODES_ALL);
return;
}
sepFilter2D(src, dst, sdepth, kx, ky, Point(-1, -1), 0, borderType);
}
} // namespace
//////////////////////////////////////////////////////////////////////////////////////////
CV_IMPL void
cvSmooth( const void* srcarr, void* dstarr, int smooth_type,
int param1, int param2, double param3, double param4 )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
CV_Assert( dst.size() == src.size() &&
(smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) );
if( param2 <= 0 )
param2 = param1;
if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE )
cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1),
smooth_type == CV_BLUR, cv::BORDER_REPLICATE );
else if( smooth_type == CV_GAUSSIAN )
cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE );
else if( smooth_type == CV_MEDIAN )
cv::medianBlur( src, dst, param1 );
else
cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE );
if( dst.data != dst0.data )
CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" );
}
/* End of file. */

@ -46,120 +46,28 @@
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
#include "filter.hpp"
#include "fixedpoint.inl.hpp"
/****************************************************************************************\
Gaussian Blur
\****************************************************************************************/
cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
{
CV_Assert(n > 0);
const int SMALL_GAUSSIAN_SIZE = 7;
static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
{
{1.f},
{0.25f, 0.5f, 0.25f},
{0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
{0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
};
const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
small_gaussian_tab[n>>1] : 0;
CV_Assert( ktype == CV_32F || ktype == CV_64F );
Mat kernel(n, 1, ktype);
float* cf = kernel.ptr<float>();
double* cd = kernel.ptr<double>();
double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
double scale2X = -0.5/(sigmaX*sigmaX);
double sum = 0;
int i;
for( i = 0; i < n; i++ )
{
double x = i - (n-1)*0.5;
double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
if( ktype == CV_32F )
{
cf[i] = (float)t;
sum += cf[i];
}
else
{
cd[i] = t;
sum += cd[i];
}
}
CV_DbgAssert(fabs(sum) > 0);
sum = 1./sum;
for( i = 0; i < n; i++ )
{
if( ktype == CV_32F )
cf[i] = (float)(cf[i]*sum);
else
cd[i] *= sum;
}
return kernel;
}
#include "opencv2/core/softfloat.hpp"
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
// forward declarations
void GaussianBlurFixedPoint(const Mat& src, /*const*/ Mat& dst,
const uint16_t/*ufixedpoint16*/* fkx, int fkx_size,
const uint16_t/*ufixedpoint16*/* fky, int fky_size,
int borderType);
template <typename T>
static std::vector<T> getFixedpointGaussianKernel( int n, double sigma )
{
if (sigma <= 0)
{
if(n == 1)
return std::vector<T>(1, softdouble(1.0));
else if(n == 3)
{
T v3[] = { softdouble(0.25), softdouble(0.5), softdouble(0.25) };
return std::vector<T>(v3, v3 + 3);
}
else if(n == 5)
{
T v5[] = { softdouble(0.0625), softdouble(0.25), softdouble(0.375), softdouble(0.25), softdouble(0.0625) };
return std::vector<T>(v5, v5 + 5);
}
else if(n == 7)
{
T v7[] = { softdouble(0.03125), softdouble(0.109375), softdouble(0.21875), softdouble(0.28125), softdouble(0.21875), softdouble(0.109375), softdouble(0.03125) };
return std::vector<T>(v7, v7 + 7);
}
}
softdouble sigmaX = sigma > 0 ? softdouble(sigma) : mulAdd(softdouble(n),softdouble(0.15),softdouble(0.35));// softdouble(((n-1)*0.5 - 1)*0.3 + 0.8)
softdouble scale2X = softdouble(-0.5*0.25)/(sigmaX*sigmaX);
std::vector<softdouble> values(n);
softdouble sum(0.);
for(int i = 0, x = 1 - n; i < n; i++, x+=2 )
{
// x = i - (n - 1)*0.5
// t = std::exp(scale2X*x*x)
values[i] = exp(softdouble(x*x)*scale2X);
sum += values[i];
}
sum = softdouble::one()/sum;
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
std::vector<T> kernel(n);
for(int i = 0; i < n; i++ )
{
kernel[i] = values[i] * sum;
}
#if defined(CV_CPU_BASELINE_MODE)
// included in dispatch.cpp
#else
#include "fixedpoint.inl.hpp"
#endif
return kernel;
};
namespace {
template <typename ET, typename FT>
void hlineSmooth1N(const ET* src, int cn, const FT* m, int, FT* dst, int len, int)
@ -2119,418 +2027,27 @@ private:
fixedSmoothInvoker& operator=(const fixedSmoothInvoker&);
};
static void getGaussianKernel(int n, double sigma, int ktype, Mat& res) { res = getGaussianKernel(n, sigma, ktype); }
template <typename T> static void getGaussianKernel(int n, double sigma, int, std::vector<T>& res) { res = getFixedpointGaussianKernel<T>(n, sigma); }
template <typename T>
static void createGaussianKernels( T & kx, T & ky, int type, Size &ksize,
double sigma1, double sigma2 )
{
int depth = CV_MAT_DEPTH(type);
if( sigma2 <= 0 )
sigma2 = sigma1;
// automatic detection of kernel size from sigma
if( ksize.width <= 0 && sigma1 > 0 )
ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
if( ksize.height <= 0 && sigma2 > 0 )
ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 &&
ksize.height > 0 && ksize.height % 2 == 1 );
sigma1 = std::max( sigma1, 0. );
sigma2 = std::max( sigma2, 0. );
} // namespace anon
getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F), kx );
if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
ky = kx;
else
getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F), ky );
}
}
cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
double sigma1, double sigma2,
int borderType )
{
Mat kx, ky;
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
}
namespace cv
{
#ifdef HAVE_OPENCL
static bool ocl_GaussianBlur_8UC1(InputArray _src, OutputArray _dst, Size ksize, int ddepth,
InputArray _kernelX, InputArray _kernelY, int borderType)
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !(dev.isIntel() && (type == CV_8UC1) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
((ksize.width == 5 && (_src.cols() % 4 == 0)) ||
(ksize.width == 3 && (_src.cols() % 16 == 0) && (_src.rows() % 2 == 0)))) )
return false;
Mat kernelX = _kernelX.getMat().reshape(1, 1);
if (kernelX.cols % 2 != 1)
return false;
Mat kernelY = _kernelY.getMat().reshape(1, 1);
if (kernelY.cols % 2 != 1)
return false;
if (ddepth < 0)
ddepth = sdepth;
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
if (ksize.width == 3)
{
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
}
else if (ksize.width == 5)
{
globalsize[0] = size.width / 4;
globalsize[1] = size.height / 1;
}
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
char build_opts[1024];
sprintf(build_opts, "-D %s %s%s", borderMap[borderType & ~BORDER_ISOLATED],
ocl::kernelToStr(kernelX, CV_32F, "KERNEL_MATRIX_X").c_str(),
ocl::kernelToStr(kernelY, CV_32F, "KERNEL_MATRIX_Y").c_str());
ocl::Kernel kernel;
if (ksize.width == 3)
kernel.create("gaussianBlur3x3_8UC1_cols16_rows2", cv::ocl::imgproc::gaussianBlur3x3_oclsrc, build_opts);
else if (ksize.width == 5)
kernel.create("gaussianBlur5x5_8UC1_cols4", cv::ocl::imgproc::gaussianBlur5x5_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(int w, int h) { return w*h < 320 * 240; }
}
static bool openvx_gaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2, int borderType)
{
if (sigma2 <= 0)
sigma2 = sigma1;
// automatic detection of kernel size from sigma
if (ksize.width <= 0 && sigma1 > 0)
ksize.width = cvRound(sigma1*6 + 1) | 1;
if (ksize.height <= 0 && sigma2 > 0)
ksize.height = cvRound(sigma2*6 + 1) | 1;
if (_src.type() != CV_8UC1 ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3)
return false;
sigma1 = std::max(sigma1, 0.);
sigma2 = std::max(sigma2, 0.);
if (!(sigma1 == 0.0 || (sigma1 - 0.8) < DBL_EPSILON) || !(sigma2 == 0.0 || (sigma2 - 0.8) < DBL_EPSILON) ||
ovx::skipSmallImages<VX_KERNEL_GAUSSIAN_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuGaussian3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
// IW 2017u2 has bug which doesn't allow use of partial inMem with tiling
#if IPP_DISABLE_GAUSSIANBLUR_PARALLEL
#define IPP_GAUSSIANBLUR_PARALLEL 0
#else
#define IPP_GAUSSIANBLUR_PARALLEL 1
#endif
#ifdef HAVE_IPP_IW
class ipp_gaussianBlurParallel: public ParallelLoopBody
{
public:
ipp_gaussianBlurParallel(::ipp::IwiImage &src, ::ipp::IwiImage &dst, int kernelSize, float sigma, ::ipp::IwiBorderType &border, bool *pOk):
m_src(src), m_dst(dst), m_kernelSize(kernelSize), m_sigma(sigma), m_border(border), m_pOk(pOk) {
*m_pOk = true;
}
~ipp_gaussianBlurParallel()
{
}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
CV_INSTRUMENT_REGION_IPP();
if(!*m_pOk)
return;
try
{
::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, m_dst.m_size.width, range.end - range.start);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterGaussian, m_src, m_dst, m_kernelSize, m_sigma, ::ipp::IwDefault(), m_border, tile);
}
catch(const ::ipp::IwException &)
{
*m_pOk = false;
return;
}
}
private:
::ipp::IwiImage &m_src;
::ipp::IwiImage &m_dst;
int m_kernelSize;
float m_sigma;
::ipp::IwiBorderType &m_border;
volatile bool *m_pOk;
const ipp_gaussianBlurParallel& operator= (const ipp_gaussianBlurParallel&);
};
#endif
static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2, int borderType )
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201800 && ((defined _MSC_VER && defined _M_IX86) || (defined __GNUC__ && defined __i386__))
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false; // bug on ia32
#else
if(sigma1 != sigma2)
return false;
if(sigma1 < FLT_EPSILON)
return false;
if(ksize.width != ksize.height)
return false;
// Acquire data and begin processing
try
{
Mat src = _src.getMat();
Mat dst = _dst.getMat();
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize = ::ipp::iwiSizeToBorderSize(ippiGetSize(ksize));
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
const int threads = ippiSuggestThreadsNum(iwDst, 2);
if(IPP_GAUSSIANBLUR_PARALLEL && threads > 1) {
bool ok;
ipp_gaussianBlurParallel invoker(iwSrc, iwDst, ksize.width, (float) sigma1, ippBorder, &ok);
if(!ok)
return false;
const Range range(0, (int) iwDst.m_size.height);
parallel_for_(range, invoker, threads*4);
if(!ok)
return false;
} else {
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterGaussian, iwSrc, iwDst, ksize.width, sigma1, ::ipp::IwDefault(), ippBorder);
}
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#endif
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ksize); CV_UNUSED(sigma1); CV_UNUSED(sigma2); CV_UNUSED(borderType);
return false;
#endif
}
#endif
}
void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2,
int borderType )
void GaussianBlurFixedPoint(const Mat& src, /*const*/ Mat& dst,
const uint16_t/*ufixedpoint16*/* fkx, int fkx_size,
const uint16_t/*ufixedpoint16*/* fky, int fky_size,
int borderType)
{
CV_INSTRUMENT_REGION();
int type = _src.type();
Size size = _src.size();
_dst.create( size, type );
if( (borderType & ~BORDER_ISOLATED) != BORDER_CONSTANT &&
((borderType & BORDER_ISOLATED) != 0 || !_src.getMat().isSubmatrix()) )
CV_Assert(src.depth() == CV_8U && ((borderType & BORDER_ISOLATED) || !src.isSubmatrix()));
fixedSmoothInvoker<uint8_t, ufixedpoint16> invoker(
src.ptr<uint8_t>(), src.step1(),
dst.ptr<uint8_t>(), dst.step1(), dst.cols, dst.rows, dst.channels(),
(const ufixedpoint16*)fkx, fkx_size, (const ufixedpoint16*)fky, fky_size,
borderType & ~BORDER_ISOLATED);
{
if( size.height == 1 )
ksize.height = 1;
if( size.width == 1 )
ksize.width = 1;
}
if( ksize.width == 1 && ksize.height == 1 )
{
_src.copyTo(_dst);
return;
}
bool useOpenCL = (ocl::isOpenCLActivated() && _dst.isUMat() && _src.dims() <= 2 &&
((ksize.width == 3 && ksize.height == 3) ||
(ksize.width == 5 && ksize.height == 5)) &&
_src.rows() > ksize.height && _src.cols() > ksize.width);
CV_UNUSED(useOpenCL);
int sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
Mat kx, ky;
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
CV_OCL_RUN(useOpenCL, ocl_GaussianBlur_8UC1(_src, _dst, ksize, CV_MAT_DEPTH(type), kx, ky, borderType));
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && (size_t)_src.rows() > kx.total() && (size_t)_src.cols() > kx.total(),
ocl_sepFilter2D(_src, _dst, sdepth, kx, ky, Point(-1, -1), 0, borderType))
Mat src = _src.getMat();
Mat dst = _dst.getMat();
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType & BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(gaussianBlur, cv_hal_gaussianBlur, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
sigma1, sigma2, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_gaussianBlur(src, dst, ksize, sigma1, sigma2, borderType))
CV_IPP_RUN_FAST(ipp_GaussianBlur(src, dst, ksize, sigma1, sigma2, borderType));
if(sdepth == CV_8U && ((borderType & BORDER_ISOLATED) || !_src.getMat().isSubmatrix()))
{
std::vector<ufixedpoint16> fkx, fky;
createGaussianKernels(fkx, fky, type, ksize, sigma1, sigma2);
if (src.data == dst.data)
src = src.clone();
fixedSmoothInvoker<uint8_t, ufixedpoint16> invoker(src.ptr<uint8_t>(), src.step1(), dst.ptr<uint8_t>(), dst.step1(), dst.cols, dst.rows, dst.channels(), &fkx[0], (int)fkx.size(), &fky[0], (int)fky.size(), borderType & ~BORDER_ISOLATED);
// TODO AVX guard (external call)
parallel_for_(Range(0, dst.rows), invoker, std::max(1, std::min(getNumThreads(), getNumberOfCPUs())));
return;
}
sepFilter2D(src, dst, sdepth, kx, ky, Point(-1, -1), 0, borderType);
}
//////////////////////////////////////////////////////////////////////////////////////////
CV_IMPL void
cvSmooth( const void* srcarr, void* dstarr, int smooth_type,
int param1, int param2, double param3, double param4 )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
CV_Assert( dst.size() == src.size() &&
(smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) );
if( param2 <= 0 )
param2 = param1;
if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE )
cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1),
smooth_type == CV_BLUR, cv::BORDER_REPLICATE );
else if( smooth_type == CV_GAUSSIAN )
cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE );
else if( smooth_type == CV_MEDIAN )
cv::medianBlur( src, dst, param1 );
else
cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE );
if( dst.data != dst0.data )
CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" );
}
/* End of file. */
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace
Loading…
Cancel
Save