|
|
|
@ -96,12 +96,6 @@ cv::gpu::GpuMat cv::gpu::operator ^ (const GpuMat&, const GpuMat&) { throw_nogpu |
|
|
|
|
|
|
|
|
|
#else /* !defined (HAVE_CUDA) */ |
|
|
|
|
|
|
|
|
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR) |
|
|
|
|
|
|
|
|
|
#if (defined(_WIN32) || defined(_WIN64)) && (NPP_VERSION >= 32) |
|
|
|
|
# define NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// add subtract multiply divide
|
|
|
|
|
|
|
|
|
@ -120,11 +114,7 @@ namespace |
|
|
|
|
{ |
|
|
|
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type()); |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
dst.create( src1.size(), src1.type() ); |
|
|
|
|
|
|
|
|
@ -144,13 +134,11 @@ namespace |
|
|
|
|
src2.ptr<Npp8u>(), src2.step, |
|
|
|
|
dst.ptr<Npp8u>(), dst.step, sz, 0) ); |
|
|
|
|
break; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
case CV_32SC1: |
|
|
|
|
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), src1.step, |
|
|
|
|
src2.ptr<Npp32s>(), src2.step, |
|
|
|
|
dst.ptr<Npp32s>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#endif |
|
|
|
|
case CV_32FC1: |
|
|
|
|
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), src1.step, |
|
|
|
|
src2.ptr<Npp32f>(), src2.step, |
|
|
|
@ -167,13 +155,11 @@ namespace |
|
|
|
|
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst, |
|
|
|
|
int nDstStep, NppiSize oSizeROI); |
|
|
|
|
}; |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
template<> struct NppArithmScalarFunc<2> |
|
|
|
|
{ |
|
|
|
|
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst, |
|
|
|
|
int nDstStep, NppiSize oSizeROI); |
|
|
|
|
}; |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar; |
|
|
|
|
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func> |
|
|
|
@ -189,7 +175,6 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32f>(), src.step, (Npp32f)sc[0], dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func> |
|
|
|
|
{ |
|
|
|
|
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
@ -207,119 +192,66 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, nValue, dst.ptr<Npp32fc>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, 0, nppiAdd_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, 0, nppiSub_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, 0, nppiMul_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, 0, nppiDiv_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiAddC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiSubC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiMulC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiDivC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -345,11 +277,7 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type()); |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
dst.create( src1.size(), src1.type() ); |
|
|
|
|
|
|
|
|
@ -369,13 +297,11 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
src2.ptr<Npp8u>(), src2.step, |
|
|
|
|
dst.ptr<Npp8u>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
case CV_32SC1: |
|
|
|
|
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), src1.step, |
|
|
|
|
src2.ptr<Npp32s>(), src2.step, |
|
|
|
|
dst.ptr<Npp32s>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#endif |
|
|
|
|
case CV_32FC1: |
|
|
|
|
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), src1.step, |
|
|
|
|
src2.ptr<Npp32f>(), src2.step, |
|
|
|
@ -388,7 +314,6 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create( src.size(), src.type() ); |
|
|
|
@ -398,9 +323,6 @@ void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiAbsDiffC_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz, (Npp32f)s[0]) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -534,6 +456,8 @@ void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode) |
|
|
|
|
|
|
|
|
|
Scalar cv::gpu::sum(const GpuMat& src) |
|
|
|
|
{ |
|
|
|
|
CV_Assert(!"disabled until fix crash"); |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4); |
|
|
|
|
|
|
|
|
|
NppiSize sz; |
|
|
|
@ -541,8 +465,6 @@ Scalar cv::gpu::sum(const GpuMat& src) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
Scalar res; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(!"disabled until fix crash"); |
|
|
|
|
|
|
|
|
|
int bufsz; |
|
|
|
|
|
|
|
|
@ -560,12 +482,6 @@ Scalar cv::gpu::sum(const GpuMat& src) |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) ); |
|
|
|
|
} |
|
|
|
|
#else |
|
|
|
|
if (src.type() == CV_8UC1) |
|
|
|
|
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, res.val) ); |
|
|
|
|
else |
|
|
|
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, res.val) ); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
@ -600,11 +516,7 @@ namespace |
|
|
|
|
|
|
|
|
|
Npp8u* cuMem; |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
cuMem = nppsMalloc_8u(8); |
|
|
|
|
#else |
|
|
|
|
cudaSafeCall( cudaMalloc((void**)&cuMem, 8 * sizeof(Npp8u)) ); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMem, cuMem + 4) ); |
|
|
|
|
|
|
|
|
@ -613,11 +525,7 @@ namespace |
|
|
|
|
if (maxVal) |
|
|
|
|
cudaMemcpy(maxVal, cuMem + 4, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost); |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppsFree(cuMem); |
|
|
|
|
#else |
|
|
|
|
cudaSafeCall( cudaFree(cuMem) ); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -696,7 +604,6 @@ void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create(src.size(), src.type()); |
|
|
|
@ -706,9 +613,6 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiExp_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -716,7 +620,6 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::log(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create(src.size(), src.type()); |
|
|
|
@ -726,15 +629,11 @@ void cv::gpu::log(const GpuMat& src, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiLn_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// NPP magnitide
|
|
|
|
|
|
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI); |
|
|
|
@ -752,24 +651,15 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
::npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
::npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -871,7 +761,7 @@ void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& |
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// Per-element bit-wise logical matrix operations
|
|
|
|
|
|
|
|
|
|
namespace cv { namespace gpu { namespace mathfunc
|
|
|
|
|
namespace cv { namespace gpu { namespace mathfunc |
|
|
|
|
{ |
|
|
|
|
void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, cudaStream_t stream); |
|
|
|
|
void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream); |
|
|
|
@ -887,7 +777,7 @@ namespace cv { namespace gpu { namespace mathfunc |
|
|
|
|
void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream); |
|
|
|
|
}}} |
|
|
|
|
|
|
|
|
|
namespace
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void bitwise_not_caller(const GpuMat& src, GpuMat& dst, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
@ -951,7 +841,7 @@ namespace |
|
|
|
|
|
|
|
|
|
void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask) |
|
|
|
|
{ |
|
|
|
|
if (mask.empty())
|
|
|
|
|
if (mask.empty()) |
|
|
|
|
::bitwise_not_caller(src, dst, 0); |
|
|
|
|
else |
|
|
|
|
::bitwise_not_caller(src, dst, mask, 0); |
|
|
|
|