|
|
|
@ -84,6 +84,12 @@ void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, |
|
|
|
|
|
|
|
|
|
#else /* !defined (HAVE_CUDA) */ |
|
|
|
|
|
|
|
|
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR) |
|
|
|
|
|
|
|
|
|
#if (defined(_WIN32) || defined(_WIN64)) && (NPP_VERSION >= 32) |
|
|
|
|
# define NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// add subtract multiply divide
|
|
|
|
|
|
|
|
|
@ -102,7 +108,11 @@ namespace |
|
|
|
|
{ |
|
|
|
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type()); |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
dst.create( src1.size(), src1.type() ); |
|
|
|
|
|
|
|
|
@ -122,11 +132,13 @@ namespace |
|
|
|
|
src2.ptr<Npp8u>(), src2.step, |
|
|
|
|
dst.ptr<Npp8u>(), dst.step, sz, 0) ); |
|
|
|
|
break; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
case CV_32SC1: |
|
|
|
|
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), src1.step, |
|
|
|
|
src2.ptr<Npp32s>(), src2.step, |
|
|
|
|
dst.ptr<Npp32s>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#endif |
|
|
|
|
case CV_32FC1: |
|
|
|
|
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), src1.step, |
|
|
|
|
src2.ptr<Npp32f>(), src2.step, |
|
|
|
@ -143,11 +155,13 @@ namespace |
|
|
|
|
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst, |
|
|
|
|
int nDstStep, NppiSize oSizeROI); |
|
|
|
|
}; |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
template<> struct NppArithmScalarFunc<2> |
|
|
|
|
{ |
|
|
|
|
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst, |
|
|
|
|
int nDstStep, NppiSize oSizeROI); |
|
|
|
|
}; |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar; |
|
|
|
|
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func> |
|
|
|
@ -163,6 +177,7 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32f>(), src.step, (Npp32f)sc[0], dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func> |
|
|
|
|
{ |
|
|
|
|
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
@ -180,66 +195,119 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, nValue, dst.ptr<Npp32fc>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, 0, nppiAdd_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, 0, nppiSub_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, 0, nppiMul_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R); |
|
|
|
|
#else |
|
|
|
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, 0, nppiDiv_32f_C1R); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc}; |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiAddC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc}; |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiSubC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc}; |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiMulC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst); |
|
|
|
|
static const caller_t callers[] = {NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc}; |
|
|
|
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc}; |
|
|
|
|
|
|
|
|
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2); |
|
|
|
|
|
|
|
|
|
callers[src.channels()](src, sc, dst); |
|
|
|
|
#else |
|
|
|
|
# if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
NppArithmScalar<1, nppiDivC_32f_C1R>::calc(src, sc, dst); |
|
|
|
|
# else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
# endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -265,7 +333,11 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type()); |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
dst.create( src1.size(), src1.type() ); |
|
|
|
|
|
|
|
|
@ -285,11 +357,13 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
src2.ptr<Npp8u>(), src2.step, |
|
|
|
|
dst.ptr<Npp8u>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
case CV_32SC1: |
|
|
|
|
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), src1.step, |
|
|
|
|
src2.ptr<Npp32s>(), src2.step, |
|
|
|
|
dst.ptr<Npp32s>(), dst.step, sz) ); |
|
|
|
|
break; |
|
|
|
|
#endif |
|
|
|
|
case CV_32FC1: |
|
|
|
|
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), src1.step, |
|
|
|
|
src2.ptr<Npp32f>(), src2.step, |
|
|
|
@ -302,6 +376,7 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create( src.size(), src.type() ); |
|
|
|
@ -311,6 +386,9 @@ void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiAbsDiffC_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz, (Npp32f)s[0]) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -444,13 +522,16 @@ void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode) |
|
|
|
|
|
|
|
|
|
Scalar cv::gpu::sum(const GpuMat& src) |
|
|
|
|
{ |
|
|
|
|
CV_Assert(!"disabled until fix crash"); |
|
|
|
|
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4); |
|
|
|
|
|
|
|
|
|
NppiSize sz; |
|
|
|
|
sz.width = src.cols; |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
Scalar res; |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(!"disabled until fix crash"); |
|
|
|
|
|
|
|
|
|
int bufsz; |
|
|
|
|
|
|
|
|
|
if (src.type() == CV_8UC1) |
|
|
|
@ -458,19 +539,23 @@ Scalar cv::gpu::sum(const GpuMat& src) |
|
|
|
|
nppiReductionGetBufferHostSize_8u_C1R(sz, &bufsz); |
|
|
|
|
GpuMat buf(1, bufsz, CV_32S); |
|
|
|
|
|
|
|
|
|
Scalar res; |
|
|
|
|
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) ); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
nppiReductionGetBufferHostSize_8u_C4R(sz, &bufsz); |
|
|
|
|
GpuMat buf(1, bufsz, CV_32S); |
|
|
|
|
|
|
|
|
|
Scalar res; |
|
|
|
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) ); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
#else |
|
|
|
|
if (src.type() == CV_8UC1) |
|
|
|
|
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, res.val) ); |
|
|
|
|
else |
|
|
|
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, res.val) ); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -501,18 +586,26 @@ namespace |
|
|
|
|
sz.width = src.cols; |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
Npp8u* cuMin = nppsMalloc_8u(4); |
|
|
|
|
Npp8u* cuMax = nppsMalloc_8u(4); |
|
|
|
|
Npp8u* cuMem; |
|
|
|
|
|
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
cuMem = nppsMalloc_8u(8); |
|
|
|
|
#else |
|
|
|
|
cudaSafeCall( cudaMalloc((void**)&cuMem, 8 * sizeof(Npp8u)) ); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMin, cuMax) ); |
|
|
|
|
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMem, cuMem + 4) ); |
|
|
|
|
|
|
|
|
|
if (minVal) |
|
|
|
|
cudaMemcpy(minVal, cuMin, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);
|
|
|
|
|
cudaMemcpy(minVal, cuMem, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost); |
|
|
|
|
if (maxVal) |
|
|
|
|
cudaMemcpy(maxVal, cuMax, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost); |
|
|
|
|
cudaMemcpy(maxVal, cuMem + 4, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost); |
|
|
|
|
|
|
|
|
|
nppsFree(cuMin); |
|
|
|
|
nppsFree(cuMax); |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
nppsFree(cuMem); |
|
|
|
|
#else |
|
|
|
|
cudaSafeCall( cudaFree(cuMem) ); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -591,6 +684,7 @@ void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create(src.size(), src.type()); |
|
|
|
@ -600,6 +694,9 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiExp_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -607,6 +704,7 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst) |
|
|
|
|
|
|
|
|
|
void cv::gpu::log(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#if NPP_VERSION >= 32 |
|
|
|
|
CV_Assert(src.type() == CV_32FC1); |
|
|
|
|
|
|
|
|
|
dst.create(src.size(), src.type()); |
|
|
|
@ -616,11 +714,15 @@ void cv::gpu::log(const GpuMat& src, GpuMat& dst) |
|
|
|
|
sz.height = src.rows; |
|
|
|
|
|
|
|
|
|
nppSafeCall( nppiLn_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// NPP magnitide
|
|
|
|
|
|
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI); |
|
|
|
@ -638,15 +740,24 @@ namespace |
|
|
|
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) ); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
::npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst) |
|
|
|
|
{ |
|
|
|
|
#ifdef NPP_HAVE_COMPLEX_TYPE |
|
|
|
|
::npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R); |
|
|
|
|
#else |
|
|
|
|
CV_Assert(!"This function doesn't supported"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|