|
|
|
@ -58,35 +58,47 @@ namespace cv { namespace gpu { namespace device |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v) |
|
|
|
|
{ |
|
|
|
|
return (uchar) ::max((int)v, 0); |
|
|
|
|
uint res = 0; |
|
|
|
|
int vi = v; |
|
|
|
|
asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v) |
|
|
|
|
{ |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v) |
|
|
|
|
{ |
|
|
|
|
return (uchar) ::min((uint)v, (uint)UCHAR_MAX); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v) |
|
|
|
|
{ |
|
|
|
|
return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v) |
|
|
|
|
{ |
|
|
|
|
return (uchar) ::min(v, (uint)UCHAR_MAX); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v) |
|
|
|
|
{ |
|
|
|
|
return saturate_cast<uchar>((uint)v); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v) |
|
|
|
|
{ |
|
|
|
|
int iv = __float2int_rn(v); |
|
|
|
|
return saturate_cast<uchar>(iv); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v) |
|
|
|
|
{ |
|
|
|
|
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 |
|
|
|
|
int iv = __double2int_rn(v); |
|
|
|
|
return saturate_cast<uchar>(iv); |
|
|
|
|
#if __CUDA_ARCH__ >= 130 |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v)); |
|
|
|
|
return res; |
|
|
|
|
#else |
|
|
|
|
return saturate_cast<uchar>((float)v); |
|
|
|
|
#endif |
|
|
|
@ -94,35 +106,47 @@ namespace cv { namespace gpu { namespace device |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v) |
|
|
|
|
{ |
|
|
|
|
return (schar) ::min((int)v, SCHAR_MAX); |
|
|
|
|
uint res = 0; |
|
|
|
|
uint vi = v; |
|
|
|
|
asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v) |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(short v) |
|
|
|
|
{ |
|
|
|
|
return (schar) ::min((uint)v, (uint)SCHAR_MAX); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(int v) |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v) |
|
|
|
|
{ |
|
|
|
|
return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(short v) |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(int v) |
|
|
|
|
{ |
|
|
|
|
return saturate_cast<schar>((int)v); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v) |
|
|
|
|
{ |
|
|
|
|
return (schar) ::min(v, (uint)SCHAR_MAX); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(float v) |
|
|
|
|
{ |
|
|
|
|
int iv = __float2int_rn(v); |
|
|
|
|
return saturate_cast<schar>(iv); |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ schar saturate_cast<schar>(double v) |
|
|
|
|
{ |
|
|
|
|
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 |
|
|
|
|
int iv = __double2int_rn(v); |
|
|
|
|
return saturate_cast<schar>(iv); |
|
|
|
|
#if __CUDA_ARCH__ >= 130 |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v)); |
|
|
|
|
return res; |
|
|
|
|
#else |
|
|
|
|
return saturate_cast<schar>((float)v); |
|
|
|
|
#endif |
|
|
|
@ -130,30 +154,41 @@ namespace cv { namespace gpu { namespace device |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v) |
|
|
|
|
{ |
|
|
|
|
return (ushort) ::max((int)v, 0); |
|
|
|
|
ushort res = 0; |
|
|
|
|
int vi = v; |
|
|
|
|
asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v) |
|
|
|
|
{ |
|
|
|
|
return (ushort) ::max((int)v, 0); |
|
|
|
|
ushort res = 0; |
|
|
|
|
asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v) |
|
|
|
|
{ |
|
|
|
|
return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); |
|
|
|
|
ushort res = 0; |
|
|
|
|
asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v) |
|
|
|
|
{ |
|
|
|
|
return (ushort) ::min(v, (uint)USHRT_MAX); |
|
|
|
|
ushort res = 0; |
|
|
|
|
asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v) |
|
|
|
|
{ |
|
|
|
|
int iv = __float2int_rn(v); |
|
|
|
|
return saturate_cast<ushort>(iv); |
|
|
|
|
ushort res = 0; |
|
|
|
|
asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v) |
|
|
|
|
{ |
|
|
|
|
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 |
|
|
|
|
int iv = __double2int_rn(v); |
|
|
|
|
return saturate_cast<ushort>(iv); |
|
|
|
|
#if __CUDA_ARCH__ >= 130 |
|
|
|
|
ushort res = 0; |
|
|
|
|
asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v)); |
|
|
|
|
return res; |
|
|
|
|
#else |
|
|
|
|
return saturate_cast<ushort>((float)v); |
|
|
|
|
#endif |
|
|
|
@ -161,31 +196,45 @@ namespace cv { namespace gpu { namespace device |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ short saturate_cast<short>(ushort v) |
|
|
|
|
{ |
|
|
|
|
return (short) ::min((int)v, SHRT_MAX); |
|
|
|
|
short res = 0; |
|
|
|
|
asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ short saturate_cast<short>(int v) |
|
|
|
|
{ |
|
|
|
|
return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); |
|
|
|
|
short res = 0; |
|
|
|
|
asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ short saturate_cast<short>(uint v) |
|
|
|
|
{ |
|
|
|
|
return (short) ::min(v, (uint)SHRT_MAX); |
|
|
|
|
short res = 0; |
|
|
|
|
asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ short saturate_cast<short>(float v) |
|
|
|
|
{ |
|
|
|
|
int iv = __float2int_rn(v); |
|
|
|
|
return saturate_cast<short>(iv); |
|
|
|
|
short res = 0; |
|
|
|
|
asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ short saturate_cast<short>(double v) |
|
|
|
|
{ |
|
|
|
|
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 |
|
|
|
|
int iv = __double2int_rn(v); |
|
|
|
|
return saturate_cast<short>(iv); |
|
|
|
|
#if __CUDA_ARCH__ >= 130 |
|
|
|
|
short res = 0; |
|
|
|
|
asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v)); |
|
|
|
|
return res; |
|
|
|
|
#else |
|
|
|
|
return saturate_cast<short>((float)v); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ int saturate_cast<int>(uint v) |
|
|
|
|
{ |
|
|
|
|
int res = 0; |
|
|
|
|
asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ int saturate_cast<int>(float v) |
|
|
|
|
{ |
|
|
|
|
return __float2int_rn(v); |
|
|
|
@ -199,6 +248,25 @@ namespace cv { namespace gpu { namespace device |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<> __device__ __forceinline__ uint saturate_cast<uint>(schar v) |
|
|
|
|
{ |
|
|
|
|
uint res = 0; |
|
|
|
|
int vi = v; |
|
|
|
|
asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uint saturate_cast<uint>(short v) |
|
|
|
|
{ |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uint saturate_cast<uint>(int v) |
|
|
|
|
{ |
|
|
|
|
uint res = 0; |
|
|
|
|
asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v)); |
|
|
|
|
return res; |
|
|
|
|
} |
|
|
|
|
template<> __device__ __forceinline__ uint saturate_cast<uint>(float v) |
|
|
|
|
{ |
|
|
|
|
return __float2uint_rn(v); |
|
|
|
|