diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index 992d09de0b..a5a75efc41 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -3,6 +3,7 @@ set(the_description "The Core Functionality") ocv_add_dispatched_file(mathfuncs_core SSE2 AVX AVX2) ocv_add_dispatched_file(stat SSE4_2 AVX2) ocv_add_dispatched_file(arithm SSE2 SSE4_1 AVX2 VSX3) +ocv_add_dispatched_file(convert SSE2 AVX2) # dispatching for accuracy tests ocv_add_dispatched_file_force_all(test_intrin128 TEST SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AVX FP16 AVX2) diff --git a/modules/core/src/convert.dispatch.cpp b/modules/core/src/convert.dispatch.cpp index 42d0c7d468..89848c2513 100644 --- a/modules/core/src/convert.dispatch.cpp +++ b/modules/core/src/convert.dispatch.cpp @@ -4,238 +4,11 @@ #include "precomp.hpp" #include "opencl_kernels_core.hpp" -#include "convert.hpp" -namespace cv { - -/*namespace hal { - -void cvt16f32f( const float16_t* src, float* dst, int len ) -{ - int j = 0; -#if CV_SIMD - const int VECSZ = v_float32::nlanes; - for( ; j < len; j += VECSZ ) - { - if( j > len - VECSZ ) - { - if( j == 0 ) - break; - j = len - VECSZ; - } - v_store(dst + j, vx_load_expand(src + j)); - } -#endif - for( ; j < len; j++ ) - dst[j] = (float)src[j]; -} - -void cvt32f16f( const float* src, float16_t* dst, int len ) -{ - int j = 0; -#if CV_SIMD - const int VECSZ = v_float32::nlanes; - for( ; j < len; j += VECSZ ) - { - if( j > len - VECSZ ) - { - if( j == 0 ) - break; - j = len - VECSZ; - } - v_pack_store(dst + j, vx_load(src + j)); - } -#endif - for( ; j < len; j++ ) - dst[j] = float16_t(src[j]); -} - -/*void addRNGBias32f( float* arr, const float* scaleBiasPairs, int len ) -{ - // the loop is simple enough, so we let the compiler to vectorize it - for( int i = 0; i < len; i++ ) - arr[i] = scaleBiasPairs[i*2 + 1]; -} - -void addRNGBias64f( double* arr, const double* scaleBiasPairs, int len ) -{ - // the loop is simple enough, so we let the compiler to vectorize it - for( int i = 0; i < len; i++ ) - arr[i] = scaleBiasPairs[i*2 + 1]; -} - -}*/ - -template inline void -cvt_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size ) -{ - sstep /= sizeof(src[0]); - dstep /= sizeof(dst[0]); - - for( int i = 0; i < size.height; i++, src += sstep, dst += dstep ) - { - int j = 0; -#if CV_SIMD - const int VECSZ = _Twvec::nlanes*2; - for( ; j < size.width; j += VECSZ ) - { - if( j > size.width - VECSZ ) - { - if( j == 0 || src == (_Ts*)dst ) - break; - j = size.width - VECSZ; - } - _Twvec v0, v1; - vx_load_pair_as(src + j, v0, v1); - v_store_pair_as(dst + j, v0, v1); - } -#endif - for( ; j < size.width; j++ ) - dst[j] = saturate_cast<_Td>(src[j]); - } -} - -// in order to reduce the code size, for (16f <-> ...) conversions -// we add a conversion function without loop unrolling -template inline void -cvt1_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size ) -{ - sstep /= sizeof(src[0]); - dstep /= sizeof(dst[0]); - - for( int i = 0; i < size.height; i++, src += sstep, dst += dstep ) - { - int j = 0; -#if CV_SIMD - const int VECSZ = _Twvec::nlanes; - for( ; j < size.width; j += VECSZ ) - { - if( j > size.width - VECSZ ) - { - if( j == 0 || src == (_Ts*)dst ) - break; - j = size.width - VECSZ; - } - _Twvec v; - vx_load_as(src + j, v); - v_store_as(dst + j, v); - } - vx_cleanup(); -#endif - for( ; j < size.width; j++ ) - dst[j] = saturate_cast<_Td>(src[j]); - } -} - -static void cvtCopy( const uchar* src, size_t sstep, - uchar* dst, size_t dstep, Size size, size_t elemsize) -{ - size_t len = size.width*elemsize; - for( int i = 0; i < size.height; i++, src += sstep, dst += dstep ) - { - memcpy( dst, src, len ); - } -} - -#define DEF_CVT_FUNC(suffix, cvtfunc, _Ts, _Td, _Twvec) \ -static void cvt##suffix(const _Ts* src, size_t sstep, uchar*, size_t, \ - _Td* dst, size_t dstep, Size size, void*) \ -{ cvtfunc<_Ts, _Td, _Twvec>(src, sstep, dst, dstep, size); } - -////////////////////// 8u -> ... //////////////////////// - -DEF_CVT_FUNC(8u8s, cvt_, uchar, schar, v_int16) -DEF_CVT_FUNC(8u16u, cvt_, uchar, ushort, v_uint16) -DEF_CVT_FUNC(8u16s, cvt_, uchar, short, v_int16) -DEF_CVT_FUNC(8u32s, cvt_, uchar, int, v_int32) -DEF_CVT_FUNC(8u32f, cvt_, uchar, float, v_float32) -DEF_CVT_FUNC(8u64f, cvt_, uchar, double, v_int32) -//DEF_CVT_FUNC(8u16f, cvt1_, uchar, float16_t, v_float32) - -////////////////////// 8s -> ... //////////////////////// - -DEF_CVT_FUNC(8s8u, cvt_, schar, uchar, v_int16) -DEF_CVT_FUNC(8s16u, cvt_, schar, ushort, v_uint16) -DEF_CVT_FUNC(8s16s, cvt_, schar, short, v_int16) -DEF_CVT_FUNC(8s32s, cvt_, schar, int, v_int32) -DEF_CVT_FUNC(8s32f, cvt_, schar, float, v_float32) -DEF_CVT_FUNC(8s64f, cvt_, schar, double, v_int32) -//DEF_CVT_FUNC(8s16f, cvt1_, schar, float16_t, v_float32) - -////////////////////// 16u -> ... //////////////////////// - -DEF_CVT_FUNC(16u8u, cvt_, ushort, uchar, v_uint16) -DEF_CVT_FUNC(16u8s, cvt_, ushort, schar, v_uint16) -DEF_CVT_FUNC(16u16s, cvt_, ushort, short, v_int32) -DEF_CVT_FUNC(16u32s, cvt_, ushort, int, v_int32) -DEF_CVT_FUNC(16u32f, cvt_, ushort, float, v_float32) -DEF_CVT_FUNC(16u64f, cvt_, ushort, double, v_int32) -//DEF_CVT_FUNC(16u16f, cvt1_,ushort, float16_t, v_float32) - -////////////////////// 16s -> ... //////////////////////// - -DEF_CVT_FUNC(16s8u, cvt_, short, uchar, v_int16) -DEF_CVT_FUNC(16s8s, cvt_, short, schar, v_int16) -DEF_CVT_FUNC(16s16u, cvt_, short, ushort, v_int32) -DEF_CVT_FUNC(16s32s, cvt_, short, int, v_int32) -DEF_CVT_FUNC(16s32f, cvt_, short, float, v_float32) -DEF_CVT_FUNC(16s64f, cvt_, short, double, v_int32) -//DEF_CVT_FUNC(16s16f, cvt1_,short, float16_t, v_float32) - -////////////////////// 32s -> ... //////////////////////// - -DEF_CVT_FUNC(32s8u, cvt_, int, uchar, v_int32) -DEF_CVT_FUNC(32s8s, cvt_, int, schar, v_int32) -DEF_CVT_FUNC(32s16u, cvt_, int, ushort, v_int32) -DEF_CVT_FUNC(32s16s, cvt_, int, short, v_int32) -DEF_CVT_FUNC(32s32f, cvt_, int, float, v_float32) -DEF_CVT_FUNC(32s64f, cvt_, int, double, v_int32) -//DEF_CVT_FUNC(32s16f, cvt1_,int, float16_t, v_float32) - -////////////////////// 32f -> ... //////////////////////// - -DEF_CVT_FUNC(32f8u, cvt_, float, uchar, v_float32) -DEF_CVT_FUNC(32f8s, cvt_, float, schar, v_float32) -DEF_CVT_FUNC(32f16u, cvt_, float, ushort, v_float32) -DEF_CVT_FUNC(32f16s, cvt_, float, short, v_float32) -DEF_CVT_FUNC(32f32s, cvt_, float, int, v_float32) -DEF_CVT_FUNC(32f64f, cvt_, float, double, v_float32) -DEF_CVT_FUNC(32f16f, cvt1_,float, float16_t, v_float32) - -////////////////////// 64f -> ... //////////////////////// - -DEF_CVT_FUNC(64f8u, cvt_, double, uchar, v_int32) -DEF_CVT_FUNC(64f8s, cvt_, double, schar, v_int32) -DEF_CVT_FUNC(64f16u, cvt_, double, ushort, v_int32) -DEF_CVT_FUNC(64f16s, cvt_, double, short, v_int32) -DEF_CVT_FUNC(64f32s, cvt_, double, int, v_int32) -DEF_CVT_FUNC(64f32f, cvt_, double, float, v_float32) -//DEF_CVT_FUNC(64f16f, cvt1_,double, float16_t, v_float32) - -////////////////////// 16f -> ... //////////////////////// - -//DEF_CVT_FUNC(16f8u, cvt_, float16_t, uchar, v_float32) -//DEF_CVT_FUNC(16f8s, cvt_, float16_t, schar, v_float32) -//DEF_CVT_FUNC(16f16u, cvt1_, float16_t, ushort, v_float32) -//DEF_CVT_FUNC(16f16s, cvt1_, float16_t, short, v_float32) -//DEF_CVT_FUNC(16f32s, cvt1_, float16_t, int, v_float32) -DEF_CVT_FUNC(16f32f, cvt1_, float16_t, float, v_float32) -//DEF_CVT_FUNC(16f64f, cvt1_, float16_t, double, v_float32) - -///////////// "conversion" w/o conversion /////////////// - -static void cvt8u(const uchar* src, size_t sstep, uchar*, size_t, uchar* dst, size_t dstep, Size size, void*) -{ cvtCopy(src, sstep, dst, dstep, size, 1); } - -static void cvt16u(const ushort* src, size_t sstep, uchar*, size_t, ushort* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 2); } - -static void cvt32s(const int* src, size_t sstep, uchar*, size_t, int* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 4); } - -static void cvt64s(const int64* src, size_t sstep, uchar*, size_t, int64* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 8); } +#include "convert.simd.hpp" +#include "convert.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content +namespace cv { /* [TODO] Recover IPP calls #if defined(HAVE_IPP) @@ -334,50 +107,22 @@ DEF_CPY_FUNC(64s, int64) BinaryFunc getConvertFunc(int sdepth, int ddepth) { - static BinaryFunc cvtTab[][8] = - { - { - (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u), - (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u), - (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0 //(BinaryFunc)(cvt16f8u) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s), - (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s), - (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0 //(BinaryFunc)(cvt16f8s) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u, - (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u), - (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0 //(BinaryFunc)(cvt16f16u) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s), - (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s), - (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0 //(BinaryFunc)(cvt16f16s) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s), - (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s), - (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0 //(BinaryFunc)(cvt16f32s) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f), - (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s, - (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0 //(BinaryFunc)(cvt16f32f) - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f), - (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f), - (BinaryFunc)(cvt64s), 0 //(BinaryFunc)(cvt16f64f) - }, - { - 0, 0, 0, 0, 0, 0, 0, 0 - //(BinaryFunc)(cvt8u16f), (BinaryFunc)(cvt8s16f), (BinaryFunc)(cvt16u16f), (BinaryFunc)(cvt16s16f), - //(BinaryFunc)(cvt32s16f), (BinaryFunc)(cvt32f16f), (BinaryFunc)(cvt64f16f), (BinaryFunc)(cvt16u) - } - }; - return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; + CV_INSTRUMENT_REGION(); + CV_CPU_DISPATCH(getConvertFunc, (sdepth, ddepth), + CV_CPU_DISPATCH_MODES_ALL); +} + +static BinaryFunc get_cvt32f16f() +{ + CV_INSTRUMENT_REGION(); + CV_CPU_DISPATCH(get_cvt32f16f, (), + CV_CPU_DISPATCH_MODES_ALL); +} +static BinaryFunc get_cvt16f32f() +{ + CV_INSTRUMENT_REGION(); + CV_CPU_DISPATCH(get_cvt16f32f, (), + CV_CPU_DISPATCH_MODES_ALL); } #ifdef HAVE_OPENCL @@ -410,9 +155,7 @@ static bool ocl_convertFp16( InputArray _src, OutputArray _dst, int sdepth, int } #endif -} // cv:: - -void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const +void Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const { CV_INSTRUMENT_REGION(); @@ -467,7 +210,7 @@ void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) //================================================================================================== -void cv::convertFp16( InputArray _src, OutputArray _dst ) +void convertFp16(InputArray _src, OutputArray _dst) { CV_INSTRUMENT_REGION(); @@ -485,12 +228,12 @@ void cv::convertFp16( InputArray _src, OutputArray _dst ) } else ddepth = CV_16S; - func = (BinaryFunc)cvt32f16f; + func = (BinaryFunc)get_cvt32f16f(); break; case CV_16S: //case CV_16F: ddepth = CV_32F; - func = (BinaryFunc)cvt16f32f; + func = (BinaryFunc)get_cvt16f32f(); break; default: CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth"); @@ -525,3 +268,5 @@ void cv::convertFp16( InputArray _src, OutputArray _dst ) func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, 0); } } + +} // namespace cv diff --git a/modules/core/src/convert.simd.hpp b/modules/core/src/convert.simd.hpp index 42d0c7d468..d9b3469e4c 100644 --- a/modules/core/src/convert.simd.hpp +++ b/modules/core/src/convert.simd.hpp @@ -3,10 +3,16 @@ // of this distribution and at http://opencv.org/license.html #include "precomp.hpp" -#include "opencl_kernels_core.hpp" #include "convert.hpp" namespace cv { +CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN + +BinaryFunc getConvertFunc(int sdepth, int ddepth); +BinaryFunc get_cvt32f16f(); +BinaryFunc get_cvt16f32f(); + +#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY /*namespace hal { @@ -66,7 +72,7 @@ void addRNGBias64f( double* arr, const double* scaleBiasPairs, int len ) }*/ -template inline void +template static inline void cvt_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size ) { sstep /= sizeof(src[0]); @@ -97,7 +103,7 @@ cvt_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size ) // in order to reduce the code size, for (16f <-> ...) conversions // we add a conversion function without loop unrolling -template inline void +template static inline void cvt1_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size ) { sstep /= sizeof(src[0]); @@ -140,7 +146,10 @@ static void cvtCopy( const uchar* src, size_t sstep, #define DEF_CVT_FUNC(suffix, cvtfunc, _Ts, _Td, _Twvec) \ static void cvt##suffix(const _Ts* src, size_t sstep, uchar*, size_t, \ _Td* dst, size_t dstep, Size size, void*) \ -{ cvtfunc<_Ts, _Td, _Twvec>(src, sstep, dst, dstep, size); } +{ \ + CV_INSTRUMENT_REGION(); \ + cvtfunc<_Ts, _Td, _Twvec>(src, sstep, dst, dstep, size); \ +} ////////////////////// 8u -> ... //////////////////////// @@ -225,16 +234,16 @@ DEF_CVT_FUNC(16f32f, cvt1_, float16_t, float, v_float32) ///////////// "conversion" w/o conversion /////////////// static void cvt8u(const uchar* src, size_t sstep, uchar*, size_t, uchar* dst, size_t dstep, Size size, void*) -{ cvtCopy(src, sstep, dst, dstep, size, 1); } +{ CV_INSTRUMENT_REGION(); cvtCopy(src, sstep, dst, dstep, size, 1); } static void cvt16u(const ushort* src, size_t sstep, uchar*, size_t, ushort* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 2); } +{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 2); } static void cvt32s(const int* src, size_t sstep, uchar*, size_t, int* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 4); } +{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 4); } static void cvt64s(const int64* src, size_t sstep, uchar*, size_t, int64* dst, size_t dstep, Size size, void*) -{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 8); } +{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 8); } /* [TODO] Recover IPP calls @@ -380,148 +389,17 @@ BinaryFunc getConvertFunc(int sdepth, int ddepth) return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; } -#ifdef HAVE_OPENCL -static bool ocl_convertFp16( InputArray _src, OutputArray _dst, int sdepth, int ddepth ) +BinaryFunc get_cvt32f16f() { - int type = _src.type(), cn = CV_MAT_CN(type); - - _dst.createSameSize( _src, CV_MAKETYPE(ddepth, cn) ); - int kercn = 1; - int rowsPerWI = 1; - String build_opt = format("-D HALF_SUPPORT -D srcT=%s -D dstT=%s -D rowsPerWI=%d%s", - sdepth == CV_32F ? "float" : "half", - sdepth == CV_32F ? "half" : "float", - rowsPerWI, - sdepth == CV_32F ? " -D FLOAT_TO_HALF " : ""); - ocl::Kernel k("convertFp16", ocl::core::halfconvert_oclsrc, build_opt); - if (k.empty()) - return false; - - UMat src = _src.getUMat(); - UMat dst = _dst.getUMat(); - - ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src), - dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn); - - k.args(srcarg, dstarg); - - size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI }; - return k.run(2, globalsize, NULL, false); + return (BinaryFunc)cvt32f16f; } -#endif - -} // cv:: -void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const +BinaryFunc get_cvt16f32f() { - CV_INSTRUMENT_REGION(); - - if( empty() ) - { - _dst.release(); - return; - } - - bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON; - - if( _type < 0 ) - _type = _dst.fixedType() ? _dst.type() : type(); - else - _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels()); - - int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type); - if( sdepth == ddepth && noScale ) - { - copyTo(_dst); - return; - } - - Mat src = *this; - if( dims <= 2 ) - _dst.create( size(), _type ); - else - _dst.create( dims, size, _type ); - Mat dst = _dst.getMat(); - - BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth); - double scale[] = {alpha, beta}; - int cn = channels(); - CV_Assert( func != 0 ); - - if( dims <= 2 ) - { - Size sz = getContinuousSize2D(src, dst, cn); - func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale ); - } - else - { - const Mat* arrays[] = {&src, &dst, 0}; - uchar* ptrs[2] = {}; - NAryMatIterator it(arrays, ptrs); - Size sz((int)(it.size*cn), 1); - - for( size_t i = 0; i < it.nplanes; i++, ++it ) - func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale); - } + return (BinaryFunc)cvt16f32f; } -//================================================================================================== - -void cv::convertFp16( InputArray _src, OutputArray _dst ) -{ - CV_INSTRUMENT_REGION(); - - int sdepth = _src.depth(), ddepth = 0; - BinaryFunc func = 0; - - switch( sdepth ) - { - case CV_32F: - if(_dst.fixedType()) - { - ddepth = _dst.depth(); - CV_Assert(ddepth == CV_16S /*|| ddepth == CV_16F*/); - CV_Assert(_dst.channels() == _src.channels()); - } - else - ddepth = CV_16S; - func = (BinaryFunc)cvt32f16f; - break; - case CV_16S: - //case CV_16F: - ddepth = CV_32F; - func = (BinaryFunc)cvt16f32f; - break; - default: - CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth"); - return; - } - - CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_convertFp16(_src, _dst, sdepth, ddepth)) - - Mat src = _src.getMat(); - - int type = CV_MAKETYPE(ddepth, src.channels()); - _dst.create( src.dims, src.size, type ); - Mat dst = _dst.getMat(); - int cn = src.channels(); - - CV_Assert( func != 0 ); - - if( src.dims <= 2 ) - { - Size sz = getContinuousSize2D(src, dst, cn); - func( src.data, src.step, 0, 0, dst.data, dst.step, sz, 0); - } - else - { - const Mat* arrays[] = {&src, &dst, 0}; - uchar* ptrs[2] = {}; - NAryMatIterator it(arrays, ptrs); - Size sz((int)(it.size*cn), 1); +#endif - for( size_t i = 0; i < it.nplanes; i++, ++it ) - func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, 0); - } -} +CV_CPU_OPTIMIZATION_NAMESPACE_END +} // namespace