diff --git a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
index 72a5683fe1..2f6b429c26 100644
--- a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
+++ b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
@@ -107,6 +107,90 @@ Building OpenCV.js from Source
     @note
     It requires `node` installed in your development environment.
 
+-#  [optional] To build `opencv.js` with threads optimization, append `--threads` option.
+
+    For example:
+    @code{.bash}
+    python ./platforms/js/build_js.py build_js --build_wasm --threads
+    @endcode
+
+    The default threads number is the logic core number of your device. You can use `cv.parallel_pthreads_set_threads_num(number)` to set threads number by yourself and use `cv.parallel_pthreads_get_threads_num()` to get the current threads number.
+
+    @note
+    You should build wasm version of `opencv.js` if you want to enable this optimization. And the threads optimization only works in browser, not in node.js. You need to enable the `WebAssembly threads support` feature first with your browser. For example, if you use Chrome, please enable this flag in chrome://flags.
+
+-#  [optional] To build `opencv.js` with wasm simd optimization, append `--simd` option.
+
+    For example:
+    @code{.bash}
+    python ./platforms/js/build_js.py build_js --build_wasm --simd
+    @endcode
+
+    The simd optimization is experimental as wasm simd is still in development.
+
+    @note
+    Now only emscripten LLVM upstream backend supports wasm simd, refering to https://emscripten.org/docs/porting/simd.html. So you need to setup upstream backend environment with the following command first:
+    @code{.bash}
+    ./emsdk update
+    ./emsdk install latest-upstream
+    ./emsdk activate latest-upstream
+    source ./emsdk_env.sh
+    @endcode
+
+    @note
+    You should build wasm version of `opencv.js` if you want to enable this optimization. For browser, you need to enable the `WebAssembly SIMD support` feature first. For example, if you use Chrome, please enable this flag in chrome://flags. For Node.js, you need to run script with flag `--experimental-wasm-simd`.
+
+    @note
+    The simd version of `opencv.js` built by latest LLVM upstream may not work with the stable browser or old version of Node.js. Please use the latest version of unstable browser or Node.js to get new features, like `Chrome Dev`.
+
+-#  [optional] To build wasm intrinsics tests, append `--build_wasm_intrin_test` option.
+
+    For example:
+    @code{.bash}
+    python ./platforms/js/build_js.py build_js --build_wasm --simd --build_wasm_intrin_test
+    @endcode
+
+    For wasm intrinsics tests, you can use the following function to test all the cases:
+    @code{.js}
+    cv.test_hal_intrin_all()
+    @endcode
+
+    And the failed cases will be logged in the JavaScript debug console.
+
+    If you only want to test single data type of wasm intrinsics, you can use the following functions:
+    @code{.js}
+    cv.test_hal_intrin_uint8()
+    cv.test_hal_intrin_int8()
+    cv.test_hal_intrin_uint16()
+    cv.test_hal_intrin_int16()
+    cv.test_hal_intrin_uint32()
+    cv.test_hal_intrin_int32()
+    cv.test_hal_intrin_uint64()
+    cv.test_hal_intrin_int64()
+    cv.test_hal_intrin_float32()
+    cv.test_hal_intrin_float64()
+    @endcode
+
+-#  [optional] To build performance tests, append `--build_perf` option.
+
+    For example:
+    @code{.bash}
+    python ./platforms/js/build_js.py build_js --build_perf
+    @endcode
+
+    To run performance tests, launch a local web server in \<build_dir\>/bin folder. For example, node http-server which serves on `localhost:8080`.
+
+    There are some kernels now in the performance test like `cvtColor`, `resize` and `threshold`. For example, if you want to test `threshold`, please navigate the web browser to `http://localhost:8080/perf/perf_imgproc/perf_threshold.html`. You need to input the test parameter like `(1920x1080, CV_8UC1, THRESH_BINARY)`, and then click the `Run` button to run the case. And if you don't input the parameter, it will run all the cases of this kernel.
+
+    You can also run tests using Node.js.
+
+    For example, run `threshold` with parameter `(1920x1080, CV_8UC1, THRESH_BINARY)`:
+    @code{.sh}
+    cd bin/perf
+    npm install
+    node perf_threshold.js --test_param_filter="(1920x1080, CV_8UC1, THRESH_BINARY)"
+    @endcode
+
 Building OpenCV.js with Docker
 ---------------------------------------
 
diff --git a/modules/core/include/opencv2/core/cv_cpu_dispatch.h b/modules/core/include/opencv2/core/cv_cpu_dispatch.h
index 818087f755..0248d5f98f 100644
--- a/modules/core/include/opencv2/core/cv_cpu_dispatch.h
+++ b/modules/core/include/opencv2/core/cv_cpu_dispatch.h
@@ -157,6 +157,11 @@
 #  define CV_MSA 1
 #endif
 
+#ifdef __EMSCRIPTEN__
+#  define CV_WASM_SIMD 1
+#  include <wasm_simd128.h>
+#endif
+
 #endif // CV_ENABLE_INTRINSICS && !CV_DISABLE_OPTIMIZATION && !__CUDACC__
 
 #if defined CV_CPU_COMPILE_AVX && !defined CV_CPU_BASELINE_COMPILE_AVX
@@ -328,3 +333,7 @@ struct VZeroUpperGuard {
 #ifndef CV_MSA
 #  define CV_MSA 0
 #endif
+
+#ifndef CV_WASM_SIMD
+#  define CV_WASM_SIMD 0
+#endif
\ No newline at end of file
diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h
index c39df9d3df..4a9dae86bb 100644
--- a/modules/core/include/opencv2/core/cvdef.h
+++ b/modules/core/include/opencv2/core/cvdef.h
@@ -695,6 +695,7 @@ __CV_ENUM_FLAGS_BITWISE_XOR_EQ   (EnumType, EnumType)
 #endif
 
 #define CV_CXX_MOVE_SEMANTICS 1
+#define CV_CXX_MOVE(x) std::move(x)
 #define CV_CXX_STD_ARRAY 1
 #include <array>
 #ifndef CV_OVERRIDE
diff --git a/modules/core/include/opencv2/core/hal/intrin.hpp b/modules/core/include/opencv2/core/hal/intrin.hpp
index 615887c924..3bdbf05f2f 100644
--- a/modules/core/include/opencv2/core/hal/intrin.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin.hpp
@@ -168,7 +168,7 @@ using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE;
 #   undef CV_MSA
 #endif
 
-#if CV_SSE2 || CV_NEON || CV_VSX || CV_MSA
+#if CV_SSE2 || CV_NEON || CV_VSX || CV_MSA || CV_WASM_SIMD
 #define CV__SIMD_FORWARD 128
 #include "opencv2/core/hal/intrin_forward.hpp"
 #endif
@@ -190,6 +190,9 @@ using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE;
 
 #include "opencv2/core/hal/intrin_msa.hpp"
 
+#elif CV_WASM_SIMD
+#include "opencv2/core/hal/intrin_wasm.hpp"
+
 #else
 
 #define CV_SIMD128_CPP 1
diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp
index 74db46b470..c821dc3991 100644
--- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp
@@ -1241,6 +1241,11 @@ inline int v_signmask(const v_int32x8& a)
 inline int v_signmask(const v_uint32x8& a)
 { return v_signmask(v_reinterpret_as_f32(a)); }
 
+inline int v_signmask(const v_int64x4& a)
+{ return v_signmask(v_reinterpret_as_f64(a)); }
+inline int v_signmask(const v_uint64x4& a)
+{ return v_signmask(v_reinterpret_as_f64(a)); }
+
 inline int v_scan_forward(const v_int8x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
 inline int v_scan_forward(const v_uint8x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
 inline int v_scan_forward(const v_int16x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; }
@@ -1253,40 +1258,23 @@ inline int v_scan_forward(const v_uint64x4& a) { return trailingZeros32(v_signma
 inline int v_scan_forward(const v_float64x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; }
 
 /** Checks **/
-#define OPENCV_HAL_IMPL_AVX_CHECK(_Tpvec, and_op, allmask)  \
-    inline bool v_check_all(const _Tpvec& a)                \
-    {                                                       \
-        int mask = v_signmask(v_reinterpret_as_s8(a));      \
-        return and_op(mask, allmask) == allmask;            \
-    }                                                       \
-    inline bool v_check_any(const _Tpvec& a)                \
-    {                                                       \
-        int mask = v_signmask(v_reinterpret_as_s8(a));      \
-        return and_op(mask, allmask) != 0;                  \
-    }
-
-OPENCV_HAL_IMPL_AVX_CHECK(v_uint8x32,  OPENCV_HAL_1ST, -1)
-OPENCV_HAL_IMPL_AVX_CHECK(v_int8x32,   OPENCV_HAL_1ST, -1)
-OPENCV_HAL_IMPL_AVX_CHECK(v_uint16x16, OPENCV_HAL_AND, (int)0xaaaaaaaa)
-OPENCV_HAL_IMPL_AVX_CHECK(v_int16x16,  OPENCV_HAL_AND, (int)0xaaaaaaaa)
-OPENCV_HAL_IMPL_AVX_CHECK(v_uint32x8,  OPENCV_HAL_AND, (int)0x88888888)
-OPENCV_HAL_IMPL_AVX_CHECK(v_int32x8,   OPENCV_HAL_AND, (int)0x88888888)
-
-#define OPENCV_HAL_IMPL_AVX_CHECK_FLT(_Tpvec, allmask) \
-    inline bool v_check_all(const _Tpvec& a)           \
-    {                                                  \
-        int mask = v_signmask(a);                      \
-        return mask == allmask;                        \
-    }                                                  \
-    inline bool v_check_any(const _Tpvec& a)           \
-    {                                                  \
-        int mask = v_signmask(a);                      \
-        return mask != 0;                              \
-    }
-
-OPENCV_HAL_IMPL_AVX_CHECK_FLT(v_float32x8, 255)
-OPENCV_HAL_IMPL_AVX_CHECK_FLT(v_float64x4, 15)
-
+#define OPENCV_HAL_IMPL_AVX_CHECK(_Tpvec, allmask) \
+    inline bool v_check_all(const _Tpvec& a) { return v_signmask(a) == allmask; } \
+    inline bool v_check_any(const _Tpvec& a) { return v_signmask(a) != 0; }
+OPENCV_HAL_IMPL_AVX_CHECK(v_uint8x32, -1)
+OPENCV_HAL_IMPL_AVX_CHECK(v_int8x32, -1)
+OPENCV_HAL_IMPL_AVX_CHECK(v_uint32x8, 255)
+OPENCV_HAL_IMPL_AVX_CHECK(v_int32x8, 255)
+OPENCV_HAL_IMPL_AVX_CHECK(v_uint64x4, 15)
+OPENCV_HAL_IMPL_AVX_CHECK(v_int64x4, 15)
+OPENCV_HAL_IMPL_AVX_CHECK(v_float32x8, 255)
+OPENCV_HAL_IMPL_AVX_CHECK(v_float64x4, 15)
+
+#define OPENCV_HAL_IMPL_AVX_CHECK_SHORT(_Tpvec)  \
+    inline bool v_check_all(const _Tpvec& a) { return (v_signmask(v_reinterpret_as_s8(a)) & 0xaaaaaaaa) == 0xaaaaaaaa; } \
+    inline bool v_check_any(const _Tpvec& a) { return (v_signmask(v_reinterpret_as_s8(a)) & 0xaaaaaaaa) != 0; }
+OPENCV_HAL_IMPL_AVX_CHECK_SHORT(v_uint16x16)
+OPENCV_HAL_IMPL_AVX_CHECK_SHORT(v_int16x16)
 
 ////////// Other math /////////
 
diff --git a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp
index 7dd948db6a..2acc7b633f 100644
--- a/modules/core/include/opencv2/core/hal/intrin_cpp.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_cpp.hpp
@@ -1080,7 +1080,7 @@ Example:
 v_int32x4 r; // set to {-1, -1, 1, 1}
 int mask = v_signmask(r); // mask = 3 <== 00000000 00000000 00000000 00000011
 @endcode
-For all types except 64-bit. */
+*/
 template<typename _Tp, int n> inline int v_signmask(const v_reg<_Tp, n>& a)
 {
     int mask = 0;
@@ -1109,7 +1109,7 @@ template <typename _Tp, int n> inline int v_scan_forward(const v_reg<_Tp, n>& a)
 /** @brief Check if all packed values are less than zero
 
 Unsigned values will be casted to signed: `uchar 254 => char -2`.
-For all types except 64-bit. */
+*/
 template<typename _Tp, int n> inline bool v_check_all(const v_reg<_Tp, n>& a)
 {
     for( int i = 0; i < n; i++ )
@@ -1121,7 +1121,7 @@ template<typename _Tp, int n> inline bool v_check_all(const v_reg<_Tp, n>& a)
 /** @brief Check if any of packed values is less than zero
 
 Unsigned values will be casted to signed: `uchar 254 => char -2`.
-For all types except 64-bit. */
+*/
 template<typename _Tp, int n> inline bool v_check_any(const v_reg<_Tp, n>& a)
 {
     for( int i = 0; i < n; i++ )
diff --git a/modules/core/include/opencv2/core/hal/intrin_neon.hpp b/modules/core/include/opencv2/core/hal/intrin_neon.hpp
index 5617bc24e6..e5f707ca57 100644
--- a/modules/core/include/opencv2/core/hal/intrin_neon.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_neon.hpp
@@ -1139,9 +1139,17 @@ inline bool v_check_any(const v_##_Tpvec& a) \
 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7)
 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15)
 OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31)
-#if CV_SIMD128_64F
-OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint64x2, u64, 63)
-#endif
+
+inline bool v_check_all(const v_uint64x2& a)
+{
+    uint64x2_t v0 = vshrq_n_u64(a.val, 63);
+    return (vgetq_lane_u64(v0, 0) & vgetq_lane_u64(v0, 1)) == 1;
+}
+inline bool v_check_any(const v_uint64x2& a)
+{
+    uint64x2_t v0 = vshrq_n_u64(a.val, 63);
+    return (vgetq_lane_u64(v0, 0) | vgetq_lane_u64(v0, 1)) != 0;
+}
 
 inline bool v_check_all(const v_int8x16& a)
 { return v_check_all(v_reinterpret_as_u8(a)); }
@@ -1161,13 +1169,13 @@ inline bool v_check_any(const v_int32x4& a)
 inline bool v_check_any(const v_float32x4& a)
 { return v_check_any(v_reinterpret_as_u32(a)); }
 
-#if CV_SIMD128_64F
 inline bool v_check_all(const v_int64x2& a)
 { return v_check_all(v_reinterpret_as_u64(a)); }
-inline bool v_check_all(const v_float64x2& a)
-{ return v_check_all(v_reinterpret_as_u64(a)); }
 inline bool v_check_any(const v_int64x2& a)
 { return v_check_any(v_reinterpret_as_u64(a)); }
+#if CV_SIMD128_64F
+inline bool v_check_all(const v_float64x2& a)
+{ return v_check_all(v_reinterpret_as_u64(a)); }
 inline bool v_check_any(const v_float64x2& a)
 { return v_check_any(v_reinterpret_as_u64(a)); }
 #endif
diff --git a/modules/core/include/opencv2/core/hal/intrin_sse.hpp b/modules/core/include/opencv2/core/hal/intrin_sse.hpp
index e172d45a9f..f661c58010 100644
--- a/modules/core/include/opencv2/core/hal/intrin_sse.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_sse.hpp
@@ -1591,31 +1591,25 @@ inline v_uint32x4 v_popcount(const v_int32x4& a)
 inline v_uint64x2 v_popcount(const v_int64x2& a)
 { return v_popcount(v_reinterpret_as_u64(a)); }
 
-#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(_Tpvec, suffix, pack_op, and_op, signmask, allmask) \
-inline int v_signmask(const _Tpvec& a) \
-{ \
-    return and_op(_mm_movemask_##suffix(pack_op(a.val)), signmask); \
-} \
-inline bool v_check_all(const _Tpvec& a) \
-{ return and_op(_mm_movemask_##suffix(a.val), allmask) == allmask; } \
-inline bool v_check_any(const _Tpvec& a) \
-{ return and_op(_mm_movemask_##suffix(a.val), allmask) != 0; }
-
-#define OPENCV_HAL_PACKS(a) _mm_packs_epi16(a, a)
-inline __m128i v_packq_epi32(__m128i a)
-{
-    __m128i b = _mm_packs_epi32(a, a);
-    return _mm_packs_epi16(b, b);
-}
-
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 65535, 65535)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 65535, 65535)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint16x8, epi8, OPENCV_HAL_PACKS, OPENCV_HAL_AND, 255, (int)0xaaaa)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int16x8, epi8, OPENCV_HAL_PACKS, OPENCV_HAL_AND, 255, (int)0xaaaa)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint32x4, epi8, v_packq_epi32, OPENCV_HAL_AND, 15, (int)0x8888)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int32x4, epi8, v_packq_epi32, OPENCV_HAL_AND, 15, (int)0x8888)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 15, 15)
-OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 3, 3)
+#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(_Tpvec, suffix, cast_op, allmask) \
+inline int v_signmask(const _Tpvec& a)   { return _mm_movemask_##suffix(cast_op(a.val)); } \
+inline bool v_check_all(const _Tpvec& a) { return _mm_movemask_##suffix(cast_op(a.val)) == allmask; } \
+inline bool v_check_any(const _Tpvec& a) { return _mm_movemask_##suffix(cast_op(a.val)) != 0; }
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint8x16, epi8, OPENCV_HAL_NOP, 65535)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int8x16, epi8, OPENCV_HAL_NOP, 65535)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint32x4, ps, _mm_castsi128_ps, 15)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int32x4, ps, _mm_castsi128_ps, 15)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint64x2, pd, _mm_castsi128_pd, 3)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int64x2, pd, _mm_castsi128_pd, 3)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, 15)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, 3)
+
+#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(_Tpvec) \
+inline int v_signmask(const _Tpvec& a) { return _mm_movemask_epi8(_mm_packs_epi16(a.val, a.val)) & 255; } \
+inline bool v_check_all(const _Tpvec& a) { return (_mm_movemask_epi8(a.val) & 0xaaaa) == 0xaaaa; } \
+inline bool v_check_any(const _Tpvec& a) { return (_mm_movemask_epi8(a.val) & 0xaaaa) != 0; }
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(v_uint16x8)
+OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(v_int16x8)
 
 inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
 inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
diff --git a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp
index 5e417a00bf..4f41021875 100644
--- a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp
+++ b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp
@@ -899,6 +899,8 @@ inline bool v_check_all(const v_uint16x8& a)
 { return v_check_all(v_reinterpret_as_s16(a)); }
 inline bool v_check_all(const v_uint32x4& a)
 { return v_check_all(v_reinterpret_as_s32(a)); }
+inline bool v_check_all(const v_uint64x2& a)
+{ return v_check_all(v_reinterpret_as_s64(a)); }
 inline bool v_check_all(const v_float32x4& a)
 { return v_check_all(v_reinterpret_as_s32(a)); }
 inline bool v_check_all(const v_float64x2& a)
@@ -913,6 +915,8 @@ inline bool v_check_any(const v_uint16x8& a)
 { return v_check_any(v_reinterpret_as_s16(a)); }
 inline bool v_check_any(const v_uint32x4& a)
 { return v_check_any(v_reinterpret_as_s32(a)); }
+inline bool v_check_any(const v_uint64x2& a)
+{ return v_check_any(v_reinterpret_as_s64(a)); }
 inline bool v_check_any(const v_float32x4& a)
 { return v_check_any(v_reinterpret_as_s32(a)); }
 inline bool v_check_any(const v_float64x2& a)
diff --git a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp
new file mode 100644
index 0000000000..302d834147
--- /dev/null
+++ b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp
@@ -0,0 +1,4025 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_HAL_INTRIN_WASM_HPP
+#define OPENCV_HAL_INTRIN_WASM_HPP
+
+#include <limits>
+#include <cstring>
+#include <algorithm>
+#include "opencv2/core/saturate.hpp"
+
+#define CV_SIMD128 1
+#define CV_SIMD128_64F 0 // Now all implementation of f64 use fallback, so disable it.
+#define CV_SIMD128_FP16 0
+
+namespace cv
+{
+
+//! @cond IGNORED
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+
+///////// Types ///////////
+
+struct v_uint8x16
+{
+    typedef uchar lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 16 };
+
+    v_uint8x16() : val(wasm_i8x16_splat(0)) {}
+    explicit v_uint8x16(v128_t v) : val(v) {}
+    v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
+            uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
+    {
+        uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};
+        val = wasm_v128_load(v);
+    }
+    uchar get0() const
+    {
+        return (uchar)wasm_i8x16_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_int8x16
+{
+    typedef schar lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 16 };
+
+    v_int8x16() : val(wasm_i8x16_splat(0)) {}
+    explicit v_int8x16(v128_t v) : val(v) {}
+    v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
+            schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
+    {
+        schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};
+        val = wasm_v128_load(v);
+    }
+    schar get0() const
+    {
+        return wasm_i8x16_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_uint16x8
+{
+    typedef ushort lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 8 };
+
+    v_uint16x8() : val(wasm_i16x8_splat(0)) {}
+    explicit v_uint16x8(v128_t v) : val(v) {}
+    v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
+    {
+        ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7};
+        val = wasm_v128_load(v);
+    }
+    ushort get0() const
+    {
+        return (ushort)wasm_i16x8_extract_lane(val, 0);    // wasm_u16x8_extract_lane() unimplemeted yet
+    }
+
+    v128_t val;
+};
+
+struct v_int16x8
+{
+    typedef short lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 8 };
+
+    v_int16x8() : val(wasm_i16x8_splat(0)) {}
+    explicit v_int16x8(v128_t v) : val(v) {}
+    v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
+    {
+        short v[] = {v0, v1, v2, v3, v4, v5, v6, v7};
+        val = wasm_v128_load(v);
+    }
+    short get0() const
+    {
+        return wasm_i16x8_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_uint32x4
+{
+    typedef unsigned lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 4 };
+
+    v_uint32x4() : val(wasm_i32x4_splat(0)) {}
+    explicit v_uint32x4(v128_t v) : val(v) {}
+    v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3)
+    {
+        unsigned v[] = {v0, v1, v2, v3};
+        val = wasm_v128_load(v);
+    }
+    unsigned get0() const
+    {
+        return (unsigned)wasm_i32x4_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_int32x4
+{
+    typedef int lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 4 };
+
+    v_int32x4() : val(wasm_i32x4_splat(0)) {}
+    explicit v_int32x4(v128_t v) : val(v) {}
+    v_int32x4(int v0, int v1, int v2, int v3)
+    {
+        int v[] = {v0, v1, v2, v3};
+        val = wasm_v128_load(v);
+    }
+    int get0() const
+    {
+        return wasm_i32x4_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_float32x4
+{
+    typedef float lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 4 };
+
+    v_float32x4() : val(wasm_f32x4_splat(0)) {}
+    explicit v_float32x4(v128_t v) : val(v) {}
+    v_float32x4(float v0, float v1, float v2, float v3)
+    {
+        float v[] = {v0, v1, v2, v3};
+        val = wasm_v128_load(v);
+    }
+    float get0() const
+    {
+        return wasm_f32x4_extract_lane(val, 0);
+    }
+
+    v128_t val;
+};
+
+struct v_uint64x2
+{
+    typedef uint64 lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 2 };
+
+#ifdef __wasm_unimplemented_simd128__
+    v_uint64x2() : val(wasm_i64x2_splat(0)) {}
+#else
+    v_uint64x2() : val(wasm_i32x4_splat(0)) {}
+#endif
+    explicit v_uint64x2(v128_t v) : val(v) {}
+    v_uint64x2(uint64 v0, uint64 v1)
+    {
+        uint64 v[] = {v0, v1};
+        val = wasm_v128_load(v);
+    }
+    uint64 get0() const
+    {
+#ifdef __wasm_unimplemented_simd128__
+        return (uint64)wasm_i64x2_extract_lane(val, 0);
+#else
+        uint64 des[2];
+        wasm_v128_store(des, val);
+        return des[0];
+#endif
+    }
+
+    v128_t val;
+};
+
+struct v_int64x2
+{
+    typedef int64 lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 2 };
+
+#ifdef __wasm_unimplemented_simd128__
+    v_int64x2() : val(wasm_i64x2_splat(0)) {}
+#else
+    v_int64x2() : val(wasm_i32x4_splat(0)) {}
+#endif
+    explicit v_int64x2(v128_t v) : val(v) {}
+    v_int64x2(int64 v0, int64 v1)
+    {
+        int64 v[] = {v0, v1};
+        val = wasm_v128_load(v);
+    }
+    int64 get0() const
+    {
+#ifdef __wasm_unimplemented_simd128__
+        return wasm_i64x2_extract_lane(val, 0);
+#else
+        int64 des[2];
+        wasm_v128_store(des, val);
+        return des[0];
+#endif
+    }
+
+    v128_t val;
+};
+
+struct v_float64x2
+{
+    typedef double lane_type;
+    typedef v128_t vector_type;
+    enum { nlanes = 2 };
+
+#ifdef __wasm_unimplemented_simd128__
+    v_float64x2() : val(wasm_f64x2_splat(0)) {}
+#else
+    v_float64x2() : val(wasm_f32x4_splat(0)) {}
+#endif
+    explicit v_float64x2(v128_t v) : val(v) {}
+    v_float64x2(double v0, double v1)
+    {
+        double v[] = {v0, v1};
+        val = wasm_v128_load(v);
+    }
+    double get0() const
+    {
+#ifdef __wasm_unimplemented_simd128__
+        return wasm_f64x2_extract_lane(val, 0);
+#else
+        double des[2];
+        wasm_v128_store(des, val);
+        return des[0];
+#endif
+    }
+
+    v128_t val;
+};
+
+namespace fallback
+{
+
+template<typename _Tp, int n> struct v_reg
+{
+    typedef _Tp lane_type;
+    enum { nlanes = n };
+
+    explicit v_reg(const _Tp* ptr) { for( int i = 0; i < n; i++ ) s[i] = ptr[i]; }
+
+    v_reg(_Tp s0, _Tp s1) { s[0] = s0; s[1] = s1; }
+
+    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3) { s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3; }
+
+    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3,
+           _Tp s4, _Tp s5, _Tp s6, _Tp s7)
+    {
+        s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3;
+        s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7;
+    }
+
+    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3,
+           _Tp s4, _Tp s5, _Tp s6, _Tp s7,
+           _Tp s8, _Tp s9, _Tp s10, _Tp s11,
+           _Tp s12, _Tp s13, _Tp s14, _Tp s15)
+    {
+        s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3;
+        s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7;
+        s[8] = s8; s[9] = s9; s[10] = s10; s[11] = s11;
+        s[12] = s12; s[13] = s13; s[14] = s14; s[15] = s15;
+    }
+
+    v_reg() {}
+
+    v_reg(const v_reg<_Tp, n> & r)
+    {
+        for( int i = 0; i < n; i++ )
+            s[i] = r.s[i];
+    }
+
+    _Tp get0() const { return s[0]; }
+
+    _Tp get(const int i) const { return s[i]; }
+    v_reg<_Tp, n> high() const
+    {
+        v_reg<_Tp, n> c;
+        int i;
+        for( i = 0; i < n/2; i++ )
+        {
+            c.s[i] = s[i+(n/2)];
+            c.s[i+(n/2)] = 0;
+        }
+        return c;
+    }
+
+    static v_reg<_Tp, n> zero()
+    {
+        v_reg<_Tp, n> c;
+        for( int i = 0; i < n; i++ )
+            c.s[i] = (_Tp)0;
+        return c;
+    }
+
+    static v_reg<_Tp, n> all(_Tp s)
+    {
+        v_reg<_Tp, n> c;
+        for( int i = 0; i < n; i++ )
+            c.s[i] = s;
+        return c;
+    }
+
+    template<typename _Tp2, int n2> v_reg<_Tp2, n2> reinterpret_as() const
+    {
+        size_t bytes = std::min(sizeof(_Tp2)*n2, sizeof(_Tp)*n);
+        v_reg<_Tp2, n2> c;
+        std::memcpy(&c.s[0], &s[0], bytes);
+        return c;
+    }
+
+    v_reg(const cv::v_uint8x16& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_int8x16& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_uint16x8& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_int16x8& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_uint32x4& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_int32x4& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_float32x4& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_float64x2& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_uint64x2& v) { wasm_v128_store(&s, v.val); }
+    v_reg(const cv::v_int64x2& v) { wasm_v128_store(&s, v.val); }
+
+    operator cv::v_uint8x16() const { return cv::v_uint8x16(wasm_v128_load(&s)); }
+    operator cv::v_int8x16() const { return cv::v_int8x16(wasm_v128_load(&s)); }
+    operator cv::v_uint16x8() const { return cv::v_uint16x8(wasm_v128_load(&s)); }
+    operator cv::v_int16x8() const { return cv::v_int16x8(wasm_v128_load(&s)); }
+    operator cv::v_uint32x4() const { return cv::v_uint32x4(wasm_v128_load(&s)); }
+    operator cv::v_int32x4() const { return cv::v_int32x4(wasm_v128_load(&s)); }
+    operator cv::v_float32x4() const { return cv::v_float32x4(wasm_v128_load(&s)); }
+    operator cv::v_float64x2() const { return cv::v_float64x2(wasm_v128_load(&s)); }
+    operator cv::v_uint64x2() const { return cv::v_uint64x2(wasm_v128_load(&s)); }
+    operator cv::v_int64x2() const { return cv::v_int64x2(wasm_v128_load(&s)); }
+
+    _Tp s[n];
+};
+
+typedef v_reg<uchar, 16> v_uint8x16;
+typedef v_reg<schar, 16> v_int8x16;
+typedef v_reg<ushort, 8> v_uint16x8;
+typedef v_reg<short, 8> v_int16x8;
+typedef v_reg<unsigned, 4> v_uint32x4;
+typedef v_reg<int, 4> v_int32x4;
+typedef v_reg<float, 4> v_float32x4;
+typedef v_reg<double, 2> v_float64x2;
+typedef v_reg<uint64, 2> v_uint64x2;
+typedef v_reg<int64, 2> v_int64x2;
+
+#define OPENCV_HAL_IMPL_BIN_OP(bin_op) \
+template<typename _Tp, int n> inline v_reg<_Tp, n> \
+    operator bin_op (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    v_reg<_Tp, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \
+    return c; \
+} \
+template<typename _Tp, int n> inline v_reg<_Tp, n>& \
+    operator bin_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    for( int i = 0; i < n; i++ ) \
+        a.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \
+    return a; \
+}
+
+OPENCV_HAL_IMPL_BIN_OP(+)
+OPENCV_HAL_IMPL_BIN_OP(-)
+OPENCV_HAL_IMPL_BIN_OP(*)
+OPENCV_HAL_IMPL_BIN_OP(/)
+
+#define OPENCV_HAL_IMPL_BIT_OP(bit_op) \
+template<typename _Tp, int n> inline v_reg<_Tp, n> operator bit_op \
+    (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    v_reg<_Tp, n> c; \
+    typedef typename V_TypeTraits<_Tp>::int_type itype; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \
+                                                        V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \
+    return c; \
+} \
+template<typename _Tp, int n> inline v_reg<_Tp, n>& operator \
+    bit_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    typedef typename V_TypeTraits<_Tp>::int_type itype; \
+    for( int i = 0; i < n; i++ ) \
+        a.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \
+                                                        V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \
+    return a; \
+}
+
+OPENCV_HAL_IMPL_BIT_OP(&)
+OPENCV_HAL_IMPL_BIT_OP(|)
+OPENCV_HAL_IMPL_BIT_OP(^)
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> operator ~ (const v_reg<_Tp, n>& a)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int(~V_TypeTraits<_Tp>::reinterpret_int(a.s[i]));
+    }
+    return c;
+}
+
+#define OPENCV_HAL_IMPL_MATH_FUNC(func, cfunc, _Tp2) \
+template<typename _Tp, int n> inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a) \
+{ \
+    v_reg<_Tp2, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = cfunc(a.s[i]); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_MATH_FUNC(v_sqrt, std::sqrt, _Tp)
+OPENCV_HAL_IMPL_MATH_FUNC(v_sin, std::sin, _Tp)
+OPENCV_HAL_IMPL_MATH_FUNC(v_cos, std::cos, _Tp)
+OPENCV_HAL_IMPL_MATH_FUNC(v_exp, std::exp, _Tp)
+OPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp)
+OPENCV_HAL_IMPL_MATH_FUNC(v_abs, (typename V_TypeTraits<_Tp>::abs_type)std::abs,
+                          typename V_TypeTraits<_Tp>::abs_type)
+OPENCV_HAL_IMPL_MATH_FUNC(v_round, cvRound, int)
+OPENCV_HAL_IMPL_MATH_FUNC(v_floor, cvFloor, int)
+OPENCV_HAL_IMPL_MATH_FUNC(v_ceil, cvCeil, int)
+OPENCV_HAL_IMPL_MATH_FUNC(v_trunc, int, int)
+
+#define OPENCV_HAL_IMPL_MINMAX_FUNC(func, cfunc) \
+template<typename _Tp, int n> inline v_reg<_Tp, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    v_reg<_Tp, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = cfunc(a.s[i], b.s[i]); \
+    return c; \
+}
+
+#define OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(func, cfunc) \
+template<typename _Tp, int n> inline _Tp func(const v_reg<_Tp, n>& a) \
+{ \
+    _Tp c = a.s[0]; \
+    for( int i = 1; i < n; i++ ) \
+        c = cfunc(c, a.s[i]); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_MINMAX_FUNC(v_min, std::min)
+OPENCV_HAL_IMPL_MINMAX_FUNC(v_max, std::max)
+OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_min, std::min)
+OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_max, std::max)
+
+static const unsigned char popCountTable[] =
+{
+    0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+template<typename _Tp, int n>
+inline v_reg<typename V_TypeTraits<_Tp>::abs_type, n> v_popcount(const v_reg<_Tp, n>& a)
+{
+    v_reg<typename V_TypeTraits<_Tp>::abs_type, n> b = v_reg<typename V_TypeTraits<_Tp>::abs_type, n>::zero();
+    for (int i = 0; i < (int)(n*sizeof(_Tp)); i++)
+        b.s[i/sizeof(_Tp)] += popCountTable[v_reinterpret_as_u8(a).s[i]];
+    return b;
+}
+
+template<typename _Tp, int n>
+inline void v_minmax( const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
+                      v_reg<_Tp, n>& minval, v_reg<_Tp, n>& maxval )
+{
+    for( int i = 0; i < n; i++ )
+    {
+        minval.s[i] = std::min(a.s[i], b.s[i]);
+        maxval.s[i] = std::max(a.s[i], b.s[i]);
+    }
+}
+
+#define OPENCV_HAL_IMPL_CMP_OP(cmp_op) \
+template<typename _Tp, int n> \
+inline v_reg<_Tp, n> operator cmp_op(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    typedef typename V_TypeTraits<_Tp>::int_type itype; \
+    v_reg<_Tp, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)-(int)(a.s[i] cmp_op b.s[i])); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_CMP_OP(<)
+OPENCV_HAL_IMPL_CMP_OP(>)
+OPENCV_HAL_IMPL_CMP_OP(<=)
+OPENCV_HAL_IMPL_CMP_OP(>=)
+OPENCV_HAL_IMPL_CMP_OP(==)
+OPENCV_HAL_IMPL_CMP_OP(!=)
+
+template<int n>
+inline v_reg<float, n> v_not_nan(const v_reg<float, n>& a)
+{
+    typedef typename V_TypeTraits<float>::int_type itype;
+    v_reg<float, n> c;
+    for (int i = 0; i < n; i++)
+        c.s[i] = V_TypeTraits<float>::reinterpret_from_int((itype)-(int)(a.s[i] == a.s[i]));
+    return c;
+}
+template<int n>
+inline v_reg<double, n> v_not_nan(const v_reg<double, n>& a)
+{
+    typedef typename V_TypeTraits<double>::int_type itype;
+    v_reg<double, n> c;
+    for (int i = 0; i < n; i++)
+        c.s[i] = V_TypeTraits<double>::reinterpret_from_int((itype)-(int)(a.s[i] == a.s[i]));
+    return c;
+}
+
+#define OPENCV_HAL_IMPL_ARITHM_OP(func, bin_op, cast_op, _Tp2) \
+template<typename _Tp, int n> \
+inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    typedef _Tp2 rtype; \
+    v_reg<rtype, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = cast_op(a.s[i] bin_op b.s[i]); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_ARITHM_OP(v_add_wrap, +, (_Tp), _Tp)
+OPENCV_HAL_IMPL_ARITHM_OP(v_sub_wrap, -, (_Tp), _Tp)
+OPENCV_HAL_IMPL_ARITHM_OP(v_mul_wrap, *, (_Tp), _Tp)
+
+template<typename T> inline T _absdiff(T a, T b)
+{
+    return a > b ? a - b : b - a;
+}
+
+template<typename _Tp, int n>
+inline v_reg<typename V_TypeTraits<_Tp>::abs_type, n> v_absdiff(const v_reg<_Tp, n>& a, const v_reg<_Tp, n> & b)
+{
+    typedef typename V_TypeTraits<_Tp>::abs_type rtype;
+    v_reg<rtype, n> c;
+    const rtype mask = (rtype)(std::numeric_limits<_Tp>::is_signed ? (1 << (sizeof(rtype)*8 - 1)) : 0);
+    for( int i = 0; i < n; i++ )
+    {
+        rtype ua = a.s[i] ^ mask;
+        rtype ub = b.s[i] ^ mask;
+        c.s[i] = _absdiff(ua, ub);
+    }
+    return c;
+}
+
+inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b)
+{
+    v_float32x4 c;
+    for( int i = 0; i < c.nlanes; i++ )
+        c.s[i] = _absdiff(a.s[i], b.s[i]);
+    return c;
+}
+
+inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b)
+{
+    v_float64x2 c;
+    for( int i = 0; i < c.nlanes; i++ )
+        c.s[i] = _absdiff(a.s[i], b.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_absdiffs(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++)
+        c.s[i] = saturate_cast<_Tp>(std::abs(a.s[i] - b.s[i]));
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_invsqrt(const v_reg<_Tp, n>& a)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = 1.f/std::sqrt(a.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = std::sqrt(a.s[i]*a.s[i] + b.s[i]*b.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_sqr_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = a.s[i]*a.s[i] + b.s[i]*b.s[i];
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_fma(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
+                           const v_reg<_Tp, n>& c)
+{
+    v_reg<_Tp, n> d;
+    for( int i = 0; i < n; i++ )
+        d.s[i] = a.s[i]*b.s[i] + c.s[i];
+    return d;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_muladd(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
+                              const v_reg<_Tp, n>& c)
+{
+    return v_fma(a, b, c);
+}
+
+template<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>
+    v_dotprod(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    v_reg<w_type, n/2> c;
+    for( int i = 0; i < (n/2); i++ )
+        c.s[i] = (w_type)a.s[i*2]*b.s[i*2] + (w_type)a.s[i*2+1]*b.s[i*2+1];
+    return c;
+}
+
+template<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>
+    v_dotprod(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, const v_reg<typename V_TypeTraits<_Tp>::w_type, n / 2>& c)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    v_reg<w_type, n/2> s;
+    for( int i = 0; i < (n/2); i++ )
+        s.s[i] = (w_type)a.s[i*2]*b.s[i*2] + (w_type)a.s[i*2+1]*b.s[i*2+1] + c.s[i];
+    return s;
+}
+
+template<typename _Tp, int n> inline void v_mul_expand(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
+                                                       v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& c,
+                                                       v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& d)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    for( int i = 0; i < (n/2); i++ )
+    {
+        c.s[i] = (w_type)a.s[i]*b.s[i];
+        d.s[i] = (w_type)a.s[i+(n/2)]*b.s[i+(n/2)];
+    }
+}
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> v_mul_hi(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    v_reg<_Tp, n> c;
+    for (int i = 0; i < n; i++)
+        c.s[i] = (_Tp)(((w_type)a.s[i] * b.s[i]) >> sizeof(_Tp)*8);
+    return c;
+}
+
+template<typename _Tp, int n> inline void v_hsum(const v_reg<_Tp, n>& a,
+                                                 v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& c)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    for( int i = 0; i < (n/2); i++ )
+    {
+        c.s[i] = (w_type)a.s[i*2] + a.s[i*2+1];
+    }
+}
+
+#define OPENCV_HAL_IMPL_SHIFT_OP(shift_op) \
+template<typename _Tp, int n> inline v_reg<_Tp, n> operator shift_op(const v_reg<_Tp, n>& a, int imm) \
+{ \
+    v_reg<_Tp, n> c; \
+    for( int i = 0; i < n; i++ ) \
+        c.s[i] = (_Tp)(a.s[i] shift_op imm); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_SHIFT_OP(<< )
+OPENCV_HAL_IMPL_SHIFT_OP(>> )
+
+#define OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(suffix,opA,opB) \
+template<int imm, typename _Tp, int n> inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a) \
+{ \
+    v_reg<_Tp, n> b; \
+    for (int i = 0; i < n; i++) \
+    { \
+        int sIndex = i opA imm; \
+        if (0 <= sIndex && sIndex < n) \
+        { \
+            b.s[i] = a.s[sIndex]; \
+        } \
+        else \
+        { \
+            b.s[i] = 0; \
+        } \
+    } \
+    return b; \
+} \
+template<int imm, typename _Tp, int n> inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+    v_reg<_Tp, n> c; \
+    for (int i = 0; i < n; i++) \
+    { \
+        int aIndex = i opA imm; \
+        int bIndex = i opA imm opB n; \
+        if (0 <= bIndex && bIndex < n) \
+        { \
+            c.s[i] = b.s[bIndex]; \
+        } \
+        else if (0 <= aIndex && aIndex < n) \
+        { \
+            c.s[i] = a.s[aIndex]; \
+        } \
+        else \
+        { \
+            c.s[i] = 0; \
+        } \
+    } \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(left,  -, +)
+OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(right, +, -)
+
+template<typename _Tp, int n> inline typename V_TypeTraits<_Tp>::sum_type v_reduce_sum(const v_reg<_Tp, n>& a)
+{
+    typename V_TypeTraits<_Tp>::sum_type c = a.s[0];
+    for( int i = 1; i < n; i++ )
+        c += a.s[i];
+    return c;
+}
+
+inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
+                                 const v_float32x4& c, const v_float32x4& d)
+{
+    v_float32x4 r;
+    r.s[0] = a.s[0] + a.s[1] + a.s[2] + a.s[3];
+    r.s[1] = b.s[0] + b.s[1] + b.s[2] + b.s[3];
+    r.s[2] = c.s[0] + c.s[1] + c.s[2] + c.s[3];
+    r.s[3] = d.s[0] + d.s[1] + d.s[2] + d.s[3];
+    return r;
+}
+
+template<typename _Tp, int n> inline typename V_TypeTraits< typename V_TypeTraits<_Tp>::abs_type >::sum_type v_reduce_sad(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    typename V_TypeTraits< typename V_TypeTraits<_Tp>::abs_type >::sum_type c = _absdiff(a.s[0], b.s[0]);
+    for (int i = 1; i < n; i++)
+        c += _absdiff(a.s[i], b.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n> inline int v_signmask(const v_reg<_Tp, n>& a)
+{
+    int mask = 0;
+    for( int i = 0; i < n; i++ )
+        mask |= (V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0) << i;
+    return mask;
+}
+
+template<typename _Tp, int n> inline bool v_check_all(const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < n; i++ )
+        if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) >= 0 )
+            return false;
+    return true;
+}
+
+template<typename _Tp, int n> inline bool v_check_any(const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < n; i++ )
+        if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0 )
+            return true;
+    return false;
+}
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>& mask,
+                                                           const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    typedef V_TypeTraits<_Tp> Traits;
+    typedef typename Traits::int_type int_type;
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < n; i++ )
+    {
+        int_type m = Traits::reinterpret_int(mask.s[i]);
+        CV_DbgAssert(m == 0 || m == (~(int_type)0));  // restrict mask values: 0 or 0xff/0xffff/etc
+        c.s[i] = m ? a.s[i] : b.s[i];
+    }
+    return c;
+}
+
+template<typename _Tp, int n> inline void v_expand(const v_reg<_Tp, n>& a,
+                            v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& b0,
+                            v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& b1)
+{
+    for( int i = 0; i < (n/2); i++ )
+    {
+        b0.s[i] = a.s[i];
+        b1.s[i] = a.s[i+(n/2)];
+    }
+}
+
+template<typename _Tp, int n>
+inline v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>
+v_expand_low(const v_reg<_Tp, n>& a)
+{
+    v_reg<typename V_TypeTraits<_Tp>::w_type, n/2> b;
+    for( int i = 0; i < (n/2); i++ )
+        b.s[i] = a.s[i];
+    return b;
+}
+
+template<typename _Tp, int n>
+inline v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>
+v_expand_high(const v_reg<_Tp, n>& a)
+{
+    v_reg<typename V_TypeTraits<_Tp>::w_type, n/2> b;
+    for( int i = 0; i < (n/2); i++ )
+        b.s[i] = a.s[i+(n/2)];
+    return b;
+}
+
+template<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::int_type, n>
+    v_reinterpret_as_int(const v_reg<_Tp, n>& a)
+{
+    v_reg<typename V_TypeTraits<_Tp>::int_type, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = V_TypeTraits<_Tp>::reinterpret_int(a.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::uint_type, n>
+    v_reinterpret_as_uint(const v_reg<_Tp, n>& a)
+{
+    v_reg<typename V_TypeTraits<_Tp>::uint_type, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = V_TypeTraits<_Tp>::reinterpret_uint(a.s[i]);
+    return c;
+}
+
+template<typename _Tp, int n> inline void v_zip( const v_reg<_Tp, n>& a0, const v_reg<_Tp, n>& a1,
+                                               v_reg<_Tp, n>& b0, v_reg<_Tp, n>& b1 )
+{
+    int i;
+    for( i = 0; i < n/2; i++ )
+    {
+        b0.s[i*2] = a0.s[i];
+        b0.s[i*2+1] = a1.s[i];
+    }
+    for( ; i < n; i++ )
+    {
+        b1.s[i*2-n] = a0.s[i];
+        b1.s[i*2-n+1] = a1.s[i];
+    }
+}
+
+template<typename _Tp>
+inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_load(const _Tp* ptr)
+{
+    return v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128>(ptr);
+}
+
+template<typename _Tp>
+inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_load_aligned(const _Tp* ptr)
+{
+    return v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128>(ptr);
+}
+
+template<typename _Tp>
+inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_load_low(const _Tp* ptr)
+{
+    v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> c;
+    for( int i = 0; i < c.nlanes/2; i++ )
+    {
+        c.s[i] = ptr[i];
+    }
+    return c;
+}
+
+template<typename _Tp>
+inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_load_halves(const _Tp* loptr, const _Tp* hiptr)
+{
+    v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> c;
+    for( int i = 0; i < c.nlanes/2; i++ )
+    {
+        c.s[i] = loptr[i];
+        c.s[i+c.nlanes/2] = hiptr[i];
+    }
+    return c;
+}
+
+template<typename _Tp>
+inline v_reg<typename V_TypeTraits<_Tp>::w_type, V_TypeTraits<_Tp>::nlanes128 / 2>
+v_load_expand(const _Tp* ptr)
+{
+    typedef typename V_TypeTraits<_Tp>::w_type w_type;
+    v_reg<w_type, V_TypeTraits<w_type>::nlanes128> c;
+    for( int i = 0; i < c.nlanes; i++ )
+    {
+        c.s[i] = ptr[i];
+    }
+    return c;
+}
+
+template<typename _Tp>
+inline v_reg<typename V_TypeTraits<_Tp>::q_type, V_TypeTraits<_Tp>::nlanes128 / 4>
+v_load_expand_q(const _Tp* ptr)
+{
+    typedef typename V_TypeTraits<_Tp>::q_type q_type;
+    v_reg<q_type, V_TypeTraits<q_type>::nlanes128> c;
+    for( int i = 0; i < c.nlanes; i++ )
+    {
+        c.s[i] = ptr[i];
+    }
+    return c;
+}
+
+template<typename _Tp, int n> inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
+                                                            v_reg<_Tp, n>& b)
+{
+    int i, i2;
+    for( i = i2 = 0; i < n; i++, i2 += 2 )
+    {
+        a.s[i] = ptr[i2];
+        b.s[i] = ptr[i2+1];
+    }
+}
+
+template<typename _Tp, int n> inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
+                                                            v_reg<_Tp, n>& b, v_reg<_Tp, n>& c)
+{
+    int i, i3;
+    for( i = i3 = 0; i < n; i++, i3 += 3 )
+    {
+        a.s[i] = ptr[i3];
+        b.s[i] = ptr[i3+1];
+        c.s[i] = ptr[i3+2];
+    }
+}
+
+template<typename _Tp, int n>
+inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
+                                v_reg<_Tp, n>& b, v_reg<_Tp, n>& c,
+                                v_reg<_Tp, n>& d)
+{
+    int i, i4;
+    for( i = i4 = 0; i < n; i++, i4 += 4 )
+    {
+        a.s[i] = ptr[i4];
+        b.s[i] = ptr[i4+1];
+        c.s[i] = ptr[i4+2];
+        d.s[i] = ptr[i4+3];
+    }
+}
+
+template<typename _Tp, int n>
+inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,
+                               const v_reg<_Tp, n>& b,
+                               hal::StoreMode /*mode*/=hal::STORE_UNALIGNED)
+{
+    int i, i2;
+    for( i = i2 = 0; i < n; i++, i2 += 2 )
+    {
+        ptr[i2] = a.s[i];
+        ptr[i2+1] = b.s[i];
+    }
+}
+
+template<typename _Tp, int n>
+inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,
+                                const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c,
+                                hal::StoreMode /*mode*/=hal::STORE_UNALIGNED)
+{
+    int i, i3;
+    for( i = i3 = 0; i < n; i++, i3 += 3 )
+    {
+        ptr[i3] = a.s[i];
+        ptr[i3+1] = b.s[i];
+        ptr[i3+2] = c.s[i];
+    }
+}
+
+template<typename _Tp, int n> inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,
+                                                            const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c,
+                                                            const v_reg<_Tp, n>& d,
+                                                            hal::StoreMode /*mode*/=hal::STORE_UNALIGNED)
+{
+    int i, i4;
+    for( i = i4 = 0; i < n; i++, i4 += 4 )
+    {
+        ptr[i4] = a.s[i];
+        ptr[i4+1] = b.s[i];
+        ptr[i4+2] = c.s[i];
+        ptr[i4+3] = d.s[i];
+    }
+}
+
+template<typename _Tp, int n>
+inline void v_store(_Tp* ptr, const v_reg<_Tp, n>& a, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    for( int i = 0; i < n; i++ )
+        ptr[i] = a.s[i];
+}
+
+template<typename _Tp, int n>
+inline void v_store_low(_Tp* ptr, const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < (n/2); i++ )
+        ptr[i] = a.s[i];
+}
+
+template<typename _Tp, int n>
+inline void v_store_high(_Tp* ptr, const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < (n/2); i++ )
+        ptr[i] = a.s[i+(n/2)];
+}
+
+template<typename _Tp, int n>
+inline void v_store_aligned(_Tp* ptr, const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < n; i++ )
+        ptr[i] = a.s[i];
+}
+
+template<typename _Tp, int n>
+inline void v_store_aligned_nocache(_Tp* ptr, const v_reg<_Tp, n>& a)
+{
+    for( int i = 0; i < n; i++ )
+        ptr[i] = a.s[i];
+}
+
+template<typename _Tp, int n>
+inline void v_store_aligned(_Tp* ptr, const v_reg<_Tp, n>& a, hal::StoreMode /*mode*/)
+{
+    for( int i = 0; i < n; i++ )
+        ptr[i] = a.s[i];
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_combine_low(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < (n/2); i++ )
+    {
+        c.s[i] = a.s[i];
+        c.s[i+(n/2)] = b.s[i];
+    }
+    return c;
+}
+
+template<typename _Tp, int n>
+inline v_reg<_Tp, n> v_combine_high(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> c;
+    for( int i = 0; i < (n/2); i++ )
+    {
+        c.s[i] = a.s[i+(n/2)];
+        c.s[i+(n/2)] = b.s[i+(n/2)];
+    }
+    return c;
+}
+
+template<typename _Tp, int n>
+inline void v_recombine(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
+                        v_reg<_Tp, n>& low, v_reg<_Tp, n>& high)
+{
+    for( int i = 0; i < (n/2); i++ )
+    {
+        low.s[i] = a.s[i];
+        low.s[i+(n/2)] = b.s[i];
+        high.s[i] = a.s[i+(n/2)];
+        high.s[i+(n/2)] = b.s[i+(n/2)];
+    }
+}
+
+template<int s, typename _Tp, int n>
+inline v_reg<_Tp, n> v_extract(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    v_reg<_Tp, n> r;
+    const int shift = n - s;
+    int i = 0;
+    for (; i < shift; ++i)
+        r.s[i] = a.s[i+s];
+    for (; i < n; ++i)
+        r.s[i] = b.s[i-shift];
+    return r;
+}
+
+template<int n> inline v_reg<int, n> v_round(const v_reg<float, n>& a)
+{
+    v_reg<int, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = cvRound(a.s[i]);
+    return c;
+}
+
+template<int n> inline v_reg<int, n*2> v_round(const v_reg<double, n>& a, const v_reg<double, n>& b)
+{
+    v_reg<int, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = cvRound(a.s[i]);
+        c.s[i+n] = cvRound(b.s[i]);
+    }
+    return c;
+}
+
+template<int n> inline v_reg<int, n> v_floor(const v_reg<float, n>& a)
+{
+    v_reg<int, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = cvFloor(a.s[i]);
+    return c;
+}
+
+template<int n> inline v_reg<int, n> v_ceil(const v_reg<float, n>& a)
+{
+    v_reg<int, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = cvCeil(a.s[i]);
+    return c;
+}
+
+template<int n> inline v_reg<int, n> v_trunc(const v_reg<float, n>& a)
+{
+    v_reg<int, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = (int)(a.s[i]);
+    return c;
+}
+
+template<int n> inline v_reg<int, n*2> v_round(const v_reg<double, n>& a)
+{
+    v_reg<int, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = cvRound(a.s[i]);
+        c.s[i+n] = 0;
+    }
+    return c;
+}
+
+template<int n> inline v_reg<int, n*2> v_floor(const v_reg<double, n>& a)
+{
+    v_reg<int, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = cvFloor(a.s[i]);
+        c.s[i+n] = 0;
+    }
+    return c;
+}
+
+template<int n> inline v_reg<int, n*2> v_ceil(const v_reg<double, n>& a)
+{
+    v_reg<int, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = cvCeil(a.s[i]);
+        c.s[i+n] = 0;
+    }
+    return c;
+}
+
+template<int n> inline v_reg<int, n*2> v_trunc(const v_reg<double, n>& a)
+{
+    v_reg<int, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = (int)(a.s[i]);
+        c.s[i+n] = 0;
+    }
+    return c;
+}
+
+template<int n> inline v_reg<float, n> v_cvt_f32(const v_reg<int, n>& a)
+{
+    v_reg<float, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = (float)a.s[i];
+    return c;
+}
+
+template<int n> inline v_reg<float, n*2> v_cvt_f32(const v_reg<double, n>& a)
+{
+    v_reg<float, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = (float)a.s[i];
+        c.s[i+n] = 0;
+    }
+    return c;
+}
+
+template<int n> inline v_reg<float, n*2> v_cvt_f32(const v_reg<double, n>& a, const v_reg<double, n>& b)
+{
+    v_reg<float, n*2> c;
+    for( int i = 0; i < n; i++ )
+    {
+        c.s[i] = (float)a.s[i];
+        c.s[i+n] = (float)b.s[i];
+    }
+    return c;
+}
+
+inline v_float64x2 v_cvt_f64(const v_int32x4& a)
+{
+    v_float64x2 c;
+    for( int i = 0; i < 2; i++ )
+        c.s[i] = (double)a.s[i];
+    return c;
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
+{
+    v_float64x2 c;
+    for( int i = 0; i < 2; i++ )
+        c.s[i] = (double)a.s[i+2];
+    return c;
+}
+
+inline v_float64x2 v_cvt_f64(const v_float32x4& a)
+{
+    v_float64x2 c;
+    for( int i = 0; i < 2; i++ )
+        c.s[i] = (double)a.s[i];
+    return c;
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
+{
+    v_float64x2 c;
+    for( int i = 0; i < 2; i++ )
+        c.s[i] = (double)a.s[i+2];
+    return c;
+}
+
+template<typename _Tp> inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_lut(const _Tp* tab, const int* idx)
+{
+    v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> c;
+    for (int i = 0; i < V_TypeTraits<_Tp>::nlanes128; i++)
+        c.s[i] = tab[idx[i]];
+    return c;
+}
+template<typename _Tp> inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_lut_pairs(const _Tp* tab, const int* idx)
+{
+    v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> c;
+    for (int i = 0; i < V_TypeTraits<_Tp>::nlanes128; i++)
+        c.s[i] = tab[idx[i / 2] + i % 2];
+    return c;
+}
+template<typename _Tp> inline v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> v_lut_quads(const _Tp* tab, const int* idx)
+{
+    v_reg<_Tp, V_TypeTraits<_Tp>::nlanes128> c;
+    for (int i = 0; i < V_TypeTraits<_Tp>::nlanes128; i++)
+        c.s[i] = tab[idx[i / 4] + i % 4];
+    return c;
+}
+
+template<int n> inline v_reg<int, n> v_lut(const int* tab, const v_reg<int, n>& idx)
+{
+    v_reg<int, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = tab[idx.s[i]];
+    return c;
+}
+
+template<int n> inline v_reg<unsigned, n> v_lut(const unsigned* tab, const v_reg<int, n>& idx)
+{
+    v_reg<int, n> c;
+    for (int i = 0; i < n; i++)
+        c.s[i] = tab[idx.s[i]];
+    return c;
+}
+
+template<int n> inline v_reg<float, n> v_lut(const float* tab, const v_reg<int, n>& idx)
+{
+    v_reg<float, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = tab[idx.s[i]];
+    return c;
+}
+
+template<int n> inline v_reg<double, n> v_lut(const double* tab, const v_reg<int, n*2>& idx)
+{
+    v_reg<double, n> c;
+    for( int i = 0; i < n; i++ )
+        c.s[i] = tab[idx.s[i]];
+    return c;
+}
+
+template<int n> inline void v_lut_deinterleave(const float* tab, const v_reg<int, n>& idx,
+                                               v_reg<float, n>& x, v_reg<float, n>& y)
+{
+    for( int i = 0; i < n; i++ )
+    {
+        int j = idx.s[i];
+        x.s[i] = tab[j];
+        y.s[i] = tab[j+1];
+    }
+}
+
+template<int n> inline void v_lut_deinterleave(const double* tab, const v_reg<int, n*2>& idx,
+                                               v_reg<double, n>& x, v_reg<double, n>& y)
+{
+    for( int i = 0; i < n; i++ )
+    {
+        int j = idx.s[i];
+        x.s[i] = tab[j];
+        y.s[i] = tab[j+1];
+    }
+}
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> v_interleave_pairs(const v_reg<_Tp, n>& vec)
+{
+    v_reg<_Tp, n> c;
+    for (int i = 0; i < n/4; i++)
+    {
+        c.s[4*i  ] = vec.s[4*i  ];
+        c.s[4*i+1] = vec.s[4*i+2];
+        c.s[4*i+2] = vec.s[4*i+1];
+        c.s[4*i+3] = vec.s[4*i+3];
+    }
+    return c;
+}
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> v_interleave_quads(const v_reg<_Tp, n>& vec)
+{
+    v_reg<_Tp, n> c;
+    for (int i = 0; i < n/8; i++)
+    {
+        c.s[8*i  ] = vec.s[8*i  ];
+        c.s[8*i+1] = vec.s[8*i+4];
+        c.s[8*i+2] = vec.s[8*i+1];
+        c.s[8*i+3] = vec.s[8*i+5];
+        c.s[8*i+4] = vec.s[8*i+2];
+        c.s[8*i+5] = vec.s[8*i+6];
+        c.s[8*i+6] = vec.s[8*i+3];
+        c.s[8*i+7] = vec.s[8*i+7];
+    }
+    return c;
+}
+
+template<typename _Tp, int n> inline v_reg<_Tp, n> v_pack_triplets(const v_reg<_Tp, n>& vec)
+{
+    v_reg<_Tp, n> c;
+    for (int i = 0; i < n/4; i++)
+    {
+        c.s[3*i  ] = vec.s[4*i  ];
+        c.s[3*i+1] = vec.s[4*i+1];
+        c.s[3*i+2] = vec.s[4*i+2];
+    }
+    return c;
+}
+
+template<typename _Tp>
+inline void v_transpose4x4( v_reg<_Tp, 4>& a0, const v_reg<_Tp, 4>& a1,
+                            const v_reg<_Tp, 4>& a2, const v_reg<_Tp, 4>& a3,
+                            v_reg<_Tp, 4>& b0, v_reg<_Tp, 4>& b1,
+                            v_reg<_Tp, 4>& b2, v_reg<_Tp, 4>& b3 )
+{
+    b0 = v_reg<_Tp, 4>(a0.s[0], a1.s[0], a2.s[0], a3.s[0]);
+    b1 = v_reg<_Tp, 4>(a0.s[1], a1.s[1], a2.s[1], a3.s[1]);
+    b2 = v_reg<_Tp, 4>(a0.s[2], a1.s[2], a2.s[2], a3.s[2]);
+    b3 = v_reg<_Tp, 4>(a0.s[3], a1.s[3], a2.s[3], a3.s[3]);
+}
+
+#define OPENCV_HAL_IMPL_C_INIT_ZERO(_Tpvec, _Tp, suffix) \
+inline _Tpvec v_setzero_##suffix() { return _Tpvec::zero(); }
+
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint8x16, uchar, u8)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_int8x16, schar, s8)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint16x8, ushort, u16)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_int16x8, short, s16)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint32x4, unsigned, u32)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_int32x4, int, s32)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_float32x4, float, f32)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_float64x2, double, f64)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint64x2, uint64, u64)
+OPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x2, int64, s64)
+
+#define OPENCV_HAL_IMPL_C_INIT_VAL(_Tpvec, _Tp, suffix) \
+inline _Tpvec v_setall_##suffix(_Tp val) { return _Tpvec::all(val); }
+
+OPENCV_HAL_IMPL_C_INIT_VAL(v_int8x16, schar, s8)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_uint16x8, ushort, u16)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_int16x8, short, s16)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_uint32x4, unsigned, u32)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_int32x4, int, s32)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_float32x4, float, f32)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_float64x2, double, f64)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_uint64x2, uint64, u64)
+OPENCV_HAL_IMPL_C_INIT_VAL(v_int64x2, int64, s64)
+
+#define OPENCV_HAL_IMPL_C_REINTERPRET(_Tpvec, _Tp, suffix) \
+template<typename _Tp0, int n0> inline _Tpvec \
+    v_reinterpret_as_##suffix(const v_reg<_Tp0, n0>& a) \
+{ return a.template reinterpret_as<_Tp, _Tpvec::nlanes>(); }
+
+OPENCV_HAL_IMPL_C_REINTERPRET(v_uint8x16, uchar, u8)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_int8x16, schar, s8)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_uint16x8, ushort, u16)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_int16x8, short, s16)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_uint32x4, unsigned, u32)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_int32x4, int, s32)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_float32x4, float, f32)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_float64x2, double, f64)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_uint64x2, uint64, u64)
+OPENCV_HAL_IMPL_C_REINTERPRET(v_int64x2, int64, s64)
+
+#define OPENCV_HAL_IMPL_C_SHIFTL(_Tpvec, _Tp) \
+template<int n> inline _Tpvec v_shl(const _Tpvec& a) \
+{ return a << n; }
+
+OPENCV_HAL_IMPL_C_SHIFTL(v_uint16x8, ushort)
+OPENCV_HAL_IMPL_C_SHIFTL(v_int16x8, short)
+OPENCV_HAL_IMPL_C_SHIFTL(v_uint32x4, unsigned)
+OPENCV_HAL_IMPL_C_SHIFTL(v_int32x4, int)
+OPENCV_HAL_IMPL_C_SHIFTL(v_uint64x2, uint64)
+OPENCV_HAL_IMPL_C_SHIFTL(v_int64x2, int64)
+
+#define OPENCV_HAL_IMPL_C_SHIFTR(_Tpvec, _Tp) \
+template<int n> inline _Tpvec v_shr(const _Tpvec& a) \
+{ return a >> n; }
+
+OPENCV_HAL_IMPL_C_SHIFTR(v_uint16x8, ushort)
+OPENCV_HAL_IMPL_C_SHIFTR(v_int16x8, short)
+OPENCV_HAL_IMPL_C_SHIFTR(v_uint32x4, unsigned)
+OPENCV_HAL_IMPL_C_SHIFTR(v_int32x4, int)
+OPENCV_HAL_IMPL_C_SHIFTR(v_uint64x2, uint64)
+OPENCV_HAL_IMPL_C_SHIFTR(v_int64x2, int64)
+
+#define OPENCV_HAL_IMPL_C_RSHIFTR(_Tpvec, _Tp) \
+template<int n> inline _Tpvec v_rshr(const _Tpvec& a) \
+{ \
+    _Tpvec c; \
+    for( int i = 0; i < _Tpvec::nlanes; i++ ) \
+        c.s[i] = (_Tp)((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_C_RSHIFTR(v_uint16x8, ushort)
+OPENCV_HAL_IMPL_C_RSHIFTR(v_int16x8, short)
+OPENCV_HAL_IMPL_C_RSHIFTR(v_uint32x4, unsigned)
+OPENCV_HAL_IMPL_C_RSHIFTR(v_int32x4, int)
+OPENCV_HAL_IMPL_C_RSHIFTR(v_uint64x2, uint64)
+OPENCV_HAL_IMPL_C_RSHIFTR(v_int64x2, int64)
+
+#define OPENCV_HAL_IMPL_C_PACK(_Tpvec, _Tpnvec, _Tpn, pack_suffix, cast) \
+inline _Tpnvec v_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    _Tpnvec c; \
+    for( int i = 0; i < _Tpvec::nlanes; i++ ) \
+    { \
+        c.s[i] = cast<_Tpn>(a.s[i]); \
+        c.s[i+_Tpvec::nlanes] = cast<_Tpn>(b.s[i]); \
+    } \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_C_PACK(v_uint16x8, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_uint32x4, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_uint64x2, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int64x2, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_uint16x8, ushort, pack_u, saturate_cast)
+
+#define OPENCV_HAL_IMPL_C_RSHR_PACK(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
+template<int n> inline _Tpnvec v_rshr_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    _Tpnvec c; \
+    for( int i = 0; i < _Tpvec::nlanes; i++ ) \
+    { \
+        c.s[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+        c.s[i+_Tpvec::nlanes] = cast<_Tpn>((b.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+    } \
+    return c; \
+}
+
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
+
+#define OPENCV_HAL_IMPL_C_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
+inline void v_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \
+{ \
+    for( int i = 0; i < _Tpvec::nlanes; i++ ) \
+        ptr[i] = cast<_Tpn>(a.s[i]); \
+}
+
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
+
+#define OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
+template<int n> inline void v_rshr_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \
+{ \
+    for( int i = 0; i < _Tpvec::nlanes; i++ ) \
+        ptr[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+}
+
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
+
+template<typename _Tpm, typename _Tp, int n>
+inline void _pack_b(_Tpm* mptr, const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
+{
+    for (int i = 0; i < n; ++i)
+    {
+        mptr[i] = (_Tpm)a.s[i];
+        mptr[i + n] = (_Tpm)b.s[i];
+    }
+}
+
+inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v_uint8x16 mask;
+    _pack_b(mask.s, a, b);
+    return mask;
+}
+
+inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b,
+                           const v_uint32x4& c, const v_uint32x4& d)
+{
+    v_uint8x16 mask;
+    _pack_b(mask.s, a, b);
+    _pack_b(mask.s + 8, c, d);
+    return mask;
+}
+
+inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c,
+                           const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f,
+                           const v_uint64x2& g, const v_uint64x2& h)
+{
+    v_uint8x16 mask;
+    _pack_b(mask.s, a, b);
+    _pack_b(mask.s + 4, c, d);
+    _pack_b(mask.s + 8, e, f);
+    _pack_b(mask.s + 12, g, h);
+    return mask;
+}
+
+inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
+                            const v_float32x4& m1, const v_float32x4& m2,
+                            const v_float32x4& m3)
+{
+    return v_float32x4(v.s[0]*m0.s[0] + v.s[1]*m1.s[0] + v.s[2]*m2.s[0] + v.s[3]*m3.s[0],
+                       v.s[0]*m0.s[1] + v.s[1]*m1.s[1] + v.s[2]*m2.s[1] + v.s[3]*m3.s[1],
+                       v.s[0]*m0.s[2] + v.s[1]*m1.s[2] + v.s[2]*m2.s[2] + v.s[3]*m3.s[2],
+                       v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + v.s[3]*m3.s[3]);
+}
+
+inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
+                               const v_float32x4& m1, const v_float32x4& m2,
+                               const v_float32x4& m3)
+{
+    return v_float32x4(v.s[0]*m0.s[0] + v.s[1]*m1.s[0] + v.s[2]*m2.s[0] + m3.s[0],
+                       v.s[0]*m0.s[1] + v.s[1]*m1.s[1] + v.s[2]*m2.s[1] + m3.s[1],
+                       v.s[0]*m0.s[2] + v.s[1]*m1.s[2] + v.s[2]*m2.s[2] + m3.s[2],
+                       v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
+}
+
+inline v_reg<float, V_TypeTraits<float>::nlanes128>
+v_load_expand(const float16_t* ptr)
+{
+    v_reg<float, V_TypeTraits<float>::nlanes128> v;
+    for( int i = 0; i < v.nlanes; i++ )
+    {
+        v.s[i] = ptr[i];
+    }
+    return v;
+}
+
+inline void
+v_pack_store(float16_t* ptr, const v_reg<float, V_TypeTraits<float>::nlanes128>& v)
+{
+    for( int i = 0; i < v.nlanes; i++ )
+    {
+        ptr[i] = float16_t(v.s[i]);
+    }
+}
+
+inline void v_cleanup() {}
+}  // namespace fallback
+
+static v128_t wasm_unpacklo_i8x16(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23);
+}
+
+static v128_t wasm_unpacklo_i16x8(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 0,1,16,17,2,3,18,19,4,5,20,21,6,7,22,23);
+}
+
+static v128_t wasm_unpacklo_i32x4(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 0,1,2,3,16,17,18,19,4,5,6,7,20,21,22,23);
+}
+
+static v128_t wasm_unpacklo_i64x2(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+}
+
+static v128_t wasm_unpackhi_i8x16(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31);
+}
+
+static v128_t wasm_unpackhi_i16x8(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 8,9,24,25,10,11,26,27,12,13,28,29,14,15,30,31);
+}
+
+static v128_t wasm_unpackhi_i32x4(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 8,9,10,11,24,25,26,27,12,13,14,15,28,29,30,31);
+}
+
+static v128_t wasm_unpackhi_i64x2(v128_t a, v128_t b) {
+    return wasm_v8x16_shuffle(a, b, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+}
+
+/** Convert **/
+// 8 >> 16
+inline v128_t v128_cvtu8x16_i16x8(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpacklo_i8x16(a, z);
+}
+inline v128_t v128_cvti8x16_i16x8(const v128_t& a)
+{ return wasm_i16x8_shr(wasm_unpacklo_i8x16(a, a), 8); }
+// 8 >> 32
+inline v128_t v128_cvtu8x16_i32x4(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpacklo_i16x8(wasm_unpacklo_i8x16(a, z), z);
+}
+inline v128_t v128_cvti8x16_i32x4(const v128_t& a)
+{
+    v128_t r = wasm_unpacklo_i8x16(a, a);
+    r = wasm_unpacklo_i8x16(r, r);
+    return wasm_i32x4_shr(r, 24);
+}
+// 16 >> 32
+inline v128_t v128_cvtu16x8_i32x4(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpacklo_i16x8(a, z);
+}
+inline v128_t v128_cvti16x8_i32x4(const v128_t& a)
+{ return wasm_i32x4_shr(wasm_unpacklo_i16x8(a, a), 16); }
+// 32 >> 64
+inline v128_t v128_cvtu32x4_i64x2(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpacklo_i32x4(a, z);
+}
+inline v128_t v128_cvti32x4_i64x2(const v128_t& a)
+{ return wasm_unpacklo_i32x4(a, wasm_i32x4_shr(a, 31)); }
+
+// 16 << 8
+inline v128_t v128_cvtu8x16_i16x8_high(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpackhi_i8x16(a, z);
+}
+inline v128_t v128_cvti8x16_i16x8_high(const v128_t& a)
+{ return wasm_i16x8_shr(wasm_unpackhi_i8x16(a, a), 8); }
+// 32 << 16
+inline v128_t v128_cvtu16x8_i32x4_high(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpackhi_i16x8(a, z);
+}
+inline v128_t v128_cvti16x8_i32x4_high(const v128_t& a)
+{ return wasm_i32x4_shr(wasm_unpackhi_i16x8(a, a), 16); }
+// 64 << 32
+inline v128_t v128_cvtu32x4_i64x2_high(const v128_t& a)
+{
+    const v128_t z = wasm_i8x16_splat(0);
+    return wasm_unpackhi_i32x4(a, z);
+}
+inline v128_t v128_cvti32x4_i64x2_high(const v128_t& a)
+{ return wasm_unpackhi_i32x4(a, wasm_i32x4_shr(a, 31)); }
+
+#define OPENCV_HAL_IMPL_WASM_INITVEC(_Tpvec, _Tp, suffix, zsuffix, _Tps) \
+inline _Tpvec v_setzero_##suffix() { return _Tpvec(wasm_##zsuffix##_splat((_Tps)0)); } \
+inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(wasm_##zsuffix##_splat((_Tps)v)); } \
+template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
+{ return _Tpvec(a.val); }
+
+OPENCV_HAL_IMPL_WASM_INITVEC(v_uint8x16, uchar, u8, i8x16, schar)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_int8x16, schar, s8, i8x16, schar)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_uint16x8, ushort, u16, i16x8, short)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_int16x8, short, s16, i16x8, short)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_uint32x4, unsigned, u32, i32x4, int)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_int32x4, int, s32, i32x4, int)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_float32x4, float, f32, f32x4, float)
+
+#ifdef __wasm_unimplemented_simd128__
+OPENCV_HAL_IMPL_WASM_INITVEC(v_uint64x2, uint64, u64, i64x2, int64)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_int64x2, int64, s64, i64x2, int64)
+OPENCV_HAL_IMPL_WASM_INITVEC(v_float64x2, double, f64, f64x2, double)
+#else
+#define OPENCV_HAL_IMPL_FALLBACK_INITVEC(_Tpvec, _Tp, suffix, _Tps) \
+inline _Tpvec v_setzero_##suffix() { return _Tpvec((_Tps)0, (_Tps)0); } \
+inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec((_Tps)v, (_Tps)v); } \
+template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
+{ return _Tpvec(a.val); }
+
+OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_uint64x2, uint64, u64, int64)
+OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_int64x2, int64, s64, int64)
+OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_float64x2, double, f64, double)
+#endif
+
+//////////////// PACK ///////////////
+inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u16x8_gt(b.val, maxval));
+    return v_uint8x16(wasm_v8x16_shuffle(a1, b1, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+inline v_int8x16 v_pack(const v_int16x8& a, const v_int16x8& b)
+{
+    v128_t maxval = wasm_i16x8_splat(127);
+    v128_t minval = wasm_i16x8_splat(-128);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i16x8_gt(b.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval));
+    v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i16x8_lt(b1, minval));
+    return v_int8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+inline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b)
+{
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u32x4_gt(b.val, maxval));
+    return v_uint16x8(wasm_v8x16_shuffle(a1, b1, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+inline v_int16x8 v_pack(const v_int32x4& a, const v_int32x4& b)
+{
+    v128_t maxval = wasm_i32x4_splat(32767);
+    v128_t minval = wasm_i32x4_splat(-32768);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i32x4_gt(b.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval));
+    v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i32x4_lt(b1, minval));
+    return v_int16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+inline v_uint32x4 v_pack(const v_uint64x2& a, const v_uint64x2& b)
+{
+    return v_uint32x4(wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
+}
+inline v_int32x4 v_pack(const v_int64x2& a, const v_int64x2& b)
+{
+    return v_int32x4(wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
+}
+inline v_uint8x16 v_pack_u(const v_int16x8& a, const v_int16x8& b)
+{
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t minval = wasm_i16x8_splat(0);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i16x8_gt(b.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval));
+    v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i16x8_lt(b1, minval));
+    return v_uint8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+inline v_uint16x8 v_pack_u(const v_int32x4& a, const v_int32x4& b)
+{
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t minval = wasm_i32x4_splat(0);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i32x4_gt(b.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval));
+    v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i32x4_lt(b1, minval));
+    return v_uint16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+
+template<int n>
+inline v_uint8x16 v_rshr_pack(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v128_t delta = wasm_i16x8_splat(((short)1 << (n-1)));
+    v128_t a1 = wasm_u16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t b1 = wasm_u16x8_shr(wasm_i16x8_add(b.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u16x8_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_u16x8_gt(b1, maxval));
+    return v_uint8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+template<int n>
+inline v_int8x16 v_rshr_pack(const v_int16x8& a, const v_int16x8& b)
+{
+    v128_t delta = wasm_i16x8_splat(((short)1 << (n-1)));
+    v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t b1 = wasm_i16x8_shr(wasm_i16x8_add(b.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(127);
+    v128_t minval = wasm_i16x8_splat(-128);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i16x8_gt(b1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval));
+    v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i16x8_lt(b1, minval));
+    return v_int8x16(wasm_v8x16_shuffle(a3, b3, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+template<int n>
+inline v_uint16x8 v_rshr_pack(const v_uint32x4& a, const v_uint32x4& b)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_u32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t b1 = wasm_u32x4_shr(wasm_i32x4_add(b.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u32x4_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_u32x4_gt(b1, maxval));
+    return v_uint16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+template<int n>
+inline v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t b1 = wasm_i32x4_shr(wasm_i32x4_add(b.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(32767);
+    v128_t minval = wasm_i16x8_splat(-32768);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i32x4_gt(b1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval));
+    v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i32x4_lt(b1, minval));
+    return v_int16x8(wasm_v8x16_shuffle(a3, b3, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+template<int n>
+inline v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
+    v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
+    v128_t b1 = wasm_u64x2_shr(wasm_i64x2_add(b.val, delta), n);
+    return v_uint32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
+#else
+    fallback::v_uint64x2 a_(a), b_(b);
+    return fallback::v_rshr_pack<n>(a_, b_);
+#endif
+}
+template<int n>
+inline v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
+    v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
+    v128_t b1 = wasm_i64x2_shr(wasm_i64x2_add(b.val, delta), n);
+    return v_int32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
+#else
+    fallback::v_int64x2 a_(a), b_(b);
+    return fallback::v_rshr_pack<n>(a_, b_);
+#endif
+}
+template<int n>
+inline v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b)
+{
+    v128_t delta = wasm_i16x8_splat(((short)1 << (n-1)));
+    v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t b1 = wasm_i16x8_shr(wasm_i16x8_add(b.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t minval = wasm_i16x8_splat(0);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i16x8_gt(b1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval));
+    v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i16x8_lt(b1, minval));
+    return v_uint8x16(wasm_v8x16_shuffle(a3, b3, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+template<int n>
+inline v_uint16x8 v_rshr_pack_u(const v_int32x4& a, const v_int32x4& b)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t b1 = wasm_i32x4_shr(wasm_i32x4_add(b.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t minval = wasm_i16x8_splat(0);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval));
+    v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i32x4_gt(b1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval));
+    v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i32x4_lt(b1, minval));
+    return v_uint16x8(wasm_v8x16_shuffle(a3, b3, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29));
+}
+
+inline void v_pack_store(uchar* ptr, const v_uint16x8& a)
+{
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval));
+    v128_t r = wasm_v8x16_shuffle(a1, a1, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    uchar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_store(schar* ptr, const v_int16x8& a)
+{
+    v128_t maxval = wasm_i16x8_splat(127);
+    v128_t minval = wasm_i16x8_splat(-128);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    schar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_store(ushort* ptr, const v_uint32x4& a)
+{
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval));
+    v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    ushort t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_store(short* ptr, const v_int32x4& a)
+{
+    v128_t maxval = wasm_i32x4_splat(32767);
+    v128_t minval = wasm_i32x4_splat(-32768);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    short t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_store(unsigned* ptr, const v_uint64x2& a)
+{
+    v128_t r = wasm_v8x16_shuffle(a.val, a.val, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
+    unsigned t_ptr[4];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<2; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_store(int* ptr, const v_int64x2& a)
+{
+    v128_t r = wasm_v8x16_shuffle(a.val, a.val, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
+    int t_ptr[4];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<2; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_u_store(uchar* ptr, const v_int16x8& a)
+{
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t minval = wasm_i16x8_splat(0);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    uchar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+inline void v_pack_u_store(ushort* ptr, const v_int32x4& a)
+{
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t minval = wasm_i32x4_splat(0);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval));
+    v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    ushort t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+
+template<int n>
+inline void v_rshr_pack_store(uchar* ptr, const v_uint16x8& a)
+{
+    v128_t delta = wasm_i16x8_splat((short)(1 << (n-1)));
+    v128_t a1 = wasm_u16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u16x8_gt(a1, maxval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    uchar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+template<int n>
+inline void v_rshr_pack_store(schar* ptr, const v_int16x8& a)
+{
+    v128_t delta = wasm_i16x8_splat(((short)1 << (n-1)));
+    v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(127);
+    v128_t minval = wasm_i16x8_splat(-128);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a3, a3, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    schar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+template<int n>
+inline void v_rshr_pack_store(ushort* ptr, const v_uint32x4& a)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_u32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u32x4_gt(a1, maxval));
+    v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    ushort t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+template<int n>
+inline void v_rshr_pack_store(short* ptr, const v_int32x4& a)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(32767);
+    v128_t minval = wasm_i32x4_splat(-32768);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a3, a3, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    short t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+template<int n>
+inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
+    v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
+    v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
+    unsigned t_ptr[4];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<2; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+#else
+    fallback::v_uint64x2 _a(a);
+    fallback::v_rshr_pack_store<n>(ptr, _a);
+#endif
+}
+template<int n>
+inline void v_rshr_pack_store(int* ptr, const v_int64x2& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
+    v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
+    v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
+    int t_ptr[4];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<2; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+#else
+    fallback::v_int64x2 _a(a);
+    fallback::v_rshr_pack_store<n>(ptr, _a);
+#endif
+}
+template<int n>
+inline void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a)
+{
+    v128_t delta = wasm_i16x8_splat(((short)1 << (n-1)));
+    v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n);
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t minval = wasm_i16x8_splat(0);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a3, a3, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14);
+    uchar t_ptr[16];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<8; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+template<int n>
+inline void v_rshr_pack_u_store(ushort* ptr, const v_int32x4& a)
+{
+    v128_t delta = wasm_i32x4_splat(((int)1 << (n-1)));
+    v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n);
+    v128_t maxval = wasm_i32x4_splat(65535);
+    v128_t minval = wasm_i32x4_splat(0);
+    v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval));
+    v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval));
+    v128_t r = wasm_v8x16_shuffle(a3, a3, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13);
+    ushort t_ptr[8];
+    wasm_v128_store(t_ptr, r);
+    for (int i=0; i<4; ++i) {
+        ptr[i] = t_ptr[i];
+    }
+}
+
+inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v128_t maxval = wasm_i16x8_splat(255);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u16x8_gt(b.val, maxval));
+    return v_uint8x16(wasm_v8x16_shuffle(a1, b1, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30));
+}
+
+inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b,
+                           const v_uint32x4& c, const v_uint32x4& d)
+{
+    v128_t maxval = wasm_i32x4_splat(255);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u32x4_gt(b.val, maxval));
+    v128_t c1 = wasm_v128_bitselect(maxval, c.val, wasm_u32x4_gt(c.val, maxval));
+    v128_t d1 = wasm_v128_bitselect(maxval, d.val, wasm_u32x4_gt(d.val, maxval));
+    v128_t ab = wasm_v8x16_shuffle(a1, b1, 0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28);
+    v128_t cd = wasm_v8x16_shuffle(c1, d1, 0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28);
+    return v_uint8x16(wasm_v8x16_shuffle(ab, cd, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23));
+}
+
+inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c,
+                           const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f,
+                           const v_uint64x2& g, const v_uint64x2& h)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t maxval = wasm_i32x4_splat(255);
+    v128_t a1 = wasm_v128_bitselect(maxval, a.val, ((__u64x2)(a.val) > (__u64x2)maxval));
+    v128_t b1 = wasm_v128_bitselect(maxval, b.val, ((__u64x2)(b.val) > (__u64x2)maxval));
+    v128_t c1 = wasm_v128_bitselect(maxval, c.val, ((__u64x2)(c.val) > (__u64x2)maxval));
+    v128_t d1 = wasm_v128_bitselect(maxval, d.val, ((__u64x2)(d.val) > (__u64x2)maxval));
+    v128_t e1 = wasm_v128_bitselect(maxval, e.val, ((__u64x2)(e.val) > (__u64x2)maxval));
+    v128_t f1 = wasm_v128_bitselect(maxval, f.val, ((__u64x2)(f.val) > (__u64x2)maxval));
+    v128_t g1 = wasm_v128_bitselect(maxval, g.val, ((__u64x2)(g.val) > (__u64x2)maxval));
+    v128_t h1 = wasm_v128_bitselect(maxval, h.val, ((__u64x2)(h.val) > (__u64x2)maxval));
+    v128_t ab = wasm_v8x16_shuffle(a1, b1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24);
+    v128_t cd = wasm_v8x16_shuffle(c1, d1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24);
+    v128_t ef = wasm_v8x16_shuffle(e1, f1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24);
+    v128_t gh = wasm_v8x16_shuffle(g1, h1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24);
+    v128_t abcd = wasm_v8x16_shuffle(ab, cd, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
+    v128_t efgh = wasm_v8x16_shuffle(ef, gh, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
+    return v_uint8x16(wasm_v8x16_shuffle(abcd, efgh, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23));
+#else
+    fallback::v_uint64x2 a_(a), b_(b), c_(c), d_(d), e_(e), f_(f), g_(g), h_(h);
+    return fallback::v_pack_b(a_, b_, c_, d_, e_, f_, g_, h_);
+#endif
+}
+
+inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
+                            const v_float32x4& m1, const v_float32x4& m2,
+                            const v_float32x4& m3)
+{
+    v128_t v0 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 0));
+    v128_t v1 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 1));
+    v128_t v2 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 2));
+    v128_t v3 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 3));
+    v0 = wasm_f32x4_mul(v0, m0.val);
+    v1 = wasm_f32x4_mul(v1, m1.val);
+    v2 = wasm_f32x4_mul(v2, m2.val);
+    v3 = wasm_f32x4_mul(v3, m3.val);
+
+    return v_float32x4(wasm_f32x4_add(wasm_f32x4_add(v0, v1), wasm_f32x4_add(v2, v3)));
+}
+
+inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
+                               const v_float32x4& m1, const v_float32x4& m2,
+                               const v_float32x4& a)
+{
+    v128_t v0 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 0));
+    v128_t v1 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 1));
+    v128_t v2 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 2));
+    v0 = wasm_f32x4_mul(v0, m0.val);
+    v1 = wasm_f32x4_mul(v1, m1.val);
+    v2 = wasm_f32x4_mul(v2, m2.val);
+
+    return v_float32x4(wasm_f32x4_add(wasm_f32x4_add(v0, v1), wasm_f32x4_add(v2, a.val)));
+}
+
+#define OPENCV_HAL_IMPL_WASM_BIN_OP(bin_op, _Tpvec, intrin) \
+inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(intrin(a.val, b.val)); \
+} \
+inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
+{ \
+    a.val = intrin(a.val, b.val); \
+    return a; \
+}
+
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint8x16, wasm_u8x16_add_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint8x16, wasm_u8x16_sub_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int8x16, wasm_i8x16_add_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int8x16, wasm_i8x16_sub_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint16x8, wasm_u16x8_add_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint16x8, wasm_u16x8_sub_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int16x8, wasm_i16x8_add_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int16x8, wasm_i16x8_sub_saturate)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint32x4, wasm_i32x4_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint32x4, wasm_i32x4_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_uint32x4, wasm_i32x4_mul)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int32x4, wasm_i32x4_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int32x4, wasm_i32x4_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_int32x4, wasm_i32x4_mul)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float32x4, wasm_f32x4_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float32x4, wasm_f32x4_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float32x4, wasm_f32x4_mul)
+OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float32x4, wasm_f32x4_div)
+
+#ifdef __wasm_unimplemented_simd128__
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint64x2, wasm_i64x2_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint64x2, wasm_i64x2_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int64x2, wasm_i64x2_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int64x2, wasm_i64x2_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float64x2, wasm_f64x2_add)
+OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float64x2, wasm_f64x2_sub)
+OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float64x2, wasm_f64x2_mul)
+OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float64x2, wasm_f64x2_div)
+#else
+#define OPENCV_HAL_IMPL_FALLBACK_BIN_OP(bin_op, _Tpvec) \
+inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
+{ \
+    fallback::_Tpvec a_(a), b_(b); \
+    return _Tpvec((a_) bin_op (b_)); \
+} \
+inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
+{ \
+    fallback::_Tpvec a_(a), b_(b); \
+    a_ bin_op##= b_; \
+    a = _Tpvec(a_); \
+    return a; \
+}
+
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_uint64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_uint64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_int64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_int64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_float64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_float64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(*, v_float64x2)
+OPENCV_HAL_IMPL_FALLBACK_BIN_OP(/, v_float64x2)
+#endif
+
+// saturating multiply 8-bit, 16-bit
+#define OPENCV_HAL_IMPL_WASM_MUL_SAT(_Tpvec, _Tpwvec)        \
+inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b)  \
+{                                                            \
+    _Tpwvec c, d;                                            \
+    v_mul_expand(a, b, c, d);                                \
+    return v_pack(c, d);                                     \
+}                                                            \
+inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b)      \
+{ a = a * b; return a; }
+
+OPENCV_HAL_IMPL_WASM_MUL_SAT(v_uint8x16, v_uint16x8)
+OPENCV_HAL_IMPL_WASM_MUL_SAT(v_int8x16,  v_int16x8)
+OPENCV_HAL_IMPL_WASM_MUL_SAT(v_uint16x8, v_uint32x4)
+OPENCV_HAL_IMPL_WASM_MUL_SAT(v_int16x8,  v_int32x4)
+
+//  Multiply and expand
+inline void v_mul_expand(const v_uint8x16& a, const v_uint8x16& b,
+                         v_uint16x8& c, v_uint16x8& d)
+{
+    v_uint16x8 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    c = v_mul_wrap(a0, b0);
+    d = v_mul_wrap(a1, b1);
+}
+
+inline void v_mul_expand(const v_int8x16& a, const v_int8x16& b,
+                         v_int16x8& c, v_int16x8& d)
+{
+    v_int16x8 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    c = v_mul_wrap(a0, b0);
+    d = v_mul_wrap(a1, b1);
+}
+
+inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,
+                         v_int32x4& c, v_int32x4& d)
+{
+    v_int32x4 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    c.val = wasm_i32x4_mul(a0.val, b0.val);
+    d.val = wasm_i32x4_mul(a1.val, b1.val);
+}
+
+inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
+                         v_uint32x4& c, v_uint32x4& d)
+{
+    v_uint32x4 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    c.val = wasm_i32x4_mul(a0.val, b0.val);
+    d.val = wasm_i32x4_mul(a1.val, b1.val);
+}
+
+inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,
+                         v_uint64x2& c, v_uint64x2& d)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v_uint64x2 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    c.val = ((__u64x2)(a0.val) * (__u64x2)(b0.val));
+    d.val = ((__u64x2)(a1.val) * (__u64x2)(b1.val));
+#else
+    fallback::v_uint32x4 a_(a), b_(b);
+    fallback::v_uint64x2 c_, d_;
+    fallback::v_mul_expand(a_, b_, c_, d_);
+    c = v_uint64x2(c_);
+    d = v_uint64x2(d_);
+#endif
+}
+
+inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b)
+{
+    v_int32x4 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    v128_t c = wasm_i32x4_mul(a0.val, b0.val);
+    v128_t d = wasm_i32x4_mul(a1.val, b1.val);
+    return v_int16x8(wasm_v8x16_shuffle(c, d, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31));
+}
+inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v_uint32x4 a0, a1, b0, b1;
+    v_expand(a, a0, a1);
+    v_expand(b, b0, b1);
+    v128_t c = wasm_i32x4_mul(a0.val, b0.val);
+    v128_t d = wasm_i32x4_mul(a1.val, b1.val);
+    return v_uint16x8(wasm_v8x16_shuffle(c, d, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31));
+}
+
+inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)
+{
+    v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
+    v128_t a1 = wasm_i32x4_shr(a.val, 16);
+    v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
+    v128_t b1 = wasm_i32x4_shr(b.val, 16);
+    v128_t c = wasm_i32x4_mul(a0, b0);
+    v128_t d = wasm_i32x4_mul(a1, b1);
+    return v_int32x4(wasm_i32x4_add(c, d));
+}
+
+inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c)
+{
+    v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
+    v128_t a1 = wasm_i32x4_shr(a.val, 16);
+    v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
+    v128_t b1 = wasm_i32x4_shr(b.val, 16);
+    v128_t d = wasm_i32x4_mul(a0, b0);
+    v128_t e = wasm_i32x4_mul(a1, b1);
+    return v_int32x4(wasm_i32x4_add(wasm_i32x4_add(d, e), c.val));
+}
+
+#define OPENCV_HAL_IMPL_WASM_LOGIC_OP(_Tpvec) \
+OPENCV_HAL_IMPL_WASM_BIN_OP(&, _Tpvec, wasm_v128_and) \
+OPENCV_HAL_IMPL_WASM_BIN_OP(|, _Tpvec, wasm_v128_or) \
+OPENCV_HAL_IMPL_WASM_BIN_OP(^, _Tpvec, wasm_v128_xor) \
+inline _Tpvec operator ~ (const _Tpvec& a) \
+{ \
+    return _Tpvec(wasm_v128_not(a.val)); \
+}
+
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint8x16)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int8x16)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint16x8)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int16x8)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint32x4)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int32x4)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint64x2)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int64x2)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float32x4)
+OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float64x2)
+
+inline v_float32x4 v_sqrt(const v_float32x4& x)
+{
+#ifdef __wasm_unimplemented_simd128__
+    return v_float32x4(wasm_f32x4_sqrt(x.val));
+#else
+    fallback::v_float32x4 x_(x);
+    return fallback::v_sqrt(x_);
+#endif
+}
+
+inline v_float32x4 v_invsqrt(const v_float32x4& x)
+{
+#ifdef __wasm_unimplemented_simd128__
+    const v128_t _1_0 = wasm_f32x4_splat(1.0);
+    return v_float32x4(wasm_f32x4_div(_1_0, wasm_f32x4_sqrt(x.val)));
+#else
+    fallback::v_float32x4 x_(x);
+    return fallback::v_invsqrt(x_);
+#endif
+}
+
+inline v_float64x2 v_sqrt(const v_float64x2& x)
+{
+#ifdef __wasm_unimplemented_simd128__
+    return v_float64x2(wasm_f64x2_sqrt(x.val));
+#else
+    fallback::v_float64x2 x_(x);
+    return fallback::v_sqrt(x_);
+#endif
+}
+
+inline v_float64x2 v_invsqrt(const v_float64x2& x)
+{
+#ifdef __wasm_unimplemented_simd128__
+    const v128_t _1_0 = wasm_f64x2_splat(1.0);
+    return v_float64x2(wasm_f64x2_div(_1_0, wasm_f64x2_sqrt(x.val)));
+#else
+    fallback::v_float64x2 x_(x);
+    return fallback::v_invsqrt(x_);
+#endif
+}
+
+#define OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(_Tpuvec, _Tpsvec, suffix, zsuffix, shiftWidth) \
+inline _Tpuvec v_abs(const _Tpsvec& x) \
+{ \
+    v128_t s = wasm_##suffix##_shr(x.val, shiftWidth); \
+    v128_t f = wasm_##zsuffix##_shr(x.val, shiftWidth); \
+    return _Tpuvec(wasm_##zsuffix##_add(wasm_v128_xor(x.val, f), s)); \
+}
+
+OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint8x16, v_int8x16, u8x16, i8x16, 7)
+OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint16x8, v_int16x8, u16x8, i16x8, 15)
+OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint32x4, v_int32x4, u32x4, i32x4, 31)
+
+inline v_float32x4 v_abs(const v_float32x4& x)
+{ return v_float32x4(wasm_f32x4_abs(x.val)); }
+inline v_float64x2 v_abs(const v_float64x2& x)
+{
+#ifdef __wasm_unimplemented_simd128__
+    return v_float64x2(wasm_f64x2_abs(x.val));
+#else
+    fallback::v_float64x2 x_(x);
+    return fallback::v_abs(x_);
+#endif
+}
+
+// TODO: exp, log, sin, cos
+
+#define OPENCV_HAL_IMPL_WASM_BIN_FUNC(_Tpvec, func, intrin) \
+inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(intrin(a.val, b.val)); \
+}
+
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max)
+
+#ifdef __wasm_unimplemented_simd128__
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_max, wasm_f64x2_max)
+#else
+#define OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(func) \
+inline v_float64x2 func(const v_float64x2& a, const v_float64x2& b) \
+{ \
+    fallback::v_float64x2 a_(a), b_(b); \
+    return fallback::func(a_, b_); \
+}
+
+OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_min)
+OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_max)
+#endif
+
+#define OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(_Tpvec, suffix) \
+inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(wasm_v128_bitselect(b.val, a.val, wasm_##suffix##_gt(a.val, b.val))); \
+} \
+inline _Tpvec v_max(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(wasm_v128_bitselect(a.val, b.val, wasm_##suffix##_gt(a.val, b.val))); \
+}
+
+OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int8x16, i8x16)
+OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int16x8, i16x8)
+OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int32x4, i32x4)
+
+#define OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(_Tpvec, suffix, deltaNum) \
+inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    v128_t delta = wasm_##suffix##_splat(deltaNum); \
+    v128_t mask = wasm_##suffix##_gt(wasm_v128_xor(a.val, delta), wasm_v128_xor(b.val, delta)); \
+    return _Tpvec(wasm_v128_bitselect(b.val, a.val, mask)); \
+} \
+inline _Tpvec v_max(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    v128_t delta = wasm_##suffix##_splat(deltaNum); \
+    v128_t mask = wasm_##suffix##_gt(wasm_v128_xor(a.val, delta), wasm_v128_xor(b.val, delta)); \
+    return _Tpvec(wasm_v128_bitselect(a.val, b.val, mask)); \
+}
+
+OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint8x16, i8x16, (schar)0x80)
+OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint16x8, i16x8, (short)0x8000)
+OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint32x4, i32x4, (int)0x80000000)
+
+#define OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(_Tpvec, suffix, esuffix) \
+inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##esuffix##_eq(a.val, b.val)); } \
+inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##esuffix##_ne(a.val, b.val)); } \
+inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##suffix##_lt(a.val, b.val)); } \
+inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##suffix##_gt(a.val, b.val)); } \
+inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##suffix##_le(a.val, b.val)); } \
+inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(wasm_##suffix##_ge(a.val, b.val)); }
+
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint8x16, u8x16, i8x16)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int8x16, i8x16, i8x16)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint16x8, u16x8, i16x8)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int16x8, i16x8, i16x8)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint32x4, u32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int32x4, i32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float32x4, f32x4, f32x4)
+
+#ifdef __wasm_unimplemented_simd128__
+OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float64x2, f64x2, f64x2)
+#else
+#define OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(_Tpvec, bin_op) \
+inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
+{ \
+    fallback::_Tpvec a_(a), b_(b); \
+    return _Tpvec((a_) bin_op (b_));\
+} \
+
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, ==)
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, !=)
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <)
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >)
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <=)
+OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >=)
+#endif
+
+#define OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(_Tpvec, cast) \
+inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
+{ return cast(v_reinterpret_as_f64(a) == v_reinterpret_as_f64(b)); } \
+inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
+{ return cast(v_reinterpret_as_f64(a) != v_reinterpret_as_f64(b)); }
+
+OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(v_uint64x2, v_reinterpret_as_u64)
+OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(v_int64x2, v_reinterpret_as_s64)
+
+inline v_float32x4 v_not_nan(const v_float32x4& a)
+{
+    v128_t z = wasm_i32x4_splat(0x7fffffff);
+    v128_t t = wasm_i32x4_splat(0x7f800000);
+    return v_float32x4(wasm_u32x4_lt(wasm_v128_and(a.val, z), t));
+}
+inline v_float64x2 v_not_nan(const v_float64x2& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t z = wasm_i64x2_splat(0x7fffffffffffffff);
+    v128_t t = wasm_i64x2_splat(0x7ff0000000000000);
+    return v_float64x2((__u64x2)(wasm_v128_and(a.val, z)) < (__u64x2)t);
+#else
+    fallback::v_float64x2 a_(a);
+    return fallback::v_not_nan(a_);
+#endif
+}
+
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_add_wrap, wasm_i8x16_add)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_add_wrap, wasm_i8x16_add)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_add_wrap, wasm_i16x8_add)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_add_wrap, wasm_i16x8_add)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_sub_wrap, wasm_i8x16_sub)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_sub_wrap, wasm_i8x16_sub)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_sub_wrap, wasm_i16x8_sub)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_sub_wrap, wasm_i16x8_sub)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_mul_wrap, wasm_i8x16_mul)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_mul_wrap, wasm_i8x16_mul)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_mul_wrap, wasm_i16x8_mul)
+OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_mul_wrap, wasm_i16x8_mul)
+
+
+/** Absolute difference **/
+
+inline v_uint8x16 v_absdiff(const v_uint8x16& a, const v_uint8x16& b)
+{ return v_add_wrap(a - b,  b - a); }
+inline v_uint16x8 v_absdiff(const v_uint16x8& a, const v_uint16x8& b)
+{ return v_add_wrap(a - b,  b - a); }
+inline v_uint32x4 v_absdiff(const v_uint32x4& a, const v_uint32x4& b)
+{ return v_max(a, b) - v_min(a, b); }
+
+inline v_uint8x16 v_absdiff(const v_int8x16& a, const v_int8x16& b)
+{
+    v_int8x16 d = v_sub_wrap(a, b);
+    v_int8x16 m = a < b;
+    return v_reinterpret_as_u8(v_sub_wrap(d ^ m, m));
+}
+inline v_uint16x8 v_absdiff(const v_int16x8& a, const v_int16x8& b)
+{
+    return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b)));
+}
+inline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b)
+{
+    v_int32x4 d = a - b;
+    v_int32x4 m = a < b;
+    return v_reinterpret_as_u32((d ^ m) - m);
+}
+
+/** Saturating absolute difference **/
+inline v_int8x16 v_absdiffs(const v_int8x16& a, const v_int8x16& b)
+{
+    v_int8x16 d = a - b;
+    v_int8x16 m = a < b;
+    return (d ^ m) - m;
+ }
+inline v_int16x8 v_absdiffs(const v_int16x8& a, const v_int16x8& b)
+{ return v_max(a, b) - v_min(a, b); }
+
+
+inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c)
+{
+    return a * b + c;
+}
+
+inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c)
+{
+    return v_fma(a, b, c);
+}
+
+inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c)
+{
+    return a * b + c;
+}
+
+inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c)
+{
+    return a * b + c;
+}
+
+inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b)
+{
+    v128_t absmask_vec = wasm_i32x4_splat(0x7fffffff);
+    return v_float32x4(wasm_v128_and(wasm_f32x4_sub(a.val, b.val), absmask_vec));
+}
+inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t absmask_vec = wasm_u64x2_shr(wasm_i32x4_splat(-1), 1);
+    return v_float64x2(wasm_v128_and(wasm_f64x2_sub(a.val, b.val), absmask_vec));
+#else
+    fallback::v_float64x2 a_(a), b_(b);
+    return fallback::v_absdiff(a_, b_);
+#endif
+}
+
+#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec) \
+inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    fallback::_Tpvec a_(a), b_(b); \
+    return fallback::v_magnitude(a_, b_); \
+} \
+inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return v_fma(a, a, b*b); \
+} \
+inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
+{ \
+    return v_fma(a, b, c); \
+}
+
+OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4)
+OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2)
+
+#define OPENCV_HAL_IMPL_WASM_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, ssuffix) \
+inline _Tpuvec operator << (const _Tpuvec& a, int imm) \
+{ \
+    return _Tpuvec(wasm_##suffix##_shl(a.val, imm)); \
+} \
+inline _Tpsvec operator << (const _Tpsvec& a, int imm) \
+{ \
+    return _Tpsvec(wasm_##suffix##_shl(a.val, imm)); \
+} \
+inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \
+{ \
+    return _Tpuvec(wasm_##ssuffix##_shr(a.val, imm)); \
+} \
+inline _Tpsvec operator >> (const _Tpsvec& a, int imm) \
+{ \
+    return _Tpsvec(wasm_##suffix##_shr(a.val, imm)); \
+} \
+template<int imm> \
+inline _Tpuvec v_shl(const _Tpuvec& a) \
+{ \
+    return _Tpuvec(wasm_##suffix##_shl(a.val, imm)); \
+} \
+template<int imm> \
+inline _Tpsvec v_shl(const _Tpsvec& a) \
+{ \
+    return _Tpsvec(wasm_##suffix##_shl(a.val, imm)); \
+} \
+template<int imm> \
+inline _Tpuvec v_shr(const _Tpuvec& a) \
+{ \
+    return _Tpuvec(wasm_##ssuffix##_shr(a.val, imm)); \
+} \
+template<int imm> \
+inline _Tpsvec v_shr(const _Tpsvec& a) \
+{ \
+    return _Tpsvec(wasm_##suffix##_shr(a.val, imm)); \
+}
+
+OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint8x16, v_int8x16, i8x16, u8x16)
+OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint16x8, v_int16x8, i16x8, u16x8)
+OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint32x4, v_int32x4, i32x4, u32x4)
+
+#ifdef __wasm_unimplemented_simd128__
+OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint64x2, v_int64x2, i64x2, u64x2)
+#else
+#define OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(_Tpvec) \
+inline _Tpvec operator << (const _Tpvec& a, int imm) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return a_ << imm; \
+} \
+inline _Tpvec operator >> (const _Tpvec& a, int imm) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return a_ >> imm; \
+} \
+template<int imm> \
+inline _Tpvec v_shl(const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return fallback::v_shl<imm>(a_); \
+} \
+template<int imm> \
+inline _Tpvec v_shr(const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return fallback::v_shr<imm>(a_); \
+} \
+
+OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_uint64x2)
+OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_int64x2)
+#endif
+
+namespace hal_wasm_internal
+{
+    template <int imm,
+        bool is_invalid = ((imm < 0) || (imm > 16)),
+        bool is_first = (imm == 0),
+        bool is_second = (imm == 16),
+        bool is_other = (((imm > 0) && (imm < 16)))>
+    class v_wasm_palignr_u8_class;
+
+    template <int imm>
+    class v_wasm_palignr_u8_class<imm, true, false, false, false>;
+
+    template <int imm>
+    class v_wasm_palignr_u8_class<imm, false, true, false, false>
+    {
+    public:
+        inline v128_t operator()(const v128_t& a, const v128_t&) const
+        {
+            return a;
+        }
+    };
+
+    template <int imm>
+    class v_wasm_palignr_u8_class<imm, false, false, true, false>
+    {
+    public:
+        inline v128_t operator()(const v128_t&, const v128_t& b) const
+        {
+            return b;
+        }
+    };
+
+    template <int imm>
+    class v_wasm_palignr_u8_class<imm, false, false, false, true>
+    {
+    public:
+        inline v128_t operator()(const v128_t& a, const v128_t& b) const
+        {
+            enum { imm2 = (sizeof(v128_t) - imm) };
+            return wasm_v8x16_shuffle(a, b,
+                                      imm, imm+1, imm+2, imm+3,
+                                      imm+4, imm+5, imm+6, imm+7,
+                                      imm+8, imm+9, imm+10, imm+11,
+                                      imm+12, imm+13, imm+14, imm+15);
+        }
+    };
+
+    template <int imm>
+    inline v128_t v_wasm_palignr_u8(const v128_t& a, const v128_t& b)
+    {
+        CV_StaticAssert((imm >= 0) && (imm <= 16), "Invalid imm for v_wasm_palignr_u8.");
+        return v_wasm_palignr_u8_class<imm>()(a, b);
+    }
+}
+
+template<int imm, typename _Tpvec>
+inline _Tpvec v_rotate_right(const _Tpvec &a)
+{
+    using namespace hal_wasm_internal;
+    enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) };
+    v128_t z = wasm_i8x16_splat(0);
+    return _Tpvec(v_wasm_palignr_u8<imm2>(a.val, z));
+}
+
+template<int imm, typename _Tpvec>
+inline _Tpvec v_rotate_left(const _Tpvec &a)
+{
+    using namespace hal_wasm_internal;
+    enum { imm2 = ((_Tpvec::nlanes - imm) * sizeof(typename _Tpvec::lane_type)) };
+    v128_t z = wasm_i8x16_splat(0);
+    return _Tpvec(v_wasm_palignr_u8<imm2>(z, a.val));
+}
+
+template<int imm, typename _Tpvec>
+inline _Tpvec v_rotate_right(const _Tpvec &a, const _Tpvec &b)
+{
+    using namespace hal_wasm_internal;
+    enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) };
+    return _Tpvec(v_wasm_palignr_u8<imm2>(a.val, b.val));
+}
+
+template<int imm, typename _Tpvec>
+inline _Tpvec v_rotate_left(const _Tpvec &a, const _Tpvec &b)
+{
+    using namespace hal_wasm_internal;
+    enum { imm2 = ((_Tpvec::nlanes - imm) * sizeof(typename _Tpvec::lane_type)) };
+    return _Tpvec(v_wasm_palignr_u8<imm2>(b.val, a.val));
+}
+
+#define OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(_Tpvec, _Tp) \
+inline _Tpvec v_load(const _Tp* ptr) \
+{ return _Tpvec(wasm_v128_load(ptr)); } \
+inline _Tpvec v_load_aligned(const _Tp* ptr) \
+{ return _Tpvec(wasm_v128_load(ptr)); } \
+inline _Tpvec v_load_low(const _Tp* ptr) \
+{ \
+    _Tp tmp[_Tpvec::nlanes] = {0}; \
+    for (int i=0; i<_Tpvec::nlanes/2; ++i) { \
+        tmp[i] = ptr[i]; \
+    } \
+    return _Tpvec(wasm_v128_load(tmp)); \
+} \
+inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
+{ \
+    _Tp tmp[_Tpvec::nlanes]; \
+    for (int i=0; i<_Tpvec::nlanes/2; ++i) { \
+        tmp[i] = ptr0[i]; \
+        tmp[i+_Tpvec::nlanes/2] = ptr1[i]; \
+    } \
+    return _Tpvec(wasm_v128_load(tmp)); \
+} \
+inline void v_store(_Tp* ptr, const _Tpvec& a) \
+{ wasm_v128_store(ptr, a.val); } \
+inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \
+{ wasm_v128_store(ptr, a.val); } \
+inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \
+{ wasm_v128_store(ptr, a.val); } \
+inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode /*mode*/) \
+{ \
+    wasm_v128_store(ptr, a.val); \
+} \
+inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    fallback::v_store_low(ptr, a_); \
+} \
+inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    fallback::v_store_high(ptr, a_); \
+}
+
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint8x16, uchar)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int8x16, schar)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint16x8, ushort)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int16x8, short)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint32x4, unsigned)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int32x4, int)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint64x2, uint64)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int64x2, int64)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_float32x4, float)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_float64x2, double)
+
+
+#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \
+inline scalartype v_reduce_sum(const _Tpvec& a) \
+{ \
+    regtype val = a.val; \
+    val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \
+    val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3)); \
+    return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \
+}
+
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_uint32x4, unsigned, v128_t, i32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_int32x4, int, v128_t, i32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_float32x4, float, v128_t, f32x4, f32x4)
+
+// To do: Optimize v_reduce_sum with wasm intrin.
+//        Now use fallback implementation as there is no widening op in wasm intrin.
+
+#define OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(_Tpvec, scalartype) \
+inline scalartype v_reduce_sum(const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return fallback::v_reduce_sum(a_); \
+}
+
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint8x16, unsigned)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int8x16, int)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint16x8, unsigned)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int16x8, int)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint64x2, uint64)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int64x2, int64)
+OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_float64x2, double)
+
+inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
+                                 const v_float32x4& c, const v_float32x4& d)
+{
+    v128_t ac = wasm_f32x4_add(wasm_unpacklo_i32x4(a.val, c.val), wasm_unpackhi_i32x4(a.val, c.val));
+    v128_t bd = wasm_f32x4_add(wasm_unpacklo_i32x4(b.val, d.val), wasm_unpackhi_i32x4(b.val, d.val));
+    return v_float32x4(wasm_f32x4_add(wasm_unpacklo_i32x4(ac, bd), wasm_unpackhi_i32x4(ac, bd)));
+}
+
+#define OPENCV_HAL_IMPL_WASM_REDUCE_OP(_Tpvec, scalartype, func, scalar_func) \
+inline scalartype v_reduce_##func(const _Tpvec& a) \
+{ \
+    scalartype buf[_Tpvec::nlanes]; \
+    v_store(buf, a); \
+    scalartype tmp = buf[0]; \
+    for (int i=1; i<_Tpvec::nlanes; ++i) { \
+        tmp = scalar_func(tmp, buf[i]); \
+    } \
+    return tmp; \
+}
+
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint8x16, uchar, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint8x16, uchar, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int8x16, schar, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int8x16, schar, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint16x8, ushort, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint16x8, ushort, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int16x8, short, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int16x8, short, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint32x4, unsigned, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint32x4, unsigned, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int32x4, int, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int32x4, int, min, std::min)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_float32x4, float, max, std::max)
+OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_float32x4, float, min, std::min)
+
+inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b)
+{
+    v_uint16x8 l16, h16;
+    v_uint32x4 l16_l32, l16_h32, h16_l32, h16_h32;
+    v_expand(v_absdiff(a, b), l16, h16);
+    v_expand(l16, l16_l32, l16_h32);
+    v_expand(h16, h16_l32, h16_h32);
+    return v_reduce_sum(l16_l32+l16_h32+h16_l32+h16_h32);
+}
+inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b)
+{
+    v_uint16x8 l16, h16;
+    v_uint32x4 l16_l32, l16_h32, h16_l32, h16_h32;
+    v_expand(v_absdiff(a, b), l16, h16);
+    v_expand(l16, l16_l32, l16_h32);
+    v_expand(h16, h16_l32, h16_h32);
+    return v_reduce_sum(l16_l32+l16_h32+h16_l32+h16_h32);
+}
+inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b)
+{
+    v_uint32x4 l, h;
+    v_expand(v_absdiff(a, b), l, h);
+    return v_reduce_sum(l + h);
+}
+inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b)
+{
+    v_uint32x4 l, h;
+    v_expand(v_absdiff(a, b), l, h);
+    return v_reduce_sum(l + h);
+}
+inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b)
+{
+    return v_reduce_sum(v_absdiff(a, b));
+}
+inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b)
+{
+    return v_reduce_sum(v_absdiff(a, b));
+}
+inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b)
+{
+    return v_reduce_sum(v_absdiff(a, b));
+}
+
+inline v_uint8x16 v_popcount(const v_uint8x16& a)
+{
+    v128_t m1 = wasm_i32x4_splat(0x55555555);
+    v128_t m2 = wasm_i32x4_splat(0x33333333);
+    v128_t m4 = wasm_i32x4_splat(0x0f0f0f0f);
+    v128_t p = a.val;
+    p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 1), m1), wasm_v128_and(p, m1));
+    p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 2), m2), wasm_v128_and(p, m2));
+    p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 4), m4), wasm_v128_and(p, m4));
+    return v_uint8x16(p);
+}
+inline v_uint16x8 v_popcount(const v_uint16x8& a)
+{
+    v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a));
+    p += v_rotate_right<1>(p);
+    return v_reinterpret_as_u16(p) & v_setall_u16(0x00ff);
+}
+inline v_uint32x4 v_popcount(const v_uint32x4& a)
+{
+    v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a));
+    p += v_rotate_right<1>(p);
+    p += v_rotate_right<2>(p);
+    return v_reinterpret_as_u32(p) & v_setall_u32(0x000000ff);
+}
+inline v_uint64x2 v_popcount(const v_uint64x2& a)
+{
+    fallback::v_uint64x2 a_(a);
+    return fallback::v_popcount(a_);
+}
+inline v_uint8x16 v_popcount(const v_int8x16& a)
+{ return v_popcount(v_reinterpret_as_u8(a)); }
+inline v_uint16x8 v_popcount(const v_int16x8& a)
+{ return v_popcount(v_reinterpret_as_u16(a)); }
+inline v_uint32x4 v_popcount(const v_int32x4& a)
+{ return v_popcount(v_reinterpret_as_u32(a)); }
+inline v_uint64x2 v_popcount(const v_int64x2& a)
+{ return v_popcount(v_reinterpret_as_u64(a)); }
+
+#define OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(_Tpvec, suffix, scalarType) \
+inline int v_signmask(const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return fallback::v_signmask(a_); \
+} \
+inline bool v_check_all(const _Tpvec& a) \
+{ return wasm_i8x16_all_true(wasm_##suffix##_lt(a.val, wasm_##suffix##_splat(0))); } \
+inline bool v_check_any(const _Tpvec& a) \
+{ return wasm_i8x16_any_true(wasm_##suffix##_lt(a.val, wasm_##suffix##_splat(0)));; }
+
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint8x16, i8x16, schar)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int8x16, i8x16, schar)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint16x8, i16x8, short)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int16x8, i16x8, short)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint32x4, i32x4, int)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int32x4, i32x4, int)
+OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float32x4, i32x4, float)
+
+inline int v_signmask(const v_float64x2& a)
+{
+    fallback::v_float64x2 a_(a);
+    return fallback::v_signmask(a_);
+}
+inline bool v_check_all(const v_float64x2& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    return wasm_i8x16_all_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));
+#else
+    fallback::v_float64x2 a_(a);
+    return fallback::v_check_all(a_);
+#endif
+}
+inline bool v_check_any(const v_float64x2& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    return wasm_i8x16_any_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));;
+#else
+    fallback::v_float64x2 a_(a);
+    return fallback::v_check_any(a_);
+#endif
+}
+
+inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
+inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
+inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; }
+inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; }
+inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; }
+inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; }
+inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; }
+inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; }
+inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; }
+inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; }
+
+#define OPENCV_HAL_IMPL_WASM_SELECT(_Tpvec) \
+inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(wasm_v128_bitselect(a.val, b.val, mask.val)); \
+}
+
+OPENCV_HAL_IMPL_WASM_SELECT(v_uint8x16)
+OPENCV_HAL_IMPL_WASM_SELECT(v_int8x16)
+OPENCV_HAL_IMPL_WASM_SELECT(v_uint16x8)
+OPENCV_HAL_IMPL_WASM_SELECT(v_int16x8)
+OPENCV_HAL_IMPL_WASM_SELECT(v_uint32x4)
+OPENCV_HAL_IMPL_WASM_SELECT(v_int32x4)
+// OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2)
+// OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2)
+OPENCV_HAL_IMPL_WASM_SELECT(v_float32x4)
+OPENCV_HAL_IMPL_WASM_SELECT(v_float64x2)
+
+#define OPENCV_HAL_IMPL_WASM_EXPAND(_Tpvec, _Tpwvec, _Tp, intrin)    \
+inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1)      \
+{                                                                    \
+    b0.val = intrin(a.val);                                          \
+    b1.val = __CV_CAT(intrin, _high)(a.val);                         \
+}                                                                    \
+inline _Tpwvec v_expand_low(const _Tpvec& a)                         \
+{ return _Tpwvec(intrin(a.val)); }                                   \
+inline _Tpwvec v_expand_high(const _Tpvec& a)                        \
+{ return _Tpwvec(__CV_CAT(intrin, _high)(a.val)); }                  \
+inline _Tpwvec v_load_expand(const _Tp* ptr)                         \
+{                                                                    \
+    v128_t a = wasm_v128_load(ptr);                                  \
+    return _Tpwvec(intrin(a));                                       \
+}
+
+OPENCV_HAL_IMPL_WASM_EXPAND(v_uint8x16, v_uint16x8, uchar, v128_cvtu8x16_i16x8)
+OPENCV_HAL_IMPL_WASM_EXPAND(v_int8x16,  v_int16x8,  schar, v128_cvti8x16_i16x8)
+OPENCV_HAL_IMPL_WASM_EXPAND(v_uint16x8, v_uint32x4, ushort, v128_cvtu16x8_i32x4)
+OPENCV_HAL_IMPL_WASM_EXPAND(v_int16x8,  v_int32x4,  short, v128_cvti16x8_i32x4)
+OPENCV_HAL_IMPL_WASM_EXPAND(v_uint32x4, v_uint64x2, unsigned, v128_cvtu32x4_i64x2)
+OPENCV_HAL_IMPL_WASM_EXPAND(v_int32x4,  v_int64x2,  int, v128_cvti32x4_i64x2)
+
+#define OPENCV_HAL_IMPL_WASM_EXPAND_Q(_Tpvec, _Tp, intrin)  \
+inline _Tpvec v_load_expand_q(const _Tp* ptr)               \
+{                                                           \
+    v128_t a = wasm_v128_load(ptr);                         \
+    return _Tpvec(intrin(a));                               \
+}
+
+OPENCV_HAL_IMPL_WASM_EXPAND_Q(v_uint32x4, uchar, v128_cvtu8x16_i32x4)
+OPENCV_HAL_IMPL_WASM_EXPAND_Q(v_int32x4, schar, v128_cvti8x16_i32x4)
+
+#define OPENCV_HAL_IMPL_WASM_UNPACKS(_Tpvec, suffix) \
+inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) \
+{ \
+    b0.val = wasm_unpacklo_##suffix(a0.val, a1.val); \
+    b1.val = wasm_unpackhi_##suffix(a0.val, a1.val); \
+} \
+inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(wasm_unpacklo_i64x2(a.val, b.val)); \
+} \
+inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \
+{ \
+    return _Tpvec(wasm_unpackhi_i64x2(a.val, b.val)); \
+} \
+inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) \
+{ \
+    c.val = wasm_unpacklo_i64x2(a.val, b.val); \
+    d.val = wasm_unpackhi_i64x2(a.val, b.val); \
+}
+
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint8x16, i8x16)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_int8x16, i8x16)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint16x8, i16x8)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_int16x8, i16x8)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_int32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_float32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_UNPACKS(v_float64x2, i64x2)
+
+template<int s, typename _Tpvec>
+inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)
+{
+    return v_rotate_right<s>(a, b);
+}
+
+inline v_int32x4 v_round(const v_float32x4& a)
+{
+    v128_t h = wasm_f32x4_splat(0.5);
+    return v_int32x4(wasm_trunc_saturate_i32x4_f32x4(wasm_f32x4_add(a.val, h)));
+}
+
+inline v_int32x4 v_floor(const v_float32x4& a)
+{
+    v128_t a1 = wasm_trunc_saturate_i32x4_f32x4(a.val);
+    v128_t mask = wasm_f32x4_lt(a.val, wasm_convert_f32x4_i32x4(a1));
+    return v_int32x4(wasm_i32x4_add(a1, mask));
+}
+
+inline v_int32x4 v_ceil(const v_float32x4& a)
+{
+    v128_t a1 = wasm_trunc_saturate_i32x4_f32x4(a.val);
+    v128_t mask = wasm_f32x4_gt(a.val, wasm_convert_f32x4_i32x4(a1));
+    return v_int32x4(wasm_i32x4_sub(a1, mask));
+}
+
+inline v_int32x4 v_trunc(const v_float32x4& a)
+{ return v_int32x4(wasm_trunc_saturate_i32x4_f32x4(a.val)); }
+
+#define OPENCV_HAL_IMPL_WASM_MATH_FUNC(func, cfunc, _Tpvec, _Tpnvec, _Tp, _Tpn) \
+inline _Tpnvec func(const _Tpvec& a) \
+{ \
+    fallback::_Tpvec a_(a); \
+    return fallback::func(a_); \
+}
+
+OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_round, cvRound, v_float64x2, v_int32x4, double, int)
+OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_floor, cvFloor, v_float64x2, v_int32x4, double, int)
+OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_ceil, cvCeil, v_float64x2, v_int32x4, double, int)
+OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_trunc, int, v_float64x2, v_int32x4, double, int)
+
+inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b)
+{
+    fallback::v_float64x2 a_(a), b_(b);
+    return fallback::v_round(a_, b_);
+}
+
+#define OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(_Tpvec, suffix) \
+inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \
+                           const _Tpvec& a2, const _Tpvec& a3, \
+                           _Tpvec& b0, _Tpvec& b1, \
+                           _Tpvec& b2, _Tpvec& b3) \
+{ \
+    v128_t t0 = wasm_unpacklo_##suffix(a0.val, a1.val); \
+    v128_t t1 = wasm_unpacklo_##suffix(a2.val, a3.val); \
+    v128_t t2 = wasm_unpackhi_##suffix(a0.val, a1.val); \
+    v128_t t3 = wasm_unpackhi_##suffix(a2.val, a3.val); \
+\
+    b0.val = wasm_unpacklo_i64x2(t0, t1); \
+    b1.val = wasm_unpackhi_i64x2(t0, t1); \
+    b2.val = wasm_unpacklo_i64x2(t2, t3); \
+    b3.val = wasm_unpackhi_i64x2(t2, t3); \
+}
+
+OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_uint32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_int32x4, i32x4)
+OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_float32x4, i32x4)
+
+// load deinterleave
+inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b)
+{
+    v128_t t00 = wasm_v128_load(ptr);
+    v128_t t01 = wasm_v128_load(ptr + 16);
+
+    a.val = wasm_v8x16_shuffle(t00, t01, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30);
+    b.val = wasm_v8x16_shuffle(t00, t01, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31);
+}
+
+inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c)
+{
+    v128_t t00 = wasm_v128_load(ptr);
+    v128_t t01 = wasm_v128_load(ptr + 16);
+    v128_t t02 = wasm_v128_load(ptr + 32);
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,3,6,9,12,15,18,21,24,27,30,1,2,4,5,7);
+    v128_t t11 = wasm_v8x16_shuffle(t00, t01, 1,4,7,10,13,16,19,22,25,28,31,0,2,3,5,6);
+    v128_t t12 = wasm_v8x16_shuffle(t00, t01, 2,5,8,11,14,17,20,23,26,29,0,1,3,4,6,7);
+
+    a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,17,20,23,26,29);
+    b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,18,21,24,27,30);
+    c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,8,9,16,19,22,25,28,31);
+}
+
+inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d)
+{
+    v128_t u0 = wasm_v128_load(ptr); // a0 b0 c0 d0 a1 b1 c1 d1 ...
+    v128_t u1 = wasm_v128_load(ptr + 16); // a4 b4 c4 d4 ...
+    v128_t u2 = wasm_v128_load(ptr + 32); // a8 b8 c8 d8 ...
+    v128_t u3 = wasm_v128_load(ptr + 48); // a12 b12 c12 d12 ...
+
+    v128_t v0 = wasm_v8x16_shuffle(u0, u1, 0,4,8,12,16,20,24,28,1,5,9,13,17,21,25,29);
+    v128_t v1 = wasm_v8x16_shuffle(u2, u3, 0,4,8,12,16,20,24,28,1,5,9,13,17,21,25,29);
+    v128_t v2 = wasm_v8x16_shuffle(u0, u1, 2,6,10,14,18,22,26,30,3,7,11,15,19,23,27,31);
+    v128_t v3 = wasm_v8x16_shuffle(u2, u3, 2,6,10,14,18,22,26,30,3,7,11,15,19,23,27,31);
+
+    a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+    b.val = wasm_v8x16_shuffle(v0, v1, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+    c.val = wasm_v8x16_shuffle(v2, v3, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+    d.val = wasm_v8x16_shuffle(v2, v3, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+}
+
+inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b)
+{
+    v128_t v0 = wasm_v128_load(ptr);     // a0 b0 a1 b1 a2 b2 a3 b3
+    v128_t v1 = wasm_v128_load(ptr + 8); // a4 b4 a5 b5 a6 b6 a7 b7
+
+    a.val = wasm_v8x16_shuffle(v0, v1, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29); // a0 a1 a2 a3 a4 a5 a6 a7
+    b.val = wasm_v8x16_shuffle(v0, v1, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31); // b0 b1 ab b3 b4 b5 b6 b7
+}
+
+inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c)
+{
+    v128_t t00 = wasm_v128_load(ptr);        // a0 b0 c0 a1 b1 c1 a2 b2
+    v128_t t01 = wasm_v128_load(ptr + 8);    // c2 a3 b3 c3 a4 b4 c4 a5
+    v128_t t02 = wasm_v128_load(ptr + 16);  // b5 c5 a6 b6 c6 a7 b7 c7
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,6,7,12,13,18,19,24,25,30,31,2,3,4,5);
+    v128_t t11 = wasm_v8x16_shuffle(t00, t01, 2,3,8,9,14,15,20,21,26,27,0,1,4,5,6,7);
+    v128_t t12 = wasm_v8x16_shuffle(t00, t01, 4,5,10,11,16,17,22,23,28,29,0,1,2,3,6,7);
+
+    a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,26,27);
+    b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,16,17,22,23,28,29);
+    c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,8,9,18,19,24,25,30,31);
+}
+
+inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c, v_uint16x8& d)
+{
+    v128_t u0 = wasm_v128_load(ptr); // a0 b0 c0 d0 a1 b1 c1 d1
+    v128_t u1 = wasm_v128_load(ptr + 8); // a2 b2 c2 d2 ...
+    v128_t u2 = wasm_v128_load(ptr + 16); // a4 b4 c4 d4 ...
+    v128_t u3 = wasm_v128_load(ptr + 24); // a6 b6 c6 d6 ...
+
+    v128_t v0 = wasm_v8x16_shuffle(u0, u1, 0,1,8,9,16,17,24,25,2,3,10,11,18,19,26,27); // a0 a1 a2 a3 b0 b1 b2 b3
+    v128_t v1 = wasm_v8x16_shuffle(u2, u3, 0,1,8,9,16,17,24,25,2,3,10,11,18,19,26,27); // a4 a5 a6 a7 b4 b5 b6 b7
+    v128_t v2 = wasm_v8x16_shuffle(u0, u1, 4,5,12,13,20,21,28,29,6,7,14,15,22,23,30,31); // c0 c1 c2 c3 d0 d1 d2 d3
+    v128_t v3 = wasm_v8x16_shuffle(u2, u3, 4,5,12,13,20,21,28,29,6,7,14,15,22,23,30,31); // c4 c5 c6 c7 d4 d5 d6 d7
+
+    a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+    b.val = wasm_v8x16_shuffle(v0, v1, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+    c.val = wasm_v8x16_shuffle(v2, v3, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+    d.val = wasm_v8x16_shuffle(v2, v3, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+}
+
+inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b)
+{
+    v128_t v0 = wasm_v128_load(ptr);     // a0 b0 a1 b1
+    v128_t v1 = wasm_v128_load(ptr + 4); // a2 b2 a3 b3
+
+    a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27); // a0 a1 a2 a3
+    b.val = wasm_v8x16_shuffle(v0, v1, 4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31); // b0 b1 b2 b3
+}
+
+inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c)
+{
+    v128_t t00 = wasm_v128_load(ptr);        // a0 b0 c0 a1
+    v128_t t01 = wasm_v128_load(ptr + 4);     // b2 c2 a3 b3
+    v128_t t02 = wasm_v128_load(ptr + 8);    // c3 a4 b4 c4
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,2,3,12,13,14,15,24,25,26,27,4,5,6,7);
+    v128_t t11 = wasm_v8x16_shuffle(t00, t01, 4,5,6,7,16,17,18,19,28,29,30,31,0,1,2,3);
+    v128_t t12 = wasm_v8x16_shuffle(t00, t01, 8,9,10,11,20,21,22,23,0,1,2,3,4,5,6,7);
+
+    a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,22,23);
+    b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,11,24,25,26,27);
+    c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,16,17,18,19,28,29,30,31);
+}
+
+inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c, v_uint32x4& d)
+{
+    v_uint32x4 s0(wasm_v128_load(ptr));      // a0 b0 c0 d0
+    v_uint32x4 s1(wasm_v128_load(ptr + 4));  // a1 b1 c1 d1
+    v_uint32x4 s2(wasm_v128_load(ptr + 8));  // a2 b2 c2 d2
+    v_uint32x4 s3(wasm_v128_load(ptr + 12)); // a3 b3 c3 d3
+
+    v_transpose4x4(s0, s1, s2, s3, a, b, c, d);
+}
+
+inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b)
+{
+    v128_t v0 = wasm_v128_load(ptr);       // a0 b0 a1 b1
+    v128_t v1 = wasm_v128_load((ptr + 4)); // a2 b2 a3 b3
+
+    a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27); // a0 a1 a2 a3
+    b.val = wasm_v8x16_shuffle(v0, v1, 4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31); // b0 b1 b2 b3
+}
+
+inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c)
+{
+    v128_t t00 = wasm_v128_load(ptr);        // a0 b0 c0 a1
+    v128_t t01 = wasm_v128_load(ptr + 4);     // b2 c2 a3 b3
+    v128_t t02 = wasm_v128_load(ptr + 8);    // c3 a4 b4 c4
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,2,3,12,13,14,15,24,25,26,27,4,5,6,7);
+    v128_t t11 = wasm_v8x16_shuffle(t00, t01, 4,5,6,7,16,17,18,19,28,29,30,31,0,1,2,3);
+    v128_t t12 = wasm_v8x16_shuffle(t00, t01, 8,9,10,11,20,21,22,23,0,1,2,3,4,5,6,7);
+
+    a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,22,23);
+    b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,11,24,25,26,27);
+    c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,16,17,18,19,28,29,30,31);
+}
+
+inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c, v_float32x4& d)
+{
+    v_float32x4 s0(wasm_v128_load(ptr));      // a0 b0 c0 d0
+    v_float32x4 s1(wasm_v128_load(ptr + 4));  // a1 b1 c1 d1
+    v_float32x4 s2(wasm_v128_load(ptr + 8));  // a2 b2 c2 d2
+    v_float32x4 s3(wasm_v128_load(ptr + 12)); // a3 b3 c3 d3
+
+    v_transpose4x4(s0, s1, s2, s3, a, b, c, d);
+}
+
+inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b)
+{
+    v128_t t0 = wasm_v128_load(ptr);      // a0 b0
+    v128_t t1 = wasm_v128_load(ptr + 2);  // a1 b1
+
+    a.val = wasm_unpacklo_i64x2(t0, t1);
+    b.val = wasm_unpackhi_i64x2(t0, t1);
+}
+
+inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b, v_uint64x2& c)
+{
+    v128_t t0 = wasm_v128_load(ptr);     // a0, b0
+    v128_t t1 = wasm_v128_load(ptr + 2); // c0, a1
+    v128_t t2 = wasm_v128_load(ptr + 4); // b1, c1
+
+    a.val = wasm_v8x16_shuffle(t0, t1, 0,1,2,3,4,5,6,7,24,25,26,27,28,29,30,31);
+    b.val = wasm_v8x16_shuffle(t0, t2, 8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23);
+    c.val = wasm_v8x16_shuffle(t1, t2, 0,1,2,3,4,5,6,7,24,25,26,27,28,29,30,31);
+}
+
+inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a,
+                                v_uint64x2& b, v_uint64x2& c, v_uint64x2& d)
+{
+    v128_t t0 = wasm_v128_load(ptr);     // a0 b0
+    v128_t t1 = wasm_v128_load(ptr + 2); // c0 d0
+    v128_t t2 = wasm_v128_load(ptr + 4); // a1 b1
+    v128_t t3 = wasm_v128_load(ptr + 6); // c1 d1
+
+    a.val = wasm_unpacklo_i64x2(t0, t2);
+    b.val = wasm_unpackhi_i64x2(t0, t2);
+    c.val = wasm_unpacklo_i64x2(t1, t3);
+    d.val = wasm_unpackhi_i64x2(t1, t3);
+}
+
+// store interleave
+
+inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i8x16(a.val, b.val);
+    v128_t v1 = wasm_unpackhi_i8x16(a.val, b.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 16, v1);
+}
+
+inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,
+                                const v_uint8x16& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,16,0,1,17,0,2,18,0,3,19,0,4,20,0,5);
+    v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 21,0,6,22,0,7,23,0,8,24,0,9,25,0,10,26);
+    v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,11,27,0,12,28,0,13,29,0,14,30,0,15,31,0);
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15);
+    v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15);
+    v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31);
+
+    wasm_v128_store(ptr, t10);
+    wasm_v128_store(ptr + 16, t11);
+    wasm_v128_store(ptr + 32, t12);
+}
+
+inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,
+                                const v_uint8x16& c, const v_uint8x16& d,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    // a0 a1 a2 a3 ....
+    // b0 b1 b2 b3 ....
+    // c0 c1 c2 c3 ....
+    // d0 d1 d2 d3 ....
+    v128_t u0 = wasm_unpacklo_i8x16(a.val, c.val); // a0 c0 a1 c1 ...
+    v128_t u1 = wasm_unpackhi_i8x16(a.val, c.val); // a8 c8 a9 c9 ...
+    v128_t u2 = wasm_unpacklo_i8x16(b.val, d.val); // b0 d0 b1 d1 ...
+    v128_t u3 = wasm_unpackhi_i8x16(b.val, d.val); // b8 d8 b9 d9 ...
+
+    v128_t v0 = wasm_unpacklo_i8x16(u0, u2); // a0 b0 c0 d0 ...
+    v128_t v1 = wasm_unpackhi_i8x16(u0, u2); // a4 b4 c4 d4 ...
+    v128_t v2 = wasm_unpacklo_i8x16(u1, u3); // a8 b8 c8 d8 ...
+    v128_t v3 = wasm_unpackhi_i8x16(u1, u3); // a12 b12 c12 d12 ...
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 16, v1);
+    wasm_v128_store(ptr + 32, v2);
+    wasm_v128_store(ptr + 48, v3);
+}
+
+inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i16x8(a.val, b.val);
+    v128_t v1 = wasm_unpackhi_i16x8(a.val, b.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 8, v1);
+}
+
+inline void v_store_interleave( ushort* ptr, const v_uint16x8& a,
+                                const v_uint16x8& b, const v_uint16x8& c,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,16,17,0,0,2,3,18,19,0,0,4,5,20,21);
+    v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 0,0,6,7,22,23,0,0,8,9,24,25,0,0,10,11);
+    v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 26,27,0,0,12,13,28,29,0,0,14,15,30,31,0,0);
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,16,17,6,7,8,9,18,19,12,13,14,15);
+    v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 20,21,2,3,4,5,22,23,8,9,10,11,24,25,14,15);
+    v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 0,1,26,27,4,5,6,7,28,29,10,11,12,13,30,31);
+
+    wasm_v128_store(ptr, t10);
+    wasm_v128_store(ptr + 8, t11);
+    wasm_v128_store(ptr + 16, t12);
+}
+
+inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b,
+                                const v_uint16x8& c, const v_uint16x8& d,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    // a0 a1 a2 a3 ....
+    // b0 b1 b2 b3 ....
+    // c0 c1 c2 c3 ....
+    // d0 d1 d2 d3 ....
+    v128_t u0 = wasm_unpacklo_i16x8(a.val, c.val); // a0 c0 a1 c1 ...
+    v128_t u1 = wasm_unpackhi_i16x8(a.val, c.val); // a4 c4 a5 c5 ...
+    v128_t u2 = wasm_unpacklo_i16x8(b.val, d.val); // b0 d0 b1 d1 ...
+    v128_t u3 = wasm_unpackhi_i16x8(b.val, d.val); // b4 d4 b5 d5 ...
+
+    v128_t v0 = wasm_unpacklo_i16x8(u0, u2); // a0 b0 c0 d0 ...
+    v128_t v1 = wasm_unpackhi_i16x8(u0, u2); // a2 b2 c2 d2 ...
+    v128_t v2 = wasm_unpacklo_i16x8(u1, u3); // a4 b4 c4 d4 ...
+    v128_t v3 = wasm_unpackhi_i16x8(u1, u3); // a6 b6 c6 d6 ...
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 8, v1);
+    wasm_v128_store(ptr + 16, v2);
+    wasm_v128_store(ptr + 24, v3);
+}
+
+inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b,
+                                hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i32x4(a.val, b.val);
+    v128_t v1 = wasm_unpackhi_i32x4(a.val, b.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 4, v1);
+}
+
+inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b,
+                                const v_uint32x4& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,16,17,18,19,0,0,0,0,4,5,6,7);
+    v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 20,21,22,23,0,0,0,0,8,9,10,11,24,25,26,27);
+    v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,0,0,0,12,13,14,15,28,29,30,31,0,0,0,0);
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,4,5,6,7,16,17,18,19,12,13,14,15);
+    v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,1,2,3,20,21,22,23,8,9,10,11,12,13,14,15);
+    v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 24,25,26,27,4,5,6,7,8,9,10,11,28,29,30,31);
+
+    wasm_v128_store(ptr, t10);
+    wasm_v128_store(ptr + 4, t11);
+    wasm_v128_store(ptr + 8, t12);
+}
+
+inline void v_store_interleave(unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b,
+                               const v_uint32x4& c, const v_uint32x4& d,
+                               hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v_uint32x4 v0, v1, v2, v3;
+    v_transpose4x4(a, b, c, d, v0, v1, v2, v3);
+
+    wasm_v128_store(ptr, v0.val);
+    wasm_v128_store(ptr + 4, v1.val);
+    wasm_v128_store(ptr + 8, v2.val);
+    wasm_v128_store(ptr + 12, v3.val);
+}
+
+// 2-channel, float only
+inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b,
+                               hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i32x4(a.val, b.val);
+    v128_t v1 = wasm_unpackhi_i32x4(a.val, b.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 4, v1);
+}
+
+inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b,
+                               const v_float32x4& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,16,17,18,19,0,0,0,0,4,5,6,7);
+    v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 20,21,22,23,0,0,0,0,8,9,10,11,24,25,26,27);
+    v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,0,0,0,12,13,14,15,28,29,30,31,0,0,0,0);
+
+    v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,4,5,6,7,16,17,18,19,12,13,14,15);
+    v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,1,2,3,20,21,22,23,8,9,10,11,12,13,14,15);
+    v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 24,25,26,27,4,5,6,7,8,9,10,11,28,29,30,31);
+
+    wasm_v128_store(ptr, t10);
+    wasm_v128_store(ptr + 4, t11);
+    wasm_v128_store(ptr + 8, t12);
+}
+
+inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b,
+                               const v_float32x4& c, const v_float32x4& d,
+                               hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v_float32x4 v0, v1, v2, v3;
+    v_transpose4x4(a, b, c, d, v0, v1, v2, v3);
+
+    wasm_v128_store(ptr, v0.val);
+    wasm_v128_store(ptr + 4, v1.val);
+    wasm_v128_store(ptr + 8, v2.val);
+    wasm_v128_store(ptr + 12, v3.val);
+}
+
+inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b,
+                               hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i64x2(a.val, b.val);
+    v128_t v1 = wasm_unpackhi_i64x2(a.val, b.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 2, v1);
+}
+
+inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b,
+                               const v_uint64x2& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23);
+    v128_t v1 = wasm_v8x16_shuffle(a.val, c.val, 16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15);
+    v128_t v2 = wasm_v8x16_shuffle(b.val, c.val, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 2, v1);
+    wasm_v128_store(ptr + 4, v2);
+}
+
+inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b,
+                               const v_uint64x2& c, const v_uint64x2& d,
+                               hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED)
+{
+    v128_t v0 = wasm_unpacklo_i64x2(a.val, b.val);
+    v128_t v1 = wasm_unpacklo_i64x2(c.val, d.val);
+    v128_t v2 = wasm_unpackhi_i64x2(a.val, b.val);
+    v128_t v3 = wasm_unpackhi_i64x2(c.val, d.val);
+
+    wasm_v128_store(ptr, v0);
+    wasm_v128_store(ptr + 2, v1);
+    wasm_v128_store(ptr + 4, v2);
+    wasm_v128_store(ptr + 6, v3);
+}
+
+#define OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(_Tpvec0, _Tp0, suffix0, _Tpvec1, _Tp1, suffix1) \
+inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0 ) \
+{ \
+    _Tpvec1 a1, b1; \
+    v_load_deinterleave((const _Tp1*)ptr, a1, b1); \
+    a0 = v_reinterpret_as_##suffix0(a1); \
+    b0 = v_reinterpret_as_##suffix0(b1); \
+} \
+inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0 ) \
+{ \
+    _Tpvec1 a1, b1, c1; \
+    v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1); \
+    a0 = v_reinterpret_as_##suffix0(a1); \
+    b0 = v_reinterpret_as_##suffix0(b1); \
+    c0 = v_reinterpret_as_##suffix0(c1); \
+} \
+inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0, _Tpvec0& d0 ) \
+{ \
+    _Tpvec1 a1, b1, c1, d1; \
+    v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1, d1); \
+    a0 = v_reinterpret_as_##suffix0(a1); \
+    b0 = v_reinterpret_as_##suffix0(b1); \
+    c0 = v_reinterpret_as_##suffix0(c1); \
+    d0 = v_reinterpret_as_##suffix0(d1); \
+} \
+inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \
+                                hal::StoreMode mode = hal::STORE_UNALIGNED ) \
+{ \
+    _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \
+    _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \
+    v_store_interleave((_Tp1*)ptr, a1, b1, mode);      \
+} \
+inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \
+                                const _Tpvec0& c0, hal::StoreMode mode = hal::STORE_UNALIGNED ) \
+{ \
+    _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \
+    _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \
+    _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \
+    v_store_interleave((_Tp1*)ptr, a1, b1, c1, mode);  \
+} \
+inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \
+                                const _Tpvec0& c0, const _Tpvec0& d0, \
+                                hal::StoreMode mode = hal::STORE_UNALIGNED ) \
+{ \
+    _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \
+    _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \
+    _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \
+    _Tpvec1 d1 = v_reinterpret_as_##suffix1(d0); \
+    v_store_interleave((_Tp1*)ptr, a1, b1, c1, d1, mode); \
+}
+
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int8x16, schar, s8, v_uint8x16, uchar, u8)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int16x8, short, s16, v_uint16x8, ushort, u16)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int32x4, int, s32, v_uint32x4, unsigned, u32)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int64x2, int64, s64, v_uint64x2, uint64, u64)
+OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_float64x2, double, f64, v_uint64x2, uint64, u64)
+
+inline v_float32x4 v_cvt_f32(const v_int32x4& a)
+{
+    return v_float32x4(wasm_convert_f32x4_i32x4(a.val));
+}
+
+inline v_float32x4 v_cvt_f32(const v_float64x2& a)
+{
+    fallback::v_float64x2 a_(a);
+    return fallback::v_cvt_f32(a_);
+}
+
+inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b)
+{
+    fallback::v_float64x2 a_(a), b_(b);
+    return fallback::v_cvt_f32(a_, b_);
+}
+
+inline v_float64x2 v_cvt_f64(const v_int32x4& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t p = v128_cvti32x4_i64x2(a.val);
+    return v_float64x2(wasm_convert_f64x2_i64x2(p));
+#else
+    fallback::v_int32x4 a_(a);
+    return fallback::v_cvt_f64(a_);
+#endif
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
+{
+#ifdef __wasm_unimplemented_simd128__
+    v128_t p = v128_cvti32x4_i64x2_high(a.val);
+    return v_float64x2(wasm_convert_f64x2_i64x2(p));
+#else
+    fallback::v_int32x4 a_(a);
+    return fallback::v_cvt_f64_high(a_);
+#endif
+}
+
+inline v_float64x2 v_cvt_f64(const v_float32x4& a)
+{
+    fallback::v_float32x4 a_(a);
+    return fallback::v_cvt_f64(a_);
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
+{
+    fallback::v_float32x4 a_(a);
+    return fallback::v_cvt_f64_high(a_);
+}
+
+////////////// Lookup table access ////////////////////
+
+inline v_int8x16 v_lut(const schar* tab, const int* idx)
+{
+    return v_int8x16(tab[idx[0]], tab[idx[1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]],
+                     tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]]);
+}
+inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx)
+{
+    return v_int8x16(tab[idx[0]], tab[idx[0]+1], tab[idx[1]], tab[idx[1]+1], tab[idx[2]], tab[idx[2]+1], tab[idx[3]], tab[idx[3]+1],
+                     tab[idx[4]], tab[idx[4]+1], tab[idx[5]], tab[idx[5]+1], tab[idx[6]], tab[idx[6]+1], tab[idx[7]], tab[idx[7]+1]);
+}
+inline v_int8x16 v_lut_quads(const schar* tab, const int* idx)
+{
+    return v_int8x16(tab[idx[0]], tab[idx[0]+1], tab[idx[0]+2], tab[idx[0]+3], tab[idx[1]], tab[idx[1]+1], tab[idx[1]+2], tab[idx[1]+3],
+                     tab[idx[2]], tab[idx[2]+1], tab[idx[2]+2], tab[idx[2]+3], tab[idx[3]], tab[idx[3]+1], tab[idx[3]+2], tab[idx[3]+3]);
+}
+inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((const schar *)tab, idx)); }
+inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((const schar *)tab, idx)); }
+inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((const schar *)tab, idx)); }
+
+inline v_int16x8 v_lut(const short* tab, const int* idx)
+{
+    return v_int16x8(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]],
+                     tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]]);
+}
+inline v_int16x8 v_lut_pairs(const short* tab, const int* idx)
+{
+    return v_int16x8(tab[idx[0]], tab[idx[0]+1], tab[idx[1]], tab[idx[1]+1],
+                     tab[idx[2]], tab[idx[2]+1], tab[idx[3]], tab[idx[3]+1]);
+}
+inline v_int16x8 v_lut_quads(const short* tab, const int* idx)
+{
+    return v_int16x8(tab[idx[0]], tab[idx[0]+1], tab[idx[0]+2], tab[idx[0]+3],
+                     tab[idx[1]], tab[idx[1]+1], tab[idx[1]+2], tab[idx[1]+3]);
+}
+inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((const short *)tab, idx)); }
+inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((const short *)tab, idx)); }
+inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((const short *)tab, idx)); }
+
+inline v_int32x4 v_lut(const int* tab, const int* idx)
+{
+    return v_int32x4(tab[idx[0]], tab[idx[1]],
+                     tab[idx[2]], tab[idx[3]]);
+}
+inline v_int32x4 v_lut_pairs(const int* tab, const int* idx)
+{
+    return v_int32x4(tab[idx[0]], tab[idx[0]+1],
+                     tab[idx[1]], tab[idx[1]+1]);
+}
+inline v_int32x4 v_lut_quads(const int* tab, const int* idx)
+{
+    return v_int32x4(wasm_v128_load(tab + idx[0]));
+}
+inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((const int *)tab, idx)); }
+inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((const int *)tab, idx)); }
+inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((const int *)tab, idx)); }
+
+inline v_int64x2 v_lut(const int64_t* tab, const int* idx)
+{
+    return v_int64x2(tab[idx[0]], tab[idx[1]]);
+}
+inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx)
+{
+    return v_int64x2(wasm_v128_load(tab + idx[0]));
+}
+inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); }
+inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); }
+
+inline v_float32x4 v_lut(const float* tab, const int* idx)
+{
+    return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]);
+}
+inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_pairs((const int *)tab, idx)); }
+inline v_float32x4 v_lut_quads(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_quads((const int *)tab, idx)); }
+
+inline v_float64x2 v_lut(const double* tab, const int* idx)
+{
+    return v_float64x2(tab[idx[0]], tab[idx[1]]);
+}
+inline v_float64x2 v_lut_pairs(const double* tab, const int* idx)
+{
+    return v_float64x2(wasm_v128_load(tab + idx[0]));
+}
+
+inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec)
+{
+    return v_int32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)],
+                     tab[wasm_i32x4_extract_lane(idxvec.val, 1)],
+                     tab[wasm_i32x4_extract_lane(idxvec.val, 2)],
+                     tab[wasm_i32x4_extract_lane(idxvec.val, 3)]);
+}
+
+inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec)
+{
+    return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec));
+}
+
+inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec)
+{
+    return v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)],
+                       tab[wasm_i32x4_extract_lane(idxvec.val, 1)],
+                       tab[wasm_i32x4_extract_lane(idxvec.val, 2)],
+                       tab[wasm_i32x4_extract_lane(idxvec.val, 3)]);
+}
+
+inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec)
+{
+    return v_float64x2(tab[wasm_i32x4_extract_lane(idxvec.val, 0)],
+                       tab[wasm_i32x4_extract_lane(idxvec.val, 1)]);
+}
+
+// loads pairs from the table and deinterleaves them, e.g. returns:
+//   x = (tab[idxvec[0], tab[idxvec[1]], tab[idxvec[2]], tab[idxvec[3]]),
+//   y = (tab[idxvec[0]+1], tab[idxvec[1]+1], tab[idxvec[2]+1], tab[idxvec[3]+1])
+// note that the indices are float's indices, not the float-pair indices.
+// in theory, this function can be used to implement bilinear interpolation,
+// when idxvec are the offsets within the image.
+inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y)
+{
+    x = v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 1)],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 2)],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 3)]);
+    y = v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)+1],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 1)+1],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 2)+1],
+                    tab[wasm_i32x4_extract_lane(idxvec.val, 3)+1]);
+}
+
+inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y)
+{
+    v128_t xy0 = wasm_v128_load(tab + wasm_i32x4_extract_lane(idxvec.val, 0));
+    v128_t xy1 = wasm_v128_load(tab + wasm_i32x4_extract_lane(idxvec.val, 1));
+    x.val = wasm_unpacklo_i64x2(xy0, xy1);
+    y.val = wasm_unpacklo_i64x2(xy0, xy1);
+}
+
+inline v_int8x16 v_interleave_pairs(const v_int8x16& vec)
+{
+    return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,2,1,3,4,6,5,7,8,10,9,11,12,14,13,15));
+}
+inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); }
+inline v_int8x16 v_interleave_quads(const v_int8x16& vec)
+{
+    return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,4,1,5,2,6,3,7,8,12,9,13,10,14,11,15));
+}
+inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); }
+
+inline v_int16x8 v_interleave_pairs(const v_int16x8& vec)
+{
+    return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,4,5,2,3,6,7,8,9,12,13,10,11,14,15));
+}
+inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); }
+inline v_int16x8 v_interleave_quads(const v_int16x8& vec)
+{
+    return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15));
+}
+inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); }
+
+inline v_int32x4 v_interleave_pairs(const v_int32x4& vec)
+{
+    return v_int32x4(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15));
+}
+inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); }
+inline v_float32x4 v_interleave_pairs(const v_float32x4& vec)
+{
+    return v_float32x4(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15));
+}
+
+inline v_int8x16 v_pack_triplets(const v_int8x16& vec)
+{
+    return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,4,5,6,8,9,10,12,13,14,16,16,16,16));
+}
+inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); }
+
+inline v_int16x8 v_pack_triplets(const v_int16x8& vec)
+{
+    return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,4,5,8,9,10,11,12,13,14,15,6,7));
+}
+inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); }
+
+inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; }
+inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; }
+inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; }
+
+////////////// FP16 support ///////////////////////////
+
+inline v_float32x4 v_load_expand(const float16_t* ptr)
+{
+    return fallback::v_load_expand(ptr);
+}
+
+inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
+{
+    fallback::v_float32x4 v_(v);
+    fallback::v_pack_store(ptr, v_);
+}
+
+inline void v_cleanup() {}
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+
+//! @endcond
+
+}
+
+#endif
diff --git a/modules/core/src/mathfuncs_core.simd.hpp b/modules/core/src/mathfuncs_core.simd.hpp
index 2959822d7f..ba8a5477de 100644
--- a/modules/core/src/mathfuncs_core.simd.hpp
+++ b/modules/core/src/mathfuncs_core.simd.hpp
@@ -31,6 +31,11 @@ using namespace cv;
 
 namespace {
 
+static const float atan2_p1 = 0.9997878412794807f*(float)(180/CV_PI);
+static const float atan2_p3 = -0.3258083974640975f*(float)(180/CV_PI);
+static const float atan2_p5 = 0.1555786518463281f*(float)(180/CV_PI);
+static const float atan2_p7 = -0.04432655554792128f*(float)(180/CV_PI);
+
 #ifdef __EMSCRIPTEN__
 static inline float atan_f32(float y, float x)
 {
@@ -42,11 +47,6 @@ static inline float atan_f32(float y, float x)
     return a; // range [0; 360)
 }
 #else
-static const float atan2_p1 = 0.9997878412794807f*(float)(180/CV_PI);
-static const float atan2_p3 = -0.3258083974640975f*(float)(180/CV_PI);
-static const float atan2_p5 = 0.1555786518463281f*(float)(180/CV_PI);
-static const float atan2_p7 = -0.04432655554792128f*(float)(180/CV_PI);
-
 static inline float atan_f32(float y, float x)
 {
     float ax = std::abs(x), ay = std::abs(y);
diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp
index 0d846b0342..d1332a4c3d 100644
--- a/modules/core/src/ocl.cpp
+++ b/modules/core/src/ocl.cpp
@@ -267,6 +267,9 @@ static const String getBuildExtraOptions()
     return param_buildExtraOptions;
 }
 
+static const bool CV_OPENCL_ENABLE_MEM_USE_HOST_PTR = utils::getConfigurationParameterBool("OPENCV_OPENCL_ENABLE_MEM_USE_HOST_PTR", true);
+static const size_t CV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR = utils::getConfigurationParameterSizeT("OPENCV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR", 4);
+
 #endif // HAVE_OPENCL
 
 struct UMat2D
@@ -4671,6 +4674,9 @@ public:
 
     bool allocate(UMatData* u, AccessFlag accessFlags, UMatUsageFlags usageFlags) const CV_OVERRIDE
     {
+#ifndef HAVE_OPENCL
+        return false;
+#else
         if(!u)
             return false;
 
@@ -4746,8 +4752,12 @@ public:
 #endif
             {
                 tempUMatFlags = UMatData::TEMP_UMAT;
-                if (u->origdata == cv::alignPtr(u->origdata, 4)  // There are OpenCL runtime issues for less aligned data
-                    && !(u->originalUMatData && u->originalUMatData->handle)  // Avoid sharing of host memory between OpenCL buffers
+                if (CV_OPENCL_ENABLE_MEM_USE_HOST_PTR
+                    // There are OpenCL runtime issues for less aligned data
+                    && (CV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR != 0
+                        && u->origdata == cv::alignPtr(u->origdata, (int)CV_OPENCL_ALIGNMENT_MEM_USE_HOST_PTR))
+                    // Avoid sharing of host memory between OpenCL buffers
+                    && !(u->originalUMatData && u->originalUMatData->handle)
                 )
                 {
                     handle = clCreateBuffer(ctx_handle, CL_MEM_USE_HOST_PTR|createFlags,
@@ -4777,6 +4787,7 @@ public:
             u->markHostCopyObsolete(true);
         opencl_allocator_stats.onAllocate(u->size);
         return true;
+#endif  // HAVE_OPENCL
     }
 
     /*void sync(UMatData* u) const
@@ -4905,7 +4916,7 @@ public:
                                 (CL_MAP_READ | CL_MAP_WRITE),
                                 0, u->size, 0, 0, 0, &retval);
                             CV_OCL_CHECK_RESULT(retval, cv::format("clEnqueueMapBuffer(handle=%p, sz=%lld) => %p", (void*)u->handle, (long long int)u->size, data).c_str());
-                            CV_Assert(u->origdata == data);
+                            CV_Assert(u->origdata == data && "Details: https://github.com/opencv/opencv/issues/6293");
                             if (u->originalUMatData)
                             {
                                 CV_Assert(u->originalUMatData->data == data);
diff --git a/modules/core/src/parallel.cpp b/modules/core/src/parallel.cpp
index cd43410e58..5b1c917e7f 100644
--- a/modules/core/src/parallel.cpp
+++ b/modules/core/src/parallel.cpp
@@ -54,7 +54,7 @@
 #endif
 
 #if defined __linux__ || defined __APPLE__ || defined __GLIBC__ \
-    || defined __HAIKU__
+    || defined __HAIKU__ || defined __EMSCRIPTEN__
     #include <unistd.h>
     #include <stdio.h>
     #include <sys/types.h>
@@ -808,7 +808,7 @@ int cv::getNumberOfCPUs(void)
 #elif defined __ANDROID__
     static int ncpus = getNumberOfCPUsImpl();
     return ncpus;
-#elif defined __linux__ || defined __GLIBC__ || defined __HAIKU__
+#elif defined __linux__ || defined __GLIBC__ || defined __HAIKU__ || defined __EMSCRIPTEN__
     return (int)sysconf( _SC_NPROCESSORS_ONLN );
 #elif defined __APPLE__
     int numCPU=0;
diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp
index b0940961e1..1e1496bfc7 100644
--- a/modules/core/test/test_io.cpp
+++ b/modules/core/test/test_io.cpp
@@ -707,10 +707,10 @@ static void test_filestorage_basic(int write_flags, const char* suffix_name, boo
         EXPECT_EQ(_em_in.depth(), _em_out.depth());
         EXPECT_TRUE(_em_in.empty());
 
-        EXPECT_EQ(_2d_in.rows   , _2d_out.rows);
-        EXPECT_EQ(_2d_in.cols   , _2d_out.cols);
-        EXPECT_EQ(_2d_in.dims   , _2d_out.dims);
-        EXPECT_EQ(_2d_in.depth(), _2d_out.depth());
+        ASSERT_EQ(_2d_in.rows   , _2d_out.rows);
+        ASSERT_EQ(_2d_in.cols   , _2d_out.cols);
+        ASSERT_EQ(_2d_in.dims   , _2d_out.dims);
+        ASSERT_EQ(_2d_in.depth(), _2d_out.depth());
 
         errors = 0;
         for(int i = 0; i < _2d_out.rows; ++i)
@@ -731,16 +731,16 @@ static void test_filestorage_basic(int write_flags, const char* suffix_name, boo
             }
         }
 
-        EXPECT_EQ(_nd_in.rows   , _nd_out.rows);
-        EXPECT_EQ(_nd_in.cols   , _nd_out.cols);
-        EXPECT_EQ(_nd_in.dims   , _nd_out.dims);
-        EXPECT_EQ(_nd_in.depth(), _nd_out.depth());
+        ASSERT_EQ(_nd_in.rows   , _nd_out.rows);
+        ASSERT_EQ(_nd_in.cols   , _nd_out.cols);
+        ASSERT_EQ(_nd_in.dims   , _nd_out.dims);
+        ASSERT_EQ(_nd_in.depth(), _nd_out.depth());
         EXPECT_EQ(0, cv::norm(_nd_in, _nd_out, NORM_INF));
 
-        EXPECT_EQ(_rd_in.rows   , _rd_out.rows);
-        EXPECT_EQ(_rd_in.cols   , _rd_out.cols);
-        EXPECT_EQ(_rd_in.dims   , _rd_out.dims);
-        EXPECT_EQ(_rd_in.depth(), _rd_out.depth());
+        ASSERT_EQ(_rd_in.rows   , _rd_out.rows);
+        ASSERT_EQ(_rd_in.cols   , _rd_out.cols);
+        ASSERT_EQ(_rd_in.dims   , _rd_out.dims);
+        ASSERT_EQ(_rd_in.depth(), _rd_out.depth());
         EXPECT_EQ(0, cv::norm(_rd_in, _rd_out, NORM_INF));
     }
 }
diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index dcf997d8a8..9a04fffa7f 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -2540,6 +2540,12 @@ struct Net::Impl
         int requiredOutputs = layers[id].requiredOutputs.size();
         inOutShapes[id].supportInPlace =
                 layers[id].getLayerInstance()->getMemoryShapes(is, requiredOutputs, os, ints);
+
+        for (int i = 0; i < ints.size(); i++)
+            CV_Assert(total(ints[i]) > 0);
+
+        for (int i = 0; i < os.size(); i++)
+            CV_Assert(total(os[i]) > 0);
     }
 
     void getLayersShapes(const ShapesVec& netInputShapes,
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 42a2597af6..c8744fadac 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -241,10 +241,14 @@ public:
 
     MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
     {
-        Size out(outShape[3], outShape[2]);
+        int dims = inpShape.size();
+        int inpD = dims == 5 ? inpShape[2] : 1;
+        int inpH = inpShape[dims - 2];
+        int inpW = inpShape.back();
         int inpGroupCn = blobs[0].size[1];
-        int ksize = inpGroupCn * kernel.height * kernel.width;
-        return shape(out.area(), ksize);
+        int ksize = inpGroupCn * std::accumulate(kernel_size.begin(), kernel_size.end(),
+                                                 1, std::multiplies<size_t>());
+        return shape(inpD * inpH * inpW, ksize);
     }
 
     virtual bool supportBackend(int backendId) CV_OVERRIDE
@@ -1304,14 +1308,17 @@ public:
 
     MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
     {
+        int dims = inpShape.size();
         int inpCn = inpShape[1];
-        int inpH = inpShape[2];
-        int inpW = inpShape[3];
+        int inpD = dims == 5 ? inpShape[2] : 1;
+        int inpH = inpShape[dims - 2];
+        int inpW = inpShape.back();
         int outCn = outShape[1];
         int ngroups = inpCn / blobs[0].size[0];
         int outGroupCn = outCn / ngroups;
-        int ksize = outGroupCn * kernel.height * kernel.width;
-        return shape(ksize, inpH * inpW);
+        int ksize = outGroupCn * std::accumulate(kernel_size.begin(), kernel_size.end(),
+                                                 1, std::multiplies<size_t>());
+        return shape(ksize, inpD * inpH * inpW);
     }
 
     virtual bool supportBackend(int backendId) CV_OVERRIDE
diff --git a/modules/dnn/src/layers/recurrent_layers.cpp b/modules/dnn/src/layers/recurrent_layers.cpp
index f5ba861c47..cf005c65f3 100644
--- a/modules/dnn/src/layers/recurrent_layers.cpp
+++ b/modules/dnn/src/layers/recurrent_layers.cpp
@@ -92,6 +92,7 @@ class LSTMLayerImpl CV_FINAL : public LSTMLayer
     bool produceCellOutput;
     float forgetBias, cellClip;
     bool useCellClip, usePeephole;
+    bool reverse;   // If true, go in negative direction along the time axis
 
 public:
 
@@ -133,6 +134,7 @@ public:
         cellClip = params.get<float>("cell_clip", 0.0f);
         useCellClip = params.get<bool>("use_cell_clip", false);
         usePeephole = params.get<bool>("use_peephole", false);
+        reverse = params.get<bool>("reverse", false);
 
         allocated = false;
         outTailShape.clear();
@@ -288,7 +290,18 @@ public:
         Mat hOutTs = output[0].reshape(1, numSamplesTotal);
         Mat cOutTs = produceCellOutput ? output[1].reshape(1, numSamplesTotal) : Mat();
 
-        for (int ts = 0; ts < numTimeStamps; ts++)
+        int tsStart, tsEnd, tsInc;
+        if (reverse) {
+            tsStart = numTimeStamps - 1;
+            tsEnd = -1;
+            tsInc = -1;
+        }
+        else {
+            tsStart = 0;
+            tsEnd = numTimeStamps;
+            tsInc = 1;
+        }
+        for (int ts = tsStart; ts != tsEnd; ts += tsInc)
         {
             Range curRowRange(ts*numSamples, (ts + 1)*numSamples);
             Mat xCurr = xTs.rowRange(curRowRange);
diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp
index 6b71b9b80b..445c524639 100644
--- a/modules/dnn/test/test_layers.cpp
+++ b/modules/dnn/test/test_layers.cpp
@@ -461,6 +461,55 @@ TEST(Layer_RNN_Test_Accuracy_with_, CaffeRecurrent)
     normAssert(h_ref, output[0]);
 }
 
+TEST(Layer_LSTM_Test_Accuracy_, Reverse)
+{
+    // This handcrafted setup calculates (approximately) the prefix sum of the
+    // input, assuming the inputs are suitably small.
+    cv::Mat input(2, 1, CV_32FC1);
+    input.at<float>(0, 0) = 1e-5f;
+    input.at<float>(1, 0) = 2e-5f;
+
+    cv::Mat Wx(4, 1, CV_32FC1);
+    Wx.at<float>(0, 0) = 0.f;  // Input gate
+    Wx.at<float>(1, 0) = 0.f;  // Forget gate
+    Wx.at<float>(2, 0) = 0.f;  // Output gate
+    Wx.at<float>(3, 0) = 1.f;  // Update signal
+
+    cv::Mat Wh(4, 1, CV_32FC1);
+    Wh.at<float>(0, 0) = 0.f;  // Input gate
+    Wh.at<float>(1, 0) = 0.f;  // Forget gate
+    Wh.at<float>(2, 0) = 0.f;  // Output gate
+    Wh.at<float>(3, 0) = 0.f;  // Update signal
+
+    cv::Mat bias(4, 1, CV_32FC1);
+    bias.at<float>(0, 0) = 1e10f;  // Input gate - always allows input to c
+    bias.at<float>(1, 0) = 1e10f;  // Forget gate - never forget anything on c
+    bias.at<float>(2, 0) = 1e10f;  // Output gate - always output everything
+    bias.at<float>(3, 0) = 0.f;  // Update signal
+
+    LayerParams lp;
+    lp.set("reverse", true);
+    lp.set("use_timestamp_dim", true);
+    lp.blobs.clear();
+    lp.blobs.push_back(Wh);
+    lp.blobs.push_back(Wx);
+    lp.blobs.push_back(bias);
+
+    cv::Ptr<cv::dnn::LSTMLayer> layer = LSTMLayer::create(lp);
+    std::vector<cv::Mat> outputs;
+    std::vector<cv::Mat> inputs;
+    inputs.push_back(input);
+    runLayer(layer, inputs, outputs);
+
+    ASSERT_EQ(1, outputs.size());
+    cv::Mat out = outputs[0];
+    ASSERT_EQ(3, out.dims);
+    ASSERT_EQ(shape(2, 1, 1), shape(out));
+    float* data = reinterpret_cast<float*>(out.data);
+    EXPECT_NEAR(std::tanh(1e-5f) + std::tanh(2e-5f), data[0], 1e-10);
+    EXPECT_NEAR(std::tanh(2e-5f), data[1], 1e-10);
+}
+
 
 class Layer_RNN_Test : public ::testing::Test
 {
diff --git a/modules/dnn/test/test_model.cpp b/modules/dnn/test/test_model.cpp
index 5bc5bd31fe..b62f1ac82f 100644
--- a/modules/dnn/test/test_model.cpp
+++ b/modules/dnn/test/test_model.cpp
@@ -105,7 +105,8 @@ TEST_P(Test_Model, Classify)
 }
 
 
-TEST_P(Test_Model, DetectRegion)
+// disabled: https://github.com/opencv/opencv/pull/15593
+TEST_P(Test_Model, DISABLED_DetectRegion)
 {
     applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB);
 
diff --git a/modules/imgcodecs/src/grfmt_tiff.cpp b/modules/imgcodecs/src/grfmt_tiff.cpp
index 5d54a4ac6d..6bbb9ebd05 100644
--- a/modules/imgcodecs/src/grfmt_tiff.cpp
+++ b/modules/imgcodecs/src/grfmt_tiff.cpp
@@ -824,6 +824,7 @@ bool TiffEncoder::writeLibTiff( const std::vector<Mat>& img_vec, const std::vect
     for (size_t page = 0; page < img_vec.size(); page++)
     {
         const Mat& img = img_vec[page];
+        CV_Assert(!img.empty());
         int channels = img.channels();
         int width = img.cols, height = img.rows;
         int type = img.type();
@@ -883,6 +884,7 @@ bool TiffEncoder::writeLibTiff( const std::vector<Mat>& img_vec, const std::vect
 
         const int bitsPerByte = 8;
         size_t fileStep = (width * channels * bitsPerChannel) / bitsPerByte;
+        CV_Assert(fileStep > 0);
 
         int rowsPerStrip = (int)((1 << 13) / fileStep);
         readParam(params, TIFFTAG_ROWSPERSTRIP, rowsPerStrip);
diff --git a/modules/imgcodecs/src/loadsave.cpp b/modules/imgcodecs/src/loadsave.cpp
index 847dda144a..8b56f0cac5 100644
--- a/modules/imgcodecs/src/loadsave.cpp
+++ b/modules/imgcodecs/src/loadsave.cpp
@@ -664,6 +664,8 @@ static bool imwrite_( const String& filename, const std::vector<Mat>& img_vec,
     for (size_t page = 0; page < img_vec.size(); page++)
     {
         Mat image = img_vec[page];
+        CV_Assert(!image.empty());
+
         CV_Assert( image.channels() == 1 || image.channels() == 3 || image.channels() == 4 );
 
         Mat temp;
@@ -710,6 +712,9 @@ bool imwrite( const String& filename, InputArray _img,
               const std::vector<int>& params )
 {
     CV_TRACE_FUNCTION();
+
+    CV_Assert(!_img.empty());
+
     std::vector<Mat> img_vec;
     if (_img.isMatVector() || _img.isUMatVector())
         _img.getMatVector(img_vec);
@@ -870,6 +875,7 @@ bool imencode( const String& ext, InputArray _image,
     CV_TRACE_FUNCTION();
 
     Mat image = _image.getMat();
+    CV_Assert(!image.empty());
 
     int channels = image.channels();
     CV_Assert( channels == 1 || channels == 3 || channels == 4 );
diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp
index ecee8b57ce..b8bfabf9f3 100644
--- a/modules/imgproc/src/pyramids.cpp
+++ b/modules/imgproc/src/pyramids.cpp
@@ -87,13 +87,13 @@ template<typename T1, typename T2> int PyrUpVecV(T1**, T2**, int) { return 0; }
 template<> int PyrDownVecH<uchar, int, 1>(const uchar* src, int* row, int width)
 {
     int x = 0;
-    const uchar *src0 = src, *src2 = src + 2, *src4 = src + 3;
+    const uchar *src01 = src, *src23 = src + 2, *src4 = src + 3;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_reinterpret_as_s16(vx_load_expand(src0)), v_1_4) +
-                     v_dotprod(v_reinterpret_as_s16(vx_load_expand(src2)), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_reinterpret_as_s16(vx_load_expand(src01)), v_1_4) +
+                     v_dotprod(v_reinterpret_as_s16(vx_load_expand(src23)), v_6_4) +
                      (v_reinterpret_as_s32(vx_load_expand(src4)) >> 16));
     vx_cleanup();
 
@@ -102,13 +102,13 @@ template<> int PyrDownVecH<uchar, int, 1>(const uchar* src, int* row, int width)
 template<> int PyrDownVecH<uchar, int, 2>(const uchar* src, int* row, int width)
 {
     int x = 0;
-    const uchar *src0 = src, *src2 = src + 4, *src4 = src + 6;
+    const uchar *src01 = src, *src23 = src + 4, *src4 = src + 6;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src0))), v_1_4) +
-                     v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src2))), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src01))), v_1_4) +
+                     v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(vx_load_expand(src23))), v_6_4) +
                      (v_reinterpret_as_s32(v_interleave_pairs(vx_load_expand(src4))) >> 16));
     vx_cleanup();
 
@@ -150,13 +150,13 @@ template<> int PyrDownVecH<uchar, int, 3>(const uchar* src, int* row, int width)
 template<> int PyrDownVecH<uchar, int, 4>(const uchar* src, int* row, int width)
 {
     int x = 0;
-    const uchar *src0 = src, *src2 = src + 8, *src4 = src + 12;
+    const uchar *src01 = src, *src23 = src + 8, *src4 = src + 12;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src0))), v_1_4) +
-                     v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src2))), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src01))), v_1_4) +
+                     v_dotprod(v_interleave_quads(v_reinterpret_as_s16(vx_load_expand(src23))), v_6_4) +
                      (v_reinterpret_as_s32(v_interleave_quads(vx_load_expand(src4))) >> 16));
     vx_cleanup();
 
@@ -166,13 +166,13 @@ template<> int PyrDownVecH<uchar, int, 4>(const uchar* src, int* row, int width)
 template<> int PyrDownVecH<short, int, 1>(const short* src, int* row, int width)
 {
     int x = 0;
-    const short *src0 = src, *src2 = src + 2, *src4 = src + 3;
+    const short *src01 = src, *src23 = src + 2, *src4 = src + 3;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(vx_load(src0), v_1_4) +
-                     v_dotprod(vx_load(src2), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(vx_load(src01), v_1_4) +
+                     v_dotprod(vx_load(src23), v_6_4) +
                      (v_reinterpret_as_s32(vx_load(src4)) >> 16));
     vx_cleanup();
 
@@ -181,13 +181,13 @@ template<> int PyrDownVecH<short, int, 1>(const short* src, int* row, int width)
 template<> int PyrDownVecH<short, int, 2>(const short* src, int* row, int width)
 {
     int x = 0;
-    const short *src0 = src, *src2 = src + 4, *src4 = src + 6;
+    const short *src01 = src, *src23 = src + 4, *src4 = src + 6;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_interleave_pairs(vx_load(src0)), v_1_4) +
-                     v_dotprod(v_interleave_pairs(vx_load(src2)), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_interleave_pairs(vx_load(src01)), v_1_4) +
+                     v_dotprod(v_interleave_pairs(vx_load(src23)), v_6_4) +
                      (v_reinterpret_as_s32(v_interleave_pairs(vx_load(src4))) >> 16));
     vx_cleanup();
 
@@ -247,15 +247,15 @@ template<> int PyrDownVecH<short, int, 4>(const short* src, int* row, int width)
 template<> int PyrDownVecH<ushort, int, 1>(const ushort* src, int* row, int width)
 {
     int x = 0;
-    const ushort *src0 = src, *src2 = src + 2, *src4 = src + 3;
+    const ushort *src01 = src, *src23 = src + 2, *src4 = src + 3;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
     v_uint16 v_half = vx_setall_u16(0x8000);
     v_int32 v_half15 = vx_setall_s32(0x00078000);
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src0), v_half)), v_1_4) +
-                     v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src2), v_half)), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src01), v_half)), v_1_4) +
+                     v_dotprod(v_reinterpret_as_s16(v_sub_wrap(vx_load(src23), v_half)), v_6_4) +
                      v_reinterpret_as_s32(v_reinterpret_as_u32(vx_load(src4)) >> 16) + v_half15);
     vx_cleanup();
 
@@ -264,15 +264,15 @@ template<> int PyrDownVecH<ushort, int, 1>(const ushort* src, int* row, int widt
 template<> int PyrDownVecH<ushort, int, 2>(const ushort* src, int* row, int width)
 {
     int x = 0;
-    const ushort *src0 = src, *src2 = src + 4, *src4 = src + 6;
+    const ushort *src01 = src, *src23 = src + 4, *src4 = src + 6;
 
     v_int16 v_1_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040001));
     v_int16 v_6_4 = v_reinterpret_as_s16(vx_setall_u32(0x00040006));
     v_uint16 v_half = vx_setall_u16(0x8000);
     v_int32 v_half15 = vx_setall_s32(0x00078000);
-    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src0 += v_int16::nlanes, src2 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
-        v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src0), v_half))), v_1_4) +
-                     v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src2), v_half))), v_6_4) +
+    for (; x <= width - v_int32::nlanes; x += v_int32::nlanes, src01 += v_int16::nlanes, src23 += v_int16::nlanes, src4 += v_int16::nlanes, row += v_int32::nlanes)
+        v_store(row, v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src01), v_half))), v_1_4) +
+                     v_dotprod(v_interleave_pairs(v_reinterpret_as_s16(v_sub_wrap(vx_load(src23), v_half))), v_6_4) +
                      v_reinterpret_as_s32(v_reinterpret_as_u32(v_interleave_pairs(vx_load(src4))) >> 16) + v_half15);
     vx_cleanup();
 
@@ -344,15 +344,15 @@ template<> int PyrDownVecH<ushort, int, 4>(const ushort* src, int* row, int widt
 template<> int PyrDownVecH<float, float, 1>(const float* src, float* row, int width)
 {
     int x = 0;
-    const float *src0 = src, *src2 = src + 2, *src4 = src + 4;
+    const float *src01 = src, *src23 = src + 2, *src4 = src + 3;
 
     v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f);
-    for (; x <= width - v_float32::nlanes; x += v_float32::nlanes, src0 += 2*v_float32::nlanes, src2 += 2*v_float32::nlanes, src4 += 2*v_float32::nlanes, row+=v_float32::nlanes)
+    for (; x <= width - v_float32::nlanes; x += v_float32::nlanes, src01 += 2*v_float32::nlanes, src23 += 2*v_float32::nlanes, src4 += 2*v_float32::nlanes, row+=v_float32::nlanes)
     {
         v_float32 r0, r1, r2, r3, r4, rtmp;
-        v_load_deinterleave(src0, r0, r1);
-        v_load_deinterleave(src2, r2, r3);
-        v_load_deinterleave(src4, r4, rtmp);
+        v_load_deinterleave(src01, r0, r1);
+        v_load_deinterleave(src23, r2, r3);
+        v_load_deinterleave(src4, rtmp, r4);
         v_store(row, v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4)));
     }
     vx_cleanup();
@@ -362,14 +362,14 @@ template<> int PyrDownVecH<float, float, 1>(const float* src, float* row, int wi
 template<> int PyrDownVecH<float, float, 2>(const float* src, float* row, int width)
 {
     int x = 0;
-    const float *src0 = src, *src2 = src + 4, *src4 = src + 6;
+    const float *src01 = src, *src23 = src + 4, *src4 = src + 6;
 
     v_float32 _4 = vx_setall_f32(4.f), _6 = vx_setall_f32(6.f);
-    for (; x <= width - 2*v_float32::nlanes; x += 2*v_float32::nlanes, src0 += 4*v_float32::nlanes, src2 += 4*v_float32::nlanes, src4 += 4*v_float32::nlanes, row += 2*v_float32::nlanes)
+    for (; x <= width - 2*v_float32::nlanes; x += 2*v_float32::nlanes, src01 += 4*v_float32::nlanes, src23 += 4*v_float32::nlanes, src4 += 4*v_float32::nlanes, row += 2*v_float32::nlanes)
     {
         v_float32 r0a, r0b, r1a, r1b, r2a, r2b, r3a, r3b, r4a, r4b, rtmpa, rtmpb;
-        v_load_deinterleave(src0, r0a, r0b, r1a, r1b);
-        v_load_deinterleave(src2, r2a, r2b, r3a, r3b);
+        v_load_deinterleave(src01, r0a, r0b, r1a, r1b);
+        v_load_deinterleave(src23, r2a, r2b, r3a, r3b);
         v_load_deinterleave(src4, rtmpa, rtmpb, r4a, r4b);
         v_store_interleave(row, v_muladd(r2a, _6, v_muladd(r1a + r3a, _4, r0a + r4a)), v_muladd(r2b, _6, v_muladd(r1b + r3b, _4, r0b + r4b)));
     }
@@ -430,15 +430,15 @@ template<> int PyrDownVecH<float, float, 4>(const float* src, float* row, int wi
 template<> int PyrDownVecH<double, double, 1>(const double* src, double* row, int width)
 {
     int x = 0;
-    const double *src0 = src, *src2 = src + 2, *src4 = src + 4;
+    const double *src01 = src, *src23 = src + 2, *src4 = src + 3;
 
     v_float64 _4 = vx_setall_f64(4.f), _6 = vx_setall_f64(6.f);
-    for (; x <= width - v_float64::nlanes; x += v_float64::nlanes, src0 += 2*v_float64::nlanes, src2 += 2*v_float64::nlanes, src4 += 2*v_float64::nlanes, row += v_float64::nlanes)
+    for (; x <= width - v_float64::nlanes; x += v_float64::nlanes, src01 += 2*v_float64::nlanes, src23 += 2*v_float64::nlanes, src4 += 2*v_float64::nlanes, row += v_float64::nlanes)
     {
         v_float64 r0, r1, r2, r3, r4, rtmp;
-        v_load_deinterleave(src0, r0, r1);
-        v_load_deinterleave(src2, r2, r3);
-        v_load_deinterleave(src4, r4, rtmp);
+        v_load_deinterleave(src01, r0, r1);
+        v_load_deinterleave(src23, r2, r3);
+        v_load_deinterleave(src4, rtmp, r4);
         v_store(row, v_muladd(r2, _6, v_muladd(r1 + r3, _4, r0 + r4)));
     }
     vx_cleanup();
diff --git a/modules/js/CMakeLists.txt b/modules/js/CMakeLists.txt
index e97042aab1..cefc1bd9fa 100644
--- a/modules/js/CMakeLists.txt
+++ b/modules/js/CMakeLists.txt
@@ -82,7 +82,17 @@ add_definitions("-std=c++11")
 
 link_libraries(${OPENCV_MODULE_${the_module}_DEPS})
 
-ocv_add_executable(${the_module} ${bindings_cpp})
+OCV_OPTION(BUILD_WASM_INTRIN_TESTS "Build WASM intrin tests" OFF )
+if(BUILD_WASM_INTRIN_TESTS)
+  add_definitions(-DTEST_WASM_INTRIN)
+  ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../ts/include")
+  ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../imgcodecs/include")
+  ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../videoio/include")
+  ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../highgui/include")
+  ocv_add_executable(${the_module} ${bindings_cpp} "${CMAKE_CURRENT_SOURCE_DIR}/../ts/src/ts_gtest.cpp")
+else()
+  ocv_add_executable(${the_module} ${bindings_cpp})
+endif()
 
 set(COMPILE_FLAGS "")
 if(NOT CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
@@ -91,7 +101,7 @@ endif()
 if(COMPILE_FLAGS)
     set_target_properties(${the_module} PROPERTIES COMPILE_FLAGS ${COMPILE_FLAGS})
 endif()
-set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} --memory-init-file 0 -s TOTAL_MEMORY=134217728 -s ALLOW_MEMORY_GROWTH=1")
+set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} --memory-init-file 0 -s TOTAL_MEMORY=128MB -s WASM_MEM_MAX=1GB -s ALLOW_MEMORY_GROWTH=1")
 set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s MODULARIZE=1 -s SINGLE_FILE=1")
 set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s EXPORT_NAME=\"'cv'\" -s DEMANGLE_SUPPORT=1")
 set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s FORCE_FILESYSTEM=1 --use-preload-plugins --bind --post-js ${JS_HELPER} ${COMPILE_FLAGS}")
@@ -147,3 +157,26 @@ list(APPEND opencv_test_js_file_deps "${test_data_path}" "${opencv_test_js_bin_d
 
 add_custom_target(${PROJECT_NAME}_test ALL
                   DEPENDS ${OCV_JS_PATH} ${opencv_test_js_file_deps})
+
+# perf
+set(opencv_perf_js_bin_dir "${EXECUTABLE_OUTPUT_PATH}/perf")
+set(perf_dir ${CMAKE_CURRENT_SOURCE_DIR}/perf)
+
+set(opencv_perf_js_file_deps "")
+
+# make sure the build directory exists
+file(MAKE_DIRECTORY "${opencv_perf_js_bin_dir}")
+
+# gather and copy specific files for js perf
+file(GLOB_RECURSE perf_files RELATIVE "${perf_dir}" "${perf_dir}/*")
+foreach(f ${perf_files})
+  add_custom_command(OUTPUT "${opencv_perf_js_bin_dir}/${f}"
+                     COMMAND ${CMAKE_COMMAND} -E copy_if_different "${perf_dir}/${f}" "${opencv_perf_js_bin_dir}/${f}"
+                     DEPENDS "${perf_dir}/${f}"
+                     COMMENT "Copying ${f}"
+                    )
+  list(APPEND opencv_perf_js_file_deps "${perf_dir}/${f}" "${opencv_perf_js_bin_dir}/${f}")
+endforeach()
+
+add_custom_target(${PROJECT_NAME}_perf ALL
+                  DEPENDS ${OCV_JS_PATH} ${opencv_perf_js_file_deps})
diff --git a/modules/js/perf/README.md b/modules/js/perf/README.md
new file mode 100644
index 0000000000..c851bd51dd
--- /dev/null
+++ b/modules/js/perf/README.md
@@ -0,0 +1,35 @@
+# OpenCV.js Performance Test
+
+## Node.js Version
+
+### Prerequisites
+
+1. node.js, npm: Make sure you have installed these beforehand with the system package manager.
+
+2. Benchmark.js: Make sure you have installed Benchmark.js by npm before use. Please run `npm install` in the directory `<build_dir>/bin/perf`.
+
+### How to Use
+
+For example, if you want to test the performance of cvtColor, please run `perf_cvtcolor.js` by node in terminal:
+
+```sh
+node perf_cvtcolor.js
+```
+
+All tests of cvtColor will be run by above command.
+
+If you just want to run one specific case, please use `--test_param_filter="()"` flag, like:
+
+```sh
+node perf_cvtcolor.js --test_param_filter="(1920x1080, COLOR_BGR2GRAY)"
+```
+
+## Browser Version
+
+### How to Use
+
+To run performance tests, please launch a local web server in <build_dir>/bin folder. For example, node http-server which serves on localhost:8080.
+
+Navigate the web browser to the kernel page you want to test, like http://localhost:8080/perf/imgproc/cvtcolor.html.
+
+You can input the paramater, and then click the `Run` button to run the specific case, or it will run all the cases.
diff --git a/modules/js/perf/base.js b/modules/js/perf/base.js
new file mode 100644
index 0000000000..6c2e772e30
--- /dev/null
+++ b/modules/js/perf/base.js
@@ -0,0 +1,18 @@
+if (typeof window === 'undefined') {
+  var cv = require("../opencv");
+}
+
+const cvSize = {
+  szODD: new cv.Size(127, 61),
+  szQVGA: new cv.Size(320, 240),
+  szVGA: new cv.Size(640, 480),
+  szqHD: new cv.Size(960, 540),
+  sz720p: new cv.Size(1280, 720),
+  sz1080p: new cv.Size(1920, 1080),
+  sz130x60: new cv.Size(130, 60),
+  sz213x120: new cv.Size(120 * 1280 / 720, 120),
+}
+
+if (typeof window === 'undefined') {
+  exports.cvSize = cvSize;
+}
\ No newline at end of file
diff --git a/modules/js/perf/package.json b/modules/js/perf/package.json
new file mode 100644
index 0000000000..cbeedb7ac8
--- /dev/null
+++ b/modules/js/perf/package.json
@@ -0,0 +1,19 @@
+{
+    "name": "opencv_js_perf",
+    "description": "Perfermance tests for opencv js bindings",
+    "version": "1.0.0",
+    "dependencies" : {
+      "benchmark" : "latest"
+    },
+    "repository": {
+      "type": "git",
+      "url": "https://github.com/opencv/opencv.git"
+    },
+    "keywords": [],
+    "author": "",
+    "license": "BSD-3-Clause",
+    "bugs": {
+      "url": "https://github.com/opencv/opencv/issues"
+    },
+    "homepage": "https://github.com/opencv/opencv"
+  }
\ No newline at end of file
diff --git a/modules/js/perf/perf_helpfunc.js b/modules/js/perf/perf_helpfunc.js
new file mode 100644
index 0000000000..e07e3a297e
--- /dev/null
+++ b/modules/js/perf/perf_helpfunc.js
@@ -0,0 +1,59 @@
+var fillGradient = function(cv, img, delta=5) {
+  let ch = img.channels();
+  console.assert(!img.empty() && img.depth() == cv.CV_8U && ch <= 4);
+
+  let n = 255 / delta;
+  for(let r = 0; r < img.rows; ++r) {
+    let kR = r % (2*n);
+    let valR = (kR<=n) ? delta*kR : delta*(2*n-kR);
+    for(let c = 0; c < img.cols; ++c) {
+        let kC = c % (2*n);
+        let valC = (kC<=n) ? delta*kC : delta*(2*n-kC);
+        let vals = [valR, valC, 200*r/img.rows, 255];
+        let p = img.ptr(r, c);
+        for(let i = 0; i < ch; ++i) p[i] = vals[i];
+    }
+  }
+}
+
+var cvtStr2cvSize = function(strSize) {
+  let size;
+  switch(strSize) {
+    case "127,61": size = cvSize.szODD;break;
+    case '320,240': size = cvSize.szQVGA;break;
+    case '640,480': size = cvSize.szVGA;break;
+    case '960,540': size = cvSize.szqHD;break;
+    case '1280,720': size = cvSize.sz720p;break;
+    case '1920,1080': size = cvSize.sz1080p;break;
+    case "130,60": size = cvSize.sz130x60;break;
+    case '213,120': size = cvSize.sz213x120;break;
+    default: console.error("unsupported size for this case");
+  }
+  return size;
+}
+
+var combine = function() {
+  let result = [[]];
+  for (let i = 0; i < arguments.length; ++i) {
+    result = permute(result, arguments[i]);
+  }
+  return result;
+}
+
+function permute (source, target) {
+  let result = [];
+  for (let i = 0; i < source.length; ++i) {
+    for (let j = 0; j < target.length; ++j) {
+      let tmp = source[i].slice();
+      tmp.push(target[j]);
+      result.push(tmp);
+    }
+  }
+  return result;
+}
+
+if (typeof window === 'undefined') {
+  exports.fillGradient = fillGradient;
+  exports.cvtStr2cvSize = cvtStr2cvSize;
+  exports.combine = combine;
+}
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_cvtcolor.html b/modules/js/perf/perf_imgproc/perf_cvtcolor.html
new file mode 100644
index 0000000000..1897e35c48
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_cvtcolor.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>CvtColor</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (640x480,COLOR_RGBA2GRAY)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_cvtcolor.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_cvtcolor.js b/modules/js/perf/perf_imgproc/perf_cvtcolor.js
new file mode 100644
index 0000000000..752691ef77
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_cvtcolor.js
@@ -0,0 +1,572 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+cv.onRuntimeInitialized = () => {
+  console.log('opencv.js loaded');
+  if (isNodeJs) {
+    global.cv = cv;
+    global.combine = HelpFunc.combine;
+    global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+    global.cvSize = Base.cvSize;
+  } else {
+    runButton.removeAttribute('disabled');
+    runButton.setAttribute('class', 'btn btn-primary');
+    runButton.innerHTML = 'Run';
+  }
+  let totalCaseNum, currentCaseId;
+
+  //extra color conversions supported implicitly
+  {
+    cv.CX_BGRA2HLS      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2HLS,
+    cv.CX_BGRA2HLS_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2HLS_FULL,
+    cv.CX_BGRA2HSV      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2HSV,
+    cv.CX_BGRA2HSV_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2HSV_FULL,
+    cv.CX_BGRA2Lab      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2Lab,
+    cv.CX_BGRA2Luv      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2Luv,
+    cv.CX_BGRA2XYZ      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2XYZ,
+    cv.CX_BGRA2YCrCb    = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2YCrCb,
+    cv.CX_BGRA2YUV      = cv.COLOR_COLORCVT_MAX + cv.COLOR_BGR2YUV,
+    cv.CX_HLS2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_HLS2BGR,
+    cv.CX_HLS2BGRA_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_HLS2BGR_FULL,
+    cv.CX_HLS2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_HLS2RGB,
+    cv.CX_HLS2RGBA_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_HLS2RGB_FULL,
+    cv.CX_HSV2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_HSV2BGR,
+    cv.CX_HSV2BGRA_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_HSV2BGR_FULL,
+    cv.CX_HSV2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_HSV2RGB,
+    cv.CX_HSV2RGBA_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_HSV2RGB_FULL,
+    cv.CX_Lab2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_Lab2BGR,
+    cv.CX_Lab2LBGRA     = cv.COLOR_COLORCVT_MAX + cv.COLOR_Lab2LBGR,
+    cv.CX_Lab2LRGBA     = cv.COLOR_COLORCVT_MAX + cv.COLOR_Lab2LRGB,
+    cv.CX_Lab2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_Lab2RGB,
+    cv.CX_LBGRA2Lab     = cv.COLOR_COLORCVT_MAX + cv.COLOR_LBGR2Lab,
+    cv.CX_LBGRA2Luv     = cv.COLOR_COLORCVT_MAX + cv.COLOR_LBGR2Luv,
+    cv.CX_LRGBA2Lab     = cv.COLOR_COLORCVT_MAX + cv.COLOR_LRGB2Lab,
+    cv.CX_LRGBA2Luv     = cv.COLOR_COLORCVT_MAX + cv.COLOR_LRGB2Luv,
+    cv.CX_Luv2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_Luv2BGR,
+    cv.CX_Luv2LBGRA     = cv.COLOR_COLORCVT_MAX + cv.COLOR_Luv2LBGR,
+    cv.CX_Luv2LRGBA     = cv.COLOR_COLORCVT_MAX + cv.COLOR_Luv2LRGB,
+    cv.CX_Luv2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_Luv2RGB,
+    cv.CX_RGBA2HLS      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2HLS,
+    cv.CX_RGBA2HLS_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2HLS_FULL,
+    cv.CX_RGBA2HSV      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2HSV,
+    cv.CX_RGBA2HSV_FULL = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2HSV_FULL,
+    cv.CX_RGBA2Lab      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2Lab,
+    cv.CX_RGBA2Luv      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2Luv,
+    cv.CX_RGBA2XYZ      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2XYZ,
+    cv.CX_RGBA2YCrCb    = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2YCrCb,
+    cv.CX_RGBA2YUV      = cv.COLOR_COLORCVT_MAX + cv.COLOR_RGB2YUV,
+    cv.CX_XYZ2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_XYZ2BGR,
+    cv.CX_XYZ2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_XYZ2RGB,
+    cv.CX_YCrCb2BGRA    = cv.COLOR_COLORCVT_MAX + cv.COLOR_YCrCb2BGR,
+    cv.CX_YCrCb2RGBA    = cv.COLOR_COLORCVT_MAX + cv.COLOR_YCrCb2RGB,
+    cv.CX_YUV2BGRA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_YUV2BGR,
+    cv.CX_YUV2RGBA      = cv.COLOR_COLORCVT_MAX + cv.COLOR_YUV2RGB
+  };
+
+  const CvtMode = [
+    "COLOR_BGR2BGR555", "COLOR_BGR2BGR565", "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
+    "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL",
+    "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
+    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV", "COLOR_BGR5552BGR", "COLOR_BGR5552BGRA",
+
+    "COLOR_BGR5552GRAY", "COLOR_BGR5552RGB", "COLOR_BGR5552RGBA", "COLOR_BGR5652BGR",
+    "COLOR_BGR5652BGRA", "COLOR_BGR5652GRAY", "COLOR_BGR5652RGB", "COLOR_BGR5652RGBA",
+
+    "COLOR_BGRA2BGR", "COLOR_BGRA2BGR555", "COLOR_BGRA2BGR565", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
+    "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL",
+    "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ",
+    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
+
+    "COLOR_GRAY2BGR", "COLOR_GRAY2BGR555", "COLOR_GRAY2BGR565", "COLOR_GRAY2BGRA",
+
+    "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL",
+    "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL",
+
+    "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL",
+    "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL",
+
+    "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB",
+    "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA",
+
+    "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv",
+    "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv",
+
+    "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB",
+    "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA",
+
+    "COLOR_RGB2BGR555", "COLOR_RGB2BGR565", "COLOR_RGB2GRAY",
+    "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL",
+    "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
+
+    "COLOR_RGBA2BGR", "COLOR_RGBA2BGR555", "COLOR_RGBA2BGR565", "COLOR_RGBA2GRAY",
+    "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL",
+    "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ",
+    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
+
+    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
+
+    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
+    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
+  ];
+  const CvtModeSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
+  const combiCvtMode = combine(CvtModeSize, CvtMode);
+
+  // didn't support 16u and 32f perf tests according to
+  // https://github.com/opencv/opencv/commit/4e679e1cc5b075ec006b29a58b4fe117523fba1d
+  const CvtMode16U = [
+    "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
+    "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
+    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV",
+
+    "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
+    "CX_BGRA2XYZ",
+    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
+
+    "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA",
+
+    "COLOR_RGB2GRAY",
+    "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
+
+    "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY",
+    "CX_RGBA2XYZ",
+    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
+
+    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
+
+    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
+    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
+  ];
+  const CvtMode16USize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
+  const combiCvtMode16U = combine(CvtMode16USize, CvtMode16U);
+
+  const CvtMode32F = [
+    "COLOR_BGR2BGRA", "COLOR_BGR2GRAY",
+    "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL",
+    "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ",
+    "COLOR_BGR2YCrCb", "COLOR_BGR2YUV",
+
+    "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA",
+    "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL",
+    "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ",
+    "CX_BGRA2YCrCb", "CX_BGRA2YUV",
+
+    "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA",
+
+    "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL",
+    "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL",
+
+    "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL",
+    "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL",
+
+    "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB",
+    "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA",
+
+    "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv",
+    "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv",
+
+    "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB",
+    "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA",
+
+    "COLOR_RGB2GRAY",
+    "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL",
+    "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV",
+
+    "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY",
+    "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL",
+    "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ",
+    "CX_RGBA2YCrCb", "CX_RGBA2YUV",
+
+    "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA",
+
+    "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA",
+    "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA"
+  ];
+  const CvtMode32FSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p];
+  const combiCvtMode32F = combine(CvtMode32FSize, CvtMode32F);
+
+  const CvtModeBayer = [
+    "COLOR_BayerBG2BGR", "COLOR_BayerBG2BGRA", "COLOR_BayerBG2BGR_VNG", "COLOR_BayerBG2GRAY",
+    "COLOR_BayerGB2BGR", "COLOR_BayerGB2BGRA", "COLOR_BayerGB2BGR_VNG", "COLOR_BayerGB2GRAY",
+    "COLOR_BayerGR2BGR", "COLOR_BayerGR2BGRA", "COLOR_BayerGR2BGR_VNG", "COLOR_BayerGR2GRAY",
+    "COLOR_BayerRG2BGR", "COLOR_BayerRG2BGRA", "COLOR_BayerRG2BGR_VNG", "COLOR_BayerRG2GRAY"
+  ];
+  const CvtModeBayerSize = [cvSize.szODD, cvSize.szVGA];
+  const combiCvtModeBayer = combine(CvtModeBayerSize, CvtModeBayer);
+
+
+  const CvtMode2 = [
+    "COLOR_YUV2BGR_NV12", "COLOR_YUV2BGRA_NV12", "COLOR_YUV2RGB_NV12", "COLOR_YUV2RGBA_NV12", "COLOR_YUV2BGR_NV21", "COLOR_YUV2BGRA_NV21", "COLOR_YUV2RGB_NV21", "COLOR_YUV2RGBA_NV21",
+    "COLOR_YUV2BGR_YV12", "COLOR_YUV2BGRA_YV12", "COLOR_YUV2RGB_YV12", "COLOR_YUV2RGBA_YV12", "COLOR_YUV2BGR_IYUV", "COLOR_YUV2BGRA_IYUV", "COLOR_YUV2RGB_IYUV", "COLOR_YUV2RGBA_IYUV",
+    "COLOR_YUV2GRAY_420", "COLOR_YUV2RGB_UYVY", "COLOR_YUV2BGR_UYVY", "COLOR_YUV2RGBA_UYVY", "COLOR_YUV2BGRA_UYVY", "COLOR_YUV2RGB_YUY2", "COLOR_YUV2BGR_YUY2", "COLOR_YUV2RGB_YVYU",
+    "COLOR_YUV2BGR_YVYU", "COLOR_YUV2RGBA_YUY2", "COLOR_YUV2BGRA_YUY2", "COLOR_YUV2RGBA_YVYU", "COLOR_YUV2BGRA_YVYU"
+  ];
+  const CvtMode2Size = [cvSize.szVGA, cvSize.sz1080p, cvSize.sz130x60];
+  const combiCvtMode2 = combine(CvtMode2Size, CvtMode2);
+
+  const CvtMode3 = [
+    "COLOR_RGB2YUV_IYUV", "COLOR_BGR2YUV_IYUV", "COLOR_RGBA2YUV_IYUV", "COLOR_BGRA2YUV_IYUV",
+    "COLOR_RGB2YUV_YV12", "COLOR_BGR2YUV_YV12", "COLOR_RGBA2YUV_YV12", "COLOR_BGRA2YUV_YV12"
+  ];
+  const CvtMode3Size = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p, cvSize.sz130x60];
+  const combiCvtMode3 = combine(CvtMode3Size, CvtMode3);
+
+  const EdgeAwareBayerMode = [
+    "COLOR_BayerBG2BGR_EA", "COLOR_BayerGB2BGR_EA", "COLOR_BayerRG2BGR_EA", "COLOR_BayerGR2BGR_EA"
+  ];
+  const EdgeAwareBayerModeSize = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p, cvSize.sz130x60];
+  const combiEdgeAwareBayer = combine(EdgeAwareBayerModeSize, EdgeAwareBayerMode);
+
+  // This function returns an array. The 1st element is the channel number of
+  // source mat and 2nd element is the channel number of destination mat.
+  function getConversionInfo(cvtMode) {
+    switch(cvtMode) {
+      case "COLOR_BayerBG2GRAY": case "COLOR_BayerGB2GRAY":
+      case "COLOR_BayerGR2GRAY": case "COLOR_BayerRG2GRAY":
+      case "COLOR_YUV2GRAY_420":
+        return [1, 1];
+      case "COLOR_GRAY2BGR555": case "COLOR_GRAY2BGR565":
+        return [1, 2];
+      case "COLOR_BayerBG2BGR": case "COLOR_BayerBG2BGR_VNG":
+      case "COLOR_BayerGB2BGR": case "COLOR_BayerGB2BGR_VNG":
+      case "COLOR_BayerGR2BGR": case "COLOR_BayerGR2BGR_VNG":
+      case "COLOR_BayerRG2BGR": case "COLOR_BayerRG2BGR_VNG":
+      case "COLOR_GRAY2BGR":
+      case "COLOR_YUV2BGR_NV12": case "COLOR_YUV2RGB_NV12":
+      case "COLOR_YUV2BGR_NV21": case "COLOR_YUV2RGB_NV21":
+      case "COLOR_YUV2BGR_YV12": case "COLOR_YUV2RGB_YV12":
+      case "COLOR_YUV2BGR_IYUV": case "COLOR_YUV2RGB_IYUV":
+        return [1, 3];
+      case "COLOR_GRAY2BGRA":
+      case "COLOR_YUV2BGRA_NV12": case "COLOR_YUV2RGBA_NV12":
+      case "COLOR_YUV2BGRA_NV21": case "COLOR_YUV2RGBA_NV21":
+      case "COLOR_YUV2BGRA_YV12": case "COLOR_YUV2RGBA_YV12":
+      case "COLOR_YUV2BGRA_IYUV": case "COLOR_YUV2RGBA_IYUV":
+      case "COLOR_BayerBG2BGRA": case "COLOR_BayerGB2BGRA":
+      case "COLOR_BayerGR2BGRA": case "COLOR_BayerRG2BGRA":
+        return [1, 4];
+      case "COLOR_BGR5552GRAY": case "COLOR_BGR5652GRAY":
+        return [2, 1];
+      case "COLOR_BGR5552BGR": case "COLOR_BGR5552RGB":
+      case "COLOR_BGR5652BGR": case "COLOR_BGR5652RGB":
+      case "COLOR_YUV2RGB_UYVY": case "COLOR_YUV2BGR_UYVY":
+      case "COLOR_YUV2RGB_YUY2": case "COLOR_YUV2BGR_YUY2":
+      case "COLOR_YUV2RGB_YVYU": case "COLOR_YUV2BGR_YVYU":
+        return [2, 3];
+      case "COLOR_BGR5552BGRA": case "COLOR_BGR5552RGBA":
+      case "COLOR_BGR5652BGRA": case "COLOR_BGR5652RGBA":
+      case "COLOR_YUV2RGBA_UYVY": case "COLOR_YUV2BGRA_UYVY":
+      case "COLOR_YUV2RGBA_YUY2": case "COLOR_YUV2BGRA_YUY2":
+      case "COLOR_YUV2RGBA_YVYU": case "COLOR_YUV2BGRA_YVYU":
+        return [2, 4];
+      case "COLOR_BGR2GRAY": case "COLOR_RGB2GRAY":
+      case "COLOR_RGB2YUV_IYUV": case "COLOR_RGB2YUV_YV12":
+      case "COLOR_BGR2YUV_IYUV": case "COLOR_BGR2YUV_YV12":
+        return [3, 1];
+      case "COLOR_BGR2BGR555": case "COLOR_BGR2BGR565":
+      case "COLOR_RGB2BGR555": case "COLOR_RGB2BGR565":
+        return [3, 2];
+      case "COLOR_BGR2HLS": case "COLOR_BGR2HLS_FULL":
+      case "COLOR_BGR2HSV": case "COLOR_BGR2HSV_FULL":
+      case "COLOR_BGR2Lab": case "COLOR_BGR2Luv":
+      case "COLOR_BGR2RGB": case "COLOR_BGR2XYZ":
+      case "COLOR_BGR2YCrCb": case "COLOR_BGR2YUV":
+      case "COLOR_HLS2BGR": case "COLOR_HLS2BGR_FULL":
+      case "COLOR_HLS2RGB": case "COLOR_HLS2RGB_FULL":
+      case "COLOR_HSV2BGR": case "COLOR_HSV2BGR_FULL":
+      case "COLOR_HSV2RGB": case "COLOR_HSV2RGB_FULL":
+      case "COLOR_Lab2BGR": case "COLOR_Lab2LBGR":
+      case "COLOR_Lab2LRGB": case "COLOR_Lab2RGB":
+      case "COLOR_LBGR2Lab": case "COLOR_LBGR2Luv":
+      case "COLOR_LRGB2Lab": case "COLOR_LRGB2Luv":
+      case "COLOR_Luv2BGR": case "COLOR_Luv2LBGR":
+      case "COLOR_Luv2LRGB": case "COLOR_Luv2RGB":
+      case "COLOR_RGB2HLS": case "COLOR_RGB2HLS_FULL":
+      case "COLOR_RGB2HSV": case "COLOR_RGB2HSV_FULL":
+      case "COLOR_RGB2Lab": case "COLOR_RGB2Luv":
+      case "COLOR_RGB2XYZ": case "COLOR_RGB2YCrCb":
+      case "COLOR_RGB2YUV": case "COLOR_XYZ2BGR":
+      case "COLOR_XYZ2RGB": case "COLOR_YCrCb2BGR":
+      case "COLOR_YCrCb2RGB": case "COLOR_YUV2BGR":
+      case "COLOR_YUV2RGB":
+        return [3, 3];
+      case "COLOR_BGR2BGRA": case "COLOR_BGR2RGBA":
+      case "CX_HLS2BGRA": case "CX_HLS2BGRA_FULL":
+      case "CX_HLS2RGBA": case "CX_HLS2RGBA_FULL":
+      case "CX_HSV2BGRA": case "CX_HSV2BGRA_FULL":
+      case "CX_HSV2RGBA": case "CX_HSV2RGBA_FULL":
+      case "CX_Lab2BGRA": case "CX_Lab2LBGRA":
+      case "CX_Lab2LRGBA": case "CX_Lab2RGBA":
+      case "CX_Luv2BGRA": case "CX_Luv2LBGRA":
+      case "CX_Luv2LRGBA": case "CX_Luv2RGBA":
+      case "CX_XYZ2BGRA": case "CX_XYZ2RGBA":
+      case "CX_YCrCb2BGRA": case "CX_YCrCb2RGBA":
+      case "CX_YUV2BGRA": case "CX_YUV2RGBA":
+        return [3, 4];
+      case "COLOR_BGRA2GRAY": case "COLOR_RGBA2GRAY":
+      case "COLOR_RGBA2YUV_IYUV": case "COLOR_RGBA2YUV_YV12":
+      case "COLOR_BGRA2YUV_IYUV": case "COLOR_BGRA2YUV_YV12":
+        return [4, 1];
+      case "COLOR_BGRA2BGR555": case "COLOR_BGRA2BGR565":
+      case "COLOR_RGBA2BGR555": case "COLOR_RGBA2BGR565":
+        return [4, 2];
+      case "COLOR_BGRA2BGR": case "CX_BGRA2HLS":
+      case "CX_BGRA2HLS_FULL": case "CX_BGRA2HSV":
+      case "CX_BGRA2HSV_FULL": case "CX_BGRA2Lab":
+      case "CX_BGRA2Luv": case "CX_BGRA2XYZ":
+      case "CX_BGRA2YCrCb": case "CX_BGRA2YUV":
+      case "CX_LBGRA2Lab": case "CX_LBGRA2Luv":
+      case "CX_LRGBA2Lab": case "CX_LRGBA2Luv":
+      case "COLOR_RGBA2BGR": case "CX_RGBA2HLS":
+      case "CX_RGBA2HLS_FULL": case "CX_RGBA2HSV":
+      case "CX_RGBA2HSV_FULL": case "CX_RGBA2Lab":
+      case "CX_RGBA2Luv": case "CX_RGBA2XYZ":
+      case "CX_RGBA2YCrCb": case "CX_RGBA2YUV":
+        return [4, 3];
+      case "COLOR_BGRA2RGBA":
+        return [4, 4];
+      default:
+        console.error("Unknown conversion type");
+        break;
+      };
+      return [0, 0];
+  }
+
+  function getMatType(chPair) {
+    let dataType = "8U";  // now just support "8U" data type, we can set it as a param to extend the data type later.
+    let mat1Type, mat2Type;
+    if (chPair[0] === 0) {
+      mat1Type = `CV_${dataType}C`;
+    } else {
+      mat1Type = `CV_${dataType}C${chPair[0].toString()}`;
+    }
+    if (chPair[1] === 0) {
+      mat2Type = `CV_${dataType}C`;
+    } else {
+      mat2Type = `CV_${dataType}C${chPair[1].toString()}`;
+    }
+    return [mat1Type, mat2Type];
+  }
+
+  function addCvtColorCase(suite) {
+    suite.add('cvtColor', function() {
+      cv.cvtColor(mat1, mat2, mode, 0);
+      }, {
+        'setup': function() {
+          let size = this.params.size;
+          let matType = this.params.matType;
+          let mode = cv[this.params.mode]%cv.COLOR_COLORCVT_MAX;
+          let mat1 = new cv.Mat(size[1], size[0], cv[matType[0]]);
+          let mat2 = new cv.Mat(size[1], size[0], cv[matType[1]]);
+            },
+        'teardown': function() {
+          mat1.delete();
+          mat2.delete();
+        }
+    });
+  }
+
+  function addCvtModeCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for(let i = 0; i < combination.length; ++i) {
+      let size = combination[i][0];
+      let mode = combination[i][1];
+      let chPair = getConversionInfo(mode);
+      let matType = getMatType(chPair);
+      let sizeArray = [size.width, size.height];
+
+      addCvtColorCase(suite);
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        size: sizeArray,
+        matType: matType,
+        mode: mode
+      };
+    };
+  }
+
+  function addCvtModeBayerCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for(let i = 0; i < combination.length; ++i) {
+      let size = combination[i][0];
+      let mode = combination[i][1];
+      let chPair = getConversionInfo(mode);
+      let matType = getMatType(chPair);
+      let sizeArray = [size.width, size.height];
+
+      addCvtColorCase(suite);
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        size: sizeArray,
+        matType: matType,
+        mode: mode
+      };
+    };
+  }
+
+  function addCvtMode2Case(suite, combination) {
+    totalCaseNum += combination.length;
+    for(let i = 0; i < combination.length; ++i) {
+      let size = combination[i][0];
+      let mode = combination[i][1];
+      let chPair = getConversionInfo(mode);
+      let matType = getMatType(chPair);
+      let sizeArray = [size.width, size.height+size.height/2];
+
+      addCvtColorCase(suite);
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        size: sizeArray,
+        matType: matType,
+        mode: mode
+      };
+    };
+  }
+
+  function addCvtMode3Case(suite, combination) {
+    totalCaseNum += combination.length;
+    for(let i = 0; i < combination.length; ++i) {
+      let size = combination[i][0];
+      let mode = combination[i][1];
+      let chPair = getConversionInfo(mode);
+      let matType = getMatType(chPair);
+      let sizeArray = [size.width, size.height+size.height/2];
+
+      addCvtColorCase(suite);
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        size: sizeArray,
+        matType: matType,
+        mode: mode
+      };
+    };
+  }
+
+  function addEdgeAwareBayerModeCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for(let i = 0; i < combination.length; ++i) {
+      let size = combination[i][0];
+      let mode = combination[i][1];
+      let chPair = getConversionInfo(mode);
+      let matType = getMatType(chPair);
+      let sizeArray = [size.width, size.height];
+
+      addCvtColorCase(suite);
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        size: sizeArray,
+        matType: matType,
+        mode: mode
+      };
+    };
+  }
+
+  function decodeParams2Case(suite, params) {
+    let sizeStr = (params.match(/[0-9]+/g) || []).slice(0, 2).toString();
+    let mode = (params.match(/CX\_[A-z]+2[A-z]+/) || params.match(/COLOR\_[A-z]+2[A-z]+/) || []).toString();
+    let size = cvtStr2cvSize(sizeStr);
+
+    // check if the params match and add case
+    for (let i = 0; i < combinations.length; ++i) {
+      let combination = combinations[i];
+      for (let j = 0; j < combination.length; ++j) {
+        if (size === combination[j][0] && mode === combination[j][1]) {
+          cvtFunc[i](suite, [combination[j]]);
+        }
+      }
+    }
+  }
+
+  function log(message) {
+    console.log(message);
+    if (!isNodeJs) {
+      logElement.innerHTML += `\n${'\t' + message}`;
+    }
+  }
+
+  function setBenchmarkSuite(suite) {
+    suite
+    // add listeners
+    .on('cycle', function(event) {
+      ++currentCaseId;
+      let params = event.target.params;
+      let mode = params.mode;
+      let size = params.size;
+      log(`=== ${event.target.name} ${currentCaseId} ===`);
+      log(`params: (${parseInt(size[0])}x${parseInt(size[1])}, ${mode})`);
+      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
+      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
+      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
+      log(String(event.target));
+    })
+    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
+    .on('complete', function(event) {
+      log(`\n ###################################`)
+      log(`Finished testing ${event.currentTarget.length} cases \n`);
+      if (!isNodeJs) {
+        runButton.removeAttribute('disabled');
+        runButton.setAttribute('class', 'btn btn-primary');
+        runButton.innerHTML = 'Run';
+      }
+    });
+  }
+
+  function genBenchmarkCase(paramsContent) {
+    let suite = new Benchmark.Suite;
+    totalCaseNum = 0;
+    currentCaseId = 0;
+    if (/\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(paramsContent.toString())) {
+      let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0];
+      decodeParams2Case(suite, params);
+    } else {
+      log("no filter or getting invalid params, run all the cases");
+      addCvtModeCase(suite, combiCvtMode);
+      addCvtModeBayerCase(suite, combiCvtModeBayer);
+      addCvtMode2Case(suite, combiCvtMode2);
+      addCvtMode3Case(suite, combiCvtMode3);
+    }
+    setBenchmarkSuite(suite);
+    log(`Running ${totalCaseNum} tests from CvtColor`);
+    suite.run({ 'async': true }); // run the benchmark
+  }
+
+
+
+  // init
+  let cvtFunc = [addCvtModeCase, addCvtModeBayerCase, addCvtMode2Case, addCvtMode3Case];//, addEdgeAwareBayerModeCase];
+  let combinations = [combiCvtMode, combiCvtModeBayer, combiCvtMode2, combiCvtMode3];//, combiEdgeAwareBayer];
+
+  // set test filter params
+  if (isNodeJs) {
+    const args = process.argv.slice(2);
+    let paramsContent = '';
+    if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(args.toString())) {
+      paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0];
+    }
+    genBenchmarkCase(paramsContent);
+  } else {
+    runButton.onclick = function() {
+      let paramsContent = paramsElement.value;
+      genBenchmarkCase(paramsContent);
+      if (totalCaseNum !== 0) {
+        runButton.setAttribute("disabled", "disabled");
+        runButton.setAttribute('class', 'btn btn-primary disabled');
+        runButton.innerHTML = "Running";
+      }
+    }
+  }
+};
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_resize.html b/modules/js/perf/perf_imgproc/perf_resize.html
new file mode 100644
index 0000000000..66bd220387
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_resize.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Resize</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (CV_8UC1,640x480,960x540)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_resize.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_resize.js b/modules/js/perf/perf_imgproc/perf_resize.js
new file mode 100644
index 0000000000..4e71db3806
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_resize.js
@@ -0,0 +1,262 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+cv.onRuntimeInitialized = () => {
+  console.log('opencv.js loaded');
+  if (isNodeJs) {
+    global.cv = cv;
+    global.combine = HelpFunc.combine;
+    global.fillGradient = HelpFunc.fillGradient;
+    global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+    global.cvSize = Base.cvSize;
+  } else {
+    runButton.removeAttribute('disabled');
+    runButton.setAttribute('class', 'btn btn-primary');
+    runButton.innerHTML = 'Run';
+  }
+  let totalCaseNum, currentCaseId;
+
+  const matTypesUpLinear = ['CV_8UC1', 'CV_8UC2', 'CV_8UC3', 'CV_8UC4'];
+  const size1UpLinear = [cvSize.szVGA];
+  const size2UpLinear = [cvSize.szqHD, cvSize.sz720p];
+  const combiUpLinear = combine(matTypesUpLinear, size1UpLinear, size2UpLinear);
+
+  const combiDownLinear = [
+    ['CV_8UC1', cvSize.szVGA, cvSize.szQVGA],
+    ['CV_8UC2', cvSize.szVGA, cvSize.szQVGA],
+    ['CV_8UC3', cvSize.szVGA, cvSize.szQVGA],
+    ['CV_8UC4', cvSize.szVGA, cvSize.szQVGA],
+    ['CV_8UC1', cvSize.szqHD, cvSize.szVGA],
+    ['CV_8UC2', cvSize.szqHD, cvSize.szVGA],
+    ['CV_8UC3', cvSize.szqHD, cvSize.szVGA],
+    ['CV_8UC4', cvSize.szqHD, cvSize.szVGA],
+    ['CV_8UC1', cvSize.sz720p, cvSize.sz213x120],// face detection min_face_size = 20%
+    ['CV_8UC2', cvSize.sz720p, cvSize.sz213x120],// face detection min_face_size = 20%
+    ['CV_8UC3', cvSize.sz720p, cvSize.sz213x120],// face detection min_face_size = 20%
+    ['CV_8UC4', cvSize.sz720p, cvSize.sz213x120],// face detection min_face_size = 20%
+    ['CV_8UC1', cvSize.sz720p, cvSize.szVGA],
+    ['CV_8UC2', cvSize.sz720p, cvSize.szVGA],
+    ['CV_8UC3', cvSize.sz720p, cvSize.szVGA],
+    ['CV_8UC4', cvSize.sz720p, cvSize.szVGA],
+    ['CV_8UC1', cvSize.sz720p, cvSize.szQVGA],
+    ['CV_8UC2', cvSize.sz720p, cvSize.szQVGA],
+    ['CV_8UC3', cvSize.sz720p, cvSize.szQVGA],
+    ['CV_8UC4', cvSize.sz720p, cvSize.szQVGA]
+  ];
+
+  const matTypesAreaFast = ['CV_8UC1', 'CV_8UC3', 'CV_8UC4', 'CV_16UC1', 'CV_16UC3', 'CV_16UC4'];
+  const sizesAreaFast = [cvSize.szVGA, cvSize.szqHD, cvSize.sz720p, cvSize.sz1080p];
+  const scalesAreaFast = [2];
+  const combiAreaFast = combine(matTypesAreaFast, sizesAreaFast, scalesAreaFast);
+
+  function addResizeUpLinearCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for (let i = 0; i < combination.length; ++i) {
+      let matType = combination[i][0];
+      let from = combination[i][1];
+      let to = combination[i][2];
+
+      suite.add('resize', function() {
+        cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT);
+        }, {
+          'setup': function() {
+            let from = this.params.from;
+            let to = this.params.to;
+            let matType = cv[this.params.matType];
+            let src = new cv.Mat(from, matType);
+            let dst = new cv.Mat(to, matType);
+            fillGradient(cv, src);
+              },
+          'teardown': function() {
+            src.delete();
+            dst.delete();
+          }
+      });
+
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        from: from,
+        to: to,
+        matType: matType
+      };
+    }
+  }
+
+  function addResizeDownLinearCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for (let i = 0; i < combination.length; ++i) {
+      let matType = combination[i][0];
+      let from = combination[i][1];
+      let to = combination[i][2];
+
+      suite.add('resize', function() {
+        cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT);
+        }, {
+          'setup': function() {
+            let from = this.params.from;
+            let to = this.params.to;
+            let matType = cv[this.params.matType];
+            let src = new cv.Mat(from, matType);
+            let dst = new cv.Mat(to, matType);
+            fillGradient(cv, src);
+              },
+          'teardown': function() {
+            src.delete();
+            dst.delete();
+          }
+      });
+
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        from: from,
+        to: to,
+        matType: matType
+      };
+    }
+  }
+
+  function addResizeAreaFastCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for (let i = 0; i < combination.length; ++i) {
+      let matType = combination[i][0];
+      let from = combination[i][1];
+      let scale = combination[i][2];
+      from.width = (Math.floor(from.width/scale))*scale;
+      from.height = (Math.floor(from.height/scale))*scale;
+      let to = {
+        width: from.width/scale,
+        height: from.height/scale};  // for params print
+
+      suite.add('resize', function() {
+        cv.resize(src, dst, dst.size(), 0, 0, cv.INTER_AREA);
+        }, {
+          'setup': function() {
+            let from = this.params.from;
+            let scale = this.params.scale;
+            let matType = cv[this.params.matType];
+            let src = new cv.Mat(from, matType);
+            let dst = new cv.Mat(from.height/scale, from.width/scale, matType);
+              },
+          'teardown': function() {
+            src.delete();
+            dst.delete();
+          }
+      });
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        from: from,
+        scale: scale,
+        matType: matType
+      };
+    }
+  }
+
+  function decodeParams2Case(suite, params) {
+    let sizeString = (params.match(/[0-9]+x[0-9]+/g) || []).slice(0, 2).toString();
+    let sizes = (sizeString.match(/[0-9]+/g) || []);
+    let size1Str = sizes.slice(0, 2).toString();
+    let size2Str = sizes.slice(2, 5).toString();
+    let matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString();
+    let size1 = cvtStr2cvSize(size1Str);
+    let size2 = cvtStr2cvSize(size2Str);
+    // check if the params match and add case
+    for (let i = 0; i < combinations.length; ++i) {
+      let combination = combinations[i];
+      for (let j = 0; j < combination.length; ++j) {
+        if (matType === combination[j][0] && size1 === combination[j][1] && size2 === combination[j][2]) {
+          resizeFunc[i](suite, [combination[j]]);
+        }
+      }
+    }
+  }
+
+  function log(message) {
+    console.log(message);
+    if (!isNodeJs) {
+      logElement.innerHTML += `\n${'\t'.repeat(1) + message}`;
+    }
+  }
+
+  function setBenchmarkSuite(suite) {
+    suite
+    // add listeners
+    .on('cycle', function(event) {
+      ++currentCaseId;
+      let params = event.target.params;
+      let matType = params.matType;
+      let size1 = params.from;
+      let size2 = params.to;
+      log(`=== ${event.target.name} ${currentCaseId} ===`);
+      log(`params: (${matType},${parseInt(size1.width)}x${parseInt(size1.height)},`+
+          `${parseInt(size2.width)}x${parseInt(size2.height)})`);
+      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
+      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
+      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
+      log(String(event.target));
+    })
+    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
+    .on('complete', function(event) {
+      log(`\n ###################################`)
+      log(`Finished testing ${event.currentTarget.length} cases \n`);
+      if (!isNodeJs) {
+        runButton.removeAttribute('disabled');
+        runButton.setAttribute('class', 'btn btn-primary');
+        runButton.innerHTML = 'Run';
+      }
+    });
+  }
+
+  function genBenchmarkCase(paramsContent) {
+    let suite = new Benchmark.Suite;
+    totalCaseNum = 0;
+    currentCaseId = 0;
+    if (/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g.test(paramsContent.toString())) {
+      let params = paramsContent.toString().match(/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g)[0];
+      decodeParams2Case(suite, params);
+    } else {
+      log("no filter or getting invalid params, run all the cases");
+      addResizeUpLinearCase(suite, combiUpLinear);
+      addResizeDownLinearCase(suite, combiDownLinear);
+    }
+    setBenchmarkSuite(suite);
+    log(`Running ${totalCaseNum} tests from Resize`);
+    suite.run({ 'async': true }); // run the benchmark
+  }
+
+  // init
+  let resizeFunc = [addResizeUpLinearCase, addResizeDownLinearCase];//, addResizeAreaFastCase];
+  let combinations = [combiUpLinear, combiDownLinear];//, combiAreaFast];
+
+  // set test filter params
+  if (isNodeJs) {
+    const args = process.argv.slice(2);
+    let paramsContent = '';
+    if (/--test_param_filter=\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g.test(args.toString())) {
+      paramsContent = args.toString().match(/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g)[0];
+    }
+    genBenchmarkCase(paramsContent);
+  } else {
+    runButton.onclick = function() {
+      let paramsContent = paramsElement.value;
+      genBenchmarkCase(paramsContent);
+      if (totalCaseNum !== 0) {
+        runButton.setAttribute("disabled", "disabled");
+        runButton.setAttribute('class', 'btn btn-primary disabled');
+        runButton.innerHTML = "Running";
+      }
+    }
+  }
+};
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_threshold.html b/modules/js/perf/perf_imgproc/perf_threshold.html
new file mode 100644
index 0000000000..2f091f028e
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_threshold.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>OpenCV.js Performance Test</title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
+    <style>
+      body {
+        font-size: 13px;
+      }
+      .top-margin {
+        margin-top:10px;
+      }
+      h1, h4 {
+        margin: 24px 0 0;
+      }
+      h1 {
+        font-size: 2.0em;
+      }
+      h4 {
+        font-size: 1.2em;
+      }
+      pre {
+        font-family: 'Consolas', 'Monaco', monospace, serif;
+        font-size: 12px;
+        tab-size: 2;
+      }
+      input[type=checkbox] {
+        vertical-align: middle;
+      }
+    </style>
+  </head>
+  <body>
+    <div class="container" id="container">
+      <div class="row">
+        <div class="col-12">
+          <h1>OpenCV.js Performance Test</h1>
+          <div>
+            <h4>Modules</h4>
+              <h7>Image Processing</h7>
+          </div>
+          <div>
+            <h4>Kernels</h4>
+              <h7>Threshold</h7>
+          </div>
+          <div>
+            <h4>Parameters Filter</h4>
+            <input type="text" id="params" min="1" size="40" placeholder="default: run all the case"/>  for example: (1920x1080, CV_8UC1, THRESH_BINARY)
+          </div>
+          <div class='row labels-wrapper' id='labelitem'></div>
+          <div class="row top-margin">
+          </div>
+          <div>
+          <button type="button" id="runButton" class="btn btn-primary disabled" disabled="disabled">Loading</button>
+            (It will take several minutes)</div>
+          <div class="row top-margin">
+          </div>
+          <div>
+            <pre id="log"></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.11/lodash.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/platform/1.3.5/platform.js"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/benchmark/2.1.4/benchmark.js"></script>
+    <script src="../../opencv.js" type="text/javascript"></script>
+    <script src="../base.js"></script>
+    <script src="../perf_helpfunc.js"></script>
+    <script src="./perf_threshold.js"></script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/modules/js/perf/perf_imgproc/perf_threshold.js b/modules/js/perf/perf_imgproc/perf_threshold.js
new file mode 100644
index 0000000000..2616a2feaa
--- /dev/null
+++ b/modules/js/perf/perf_imgproc/perf_threshold.js
@@ -0,0 +1,217 @@
+const isNodeJs = (typeof window) === 'undefined'? true : false;
+
+if (isNodeJs) {
+  var Benchmark = require('benchmark');
+  var cv = require('../../opencv');
+  var HelpFunc = require('../perf_helpfunc');
+  var Base = require('../base');
+} else {
+  var paramsElement = document.getElementById('params');
+  var runButton = document.getElementById('runButton');
+  var logElement = document.getElementById('log');
+}
+
+cv.onRuntimeInitialized = () => {
+  console.log('opencv.js loaded');
+  if (isNodeJs) {
+    global.cv = cv;
+    global.combine = HelpFunc.combine;
+    global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize;
+    global.cvSize = Base.cvSize;
+  } else {
+    runButton.removeAttribute('disabled');
+    runButton.setAttribute('class', 'btn btn-primary');
+    runButton.innerHTML = 'Run';
+  }
+  let totalCaseNum, currentCaseId;
+
+  const typicalMatSizes = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p, cvSize.szODD];
+  const matTypes = ['CV_8UC1', 'CV_16SC1', 'CV_32FC1', 'CV_64FC1'];
+  const threshTypes = ['THRESH_BINARY', 'THRESH_BINARY_INV', 'THRESH_TRUNC', 'THRESH_TOZERO', 'THRESH_TOZERO_INV'];
+
+  const combiSizeMatTypeThreshType = combine(typicalMatSizes, matTypes, threshTypes);
+  const combiSizeOnly = combine(typicalMatSizes, ['CV_8UC1'], ['THRESH_BINARY|THRESH_OTSU']);
+
+  function addSizeMatTypeThreshTypeCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for (let i = 0; i < combination.length; ++i) {
+      let matSize = combination[i][0];
+      let matType = combination[i][1];
+      let threshType = combination[i][2];
+
+      suite.add('threshold', function() {
+        cv.threshold(src, dst, threshold, thresholdMax, threshType);
+        }, {
+          'setup': function() {
+            let matSize = this.params.matSize;
+            let matType = cv[this.params.matType];
+            let threshType = cv[this.params.threshType];
+            let threshold = 127.0;
+            let thresholdMax = 210.0;
+            let src = new cv.Mat(matSize, matType);
+            let dst = new cv.Mat(matSize, matType);
+            let srcView = src.data;
+            srcView[0] = 0;
+            srcView[1] = 100;
+            srcView[2] = 200;
+              },
+          'teardown': function() {
+            src.delete();
+            dst.delete();
+          }
+      });
+
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        matSize: matSize,
+        matType: matType,
+        threshType: threshType
+      };
+    }
+  }
+
+  function addSizeOnlyCase(suite, combination) {
+    totalCaseNum += combination.length;
+    for (let i = 0; i < combination.length; ++i) {
+      let matSize = combination[i][0];
+
+      suite.add('threshold', function() {
+        cv.threshold(src, dst, threshold, thresholdMax, cv.THRESH_BINARY|cv.THRESH_OTSU);
+        }, {
+          'setup': function() {
+            let matSize = this.params.matSize;
+            let threshold = 127.0;
+            let thresholdMax = 210.0;
+            let src = new cv.Mat(matSize, cv.CV_8UC1);
+            let dst = new cv.Mat(matSize, cv.CV_8UC1);
+            let srcView = src.data;
+            srcView[0] = 0;
+            srcView[1] = 100;
+            srcView[2] = 200;
+              },
+          'teardown': function() {
+            src.delete();
+            dst.delete();
+          }
+      });
+
+      // set init params
+      let index = suite.length - 1;
+      suite[index].params = {
+        matSize: matSize,
+        matType: 'CV_8UC1',
+        threshType: 'THRESH_BINARY|THRESH_OTSU'
+      };
+    }
+  }
+
+  function decodeParams2Case(suite, params, isSizeOnly) {
+    let sizeString = params.match(/[0-9]+x[0-9]+/g).toString();
+    let sizes = sizeString.match(/[0-9]+/g);
+    let size1Str = sizes.slice(0, 2).toString();
+    let matSize = cvtStr2cvSize(size1Str);
+    let matType, threshType;
+    if (isSizeOnly) {
+      matType = 'CV_8UC1';
+      threshType = 'THRESH_BINARY|THRESH_OTSU';
+    } else {
+      matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString();
+      threshType = (params.match(/THRESH\_[A-z]+\_?[A-z]*/) || []).toString();
+    }
+    // check if the params match and add case
+    for (let i = 0; i < combinations.length; ++i) {
+      let combination = combinations[i];
+      for (let j = 0; j < combination.length; ++j) {
+        if (matSize === combination[j][0] && matType === combination[j][1] && threshType === combination[j][2]) {
+          thresholdFunc[i](suite, [combination[j]]);
+        }
+      }
+    }
+  }
+
+  function log(message) {
+    console.log(message);1
+    if (!isNodeJs) {
+      logElement.innerHTML += `\n${'\t'.repeat(1) + message}`;
+    }
+  }
+
+  function setBenchmarkSuite(suite) {
+    suite
+    // add listeners
+    .on('cycle', function(event) {
+      ++currentCaseId;
+      let params = event.target.params;
+      let matSize = params.matSize;
+      let matType = params.matType;
+      let threshType = params.threshType;
+      log(`=== ${event.target.name} ${currentCaseId} ===`);
+      log(`params: (${parseInt(matSize.width)}x${parseInt(matSize.height)},`+
+          `${matType},${threshType})`);
+      log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms');
+      log('mean time:' +String(event.target.stats.mean*1000)+' ms');
+      log('stddev time:' +String(event.target.stats.deviation*1000)+' ms');
+      log(String(event.target));
+    })
+    .on('error', function(event) { log(`test case ${event.target.name} failed`); })
+    .on('complete', function(event) {
+      log(`\n ###################################`)
+      log(`Finished testing ${event.currentTarget.length} cases \n`);
+      if (!isNodeJs) {
+        runButton.removeAttribute('disabled');
+        runButton.setAttribute('class', 'btn btn-primary');
+        runButton.innerHTML = 'Run';
+      }
+    });
+  }
+
+  function genBenchmarkCase(paramsContent) {
+    let suite = new Benchmark.Suite;
+    totalCaseNum = 0;
+    currentCaseId = 0;
+    if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(paramsContent.toString())) {
+      let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0];
+      let isSizeOnly = 0;
+      decodeParams2Case(suite, params, isSizeOnly);
+    } else if (/[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(paramsContent.toString())) {
+      let params = paramsContent.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0];
+      let isSizeOnly = 1;
+      decodeParams2Case(suite, params, isSizeOnly);
+    }
+    else {
+      log("no filter or getting invalid params, run all the cases");
+      addSizeMatTypeThreshTypeCase(suite, combiSizeMatTypeThreshType);
+      addSizeOnlyCase(suite, combiSizeOnly);
+    }
+    setBenchmarkSuite(suite);
+    log(`Running ${totalCaseNum} tests from Threshold`);
+    suite.run({ 'async': true }); // run the benchmark
+  }
+
+  // init
+  let thresholdFunc = [addSizeMatTypeThreshTypeCase, addSizeOnlyCase];
+  let combinations = [combiSizeMatTypeThreshType, combiSizeOnly];
+
+  // set test filter params
+  if (isNodeJs) {
+    const args = process.argv.slice(2);
+    let paramsContent = '';
+    if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(args.toString())) {
+      paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0];
+    } else if (/--test_param_filter=[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(args.toString())) {
+      paramsContent = args.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0];
+    }
+    genBenchmarkCase(paramsContent);
+  } else {
+    runButton.onclick = function() {
+      let paramsContent = paramsElement.value;
+      genBenchmarkCase(paramsContent);
+      if (totalCaseNum !== 0) {
+        runButton.setAttribute("disabled", "disabled");
+        runButton.setAttribute('class', 'btn btn-primary disabled');
+        runButton.innerHTML = "Running";
+      }
+    }
+  }
+};
\ No newline at end of file
diff --git a/modules/js/src/core_bindings.cpp b/modules/js/src/core_bindings.cpp
index 0a42786171..279ee78bb1 100644
--- a/modules/js/src/core_bindings.cpp
+++ b/modules/js/src/core_bindings.cpp
@@ -71,6 +71,18 @@
 #include <emscripten/bind.h>
 
 @INCLUDES@
+#include "../../../modules/core/src/parallel_impl.hpp"
+
+#ifdef TEST_WASM_INTRIN
+#include "../../../modules/core/include/opencv2/core/hal/intrin.hpp"
+#include "../../../modules/core/include/opencv2/core/utils/trace.hpp"
+#include "../../../modules/ts/include/opencv2/ts/ts_gtest.h"
+namespace cv {
+namespace hal {
+#include "../../../modules/core/test/test_intrin_utils.hpp"
+}
+}
+#endif
 
 using namespace emscripten;
 using namespace cv;
@@ -368,6 +380,51 @@ namespace binding_utils
     std::string getBuildInformation() {
         return cv::getBuildInformation();
     }
+
+#ifdef TEST_WASM_INTRIN
+    void test_hal_intrin_uint8() {
+        cv::hal::test_hal_intrin_uint8();
+    }
+    void test_hal_intrin_int8() {
+        cv::hal::test_hal_intrin_int8();
+    }
+    void test_hal_intrin_uint16() {
+        cv::hal::test_hal_intrin_uint16();
+    }
+    void test_hal_intrin_int16() {
+        cv::hal::test_hal_intrin_int16();
+    }
+    void test_hal_intrin_uint32() {
+        cv::hal::test_hal_intrin_uint32();
+    }
+    void test_hal_intrin_int32() {
+        cv::hal::test_hal_intrin_int32();
+    }
+    void test_hal_intrin_uint64() {
+        cv::hal::test_hal_intrin_uint64();
+    }
+    void test_hal_intrin_int64() {
+        cv::hal::test_hal_intrin_int64();
+    }
+    void test_hal_intrin_float32() {
+        cv::hal::test_hal_intrin_float32();
+    }
+    void test_hal_intrin_float64() {
+        cv::hal::test_hal_intrin_float64();
+    }
+    void test_hal_intrin_all() {
+        cv::hal::test_hal_intrin_uint8();
+        cv::hal::test_hal_intrin_int8();
+        cv::hal::test_hal_intrin_uint16();
+        cv::hal::test_hal_intrin_int16();
+        cv::hal::test_hal_intrin_uint32();
+        cv::hal::test_hal_intrin_int32();
+        cv::hal::test_hal_intrin_uint64();
+        cv::hal::test_hal_intrin_int64();
+        cv::hal::test_hal_intrin_float32();
+        cv::hal::test_hal_intrin_float64();
+    }
+#endif
 }
 
 EMSCRIPTEN_BINDINGS(binding_utils)
@@ -542,10 +599,10 @@ EMSCRIPTEN_BINDINGS(binding_utils)
         .field("distance", &cv::DMatch::distance);
 
     emscripten::value_array<cv::Scalar_<double>> ("Scalar")
-        .element(index<0>())
-        .element(index<1>())
-        .element(index<2>())
-        .element(index<3>());
+        .element(emscripten::index<0>())
+        .element(emscripten::index<1>())
+        .element(emscripten::index<2>())
+        .element(emscripten::index<3>());
 
     emscripten::value_object<binding_utils::MinMaxLoc>("MinMaxLoc")
         .field("minVal", &binding_utils::MinMaxLoc::minVal)
@@ -621,6 +678,25 @@ EMSCRIPTEN_BINDINGS(binding_utils)
 
     function("getBuildInformation", &binding_utils::getBuildInformation);
 
+#ifdef HAVE_PTHREADS_PF
+    function("parallel_pthreads_set_threads_num", &cv::parallel_pthreads_set_threads_num);
+    function("parallel_pthreads_get_threads_num", &cv::parallel_pthreads_get_threads_num);
+#endif
+
+#ifdef TEST_WASM_INTRIN
+    function("test_hal_intrin_uint8", &binding_utils::test_hal_intrin_uint8);
+    function("test_hal_intrin_int8", &binding_utils::test_hal_intrin_int8);
+    function("test_hal_intrin_uint16", &binding_utils::test_hal_intrin_uint16);
+    function("test_hal_intrin_int16", &binding_utils::test_hal_intrin_int16);
+    function("test_hal_intrin_uint32", &binding_utils::test_hal_intrin_uint32);
+    function("test_hal_intrin_int32", &binding_utils::test_hal_intrin_int32);
+    function("test_hal_intrin_uint64", &binding_utils::test_hal_intrin_uint64);
+    function("test_hal_intrin_int64", &binding_utils::test_hal_intrin_int64);
+    function("test_hal_intrin_float32", &binding_utils::test_hal_intrin_float32);
+    function("test_hal_intrin_float64", &binding_utils::test_hal_intrin_float64);
+    function("test_hal_intrin_all", &binding_utils::test_hal_intrin_all);
+#endif
+
     constant("CV_8UC1", CV_8UC1);
     constant("CV_8UC2", CV_8UC2);
     constant("CV_8UC3", CV_8UC3);
diff --git a/modules/js/src/make_umd.py b/modules/js/src/make_umd.py
index 7f7dffc880..8e50da585d 100644
--- a/modules/js/src/make_umd.py
+++ b/modules/js/src/make_umd.py
@@ -87,9 +87,15 @@ def make_umd(opencvjs, cvjs):
     // only CommonJS-like environments that support module.exports,
     // like Node.
     module.exports = factory();
-  } else {
+  } else if (typeof window === 'object') {
     // Browser globals
     root.cv = factory();
+  } else if (typeof importScripts === 'function') {
+    // Web worker
+    root.cv = factory;
+  } else {
+    // Other shells, e.g. d8
+    root.cv = factory();
   }
 }(this, function () {
   %s
diff --git a/modules/ml/src/ann_mlp.cpp b/modules/ml/src/ann_mlp.cpp
index 3155c018ce..3843564e15 100644
--- a/modules/ml/src/ann_mlp.cpp
+++ b/modules/ml/src/ann_mlp.cpp
@@ -848,6 +848,7 @@ public:
 
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!trainData.empty());
         const int MAX_ITER = 1000;
         const double DEFAULT_EPSILON = FLT_EPSILON;
 
@@ -883,6 +884,7 @@ public:
     }
     int train_anneal(const Ptr<TrainData>& trainData)
     {
+        CV_Assert(!trainData.empty());
         SimulatedAnnealingANN_MLP s(*this, trainData);
         trained = true; // Enable call to CalcError
         int iter = simulatedAnnealingSolver(s, params.initialT, params.finalT, params.coolingRatio, params.itePerStep, NULL, params.rEnergy);
diff --git a/modules/ml/src/boost.cpp b/modules/ml/src/boost.cpp
index b3e8c2724a..4b94410eeb 100644
--- a/modules/ml/src/boost.cpp
+++ b/modules/ml/src/boost.cpp
@@ -88,6 +88,7 @@ public:
 
     void startTraining( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!trainData.empty());
         DTreesImpl::startTraining(trainData, flags);
         sumResult.assign(w->sidx.size(), 0.);
 
@@ -184,6 +185,7 @@ public:
 
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!trainData.empty());
         startTraining(trainData, flags);
         int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
         vector<int> sidx = w->sidx;
@@ -482,6 +484,7 @@ public:
 
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!trainData.empty());
         return impl.train(trainData, flags);
     }
 
diff --git a/modules/ml/src/em.cpp b/modules/ml/src/em.cpp
index c2dfc9c523..ec73bfd1b5 100644
--- a/modules/ml/src/em.cpp
+++ b/modules/ml/src/em.cpp
@@ -112,6 +112,7 @@ public:
 
     bool train(const Ptr<TrainData>& data, int) CV_OVERRIDE
     {
+        CV_Assert(!data.empty());
         Mat samples = data->getTrainSamples(), labels;
         return trainEM(samples, labels, noArray(), noArray());
     }
diff --git a/modules/ml/src/inner_functions.cpp b/modules/ml/src/inner_functions.cpp
index 6f8b222d19..b823c5ba22 100644
--- a/modules/ml/src/inner_functions.cpp
+++ b/modules/ml/src/inner_functions.cpp
@@ -59,9 +59,10 @@ bool StatModel::empty() const { return !isTrained(); }
 
 int StatModel::getVarCount() const { return 0; }
 
-bool StatModel::train( const Ptr<TrainData>&, int )
+bool StatModel::train(const Ptr<TrainData>& trainData, int )
 {
     CV_TRACE_FUNCTION();
+    CV_Assert(!trainData.empty());
     CV_Error(CV_StsNotImplemented, "");
     return false;
 }
@@ -69,6 +70,7 @@ bool StatModel::train( const Ptr<TrainData>&, int )
 bool StatModel::train( InputArray samples, int layout, InputArray responses )
 {
     CV_TRACE_FUNCTION();
+    CV_Assert(!samples.empty());
     return train(TrainData::create(samples, layout, responses));
 }
 
@@ -134,6 +136,7 @@ public:
 float StatModel::calcError(const Ptr<TrainData>& data, bool testerr, OutputArray _resp) const
 {
     CV_TRACE_FUNCTION_SKIP_NESTED();
+    CV_Assert(!data.empty());
     Mat samples = data->getSamples();
     Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
     Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights();
diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp
index dcc201158d..ca23d0f4d6 100644
--- a/modules/ml/src/knearest.cpp
+++ b/modules/ml/src/knearest.cpp
@@ -73,6 +73,7 @@ public:
 
     bool train( const Ptr<TrainData>& data, int flags )
     {
+        CV_Assert(!data.empty());
         Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
         Mat new_responses;
         data->getTrainResponses().convertTo(new_responses, CV_32F);
@@ -494,6 +495,7 @@ public:
 
     bool train( const Ptr<TrainData>& data, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!data.empty());
         return impl->train(data, flags);
     }
 
diff --git a/modules/ml/src/lr.cpp b/modules/ml/src/lr.cpp
index 166b6a39d8..ad7b8079a2 100644
--- a/modules/ml/src/lr.cpp
+++ b/modules/ml/src/lr.cpp
@@ -142,12 +142,10 @@ Ptr<LogisticRegression> LogisticRegression::load(const String& filepath, const S
 bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
 {
     CV_TRACE_FUNCTION_SKIP_NESTED();
+    CV_Assert(!trainData.empty());
+
     // return value
     bool ok = false;
-
-    if (trainData.empty()) {
-        return false;
-    }
     clear();
     Mat _data_i = trainData->getSamples();
     Mat _labels_i = trainData->getResponses();
diff --git a/modules/ml/src/nbayes.cpp b/modules/ml/src/nbayes.cpp
index baa46d8f0a..60dda0c7d4 100644
--- a/modules/ml/src/nbayes.cpp
+++ b/modules/ml/src/nbayes.cpp
@@ -54,6 +54,7 @@ public:
 
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
+        CV_Assert(!trainData.empty());
         const float min_variation = FLT_EPSILON;
         Mat responses = trainData->getNormCatResponses();
         Mat __cls_labels = trainData->getClassLabels();
diff --git a/modules/ml/src/rtrees.cpp b/modules/ml/src/rtrees.cpp
index b7e32b92b0..d3b0d67993 100644
--- a/modules/ml/src/rtrees.cpp
+++ b/modules/ml/src/rtrees.cpp
@@ -111,6 +111,7 @@ public:
     void startTraining( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
         CV_TRACE_FUNCTION();
+        CV_Assert(!trainData.empty());
         DTreesImpl::startTraining(trainData, flags);
         int nvars = w->data->getNVars();
         int i, m = rparams.nactiveVars > 0 ? rparams.nactiveVars : cvRound(std::sqrt((double)nvars));
@@ -133,6 +134,7 @@ public:
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
         CV_TRACE_FUNCTION();
+        CV_Assert(!trainData.empty());
         startTraining(trainData, flags);
         int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
             rparams.termCrit.maxCount : 10000;
@@ -464,6 +466,7 @@ public:
     bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
     {
         CV_TRACE_FUNCTION();
+        CV_Assert(!trainData.empty());
         if (impl.getCVFolds() != 0)
             CV_Error(Error::StsBadArg, "Cross validation for RTrees is not implemented");
         return impl.train(trainData, flags);
diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp
index 08c133feaf..d6ac0827c3 100644
--- a/modules/ml/src/svm.cpp
+++ b/modules/ml/src/svm.cpp
@@ -1613,6 +1613,7 @@ public:
 
     bool train( const Ptr<TrainData>& data, int ) CV_OVERRIDE
     {
+        CV_Assert(!data.empty());
         clear();
 
         checkParams();
@@ -1739,6 +1740,7 @@ public:
                     ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
                     bool balanced ) CV_OVERRIDE
     {
+        CV_Assert(!data.empty());
         checkParams();
 
         int svmType = params.svmType;
diff --git a/modules/ml/src/svmsgd.cpp b/modules/ml/src/svmsgd.cpp
index ac778f4da8..266c7cf300 100644
--- a/modules/ml/src/svmsgd.cpp
+++ b/modules/ml/src/svmsgd.cpp
@@ -230,6 +230,7 @@ float SVMSGDImpl::calcShift(InputArray _samples, InputArray _responses) const
 
 bool SVMSGDImpl::train(const Ptr<TrainData>& data, int)
 {
+    CV_Assert(!data.empty());
     clear();
     CV_Assert( isClassifier() );   //toDo: consider
 
diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp
index 2f9dc049e1..87181b156c 100644
--- a/modules/ml/src/tree.cpp
+++ b/modules/ml/src/tree.cpp
@@ -98,6 +98,7 @@ DTrees::Split::Split()
 
 DTreesImpl::WorkData::WorkData(const Ptr<TrainData>& _data)
 {
+    CV_Assert(!_data.empty());
     data = _data;
     vector<int> subsampleIdx;
     Mat sidx0 = _data->getTrainSampleIdx();
@@ -136,6 +137,7 @@ void DTreesImpl::clear()
 
 void DTreesImpl::startTraining( const Ptr<TrainData>& data, int )
 {
+    CV_Assert(!data.empty());
     clear();
     w = makePtr<WorkData>(data);
 
@@ -223,6 +225,7 @@ void DTreesImpl::endTraining()
 
 bool DTreesImpl::train( const Ptr<TrainData>& trainData, int flags )
 {
+    CV_Assert(!trainData.empty());
     startTraining(trainData, flags);
     bool ok = addTree( w->sidx ) >= 0;
     w.release();
diff --git a/modules/ml/test/test_lr.cpp b/modules/ml/test/test_lr.cpp
index 15d59d77fa..d57825152c 100644
--- a/modules/ml/test/test_lr.cpp
+++ b/modules/ml/test/test_lr.cpp
@@ -94,11 +94,7 @@ void CV_LRTest::run( int /*start_from*/ )
     // initialize variables from the popular Iris Dataset
     string dataFileName = ts->get_data_path() + "iris.data";
     Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
-
-    if (tdata.empty()) {
-        ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-        return;
-    }
+    ASSERT_FALSE(tdata.empty()) << "Could not find test data file : " << dataFileName;
 
     // run LR classifier train classifier
     Ptr<LogisticRegression> p = LogisticRegression::create();
@@ -156,6 +152,7 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
     // initialize variables from the popular Iris Dataset
     string dataFileName = ts->get_data_path() + "iris.data";
     Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
+    ASSERT_FALSE(tdata.empty()) << "Could not find test data file : " << dataFileName;
 
     Mat responses1, responses2;
     Mat learnt_mat1, learnt_mat2;
diff --git a/modules/ml/test/test_mltests2.cpp b/modules/ml/test/test_mltests2.cpp
index 8ca6177584..8eb1c1135d 100644
--- a/modules/ml/test/test_mltests2.cpp
+++ b/modules/ml/test/test_mltests2.cpp
@@ -105,6 +105,7 @@ int str_to_ann_activation_function(String& str)
 void ann_check_data( Ptr<TrainData> _data )
 {
     CV_TRACE_FUNCTION();
+    CV_Assert(!_data.empty());
     Mat values = _data->getSamples();
     Mat var_idx = _data->getVarIdx();
     int nvars = (int)var_idx.total();
@@ -118,6 +119,7 @@ void ann_check_data( Ptr<TrainData> _data )
 Mat ann_get_new_responses( Ptr<TrainData> _data, map<int, int>& cls_map )
 {
     CV_TRACE_FUNCTION();
+    CV_Assert(!_data.empty());
     Mat train_sidx = _data->getTrainSampleIdx();
     int* train_sidx_ptr = train_sidx.ptr<int>();
     Mat responses = _data->getResponses();
@@ -150,6 +152,8 @@ Mat ann_get_new_responses( Ptr<TrainData> _data, map<int, int>& cls_map )
 float ann_calc_error( Ptr<StatModel> ann, Ptr<TrainData> _data, map<int, int>& cls_map, int type, vector<float> *resp_labels )
 {
     CV_TRACE_FUNCTION();
+    CV_Assert(!ann.empty());
+    CV_Assert(!_data.empty());
     float err = 0;
     Mat samples = _data->getSamples();
     Mat responses = _data->getResponses();
@@ -264,13 +268,15 @@ TEST_P(ML_ANN_METHOD, Test)
     String dataname = folder + "waveform" + '_' + methodName;
 
     Ptr<TrainData> tdata2 = TrainData::loadFromCSV(original_path, 0);
+    ASSERT_FALSE(tdata2.empty()) << "Could not find test data file : " << original_path;
+
     Mat samples = tdata2->getSamples()(Range(0, N), Range::all());
     Mat responses(N, 3, CV_32FC1, Scalar(0));
     for (int i = 0; i < N; i++)
         responses.at<float>(i, static_cast<int>(tdata2->getResponses().at<float>(i, 0))) = 1;
     Ptr<TrainData> tdata = TrainData::create(samples, ml::ROW_SAMPLE, responses);
+    ASSERT_FALSE(tdata.empty());
 
-    ASSERT_FALSE(tdata.empty()) << "Could not find test data file : " << original_path;
     RNG& rng = theRNG();
     rng.state = 0;
     tdata->setTrainTestSplitRatio(0.8);
diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py
index f246dc98f3..9135ecf7b2 100755
--- a/platforms/ios/build_framework.py
+++ b/platforms/ios/build_framework.py
@@ -49,7 +49,7 @@ def getXCodeMajor():
         raise Exception("Failed to parse Xcode version")
 
 class Builder:
-    def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, enablenonfree, targets, debug, debug_info):
+    def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, disable, enablenonfree, targets, debug, debug_info):
         self.opencv = os.path.abspath(opencv)
         self.contrib = None
         if contrib:
@@ -61,6 +61,7 @@ class Builder:
         self.dynamic = dynamic
         self.bitcodedisabled = bitcodedisabled
         self.exclude = exclude
+        self.disable = disable
         self.enablenonfree = enablenonfree
         self.targets = targets
         self.debug = debug
@@ -154,6 +155,9 @@ class Builder:
             args += ["-DBUILD_opencv_world=OFF"] if not self.dynamic else []
             args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
 
+        if len(self.disable) > 0:
+            args += ["-DWITH_%s=OFF" % f for f in self.disable]
+
         return args
 
     def getBuildCommand(self, archs, target):
@@ -291,6 +295,7 @@ if __name__ == "__main__":
     parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
     parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
     parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
+    parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
     parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
     parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
     parser.add_argument('--iphoneos_deployment_target', default=os.environ.get('IPHONEOS_DEPLOYMENT_TARGET', IPHONEOS_DEPLOYMENT_TARGET), help='specify IPHONEOS_DEPLOYMENT_TARGET')
@@ -308,7 +313,7 @@ if __name__ == "__main__":
     iphonesimulator_archs = args.iphonesimulator_archs.split(',')
     print('Using iPhoneSimulator ARCHS=' + str(iphonesimulator_archs))
 
-    b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.enablenonfree,
+    b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree,
         [
             (iphoneos_archs, "iPhoneOS"),
         ] if os.environ.get('BUILD_PRECOMMIT', None) else
diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py
index a3a9698bc5..393c96a1b1 100644
--- a/platforms/js/build_js.py
+++ b/platforms/js/build_js.py
@@ -78,6 +78,7 @@ class Builder:
 
     def get_cmake_cmd(self):
         cmd = ["cmake",
+               "-DENABLE_PIC=FALSE", # To workaround emscripten upstream backend issue https://github.com/emscripten-core/emscripten/issues/8761
                "-DCMAKE_BUILD_TYPE=Release",
                "-DCMAKE_TOOLCHAIN_FILE='%s'" % self.get_toolchain_file(),
                "-DCPU_BASELINE=''",
@@ -103,7 +104,6 @@ class Builder:
                "-DWITH_OPENNI2=OFF",
                "-DWITH_PNG=OFF",
                "-DWITH_TBB=OFF",
-               "-DWITH_PTHREADS_PF=OFF",
                "-DWITH_TIFF=OFF",
                "-DWITH_V4L=OFF",
                "-DWITH_OPENCL=OFF",
@@ -145,6 +145,21 @@ class Builder:
         else:
             cmd.append("-DBUILD_DOCS=OFF")
 
+        if self.options.threads:
+            cmd.append("-DWITH_PTHREADS_PF=ON")
+        else:
+            cmd.append("-DWITH_PTHREADS_PF=OFF")
+
+        if self.options.simd:
+            cmd.append("-DCV_ENABLE_INTRINSICS=ON")
+        else:
+            cmd.append("-DCV_ENABLE_INTRINSICS=OFF")
+
+        if self.options.build_wasm_intrin_test:
+            cmd.append("-DBUILD_WASM_INTRIN_TESTS=ON")
+        else:
+            cmd.append("-DBUILD_WASM_INTRIN_TESTS=OFF")
+
         flags = self.get_build_flags()
         if flags:
             cmd += ["-DCMAKE_C_FLAGS='%s'" % flags,
@@ -157,8 +172,14 @@ class Builder:
             flags += "-s WASM=1 "
         elif self.options.disable_wasm:
             flags += "-s WASM=0 "
+        if self.options.threads:
+            flags += "-s USE_PTHREADS=1 -s PTHREAD_POOL_SIZE=4 "
+        else:
+            flags += "-s USE_PTHREADS=0 "
         if self.options.enable_exception:
             flags += "-s DISABLE_EXCEPTION_CATCHING=0 "
+        if self.options.simd:
+            flags += "-msimd128 "
         return flags
 
     def config(self):
@@ -172,6 +193,9 @@ class Builder:
     def build_test(self):
         execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_test"])
 
+    def build_perf(self):
+        execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_perf"])
+
     def build_doc(self):
         execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"])
 
@@ -190,12 +214,16 @@ if __name__ == "__main__":
     parser.add_argument('--emscripten_dir', default=emscripten_dir, help="Path to Emscripten to use for build")
     parser.add_argument('--build_wasm', action="store_true", help="Build OpenCV.js in WebAssembly format")
     parser.add_argument('--disable_wasm', action="store_true", help="Build OpenCV.js in Asm.js format")
+    parser.add_argument('--threads', action="store_true", help="Build OpenCV.js with threads optimization")
+    parser.add_argument('--simd', action="store_true", help="Build OpenCV.js with SIMD optimization")
     parser.add_argument('--build_test', action="store_true", help="Build tests")
+    parser.add_argument('--build_perf', action="store_true", help="Build performance tests")
     parser.add_argument('--build_doc', action="store_true", help="Build tutorials")
     parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir")
     parser.add_argument('--skip_config', action="store_true", help="Skip cmake config")
     parser.add_argument('--config_only', action="store_true", help="Only do cmake config")
     parser.add_argument('--enable_exception', action="store_true", help="Enable exception handling")
+    parser.add_argument('--build_wasm_intrin_test', default=False, action="store_true", help="Build WASM intrin tests")
     args = parser.parse_args()
 
     log.basicConfig(format='%(message)s', level=log.DEBUG)
@@ -240,6 +268,12 @@ if __name__ == "__main__":
         log.info("=====")
         builder.build_test()
 
+    if args.build_perf:
+        log.info("=====")
+        log.info("===== Building OpenCV.js performance tests")
+        log.info("=====")
+        builder.build_perf()
+
     if args.build_doc:
         log.info("=====")
         log.info("===== Building OpenCV.js tutorials")
@@ -260,6 +294,12 @@ if __name__ == "__main__":
         if check_file(opencvjs_test_path):
             log.info("OpenCV.js tests location: %s", opencvjs_test_path)
 
+    if args.build_perf:
+        opencvjs_perf_path = os.path.join(builder.build_dir, "bin", "perf")
+        opencvjs_perf_base_path = os.path.join(builder.build_dir, "bin", "perf", "base.js")
+        if check_file(opencvjs_perf_base_path):
+            log.info("OpenCV.js performance tests location: %s", opencvjs_perf_path)
+
     if args.build_doc:
         opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html"))
         if check_file(opencvjs_tutorial_path):