Merge remote-tracking branch 'upstream/master' into merge-4.x

pull/18951/head
Alexander Alekhin 4 years ago
commit 9d2eabaaa2
  1. 4
      3rdparty/carotene/CMakeLists.txt
  2. 4
      3rdparty/cpufeatures/CMakeLists.txt
  3. 4
      3rdparty/ippicv/CMakeLists.txt
  4. 4
      3rdparty/ittnotify/CMakeLists.txt
  5. 4
      3rdparty/libjasper/CMakeLists.txt
  6. 8
      3rdparty/libjpeg-turbo/CMakeLists.txt
  7. 2
      3rdparty/libjpeg-turbo/LICENSE.md
  8. 22
      3rdparty/libjpeg-turbo/README.ijg
  9. 21
      3rdparty/libjpeg-turbo/README.md
  10. 4
      3rdparty/libjpeg-turbo/src/jchuff.c
  11. 5
      3rdparty/libjpeg-turbo/src/jcinit.c
  12. 4
      3rdparty/libjpeg-turbo/src/jcphuff.c
  13. 5
      3rdparty/libjpeg-turbo/src/jctrans.c
  14. 45
      3rdparty/libjpeg-turbo/src/jdapistd.c
  15. 8
      3rdparty/libjpeg-turbo/src/jdcoefct.c
  16. 9
      3rdparty/libjpeg-turbo/src/jdcolor.c
  17. 55
      3rdparty/libjpeg-turbo/src/jdmerge.c
  18. 47
      3rdparty/libjpeg-turbo/src/jdmerge.h
  19. 10
      3rdparty/libjpeg-turbo/src/jdmrg565.c
  20. 6
      3rdparty/libjpeg-turbo/src/jdmrgext.c
  21. 5
      3rdparty/libjpeg-turbo/src/jdtrans.c
  22. 4
      3rdparty/libjpeg-turbo/src/jfdctint.c
  23. 4
      3rdparty/libjpeg-turbo/src/jidctint.c
  24. 8
      3rdparty/libjpeg-turbo/src/jmorecfg.h
  25. 3
      3rdparty/libjpeg-turbo/src/jpegcomp.h
  26. 8
      3rdparty/libjpeg-turbo/src/jpeglib.h
  27. 6
      3rdparty/libjpeg-turbo/src/jquant2.c
  28. 14
      3rdparty/libjpeg-turbo/src/jversion.h
  29. 4
      3rdparty/libjpeg/CMakeLists.txt
  30. 4
      3rdparty/libpng/CMakeLists.txt
  31. 4
      3rdparty/libtiff/CMakeLists.txt
  32. 4
      3rdparty/libwebp/CMakeLists.txt
  33. 5
      3rdparty/openexr/CMakeLists.txt
  34. 4
      3rdparty/openjpeg/CMakeLists.txt
  35. 5
      3rdparty/protobuf/CMakeLists.txt
  36. 4
      3rdparty/quirc/CMakeLists.txt
  37. 3
      3rdparty/tbb/CMakeLists.txt
  38. 2
      3rdparty/zlib/CMakeLists.txt
  39. 24
      CMakeLists.txt
  40. 2
      apps/interactive-calibration/parametersController.cpp
  41. 36
      cmake/FindONNX.cmake
  42. 2
      cmake/OpenCVCompilerOptions.cmake
  43. 2
      cmake/OpenCVDetectCUDA.cmake
  44. 4
      cmake/OpenCVDetectInferenceEngine.cmake
  45. 36
      cmake/OpenCVFindLibsGrfmt.cmake
  46. 10
      cmake/OpenCVFindLibsPerf.cmake
  47. 2
      cmake/OpenCVFindOpenBLAS.cmake
  48. 6
      cmake/OpenCVGenInfoPlist.cmake
  49. 15
      cmake/OpenCVUtils.cmake
  50. 1
      cmake/templates/opencv_abi.xml.in
  51. 16
      doc/CMakeLists.txt
  52. 34
      doc/js_tutorials/js_setup/js_setup/js_setup.markdown
  53. 24
      doc/pattern_tools/gen_pattern.py
  54. 2
      doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown
  55. 4
      doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown
  56. BIN
      doc/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg
  57. 2
      doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown
  58. 2
      doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown
  59. 216
      doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown
  60. 11
      doc/tutorials/introduction/config_reference/config_reference.markdown
  61. 3
      doc/tutorials/video/background_subtraction/background_subtraction.markdown
  62. 2
      doc/tutorials/videoio/intelperc.markdown
  63. 2
      doc/tutorials/videoio/kinect_openni.markdown
  64. BIN
      doc/tutorials/videoio/orbbec-astra/images/astra_color.jpg
  65. BIN
      doc/tutorials/videoio/orbbec-astra/images/astra_depth.png
  66. 150
      doc/tutorials/videoio/orbbec-astra/orbbec_astra.markdown
  67. 6
      doc/tutorials/videoio/table_of_content_videoio.markdown
  68. 15
      doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
  69. 2
      doc/tutorials/videoio/video-write/video_write.markdown
  70. 8
      modules/calib3d/doc/calib3d.bib
  71. 36
      modules/calib3d/include/opencv2/calib3d.hpp
  72. 30
      modules/calib3d/src/solvepnp.cpp
  73. 775
      modules/calib3d/src/sqpnp.cpp
  74. 194
      modules/calib3d/src/sqpnp.hpp
  75. 2
      modules/calib3d/src/usac.hpp
  76. 4
      modules/calib3d/src/usac/ransac_solvers.cpp
  77. 4
      modules/calib3d/test/test_solvepnp_ransac.cpp
  78. 9
      modules/core/include/opencv2/core.hpp
  79. 2
      modules/core/include/opencv2/core/affine.hpp
  80. 5
      modules/core/include/opencv2/core/cv_cpu_dispatch.h
  81. 6
      modules/core/include/opencv2/core/cvdef.h
  82. 21
      modules/core/include/opencv2/core/hal/intrin_avx.hpp
  83. 313
      modules/core/include/opencv2/core/hal/intrin_wasm.hpp
  84. 683
      modules/core/include/opencv2/core/mat.inl.hpp
  85. 9
      modules/core/include/opencv2/core/persistence.hpp
  86. 1194
      modules/core/include/opencv2/core/quaternion.hpp
  87. 849
      modules/core/include/opencv2/core/quaternion.inl.hpp
  88. 2
      modules/core/misc/objc/common/Converters.h
  89. 2
      modules/core/misc/objc/common/CvType.h
  90. 2
      modules/core/misc/objc/common/DMatch.h
  91. 2
      modules/core/misc/objc/common/Double2.h
  92. 2
      modules/core/misc/objc/common/Double3.h
  93. 2
      modules/core/misc/objc/common/Float4.h
  94. 2
      modules/core/misc/objc/common/Float6.h
  95. 2
      modules/core/misc/objc/common/Int4.h
  96. 2
      modules/core/misc/objc/common/KeyPoint.h
  97. 3
      modules/core/misc/objc/common/Mat.h
  98. 4
      modules/core/misc/objc/common/Mat.mm
  99. 2
      modules/core/misc/objc/common/MinMaxLocResult.h
  100. 2
      modules/core/misc/objc/common/Point2d.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -27,7 +27,7 @@ if(CMAKE_COMPILER_IS_GNUCC)
endif()
endif()
add_library(carotene_objs OBJECT
add_library(carotene_objs OBJECT EXCLUDE_FROM_ALL
${carotene_headers}
${carotene_sources}
)
@ -41,4 +41,4 @@ if(WITH_NEON)
endif()
# we add dummy file to fix XCode build
add_library(carotene STATIC EXCLUDE_FROM_ALL "$<TARGET_OBJECTS:carotene_objs>" "${CAROTENE_SOURCE_DIR}/dummy.cpp")
add_library(carotene STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} "$<TARGET_OBJECTS:carotene_objs>" "${CAROTENE_SOURCE_DIR}/dummy.cpp")

@ -14,7 +14,7 @@ if(NOT DEFINED CPUFEATURES_SOURCES)
endif()
include_directories(${CPUFEATURES_INCLUDE_DIRS})
add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${CPUFEATURES_SOURCES})
add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${CPUFEATURES_SOURCES})
set_target_properties(${OPENCV_CPUFEATURES_TARGET_NAME}
PROPERTIES OUTPUT_NAME cpufeatures
@ -29,7 +29,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(cpufeatures LICENSE README.md)

@ -17,7 +17,7 @@ file(GLOB lib_hdrs ${IPP_IW_PATH}/include/*.h ${IPP_IW_PATH}/include/iw/*.h ${IP
# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${IPP_IW_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
add_library(${IPP_IW_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
if(UNIX)
if(CV_GCC OR CV_CLANG OR CV_ICC)
@ -41,5 +41,5 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()

@ -37,7 +37,7 @@ set(ITT_SRCS
src/ittnotify/jitprofiling.c
)
add_library(${ITT_LIBRARY} STATIC ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS})
add_library(${ITT_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS})
if(NOT WIN32)
if(HAVE_DL_LIBRARY)
@ -60,7 +60,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(ittnotify src/ittnotify/LICENSE.BSD src/ittnotify/LICENSE.GPL)

@ -17,7 +17,7 @@ file(GLOB lib_ext_hdrs jasper/*.h)
# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${JASPER_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs})
add_library(${JASPER_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs})
if(WIN32 AND NOT MINGW)
add_definitions(-DJAS_WIN_MSVC_BUILD)
@ -46,7 +46,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(jasper LICENSE README copyright)

@ -4,9 +4,9 @@ ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter -Wsign-compare -Wshorten-6
set(VERSION_MAJOR 2)
set(VERSION_MINOR 0)
set(VERSION_REVISION 5)
set(VERSION_REVISION 6)
set(VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION})
set(LIBJPEG_TURBO_VERSION_NUMBER 2000005)
set(LIBJPEG_TURBO_VERSION_NUMBER 2000006)
string(TIMESTAMP BUILD "opencv-${OPENCV_VERSION}-libjpeg-turbo")
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
@ -106,7 +106,7 @@ set(JPEG_SOURCES ${JPEG_SOURCES} jsimd_none.c)
ocv_list_add_prefix(JPEG_SOURCES src/)
add_library(${JPEG_LIBRARY} STATIC ${JPEG_SOURCES} ${SIMD_OBJS})
add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${JPEG_SOURCES} ${SIMD_OBJS})
set_target_properties(${JPEG_LIBRARY}
PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY}
@ -121,7 +121,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(libjpeg-turbo README.md LICENSE.md README.ijg)

@ -91,7 +91,7 @@ best of our understanding.
The Modified (3-clause) BSD License
===================================
Copyright (C)2009-2019 D. R. Commander. All Rights Reserved.
Copyright (C)2009-2020 D. R. Commander. All Rights Reserved.
Copyright (C)2015 Viktor Szathmáry. All Rights Reserved.
Redistribution and use in source and binary forms, with or without

@ -223,12 +223,12 @@ https://www.iso.org/standard/54989.html and http://www.itu.int/rec/T-REC-T.871.
A PDF file of the older JFIF 1.02 specification is available at
http://www.w3.org/Graphics/JPEG/jfif3.pdf.
The TIFF 6.0 file format specification can be obtained by FTP from
ftp://ftp.sgi.com/graphics/tiff/TIFF6.ps.gz. The JPEG incorporation scheme
found in the TIFF 6.0 spec of 3-June-92 has a number of serious problems.
IJG does not recommend use of the TIFF 6.0 design (TIFF Compression tag 6).
Instead, we recommend the JPEG design proposed by TIFF Technical Note #2
(Compression tag 7). Copies of this Note can be obtained from
The TIFF 6.0 file format specification can be obtained from
http://mirrors.ctan.org/graphics/tiff/TIFF6.ps.gz. The JPEG incorporation
scheme found in the TIFF 6.0 spec of 3-June-92 has a number of serious
problems. IJG does not recommend use of the TIFF 6.0 design (TIFF Compression
tag 6). Instead, we recommend the JPEG design proposed by TIFF Technical Note
#2 (Compression tag 7). Copies of this Note can be obtained from
http://www.ijg.org/files/. It is expected that the next revision
of the TIFF spec will replace the 6.0 JPEG design with the Note's design.
Although IJG's own code does not support TIFF/JPEG, the free libtiff library
@ -243,14 +243,8 @@ The most recent released version can always be found there in
directory "files".
The JPEG FAQ (Frequently Asked Questions) article is a source of some
general information about JPEG.
It is available on the World Wide Web at http://www.faqs.org/faqs/jpeg-faq/
and other news.answers archive sites, including the official news.answers
archive at rtfm.mit.edu: ftp://rtfm.mit.edu/pub/usenet/news.answers/jpeg-faq/.
If you don't have Web or FTP access, send e-mail to mail-server@rtfm.mit.edu
with body
send usenet/news.answers/jpeg-faq/part1
send usenet/news.answers/jpeg-faq/part2
general information about JPEG. It is available at
http://www.faqs.org/faqs/jpeg-faq.
FILE FORMAT COMPATIBILITY

@ -2,7 +2,7 @@ Background
==========
libjpeg-turbo is a JPEG image codec that uses SIMD instructions to accelerate
baseline JPEG compression and decompression on x86, x86-64, ARM, PowerPC, and
baseline JPEG compression and decompression on x86, x86-64, Arm, PowerPC, and
MIPS systems, as well as progressive JPEG compression on x86 and x86-64
systems. On such systems, libjpeg-turbo is generally 2-6x as fast as libjpeg,
all else being equal. On other types of systems, libjpeg-turbo can still
@ -179,8 +179,8 @@ supported and which aren't.
NOTE: As of this writing, extensive research has been conducted into the
usefulness of DCT scaling as a means of data reduction and SmartScale as a
means of quality improvement. The reader is invited to peruse the research at
<http://www.libjpeg-turbo.org/About/SmartScale> and draw his/her own conclusions,
means of quality improvement. Readers are invited to peruse the research at
<http://www.libjpeg-turbo.org/About/SmartScale> and draw their own conclusions,
but it is the general belief of our project that these features have not
demonstrated sufficient usefulness to justify inclusion in libjpeg-turbo.
@ -287,12 +287,13 @@ following reasons:
(and slightly faster) floating point IDCT algorithm introduced in libjpeg
v8a as opposed to the algorithm used in libjpeg v6b. It should be noted,
however, that this algorithm basically brings the accuracy of the floating
point IDCT in line with the accuracy of the slow integer IDCT. The floating
point DCT/IDCT algorithms are mainly a legacy feature, and they do not
produce significantly more accuracy than the slow integer algorithms (to put
numbers on this, the typical difference in PNSR between the two algorithms
is less than 0.10 dB, whereas changing the quality level by 1 in the upper
range of the quality scale is typically more like a 1.0 dB difference.)
point IDCT in line with the accuracy of the accurate integer IDCT. The
floating point DCT/IDCT algorithms are mainly a legacy feature, and they do
not produce significantly more accuracy than the accurate integer algorithms
(to put numbers on this, the typical difference in PNSR between the two
algorithms is less than 0.10 dB, whereas changing the quality level by 1 in
the upper range of the quality scale is typically more like a 1.0 dB
difference.)
- If the floating point algorithms in libjpeg-turbo are not implemented using
SIMD instructions on a particular platform, then the accuracy of the
@ -340,7 +341,7 @@ The algorithm used by the SIMD-accelerated quantization function cannot produce
correct results whenever the fast integer forward DCT is used along with a JPEG
quality of 98-100. Thus, libjpeg-turbo must use the non-SIMD quantization
function in those cases. This causes performance to drop by as much as 40%.
It is therefore strongly advised that you use the slow integer forward DCT
It is therefore strongly advised that you use the accurate integer forward DCT
whenever encoding images with a JPEG quality of 98 or higher.

@ -34,10 +34,10 @@
* memory footprint by 64k, which is important for some mobile applications
* that create many isolated instances of libjpeg-turbo (web browsers, for
* instance.) This may improve performance on some mobile platforms as well.
* This feature is enabled by default only on ARM processors, because some x86
* This feature is enabled by default only on Arm processors, because some x86
* chips have a slow implementation of bsr, and the use of clz/bsr cannot be
* shown to have a significant performance impact even on the x86 chips that
* have a fast implementation of it. When building for ARMv6, you can
* have a fast implementation of it. When building for Armv6, you can
* explicitly disable the use of clz/bsr by adding -mthumb to the compiler
* flags (this defines __thumb__).
*/

@ -1,8 +1,10 @@
/*
* jcinit.c
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1991-1997, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* libjpeg-turbo Modifications:
* Copyright (C) 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -19,6 +21,7 @@
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jpegcomp.h"
/*

@ -43,10 +43,10 @@
* memory footprint by 64k, which is important for some mobile applications
* that create many isolated instances of libjpeg-turbo (web browsers, for
* instance.) This may improve performance on some mobile platforms as well.
* This feature is enabled by default only on ARM processors, because some x86
* This feature is enabled by default only on Arm processors, because some x86
* chips have a slow implementation of bsr, and the use of clz/bsr cannot be
* shown to have a significant performance impact even on the x86 chips that
* have a fast implementation of it. When building for ARMv6, you can
* have a fast implementation of it. When building for Armv6, you can
* explicitly disable the use of clz/bsr by adding -mthumb to the compiler
* flags (this defines __thumb__).
*/

@ -4,8 +4,8 @@
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1995-1998, Thomas G. Lane.
* Modified 2000-2009 by Guido Vollbeding.
* It was modified by The libjpeg-turbo Project to include only code relevant
* to libjpeg-turbo.
* libjpeg-turbo Modifications:
* Copyright (C) 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -17,6 +17,7 @@
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jpegcomp.h"
/* Forward declarations */

@ -4,7 +4,7 @@
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2010, 2015-2018, D. R. Commander.
* Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
* Copyright (C) 2015, Google, Inc.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
@ -21,6 +21,8 @@
#include "jinclude.h"
#include "jdmainct.h"
#include "jdcoefct.h"
#include "jdmaster.h"
#include "jdmerge.h"
#include "jdsample.h"
#include "jmemsys.h"
@ -316,6 +318,8 @@ LOCAL(void)
read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
{
JDIMENSION n;
my_master_ptr master = (my_master_ptr)cinfo->master;
JSAMPARRAY scanlines = NULL;
void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION input_row, JSAMPARRAY output_buf,
int num_rows) = NULL;
@ -332,8 +336,13 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
cinfo->cquantize->color_quantize = noop_quantize;
}
if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
scanlines = &upsample->spare_row;
}
for (n = 0; n < num_lines; n++)
jpeg_read_scanlines(cinfo, NULL, 1);
jpeg_read_scanlines(cinfo, scanlines, 1);
if (color_convert)
cinfo->cconvert->color_convert = color_convert;
@ -353,6 +362,12 @@ increment_simple_rowgroup_ctr(j_decompress_ptr cinfo, JDIMENSION rows)
{
JDIMENSION rows_left;
my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
my_master_ptr master = (my_master_ptr)cinfo->master;
if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
read_and_discard_scanlines(cinfo, rows);
return;
}
/* Increment the counter to the next row group after the skipped rows. */
main_ptr->rowgroup_ctr += rows / cinfo->max_v_samp_factor;
@ -382,21 +397,27 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
{
my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
my_master_ptr master = (my_master_ptr)cinfo->master;
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
JDIMENSION i, x;
int y;
JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
JDIMENSION lines_to_skip, lines_to_read;
/* Two-pass color quantization is not supported. */
if (cinfo->quantize_colors && cinfo->two_pass_quantize)
ERREXIT(cinfo, JERR_NOTIMPL);
if (cinfo->global_state != DSTATE_SCANNING)
ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
/* Do not skip past the bottom of the image. */
if (cinfo->output_scanline + num_lines >= cinfo->output_height) {
num_lines = cinfo->output_height - cinfo->output_scanline;
cinfo->output_scanline = cinfo->output_height;
(*cinfo->inputctl->finish_input_pass) (cinfo);
cinfo->inputctl->eoi_reached = TRUE;
return cinfo->output_height - cinfo->output_scanline;
return num_lines;
}
if (num_lines == 0)
@ -445,8 +466,10 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
main_ptr->buffer_full = FALSE;
main_ptr->rowgroup_ctr = 0;
main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
upsample->next_row_out = cinfo->max_v_samp_factor;
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
if (!master->using_merged_upsample) {
upsample->next_row_out = cinfo->max_v_samp_factor;
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
}
}
/* Skipping is much simpler when context rows are not required. */
@ -458,8 +481,10 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
cinfo->output_scanline += lines_left_in_iMCU_row;
main_ptr->buffer_full = FALSE;
main_ptr->rowgroup_ctr = 0;
upsample->next_row_out = cinfo->max_v_samp_factor;
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
if (!master->using_merged_upsample) {
upsample->next_row_out = cinfo->max_v_samp_factor;
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
}
}
}
@ -494,7 +519,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
increment_simple_rowgroup_ctr(cinfo, lines_to_read);
}
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
if (!master->using_merged_upsample)
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
return num_lines;
}
@ -535,7 +561,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
* bit odd, since "rows_to_go" seems to be redundantly keeping track of
* output_scanline.
*/
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
if (!master->using_merged_upsample)
upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
/* Always skip the requested number of lines. */
return num_lines;

@ -6,7 +6,7 @@
* libjpeg-turbo Modifications:
* Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
* Copyright (C) 2010, 2015-2016, D. R. Commander.
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, 2020, Google, Inc.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -495,11 +495,13 @@ decompress_smooth_data(j_decompress_ptr cinfo, JSAMPIMAGE output_buf)
if (first_row && block_row == 0)
prev_block_row = buffer_ptr;
else
prev_block_row = buffer[block_row - 1];
prev_block_row = buffer[block_row - 1] +
cinfo->master->first_MCU_col[ci];
if (last_row && block_row == block_rows - 1)
next_block_row = buffer_ptr;
else
next_block_row = buffer[block_row + 1];
next_block_row = buffer[block_row + 1] +
cinfo->master->first_MCU_col[ci];
/* We fetch the surrounding DC values using a sliding-register approach.
* Initialize all nine here so as to do the right thing on narrow pics.
*/

@ -571,11 +571,10 @@ ycck_cmyk_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
* RGB565 conversion
*/
#define PACK_SHORT_565_LE(r, g, b) ((((r) << 8) & 0xF800) | \
(((g) << 3) & 0x7E0) | ((b) >> 3))
#define PACK_SHORT_565_BE(r, g, b) (((r) & 0xF8) | ((g) >> 5) | \
(((g) << 11) & 0xE000) | \
(((b) << 5) & 0x1F00))
#define PACK_SHORT_565_LE(r, g, b) \
((((r) << 8) & 0xF800) | (((g) << 3) & 0x7E0) | ((b) >> 3))
#define PACK_SHORT_565_BE(r, g, b) \
(((r) & 0xF8) | ((g) >> 5) | (((g) << 11) & 0xE000) | (((b) << 5) & 0x1F00))
#define PACK_TWO_PIXELS_LE(l, r) ((r << 16) | l)
#define PACK_TWO_PIXELS_BE(l, r) ((l << 16) | r)

@ -5,7 +5,7 @@
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
* Copyright (C) 2009, 2011, 2014-2015, D. R. Commander.
* Copyright (C) 2009, 2011, 2014-2015, 2020, D. R. Commander.
* Copyright (C) 2013, Linaro Limited.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
@ -40,41 +40,13 @@
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdmerge.h"
#include "jsimd.h"
#include "jconfigint.h"
#ifdef UPSAMPLE_MERGING_SUPPORTED
/* Private subobject */
typedef struct {
struct jpeg_upsampler pub; /* public fields */
/* Pointer to routine to do actual upsampling/conversion of one row group */
void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
/* Private state for YCC->RGB conversion */
int *Cr_r_tab; /* => table for Cr to R conversion */
int *Cb_b_tab; /* => table for Cb to B conversion */
JLONG *Cr_g_tab; /* => table for Cr to G conversion */
JLONG *Cb_g_tab; /* => table for Cb to G conversion */
/* For 2:1 vertical sampling, we produce two output rows at a time.
* We need a "spare" row buffer to hold the second output row if the
* application provides just a one-row buffer; we also use the spare
* to discard the dummy last row if the image height is odd.
*/
JSAMPROW spare_row;
boolean spare_full; /* T if spare buffer is occupied */
JDIMENSION out_row_width; /* samples per output row */
JDIMENSION rows_to_go; /* counts rows remaining in image */
} my_upsampler;
typedef my_upsampler *my_upsample_ptr;
#define SCALEBITS 16 /* speediest right-shift on some machines */
#define ONE_HALF ((JLONG)1 << (SCALEBITS - 1))
#define FIX(x) ((JLONG)((x) * (1L << SCALEBITS) + 0.5))
@ -189,7 +161,7 @@ typedef my_upsampler *my_upsample_ptr;
LOCAL(void)
build_ycc_rgb_table(j_decompress_ptr cinfo)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
int i;
JLONG x;
SHIFT_TEMPS
@ -232,7 +204,7 @@ build_ycc_rgb_table(j_decompress_ptr cinfo)
METHODDEF(void)
start_pass_merged_upsample(j_decompress_ptr cinfo)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
/* Mark the spare buffer empty */
upsample->spare_full = FALSE;
@ -254,7 +226,7 @@ merged_2v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
/* 2:1 vertical sampling case: may need a spare row. */
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
JSAMPROW work_ptrs[2];
JDIMENSION num_rows; /* number of rows returned to caller */
@ -305,7 +277,7 @@ merged_1v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
/* 1:1 vertical sampling case: much easier, never need a spare row. */
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
/* Just do the upsampling. */
(*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
@ -420,11 +392,10 @@ h2v2_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
* RGB565 conversion
*/
#define PACK_SHORT_565_LE(r, g, b) ((((r) << 8) & 0xF800) | \
(((g) << 3) & 0x7E0) | ((b) >> 3))
#define PACK_SHORT_565_BE(r, g, b) (((r) & 0xF8) | ((g) >> 5) | \
(((g) << 11) & 0xE000) | \
(((b) << 5) & 0x1F00))
#define PACK_SHORT_565_LE(r, g, b) \
((((r) << 8) & 0xF800) | (((g) << 3) & 0x7E0) | ((b) >> 3))
#define PACK_SHORT_565_BE(r, g, b) \
(((r) & 0xF8) | ((g) >> 5) | (((g) << 11) & 0xE000) | (((b) << 5) & 0x1F00))
#define PACK_TWO_PIXELS_LE(l, r) ((r << 16) | l)
#define PACK_TWO_PIXELS_BE(l, r) ((l << 16) | r)
@ -566,11 +537,11 @@ h2v2_merged_upsample_565D(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
GLOBAL(void)
jinit_merged_upsampler(j_decompress_ptr cinfo)
{
my_upsample_ptr upsample;
my_merged_upsample_ptr upsample;
upsample = (my_upsample_ptr)
upsample = (my_merged_upsample_ptr)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
sizeof(my_upsampler));
sizeof(my_merged_upsampler));
cinfo->upsample = (struct jpeg_upsampler *)upsample;
upsample->pub.start_pass = start_pass_merged_upsample;
upsample->pub.need_context_rows = FALSE;

@ -0,0 +1,47 @@
/*
* jdmerge.h
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*/
#define JPEG_INTERNALS
#include "jpeglib.h"
#ifdef UPSAMPLE_MERGING_SUPPORTED
/* Private subobject */
typedef struct {
struct jpeg_upsampler pub; /* public fields */
/* Pointer to routine to do actual upsampling/conversion of one row group */
void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
/* Private state for YCC->RGB conversion */
int *Cr_r_tab; /* => table for Cr to R conversion */
int *Cb_b_tab; /* => table for Cb to B conversion */
JLONG *Cr_g_tab; /* => table for Cr to G conversion */
JLONG *Cb_g_tab; /* => table for Cb to G conversion */
/* For 2:1 vertical sampling, we produce two output rows at a time.
* We need a "spare" row buffer to hold the second output row if the
* application provides just a one-row buffer; we also use the spare
* to discard the dummy last row if the image height is odd.
*/
JSAMPROW spare_row;
boolean spare_full; /* T if spare buffer is occupied */
JDIMENSION out_row_width; /* samples per output row */
JDIMENSION rows_to_go; /* counts rows remaining in image */
} my_merged_upsampler;
typedef my_merged_upsampler *my_merged_upsample_ptr;
#endif /* UPSAMPLE_MERGING_SUPPORTED */

@ -5,7 +5,7 @@
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2013, Linaro Limited.
* Copyright (C) 2014-2015, 2018, D. R. Commander.
* Copyright (C) 2014-2015, 2018, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -19,7 +19,7 @@ h2v1_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr;
@ -90,7 +90,7 @@ h2v1_merged_upsample_565D_internal(j_decompress_ptr cinfo,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr;
@ -163,7 +163,7 @@ h2v2_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr0, outptr1;
@ -259,7 +259,7 @@ h2v2_merged_upsample_565D_internal(j_decompress_ptr cinfo,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr0, outptr1;

@ -4,7 +4,7 @@
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2011, 2015, D. R. Commander.
* Copyright (C) 2011, 2015, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -25,7 +25,7 @@ h2v1_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr;
@ -97,7 +97,7 @@ h2v2_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr,
JSAMPARRAY output_buf)
{
my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
register int y, cred, cgreen, cblue;
int cb, cr;
register JSAMPROW outptr0, outptr1;

@ -3,8 +3,8 @@
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1995-1997, Thomas G. Lane.
* It was modified by The libjpeg-turbo Project to include only code relevant
* to libjpeg-turbo.
* libjpeg-turbo Modifications:
* Copyright (C) 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -16,6 +16,7 @@
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jpegcomp.h"
/* Forward declarations */

@ -4,11 +4,11 @@
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1991-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2015, D. R. Commander.
* Copyright (C) 2015, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
* This file contains a slow-but-accurate integer implementation of the
* This file contains a slower but more accurate integer implementation of the
* forward DCT (Discrete Cosine Transform).
*
* A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT

@ -5,11 +5,11 @@
* Copyright (C) 1991-1998, Thomas G. Lane.
* Modification developed 2002-2009 by Guido Vollbeding.
* libjpeg-turbo Modifications:
* Copyright (C) 2015, D. R. Commander.
* Copyright (C) 2015, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
* This file contains a slow-but-accurate integer implementation of the
* This file contains a slower but more accurate integer implementation of the
* inverse DCT (Discrete Cosine Transform). In the IJG code, this routine
* must also perform dequantization of the input coefficients.
*

@ -5,7 +5,7 @@
* Copyright (C) 1991-1997, Thomas G. Lane.
* Modified 1997-2009 by Guido Vollbeding.
* libjpeg-turbo Modifications:
* Copyright (C) 2009, 2011, 2014-2015, 2018, D. R. Commander.
* Copyright (C) 2009, 2011, 2014-2015, 2018, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -273,9 +273,9 @@ typedef int boolean;
/* Capability options common to encoder and decoder: */
#define DCT_ISLOW_SUPPORTED /* slow but accurate integer algorithm */
#define DCT_IFAST_SUPPORTED /* faster, less accurate integer method */
#define DCT_FLOAT_SUPPORTED /* floating-point: accurate, fast on fast HW */
#define DCT_ISLOW_SUPPORTED /* accurate integer method */
#define DCT_IFAST_SUPPORTED /* less accurate int method [legacy feature] */
#define DCT_FLOAT_SUPPORTED /* floating-point method [legacy feature] */
/* Encoder capability options: */

@ -1,7 +1,7 @@
/*
* jpegcomp.h
*
* Copyright (C) 2010, D. R. Commander.
* Copyright (C) 2010, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -19,6 +19,7 @@
#define _min_DCT_v_scaled_size min_DCT_v_scaled_size
#define _jpeg_width jpeg_width
#define _jpeg_height jpeg_height
#define JERR_ARITH_NOTIMPL JERR_NOT_COMPILED
#else
#define _DCT_scaled_size DCT_scaled_size
#define _DCT_h_scaled_size DCT_scaled_size

@ -5,7 +5,7 @@
* Copyright (C) 1991-1998, Thomas G. Lane.
* Modified 2002-2009 by Guido Vollbeding.
* libjpeg-turbo Modifications:
* Copyright (C) 2009-2011, 2013-2014, 2016-2017, D. R. Commander.
* Copyright (C) 2009-2011, 2013-2014, 2016-2017, 2020, D. R. Commander.
* Copyright (C) 2015, Google, Inc.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
@ -244,9 +244,9 @@ typedef enum {
/* DCT/IDCT algorithm options. */
typedef enum {
JDCT_ISLOW, /* slow but accurate integer algorithm */
JDCT_IFAST, /* faster, less accurate integer method */
JDCT_FLOAT /* floating-point: accurate, fast on fast HW */
JDCT_ISLOW, /* accurate integer method */
JDCT_IFAST, /* less accurate integer method [legacy feature] */
JDCT_FLOAT /* floating-point method [legacy feature] */
} J_DCT_METHOD;
#ifndef JDCT_DEFAULT /* may be overridden in jconfig.h */

@ -4,7 +4,7 @@
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1991-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright (C) 2009, 2014-2015, D. R. Commander.
* Copyright (C) 2009, 2014-2015, 2020, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
@ -1145,7 +1145,7 @@ start_pass_2_quant(j_decompress_ptr cinfo, boolean is_pre_scan)
int i;
/* Only F-S dithering or no dithering is supported. */
/* If user asks for ordered dither, give him F-S. */
/* If user asks for ordered dither, give them F-S. */
if (cinfo->dither_mode != JDITHER_NONE)
cinfo->dither_mode = JDITHER_FS;
@ -1263,7 +1263,7 @@ jinit_2pass_quantizer(j_decompress_ptr cinfo)
cquantize->sv_colormap = NULL;
/* Only F-S dithering or no dithering is supported. */
/* If user asks for ordered dither, give him F-S. */
/* If user asks for ordered dither, give them F-S. */
if (cinfo->dither_mode != JDITHER_NONE)
cinfo->dither_mode = JDITHER_FS;

@ -30,23 +30,25 @@
* NOTE: It is our convention to place the authors in the following order:
* - libjpeg-turbo authors (2009-) in descending order of the date of their
* most recent contribution to the project, then in ascending order of the
* date of their first contribution to the project
* date of their first contribution to the project, then in alphabetical
* order
* - Upstream authors in descending order of the date of the first inclusion of
* their code
*/
#define JCOPYRIGHT \
"Copyright (C) 2009-2020 D. R. Commander\n" \
"Copyright (C) 2011-2016 Siarhei Siamashka\n" \
"Copyright (C) 2015, 2020 Google, Inc.\n" \
"Copyright (C) 2019 Arm Limited\n" \
"Copyright (C) 2015-2016, 2018 Matthieu Darbois\n" \
"Copyright (C) 2011-2016 Siarhei Siamashka\n" \
"Copyright (C) 2015 Intel Corporation\n" \
"Copyright (C) 2015 Google, Inc.\n" \
"Copyright (C) 2013-2014 Linaro Limited\n" \
"Copyright (C) 2013-2014 MIPS Technologies, Inc.\n" \
"Copyright (C) 2013 Linaro Limited\n" \
"Copyright (C) 2009, 2012 Pierre Ossman for Cendio AB\n" \
"Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies)\n" \
"Copyright (C) 2009 Pierre Ossman for Cendio AB\n" \
"Copyright (C) 1999-2006 MIYASAKA Masaru\n" \
"Copyright (C) 1991-2016 Thomas G. Lane, Guido Vollbeding"
"Copyright (C) 1991-2017 Thomas G. Lane, Guido Vollbeding"
#define JCOPYRIGHT_SHORT \
"Copyright (C) 1991-2020 The libjpeg-turbo Project and many others"

@ -19,7 +19,7 @@ endif()
# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${JPEG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
if(CV_GCC OR CV_CLANG)
set_source_files_properties(jcdctmgr.c PROPERTIES COMPILE_FLAGS "-O1")
@ -42,7 +42,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(libjpeg README)

@ -74,7 +74,7 @@ if(MSVC)
add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
endif(MSVC)
add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
add_library(${PNG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES})
ocv_warnings_disable(CMAKE_C_FLAGS -Wundef -Wcast-align -Wimplicit-fallthrough -Wunused-parameter -Wsign-compare)
@ -92,7 +92,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(libpng LICENSE README)

@ -462,7 +462,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4456 /wd4457 /wd4312) # vs2015
ocv_warnings_disable(CMAKE_C_FLAGS /wd4267 /wd4244 /wd4018 /wd4311 /wd4312)
add_library(${TIFF_LIBRARY} STATIC ${lib_srcs})
add_library(${TIFF_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs})
target_link_libraries(${TIFF_LIBRARY} ${ZLIB_LIBRARIES})
set_target_properties(${TIFF_LIBRARY}
@ -479,7 +479,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(libtiff COPYRIGHT)

@ -34,7 +34,7 @@ endif()
add_definitions(-DWEBP_USE_THREAD)
add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
add_library(${WEBP_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
if(ANDROID)
target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
endif()
@ -59,6 +59,6 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()

@ -109,6 +109,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshadow -Wunused -Wsign-compare -Wundef -W
-Wmissing-prototypes # gcc/clang
-Wreorder
-Wunused-result
-Wimplicit-const-int-float-conversion # clang
)
if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.0)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wclass-memaccess)
@ -125,7 +126,7 @@ if(MSVC AND CV_ICC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Qrestrict")
endif()
add_library(IlmImf STATIC ${lib_hdrs} ${lib_srcs})
add_library(IlmImf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_hdrs} ${lib_srcs})
target_link_libraries(IlmImf ${ZLIB_LIBRARIES})
set_target_properties(IlmImf
@ -142,7 +143,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(openexr LICENSE AUTHORS.ilmbase AUTHORS.openexr)

@ -11,6 +11,10 @@ set(OPENJPEG_LIBRARY_NAME libopenjp2)
project(openjpeg C)
ocv_warnings_disable(CMAKE_C_FLAGS
-Wimplicit-const-int-float-conversion # clang
)
#-----------------------------------------------------------------------------
# OPENJPEG version number, useful for packaging and doxygen doc:
set(OPENJPEG_VERSION_MAJOR 2)

@ -140,7 +140,8 @@ append_if_exist(Protobuf_SRCS
${PROTOBUF_ROOT}/src/google/protobuf/wrappers.pb.cc
)
add_library(libprotobuf STATIC ${Protobuf_SRCS})
include_directories(BEFORE "${PROTOBUF_ROOT}/src") # ensure using if own headers: https://github.com/opencv/opencv/issues/13328
add_library(libprotobuf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${Protobuf_SRCS})
target_include_directories(libprotobuf SYSTEM PUBLIC $<BUILD_INTERFACE:${PROTOBUF_ROOT}/src>)
set_target_properties(libprotobuf
PROPERTIES
@ -156,7 +157,7 @@ get_protobuf_version(Protobuf_VERSION "${PROTOBUF_ROOT}/src")
set(Protobuf_VERSION ${Protobuf_VERSION} CACHE INTERNAL "" FORCE)
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(protobuf LICENSE README.md)

@ -8,7 +8,7 @@ ocv_include_directories(${CURR_INCLUDE_DIR})
file(GLOB_RECURSE quirc_headers RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "include/*.h")
file(GLOB_RECURSE quirc_sources RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "src/*.c")
add_library(${PROJECT_NAME} STATIC ${quirc_headers} ${quirc_sources})
add_library(${PROJECT_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${quirc_headers} ${quirc_sources})
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-variable -Wshadow)
set_target_properties(${PROJECT_NAME}
@ -24,7 +24,7 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
if(NOT BUILD_SHARED_LIBS)
ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
endif()
ocv_install_3rdparty_licenses(${PROJECT_NAME} LICENSE)

@ -108,7 +108,7 @@ set(tbb_version_file "version_string.ver")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}.cmakein" "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}" @ONLY)
list(APPEND TBB_SOURCE_FILES "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}")
add_library(tbb ${TBB_SOURCE_FILES})
add_library(tbb ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${TBB_SOURCE_FILES})
target_compile_definitions(tbb PUBLIC
TBB_USE_GCC_BUILTINS=1
__TBB_GCC_BUILTIN_ATOMICS_PRESENT=1
@ -165,6 +165,7 @@ ocv_install_target(tbb EXPORT OpenCVModules
RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs
LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs
ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev
OPTIONAL
)
ocv_install_3rdparty_licenses(tbb "${tbb_src_dir}/LICENSE" "${tbb_src_dir}/README")

@ -76,7 +76,7 @@ set(ZLIB_SRCS
zutil.c
)
add_library(${ZLIB_LIBRARY} STATIC ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
add_library(${ZLIB_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
set_target_properties(${ZLIB_LIBRARY} PROPERTIES DEFINE_SYMBOL ZLIB_DLL)
ocv_warnings_disable(CMAKE_C_FLAGS -Wshorten-64-to-32 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wshift-negative-value

@ -368,6 +368,9 @@ OCV_OPTION(WITH_MSMF_DXVA "Enable hardware acceleration in Media Foundation back
OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF
VISIBLE_IF NOT ANDROID AND NOT WINRT
VERIFY HAVE_XIMEA)
OCV_OPTION(WITH_UEYE "Include UEYE camera support" OFF
VISIBLE_IF NOT ANDROID AND NOT APPLE AND NOT WINRT
VERIFY HAVE_UEYE)
OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF
VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID
VERIFY HAVE_XINE)
@ -440,6 +443,9 @@ OCV_OPTION(WITH_ANDROID_MEDIANDK "Use Android Media NDK for Video I/O (Android)"
OCV_OPTION(WITH_TENGINE "Include Arm Inference Tengine support" OFF
VISIBLE_IF (ARM OR AARCH64) AND (UNIX OR ANDROID) AND NOT IOS
VERIFY HAVE_TENGINE)
OCV_OPTION(WITH_ONNX "Include Microsoft ONNX Runtime support" OFF
VISIBLE_IF TRUE
VERIFY HAVE_ONNX)
# OpenCV build components
# ===================================================
@ -782,6 +788,11 @@ if(WITH_QUIRC)
add_subdirectory(3rdparty/quirc)
set(HAVE_QUIRC TRUE)
endif()
if(WITH_ONNX)
include(cmake/FindONNX.cmake)
endif()
# ----------------------------------------------------------------------------
# OpenCV HAL
# ----------------------------------------------------------------------------
@ -1383,6 +1394,10 @@ if(WITH_XIMEA OR HAVE_XIMEA)
status(" XIMEA:" HAVE_XIMEA THEN YES ELSE NO)
endif()
if(WITH_UEYE OR HAVE_UEYE)
status(" uEye:" HAVE_UEYE THEN YES ELSE NO)
endif()
if(WITH_XINE OR HAVE_XINE)
status(" Xine:" HAVE_XINE THEN "YES (ver ${XINE_VERSION})" ELSE NO)
endif()
@ -1567,6 +1582,15 @@ if(WITH_OPENCL OR HAVE_OPENCL)
endif()
endif()
if(WITH_ONNX OR HAVE_ONNX)
status("")
status(" ONNX:" HAVE_ONNX THEN "YES" ELSE "NO")
if(HAVE_ONNX)
status(" Include path:" ONNX_INCLUDE_DIR THEN "${ONNX_INCLUDE_DIR}" ELSE "NO")
status(" Link libraries:" ONNX_LIBRARIES THEN "${ONNX_LIBRARIES}" ELSE "NO")
endif()
endif()
# ========================== python ==========================
if(BUILD_opencv_python2)
status("")

@ -32,7 +32,7 @@ bool calib::parametersController::loadFromFile(const std::string &inputFileName)
if(!reader.isOpened()) {
std::cerr << "Warning: Unable to open " << inputFileName <<
" Applicatioin stated with default advanced parameters" << std::endl;
" Application started with default advanced parameters" << std::endl;
return true;
}

@ -0,0 +1,36 @@
ocv_clear_vars(HAVE_ONNX)
set(ONNXRT_ROOT_DIR "" CACHE PATH "ONNX Runtime install directory")
# For now, check the old name ORT_INSTALL_DIR
if(ORT_INSTALL_DIR AND NOT ONNXRT_ROOT_DIR)
set(ONNXRT_ROOT_DIR ${ORT_INSTALL_DIR})
endif()
if(ONNXRT_ROOT_DIR)
find_library(ORT_LIB onnxruntime
${ONNXRT_ROOT_DIR}/lib
CMAKE_FIND_ROOT_PATH_BOTH)
find_path(ORT_INCLUDE onnxruntime_cxx_api.h
${ONNXRT_ROOT_DIR}/include/onnxruntime/core/session
CMAKE_FIND_ROOT_PATH_BOTH)
endif()
if(ORT_LIB AND ORT_INCLUDE)
set(HAVE_ONNX TRUE)
# For CMake output only
set(ONNX_LIBRARIES "${ORT_LIB}" CACHE STRING "ONNX Runtime libraries")
set(ONNX_INCLUDE_DIR "${ORT_INCLUDE}" CACHE STRING "ONNX Runtime include path")
# Link target with associated interface headers
set(ONNX_LIBRARY "onnxruntime" CACHE STRING "ONNX Link Target")
ocv_add_library(${ONNX_LIBRARY} SHARED IMPORTED)
set_target_properties(${ONNX_LIBRARY} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES ${ORT_INCLUDE}
IMPORTED_LOCATION ${ORT_LIB}
IMPORTED_IMPLIB ${ORT_LIB})
endif()
if(NOT HAVE_ONNX)
ocv_clear_vars(HAVE_ONNX ORT_LIB ORT_INCLUDE_DIR)
endif()

@ -153,7 +153,7 @@ if(CV_GCC OR CV_CLANG)
if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
add_extra_compiler_option(-Wno-missing-field-initializers) # GCC 4.x emits warnings about {}, fixed in GCC 5+
endif()
if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0)
add_extra_compiler_option(-Wno-deprecated-enum-enum-conversion)
add_extra_compiler_option(-Wno-deprecated-anon-enum-enum-conversion)
endif()

@ -208,7 +208,7 @@ if(CUDA_FOUND)
if(${status} EQUAL 0)
# cache detected values
set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${result_list}} CACHE INTERNAL "")
set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${output}} CACHE INTERNAL "")
set(OPENCV_CACHE_CUDA_ACTIVE_CC_check "${__cache_key_check}" CACHE INTERNAL "")
endif()
endif()

@ -129,9 +129,9 @@ endif()
if(INF_ENGINE_TARGET)
if(NOT INF_ENGINE_RELEASE)
message(WARNING "InferenceEngine version has not been set, 2020.4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
message(WARNING "InferenceEngine version has not been set, 2021.1 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
endif()
set(INF_ENGINE_RELEASE "2020040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set(INF_ENGINE_RELEASE "2021010000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
)

@ -15,11 +15,12 @@ else()
endif()
if(NOT ZLIB_FOUND)
ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIRS)
ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIR)
set(ZLIB_LIBRARY zlib)
set(ZLIB_LIBRARY zlib CACHE INTERNAL "")
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/zlib")
set(ZLIB_INCLUDE_DIRS "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}")
set(ZLIB_INCLUDE_DIR "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
set(ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR})
set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
ocv_parse_header2(ZLIB "${${ZLIB_LIBRARY}_SOURCE_DIR}/zlib.h" ZLIB_VERSION)
@ -37,16 +38,17 @@ if(WITH_JPEG)
ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR)
if(NOT BUILD_JPEG_TURBO_DISABLE)
set(JPEG_LIBRARY libjpeg-turbo)
set(JPEG_LIBRARY libjpeg-turbo CACHE INTERNAL "")
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg-turbo")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
else()
set(JPEG_LIBRARY libjpeg)
set(JPEG_LIBRARY libjpeg CACHE INTERNAL "")
set(JPEG_LIBRARIES ${JPEG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}")
set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
endif()
set(JPEG_INCLUDE_DIRS "${JPEG_INCLUDE_DIR}")
endif()
macro(ocv_detect_jpeg_version header_file)
@ -83,10 +85,10 @@ if(WITH_TIFF)
if(NOT TIFF_FOUND)
ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR)
set(TIFF_LIBRARY libtiff)
set(TIFF_LIBRARY libtiff CACHE INTERNAL "")
set(TIFF_LIBRARIES ${TIFF_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff")
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}")
set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "")
ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION)
endif()
@ -128,12 +130,12 @@ endif()
if(WITH_WEBP AND NOT WEBP_FOUND
AND (NOT ANDROID OR HAVE_CPUFEATURES)
)
set(WEBP_LIBRARY libwebp)
ocv_clear_vars(WEBP_LIBRARY WEBP_INCLUDE_DIR)
set(WEBP_LIBRARY libwebp CACHE INTERNAL "")
set(WEBP_LIBRARIES ${WEBP_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src")
set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "")
set(HAVE_WEBP 1)
endif()
@ -192,10 +194,10 @@ if(WITH_JASPER AND NOT HAVE_OPENJPEG)
if(NOT JASPER_FOUND)
ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR)
set(JASPER_LIBRARY libjasper)
set(JASPER_LIBRARY libjasper CACHE INTERNAL "")
set(JASPER_LIBRARIES ${JASPER_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper")
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
endif()
set(HAVE_JASPER YES)
@ -225,10 +227,10 @@ if(WITH_PNG)
if(NOT PNG_FOUND)
ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS)
set(PNG_LIBRARY libpng)
set(PNG_LIBRARY libpng CACHE INTERNAL "")
set(PNG_LIBRARIES ${PNG_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng")
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}")
set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "")
set(PNG_DEFINITIONS "")
ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE)
endif()
@ -270,7 +272,7 @@ if(WITH_GDAL)
endif()
endif()
if (WITH_GDCM)
if(WITH_GDCM)
find_package(GDCM QUIET)
if(NOT GDCM_FOUND)
set(HAVE_GDCM NO)

@ -51,7 +51,15 @@ endif(WITH_CUDA)
# --- Eigen ---
if(WITH_EIGEN AND NOT HAVE_EIGEN)
find_package(Eigen3 QUIET)
if((OPENCV_FORCE_EIGEN_FIND_PACKAGE_CONFIG
OR NOT (CMAKE_VERSION VERSION_LESS "3.0.0") # Eigen3Targets.cmake required CMake 3.0.0+
) AND NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG
)
find_package(Eigen3 CONFIG QUIET) # Ceres 2.0.0 CMake scripts doesn't work with CMake's FindEigen3.cmake module (due to missing EIGEN3_VERSION_STRING)
endif()
if(NOT Eigen3_FOUND)
find_package(Eigen3 QUIET)
endif()
if(Eigen3_FOUND)
if(TARGET Eigen3::Eigen)

@ -57,7 +57,7 @@ SET(Open_BLAS_INCLUDE_SEARCH_PATHS
)
SET(Open_BLAS_LIB_SEARCH_PATHS
$ENV{OpenBLAS}cd
$ENV{OpenBLAS}
$ENV{OpenBLAS}/lib
$ENV{OpenBLAS_HOME}
$ENV{OpenBLAS_HOME}/lib

@ -2,7 +2,11 @@ set(OPENCV_APPLE_BUNDLE_NAME "OpenCV")
set(OPENCV_APPLE_BUNDLE_ID "org.opencv")
if(IOS)
if (APPLE_FRAMEWORK AND DYNAMIC_PLIST)
if(MAC_CATALYST)
# Copy the iOS plist over to the OSX directory if building iOS library for Catalyst
configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in"
"${CMAKE_BINARY_DIR}/osx/Info.plist")
elseif(APPLE_FRAMEWORK AND DYNAMIC_PLIST)
configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.Dynamic.plist.in"
"${CMAKE_BINARY_DIR}/ios/Info.plist")
else()

@ -1512,10 +1512,16 @@ function(ocv_add_library target)
set(CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG 1)
if(IOS AND NOT MAC_CATALYST)
set(OPENCV_APPLE_INFO_PLIST "${CMAKE_BINARY_DIR}/ios/Info.plist")
else()
set(OPENCV_APPLE_INFO_PLIST "${CMAKE_BINARY_DIR}/osx/Info.plist")
endif()
set_target_properties(${target} PROPERTIES
FRAMEWORK TRUE
MACOSX_FRAMEWORK_IDENTIFIER org.opencv
MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_BINARY_DIR}/ios/Info.plist
MACOSX_FRAMEWORK_INFO_PLIST ${OPENCV_APPLE_INFO_PLIST}
# "current version" in semantic format in Mach-O binary file
VERSION ${OPENCV_LIBVERSION}
# "compatibility version" in semantic format in Mach-O binary file
@ -1882,6 +1888,13 @@ function(ocv_update_file filepath content)
endif()
endfunction()
if(NOT BUILD_SHARED_LIBS AND (CMAKE_VERSION VERSION_LESS "3.14.0"))
ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "") # avoid CMake warnings: https://gitlab.kitware.com/cmake/cmake/-/issues/18938
else()
ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "EXCLUDE_FROM_ALL")
endif()
# adopted from https://gist.github.com/amir-saniyan/de99cee82fa9d8d615bb69f3f53b6004
function(ocv_blob2hdr blob_filename hdr_filename cpp_variable)
if(EXISTS "${hdr_filename}")

@ -32,6 +32,7 @@
opencv2/flann/hdf5.h
opencv2/imgcodecs/imgcodecs_c.h
opencv2/imgcodecs/ios.h
opencv2/imgcodecs/macosx.h
opencv2/videoio/videoio_c.h
opencv2/videoio/cap_ios.h
opencv2/xobjdetect/private.hpp

@ -145,9 +145,23 @@ if(DOXYGEN_FOUND)
set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials")
set(example_path "${CMAKE_SOURCE_DIR}/samples")
set(doxygen_image_path
${CMAKE_CURRENT_SOURCE_DIR}/images
${paths_doc}
${tutorial_path}
${tutorial_py_path}
${tutorial_js_path}
${paths_tutorial}
#${OpenCV_SOURCE_DIR}/samples/data # TODO: need to resolve ambiguous conflicts first
${OpenCV_SOURCE_DIR}
${OpenCV_SOURCE_DIR}/modules # <opencv>/modules
${OPENCV_EXTRA_MODULES_PATH} # <opencv_contrib>/modules
${OPENCV_DOCS_EXTRA_IMAGE_PATH} # custom variable for user modules
)
# set export variables
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/images ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${doxygen_image_path}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXCLUDE_LIST "${CMAKE_DOXYGEN_EXCLUDE_LIST}")
string(REPLACE ";" " " CMAKE_DOXYGEN_ENABLED_SECTIONS "${CMAKE_DOXYGEN_ENABLED_SECTIONS}")
# TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders

@ -32,6 +32,15 @@ source ./emsdk_env.sh
echo ${EMSCRIPTEN}
@endcode
The version 1.39.16 of emscripten is verified for latest WebAssembly. Please check the version of emscripten to use the newest features of WebAssembly.
For example:
@code{.bash}
./emsdk update
./emsdk install 1.39.16
./emsdk activate 1.39.16
@endcode
Obtaining OpenCV Source Code
--------------------------
@ -76,6 +85,31 @@ Building OpenCV.js from Source
python ./platforms/js/build_js.py build_wasm --build_wasm
@endcode
-# [Optional] To build the OpenCV.js loader, append `--build_loader`.
For example:
@code{.bash}
python ./platforms/js/build_js.py build_js --build_loader
@endcode
@note
The loader is implemented as a js file in the path `<opencv_js_dir>/bin/loader.js`. The loader utilizes the [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) to detect the features of the broswer and load corresponding OpenCV.js automatically. To use it, you need to use the UMD version of [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) and introduce the `loader.js` in your Web application.
Example Code:
@code{.javascipt}
// Set paths configuration
let pathsConfig = {
wasm: "../../build_wasm/opencv.js",
threads: "../../build_mt/opencv.js",
simd: "../../build_simd/opencv.js",
threadsSimd: "../../build_mtSIMD/opencv.js",
}
// Load OpenCV.js and use the pathsConfiguration and main function as the params.
loadOpenCV(pathsConfig, main);
@endcode
-# [optional] To build documents, append `--build_doc` option.
For example:

@ -92,11 +92,11 @@ def main():
dest="square_size", type=float)
parser.add_argument("-R", "--radius_rate", help="circles_radius = square_size/radius_rate", default="5.0",
action="store", dest="radius_rate", type=float)
parser.add_argument("-w", "--page_width", help="page width in units", default="216", action="store",
parser.add_argument("-w", "--page_width", help="page width in units", default=argparse.SUPPRESS, action="store",
dest="page_width", type=float)
parser.add_argument("-h", "--page_height", help="page height in units", default="279", action="store",
dest="page_width", type=float)
parser.add_argument("-a", "--page_size", help="page size, supersedes -h -w arguments", default="A4", action="store",
parser.add_argument("-h", "--page_height", help="page height in units", default=argparse.SUPPRESS, action="store",
dest="page_height", type=float)
parser.add_argument("-a", "--page_size", help="page size, superseded if -h and -w are set", default="A4", action="store",
dest="page_size", choices=["A0", "A1", "A2", "A3", "A4", "A5"])
args = parser.parse_args()
@ -111,12 +111,16 @@ def main():
units = args.units
square_size = args.square_size
radius_rate = args.radius_rate
page_size = args.page_size
# page size dict (ISO standard, mm) for easy lookup. format - size: [width, height]
page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297],
"A5": [148, 210]}
page_width = page_sizes[page_size.upper()][0]
page_height = page_sizes[page_size.upper()][1]
if 'page_width' and 'page_height' in args:
page_width = args.page_width
page_height = args.page_height
else:
page_size = args.page_size
# page size dict (ISO standard, mm) for easy lookup. format - size: [width, height]
page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297],
"A5": [148, 210]}
page_width = page_sizes[page_size][0]
page_height = page_sizes[page_size][1]
pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height)
# dict for easy lookup of pattern type
mp = {"circles": pm.make_circles_pattern, "acircles": pm.make_acircles_pattern,

@ -209,7 +209,7 @@ find the average error, we calculate the arithmetical mean of the errors calcula
calibration images.
@code{.py}
mean_error = 0
for i in xrange(len(objpoints)):
for i in range(len(objpoints)):
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)
mean_error += error

@ -79,7 +79,7 @@ from matplotlib import pyplot as plt
img1 = cv.imread('myleft.jpg',0) #queryimage # left image
img2 = cv.imread('myright.jpg',0) #trainimage # right image
sift = cv.SIFT()
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
@ -93,14 +93,12 @@ search_params = dict(checks=50)
flann = cv.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
@endcode

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 18 KiB

@ -48,7 +48,7 @@ titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray',vmin=0,vmax=255)
plt.title(titles[i])
plt.xticks([]),plt.yticks([])

@ -32,7 +32,7 @@ automatically available with the platform (e.g. APPLE GCD) but chances are that
have access to a parallel framework either directly or by enabling the option in CMake and rebuild the library.
The second (weak) precondition is more related to the task you want to achieve as not all computations
are suitable / can be adatapted to be run in a parallel way. To remain simple, tasks that can be split
are suitable / can be adapted to be run in a parallel way. To remain simple, tasks that can be split
into multiple elementary operations with no memory dependency (no possible race condition) are easily
parallelizable. Computer vision processing are often easily parallelizable as most of the time the processing of
one pixel does not depend to the state of other pixels.

@ -84,57 +84,198 @@ This tutorial's code is shown below. You can also download it
Explanation
-----------
-# Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
previous sections). Let's check the general structure of the C++ program:
@add_toggle_cpp
Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
previous sections). Let's check the general structure of the C++ program:
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp main
-# Load an image (can be BGR or grayscale)
-# Create two windows (one for dilation output, the other for erosion)
-# Create a set of two Trackbars for each operation:
- The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
- The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the
corresponding operation.
-# Call once erosion and dilation to show the initial image.
Every time we move any slider, the user's function **Erosion** or **Dilation** will be
called and it will update the output image based on the current trackbar values.
Let's analyze these two functions:
#### The erosion function
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
receives three arguments:
- *src*: The source image
- *erosion_dst*: The output image
- *element*: This is the kernel we will use to perform the operation. If we do not
specify, the default is a simple `3x3` matrix. Otherwise, we can specify its
shape. For this, we need to use the function cv::getStructuringElement :
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp kernel
We can choose any of three shapes for our kernel:
- Rectangular box: MORPH_RECT
- Cross: MORPH_CROSS
- Ellipse: MORPH_ELLIPSE
Then, we just have to specify the size of our kernel and the *anchor point*. If not
specified, it is assumed to be in the center.
That is all. We are ready to perform the erosion of our image.
#### The dilation function
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
Here we also have the option of defining our kernel, its anchor point and the size of the operator
to be used.
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp dilation
@end_toggle
@add_toggle_java
Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
previous sections). Let's check however the general structure of the java class. There are 4 main
parts in the java class:
- the class constructor which setups the window that will be filled with window components
- the `addComponentsToPane` method, which fills out the window
- the `update` method, which determines what happens when the user changes any value
- the `main` method, which is the entry point of the program
In this tutorial we will focus on the `addComponentsToPane` and `update` methods. However, for completion the
steps followed in the constructor are:
-# Load an image (can be BGR or grayscale)
-# Create a window
-# Add various control components with `addComponentsToPane`
-# show the window
The components were added by the following method:
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java components
In short we
-# create a panel for the sliders
-# create a combo box for the element types
-# create a slider for the kernel size
-# create a combo box for the morphology function to use (erosion or dilation)
The action and state changed listeners added call at the end the `update` method which updates
the image based on the current slider values. So every time we move any slider, the `update` method is triggered.
- Load an image (can be BGR or grayscale)
- Create two windows (one for dilation output, the other for erosion)
- Create a set of two Trackbars for each operation:
- The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
- The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the
corresponding operation.
- Every time we move any slider, the user's function **Erosion** or **Dilation** will be
called and it will update the output image based on the current trackbar values.
#### Updating the image
Let's analyze these two functions:
To update the image we used the following implementation:
-# **erosion:**
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java update
- The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
receives three arguments:
- *src*: The source image
- *erosion_dst*: The output image
- *element*: This is the kernel we will use to perform the operation. If we do not
specify, the default is a simple `3x3` matrix. Otherwise, we can specify its
shape. For this, we need to use the function cv::getStructuringElement :
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp kernel
In other words we
We can choose any of three shapes for our kernel:
-# get the structuring element the user chose
-# execute the **erosion** or **dilation** function based on `doErosion`
-# reload the image with the morphology applied
-# repaint the frame
- Rectangular box: MORPH_RECT
- Cross: MORPH_CROSS
- Ellipse: MORPH_ELLIPSE
Let's analyze the `erode` and `dilate` methods:
Then, we just have to specify the size of our kernel and the *anchor point*. If not
specified, it is assumed to be in the center.
#### The erosion method
- That is all. We are ready to perform the erosion of our image.
@note Additionally, there is another parameter that allows you to perform multiple erosions
(iterations) at once. However, We haven't used it in this simple tutorial. You can check out the
reference for more details.
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java erosion
-# **dilation:**
The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
receives three arguments:
- *src*: The source image
- *erosion_dst*: The output image
- *element*: This is the kernel we will use to perform the operation. For specifying the shape, we need to use
the function cv::getStructuringElement :
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java kernel
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
Here we also have the option of defining our kernel, its anchor point and the size of the operator
to be used.
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp dilation
We can choose any of three shapes for our kernel:
- Rectangular box: CV_SHAPE_RECT
- Cross: CV_SHAPE_CROSS
- Ellipse: CV_SHAPE_ELLIPSE
Together with the shape we specify the size of our kernel and the *anchor point*. If the anchor point is not
specified, it is assumed to be in the center.
That is all. We are ready to perform the erosion of our image.
#### The dilation function
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
Here we also have the option of defining our kernel, its anchor point and the size of the operator
to be used.
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java dilation
@end_toggle
@add_toggle_python
Most of the material shown here is trivial (if you have any doubt, please refer to the tutorials in
previous sections). Let's check the general structure of the python script:
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py main
-# Load an image (can be BGR or grayscale)
-# Create two windows (one for erosion output, the other for dilation) with a set of trackbars each
- The first trackbar "Element" returns the value for the morphological type that will be mapped
(1 = rectangle, 2 = cross, 3 = ellipse)
- The second trackbar "Kernel size" returns the size of the element for the
corresponding operation
-# Call once erosion and dilation to show the initial image
Every time we move any slider, the user's function **erosion** or **dilation** will be
called and it will update the output image based on the current trackbar values.
Let's analyze these two functions:
#### The erosion function
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py erosion
The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
receives two arguments and returns the processed image:
- *src*: The source image
- *element*: The kernel we will use to perform the operation. We can specify its
shape by using the function cv::getStructuringElement :
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py kernel
We can choose any of three shapes for our kernel:
- Rectangular box: MORPH_RECT
- Cross: MORPH_CROSS
- Ellipse: MORPH_ELLIPSE
Then, we just have to specify the size of our kernel and the *anchor point*. If the anchor point not
specified, it is assumed to be in the center.
That is all. We are ready to perform the erosion of our image.
#### The dilation function
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
Here we also have the option of defining our kernel, its anchor point and the size of the operator
to be used.
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py dilation
@end_toggle
@note Additionally, there are further parameters that allow you to perform multiple erosions/dilations
(iterations) at once and also set the border type and value. However, We haven't used those
in this simple tutorial. You can check out the reference for more details.
Results
-------
Compile the code above and execute it with an image as argument. For instance, using this image:
Compile the code above and execute it (or run the script if using python) with an image as argument.
If you do not provide an image as argument the default sample image
([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/master/samples/data/LinuxLogo.jpg)) will be used.
For instance, using this image:
![](images/Morphology_1_Tutorial_Original_Image.jpg)
@ -143,3 +284,4 @@ naturally. Try them out! You can even try to add a third Trackbar to control the
iterations.
![](images/Morphology_1_Result.jpg)
(depending on the programming language the output might vary a little or be only 1 window)

@ -247,15 +247,18 @@ When `WITH_` option is enabled:
`WITH_CUDA` (default: _OFF_)
Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules: @ref cuda. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details.
Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu
Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details.
@note Since OpenCV version 4.0 all CUDA-accelerated algorithm implementations have been moved to the _opencv_contrib_ repository. To build _opencv_ and _opencv_contrib_ together check @ref tutorial_config_reference_general_contrib.
@cond CUDA_MODULES
@note Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu
@see @ref cuda
@endcond
@see https://en.wikipedia.org/wiki/CUDA
TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, WITH_NVCUVID`?
TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, `WITH_NVCUVID`?
### OpenCL support

@ -32,8 +32,7 @@ In this tutorial you will learn how to:
-# Create and update the background model by using @ref cv::BackgroundSubtractor class;
-# Get and show the foreground mask by using @ref cv::imshow ;
Code
----
### Code
In the following you can find the source code. We will let the user choose to process either a video
file or a sequence of images.

@ -1,7 +1,7 @@
Using Creative Senz3D and other Intel RealSense SDK compatible depth sensors {#tutorial_intelperc}
=======================================================================================
@prev_tutorial{tutorial_kinect_openni}
@prev_tutorial{tutorial_orbbec_astra}
**Note**: This tutorial is partially obsolete since PerC SDK has been replaced with RealSense SDK

@ -2,7 +2,7 @@ Using Kinect and other OpenNI compatible depth sensors {#tutorial_kinect_openni}
======================================================
@prev_tutorial{tutorial_video_write}
@next_tutorial{tutorial_intelperc}
@next_tutorial{tutorial_orbbec_astra}
Depth sensors compatible with OpenNI (Kinect, XtionPRO, ...) are supported through VideoCapture

Binary file not shown.

After

Width:  |  Height:  |  Size: 135 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

@ -0,0 +1,150 @@
Using Orbbec Astra 3D cameras {#tutorial_orbbec_astra}
======================================================
@prev_tutorial{tutorial_kinect_openni}
@next_tutorial{tutorial_intelperc}
### Introduction
This tutorial is devoted to the Astra Series of Orbbec 3D cameras (https://orbbec3d.com/product-astra-pro/).
That cameras have a depth sensor in addition to a common color sensor. The depth sensors can be read using
the OpenNI interface with @ref cv::VideoCapture class. The video stream is provided through the regular camera
interface.
### Installation Instructions
In order to use a depth sensor with OpenCV you should do the following steps:
-# Download the latest version of Orbbec OpenNI SDK (from here <https://orbbec3d.com/develop/>).
Unzip the archive, choose the build according to your operating system and follow installation
steps provided in the Readme file. For instance, if you use 64bit GNU/Linux run:
@code{.bash}
$ cd Linux/OpenNI-Linux-x64-2.3.0.63/
$ sudo ./install.sh
@endcode
When you are done with the installation, make sure to replug your device for udev rules to take
effect. The camera should now work as a general camera device. Note that your current user should
belong to group `video` to have access to the camera. Also, make sure to source `OpenNIDevEnvironment` file:
@code{.bash}
$ source OpenNIDevEnvironment
@endcode
-# Run the following commands to verify that OpenNI library and header files can be found. You should see
something similar in your terminal:
@code{.bash}
$ echo $OPENNI2_INCLUDE
/home/user/OpenNI_2.3.0.63/Linux/OpenNI-Linux-x64-2.3.0.63/Include
$ echo $OPENNI2_REDIST
/home/user/OpenNI_2.3.0.63/Linux/OpenNI-Linux-x64-2.3.0.63/Redist
@endcode
If the above two variables are empty, then you need to source `OpenNIDevEnvironment` again. Now you can
configure OpenCV with OpenNI support enabled by setting the `WITH_OPENNI2` flag in CMake.
You may also like to enable the `BUILD_EXAMPLES` flag to get a code sample working with your Astra camera.
Run the following commands in the directory containing OpenCV source code to enable OpenNI support:
@code{.bash}
$ mkdir build
$ cd build
$ cmake -DWITH_OPENNI2=ON ..
@endcode
If the OpenNI library is found, OpenCV will be built with OpenNI2 support. You can see the status of OpenNI2
support in the CMake log:
@code{.text}
-- Video I/O:
-- DC1394: YES (2.2.6)
-- FFMPEG: YES
-- avcodec: YES (58.91.100)
-- avformat: YES (58.45.100)
-- avutil: YES (56.51.100)
-- swscale: YES (5.7.100)
-- avresample: NO
-- GStreamer: YES (1.18.1)
-- OpenNI2: YES (2.3.0)
-- v4l/v4l2: YES (linux/videodev2.h)
@endcode
-# Build OpenCV:
@code{.bash}
$ make
@endcode
### Code
To get both depth and color frames, two @ref cv::VideoCapture objects should be created:
@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Open streams
The first object will use the regular Video4Linux2 interface to access the color sensor. The second one
is using OpenNI2 API to retrieve depth data.
Before using the created VideoCapture objects you may want to setup stream parameters by setting
objects' properties. The most important parameters are frame width, frame height and fps:
@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Setup streams
For setting and getting some property of sensor data generators use @ref cv::VideoCapture::set and
@ref cv::VideoCapture::get methods respectively, e.g. :
@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Get properties
The following properties of cameras available through OpenNI interfaces are supported for the depth
generator:
- @ref cv::CAP_PROP_FRAME_WIDTH -- Frame width in pixels.
- @ref cv::CAP_PROP_FRAME_HEIGHT -- Frame height in pixels.
- @ref cv::CAP_PROP_FPS -- Frame rate in FPS.
- @ref cv::CAP_PROP_OPENNI_REGISTRATION -- Flag that registers the remapping depth map to image map
by changing the depth generator's viewpoint (if the flag is "on") or sets this view point to
its normal one (if the flag is "off"). The registration process’ resulting images are
pixel-aligned, which means that every pixel in the image is aligned to a pixel in the depth
image.
- @ref cv::CAP_PROP_OPENNI2_MIRROR -- Flag to enable or disable mirroring for this stream. Set to 0
to disable mirroring
Next properties are available for getting only:
- @ref cv::CAP_PROP_OPENNI_FRAME_MAX_DEPTH -- A maximum supported depth of the camera in mm.
- @ref cv::CAP_PROP_OPENNI_BASELINE -- Baseline value in mm.
After the VideoCapture objects are set up you can start reading frames from them.
@note
OpenCV's VideoCapture provides synchronous API, so you have to grab frames in a new thread
to avoid one stream blocking while another stream is being read. VideoCapture is not a
thread-safe class, so you need to be careful to avoid any possible deadlocks or data races.
Example implementation that gets frames from each sensor in a new thread and stores them
in a list along with their timestamps:
@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Read streams
VideoCapture can retrieve the following data:
-# data given from the depth generator:
- @ref cv::CAP_OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)
- @ref cv::CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)
- @ref cv::CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)
- @ref cv::CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)
- @ref cv::CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not occluded, not shaded, etc.)
(CV_8UC1)
-# data given from the color sensor is a regular BGR image (CV_8UC3).
When new data is available a reading thread notifies the main thread. A frame is stored in the
ordered list -- the first frame is the latest one:
@snippetlineno samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp Show color frame
Depth frames can be picked the same way from the `depthFrames` list.
After that, you'll have two frames: one containing color information and another one -- depth
information. In the sample images below you can see the color frame and the depth frame showing
the same scene. Looking at the color frame it's hard to distinguish plant leaves from leaves painted
on a wall, but the depth data makes it easy.
![Color frame](images/astra_color.jpg)
![Depth frame](images/astra_depth.png)
The complete implementation can be found in
[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
in `samples/cpp/tutorial_code/videoio` directory.

@ -26,6 +26,10 @@ This section contains tutorials about how to read/save your video files.
*Languages:* C++
- @subpage tutorial_orbbec_astra
*Languages:* C++
- @subpage tutorial_intelperc
*Languages:* C++
*Languages:* C++

@ -126,13 +126,12 @@ captRefrnc.set(CAP_PROP_POS_FRAMES, 10); // go to the 10th frame of the video
For properties you can read and change look into the documentation of the @ref cv::VideoCapture::get and
@ref cv::VideoCapture::set functions.
Image similarity - PSNR and SSIM
--------------------------------
### Image similarity - PSNR and SSIM
We want to check just how imperceptible our video converting operation went, therefore we need a
system to check frame by frame the similarity or differences. The most common algorithm used for
this is the PSNR (aka **Peak signal-to-noise ratio**). The simplest definition of this starts out
from the *mean squad error*. Let there be two images: I1 and I2; with a two dimensional size i and
from the *mean squared error*. Let there be two images: I1 and I2; with a two dimensional size i and
j, composed of c number of channels.
\f[MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\f]
@ -145,15 +144,15 @@ Here the \f$MAX_I\f$ is the maximum valid value for a pixel. In case of the simp
per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in
an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as
we'll need to handle this case separately. The transition to a logarithmic scale is made because the
pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks
pixel values have a very wide dynamic range. All this translated to OpenCV and a function looks
like:
@add_toggle_cpp
@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr
@snippet cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr
@end_toggle
@add_toggle_python
@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr
@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr
@end_toggle
Typically result values are anywhere between 30 and 50 for video compression, where higher is
@ -172,11 +171,11 @@ implementation below.
Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article.
@add_toggle_cpp
@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim
@snippet samples/cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim
@end_toggle
@add_toggle_python
@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim
@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim
@end_toggle
This will return a similarity index for each channel of the image. This value is between zero and

@ -63,7 +63,7 @@ specialized video writing libraries such as *FFMpeg* or codecs as *HuffYUV*, *Co
an alternative, create the video track with OpenCV and expand it with sound tracks or convert it to
other formats by using video manipulation programs such as *VirtualDub* or *AviSynth*.
The *VideoWriter* class
The VideoWriter class
-----------------------
The content written here builds on the assumption you

@ -40,6 +40,14 @@
publisher={IEEE}
}
@inproceedings{Terzakis20,
author = {Terzakis, George and Lourakis, Manolis},
year = {2020},
month = {09},
pages = {},
title = {A Consistently Fast and Globally Optimal Solution to the Perspective-n-Point Problem}
}
@inproceedings{strobl2011iccv,
title={More accurate pinhole camera calibration with imperfect planar target},
author={Strobl, Klaus H. and Hirzinger, Gerd},

@ -471,6 +471,7 @@ enum SolvePnPMethod {
//!< - point 1: [ squareLength / 2, squareLength / 2, 0]
//!< - point 2: [ squareLength / 2, -squareLength / 2, 0]
//!< - point 3: [-squareLength / 2, -squareLength / 2, 0]
SOLVEPNP_SQPNP = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis20
#ifndef CV_DOXYGEN
SOLVEPNP_MAX_COUNT //!< Used for count
#endif
@ -550,17 +551,18 @@ enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS };
struct CV_EXPORTS_W_SIMPLE UsacParams
{ // in alphabetical order
double confidence = 0.99;
bool isParallel = false;
int loIterations = 5;
LocalOptimMethod loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
int loSampleSize = 14;
int maxIterations = 5000;
NeighborSearchMethod neighborsSearch = NeighborSearchMethod::NEIGH_GRID;
int randomGeneratorState = 0;
SamplingMethod sampler = SamplingMethod::SAMPLING_UNIFORM;
ScoreMethod score = ScoreMethod::SCORE_METHOD_MSAC;
double threshold = 1.5;
CV_WRAP UsacParams();
CV_PROP_RW double confidence;
CV_PROP_RW bool isParallel;
CV_PROP_RW int loIterations;
CV_PROP_RW LocalOptimMethod loMethod;
CV_PROP_RW int loSampleSize;
CV_PROP_RW int maxIterations;
CV_PROP_RW NeighborSearchMethod neighborsSearch;
CV_PROP_RW int randomGeneratorState;
CV_PROP_RW SamplingMethod sampler;
CV_PROP_RW ScoreMethod score;
CV_PROP_RW double threshold;
};
/** @brief Converts a rotation matrix to a rotation vector or vice versa.
@ -945,6 +947,9 @@ It requires 4 coplanar object points defined in the following order:
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- **SOLVEPNP_SQPNP** Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (@cite Terzakis20). It requires 3 or more points.
The function estimates the object pose given a set of object points, their corresponding image
projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
@ -1068,6 +1073,7 @@ a 3D point expressed in the world frame into the camera frame:
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- With **SOLVEPNP_SQPNP** input points must be >= 3
*/
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
@ -2597,10 +2603,10 @@ CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param method Method for computing a fundamental matrix.
- **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
- **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
- **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
- **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
- @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$
- @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$
- @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$
- @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$
@param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the

@ -47,6 +47,7 @@
#include "p3p.h"
#include "ap3p.h"
#include "ippe.hpp"
#include "sqpnp.hpp"
#include "calib3d_c_api.h"
#include "usac.hpp"
@ -197,6 +198,21 @@ public:
Mat tvec;
};
UsacParams::UsacParams()
{
confidence = 0.99;
isParallel = false;
loIterations = 5;
loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO;
loSampleSize = 14;
maxIterations = 5000;
neighborsSearch = NeighborSearchMethod::NEIGH_GRID;
randomGeneratorState = 0;
sampler = SamplingMethod::SAMPLING_UNIFORM;
score = ScoreMethod::SCORE_METHOD_MSAC;
threshold = 1.5;
}
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
@ -781,7 +797,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess)
|| (npoints >= 3 && flags == SOLVEPNP_SQPNP) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
opoints = opoints.reshape(3, npoints);
@ -966,6 +983,14 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
}
} catch (...) { }
}
else if (flags == SOLVEPNP_SQPNP)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
sqpnp::PoseSolver solver;
solver.solve(opoints, undistortedPoints, vec_rvecs, vec_tvecs);
}
/*else if (flags == SOLVEPNP_DLS)
{
Mat undistortedPoints;
@ -993,7 +1018,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
vec_tvecs.push_back(tvec);
}*/
else
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, "
"SOLVEPNP_EPNP, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE or SOLVEPNP_SQPNP");
CV_Assert(vec_rvecs.size() == vec_tvecs.size());

@ -0,0 +1,775 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// This file is based on file issued with the following license:
/*
BSD 3-Clause License
Copyright (c) 2020, George Terzakis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "precomp.hpp"
#include "sqpnp.hpp"
#include <opencv2/calib3d.hpp>
namespace cv {
namespace sqpnp {
const double PoseSolver::RANK_TOLERANCE = 1e-7;
const double PoseSolver::SQP_SQUARED_TOLERANCE = 1e-10;
const double PoseSolver::SQP_DET_THRESHOLD = 1.001;
const double PoseSolver::ORTHOGONALITY_SQUARED_ERROR_THRESHOLD = 1e-8;
const double PoseSolver::EQUAL_VECTORS_SQUARED_DIFF = 1e-10;
const double PoseSolver::EQUAL_SQUARED_ERRORS_DIFF = 1e-6;
const double PoseSolver::POINT_VARIANCE_THRESHOLD = 1e-5;
const double PoseSolver::SQRT3 = std::sqrt(3);
const int PoseSolver::SQP_MAX_ITERATION = 15;
//No checking done here for overflow, since this is not public all call instances
//are assumed to be valid
template <typename tp, int snrows, int sncols,
int dnrows, int dncols>
void set(int row, int col, cv::Matx<tp, dnrows, dncols>& dest,
const cv::Matx<tp, snrows, sncols>& source)
{
for (int y = 0; y < snrows; y++)
{
for (int x = 0; x < sncols; x++)
{
dest(row + y, col + x) = source(y, x);
}
}
}
PoseSolver::PoseSolver()
: num_null_vectors_(-1),
num_solutions_(0)
{
}
void PoseSolver::solve(InputArray objectPoints, InputArray imagePoints, OutputArrayOfArrays rvecs,
OutputArrayOfArrays tvecs)
{
//Input checking
int objType = objectPoints.getMat().type();
CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
"Type of objectPoints must be CV_32FC3 or CV_64FC3");
int imgType = imagePoints.getMat().type();
CV_CheckType(imgType, imgType == CV_32FC2 || imgType == CV_64FC2,
"Type of imagePoints must be CV_32FC2 or CV_64FC2");
CV_Assert(objectPoints.rows() == 1 || objectPoints.cols() == 1);
CV_Assert(objectPoints.rows() >= 3 || objectPoints.cols() >= 3);
CV_Assert(imagePoints.rows() == 1 || imagePoints.cols() == 1);
CV_Assert(imagePoints.rows() * imagePoints.cols() == objectPoints.rows() * objectPoints.cols());
Mat _imagePoints;
if (imgType == CV_32FC2)
{
imagePoints.getMat().convertTo(_imagePoints, CV_64F);
}
else
{
_imagePoints = imagePoints.getMat();
}
Mat _objectPoints;
if (objType == CV_32FC3)
{
objectPoints.getMat().convertTo(_objectPoints, CV_64F);
}
else
{
_objectPoints = objectPoints.getMat();
}
num_null_vectors_ = -1;
num_solutions_ = 0;
computeOmega(_objectPoints, _imagePoints);
solveInternal();
int depthRot = rvecs.fixedType() ? rvecs.depth() : CV_64F;
int depthTrans = tvecs.fixedType() ? tvecs.depth() : CV_64F;
rvecs.create(num_solutions_, 1, CV_MAKETYPE(depthRot, rvecs.fixedType() && rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
tvecs.create(num_solutions_, 1, CV_MAKETYPE(depthTrans, tvecs.fixedType() && tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
for (int i = 0; i < num_solutions_; i++)
{
Mat rvec;
Mat rotation = Mat(solutions_[i].r_hat).reshape(1, 3);
Rodrigues(rotation, rvec);
rvecs.getMatRef(i) = rvec;
tvecs.getMatRef(i) = Mat(solutions_[i].t);
}
}
void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
{
omega_ = cv::Matx<double, 9, 9>::zeros();
cv::Matx<double, 3, 9> qa_sum = cv::Matx<double, 3, 9>::zeros();
cv::Point2d sum_img(0, 0);
cv::Point3d sum_obj(0, 0, 0);
double sq_norm_sum = 0;
Mat _imagePoints = imagePoints.getMat();
Mat _objectPoints = objectPoints.getMat();
int n = _objectPoints.cols * _objectPoints.rows;
for (int i = 0; i < n; i++)
{
const cv::Point2d& img_pt = _imagePoints.at<cv::Point2d>(i);
const cv::Point3d& obj_pt = _objectPoints.at<cv::Point3d>(i);
sum_img += img_pt;
sum_obj += obj_pt;
const double& x = img_pt.x, & y = img_pt.y;
const double& X = obj_pt.x, & Y = obj_pt.y, & Z = obj_pt.z;
double sq_norm = x * x + y * y;
sq_norm_sum += sq_norm;
double X2 = X * X,
XY = X * Y,
XZ = X * Z,
Y2 = Y * Y,
YZ = Y * Z,
Z2 = Z * Z;
omega_(0, 0) += X2;
omega_(0, 1) += XY;
omega_(0, 2) += XZ;
omega_(1, 1) += Y2;
omega_(1, 2) += YZ;
omega_(2, 2) += Z2;
//Populating this manually saves operations by only calculating upper triangle
omega_(0, 6) += -x * X2; omega_(0, 7) += -x * XY; omega_(0, 8) += -x * XZ;
omega_(1, 7) += -x * Y2; omega_(1, 8) += -x * YZ;
omega_(2, 8) += -x * Z2;
omega_(3, 6) += -y * X2; omega_(3, 7) += -y * XY; omega_(3, 8) += -y * XZ;
omega_(4, 7) += -y * Y2; omega_(4, 8) += -y * YZ;
omega_(5, 8) += -y * Z2;
omega_(6, 6) += sq_norm * X2; omega_(6, 7) += sq_norm * XY; omega_(6, 8) += sq_norm * XZ;
omega_(7, 7) += sq_norm * Y2; omega_(7, 8) += sq_norm * YZ;
omega_(8, 8) += sq_norm * Z2;
//Compute qa_sum
qa_sum(0, 0) += X; qa_sum(0, 1) += Y; qa_sum(0, 2) += Z;
qa_sum(1, 3) += X; qa_sum(1, 4) += Y; qa_sum(1, 5) += Z;
qa_sum(0, 6) += -x * X; qa_sum(0, 7) += -x * Y; qa_sum(0, 8) += -x * Z;
qa_sum(1, 6) += -y * X; qa_sum(1, 7) += -y * Y; qa_sum(1, 8) += -y * Z;
qa_sum(2, 0) += -x * X; qa_sum(2, 1) += -x * Y; qa_sum(2, 2) += -x * Z;
qa_sum(2, 3) += -y * X; qa_sum(2, 4) += -y * Y; qa_sum(2, 5) += -y * Z;
qa_sum(2, 6) += sq_norm * X; qa_sum(2, 7) += sq_norm * Y; qa_sum(2, 8) += sq_norm * Z;
}
omega_(1, 6) = omega_(0, 7); omega_(2, 6) = omega_(0, 8); omega_(2, 7) = omega_(1, 8);
omega_(4, 6) = omega_(3, 7); omega_(5, 6) = omega_(3, 8); omega_(5, 7) = omega_(4, 8);
omega_(7, 6) = omega_(6, 7); omega_(8, 6) = omega_(6, 8); omega_(8, 7) = omega_(7, 8);
omega_(3, 3) = omega_(0, 0); omega_(3, 4) = omega_(0, 1); omega_(3, 5) = omega_(0, 2);
omega_(4, 4) = omega_(1, 1); omega_(4, 5) = omega_(1, 2);
omega_(5, 5) = omega_(2, 2);
//Mirror upper triangle to lower triangle
for (int r = 0; r < 9; r++)
{
for (int c = 0; c < r; c++)
{
omega_(r, c) = omega_(c, r);
}
}
cv::Matx<double, 3, 3> q;
q(0, 0) = n; q(0, 1) = 0; q(0, 2) = -sum_img.x;
q(1, 0) = 0; q(1, 1) = n; q(1, 2) = -sum_img.y;
q(2, 0) = -sum_img.x; q(2, 1) = -sum_img.y; q(2, 2) = sq_norm_sum;
double inv_n = 1.0 / n;
double detQ = n * (n * sq_norm_sum - sum_img.y * sum_img.y - sum_img.x * sum_img.x);
double point_coordinate_variance = detQ * inv_n * inv_n * inv_n;
CV_Assert(point_coordinate_variance >= POINT_VARIANCE_THRESHOLD);
Matx<double, 3, 3> q_inv;
analyticalInverse3x3Symm(q, q_inv);
p_ = -q_inv * qa_sum;
omega_ += qa_sum.t() * p_;
cv::SVD omega_svd(omega_, cv::SVD::FULL_UV);
s_ = omega_svd.w;
u_ = cv::Mat(omega_svd.vt.t());
CV_Assert(s_(0) >= 1e-7);
while (s_(7 - num_null_vectors_) < RANK_TOLERANCE) num_null_vectors_++;
CV_Assert(++num_null_vectors_ <= 6);
point_mean_ = cv::Vec3d(sum_obj.x / n, sum_obj.y / n, sum_obj.z / n);
}
void PoseSolver::solveInternal()
{
double min_sq_err = std::numeric_limits<double>::max();
int num_eigen_points = num_null_vectors_ > 0 ? num_null_vectors_ : 1;
for (int i = 9 - num_eigen_points; i < 9; i++)
{
const cv::Matx<double, 9, 1> e = SQRT3 * u_.col(i);
double orthogonality_sq_err = orthogonalityError(e);
SQPSolution solutions[2];
//If e is orthogonal, we can skip SQP
if (orthogonality_sq_err < ORTHOGONALITY_SQUARED_ERROR_THRESHOLD)
{
solutions[0].r_hat = det3x3(e) * e;
solutions[0].t = p_ * solutions[0].r_hat;
checkSolution(solutions[0], min_sq_err);
}
else
{
Matx<double, 9, 1> r;
nearestRotationMatrix(e, r);
solutions[0] = runSQP(r);
solutions[0].t = p_ * solutions[0].r_hat;
checkSolution(solutions[0], min_sq_err);
nearestRotationMatrix(-e, r);
solutions[1] = runSQP(r);
solutions[1].t = p_ * solutions[1].r_hat;
checkSolution(solutions[1], min_sq_err);
}
}
int c = 1;
while (min_sq_err > 3 * s_[9 - num_eigen_points - c] && 9 - num_eigen_points - c > 0)
{
int index = 9 - num_eigen_points - c;
const cv::Matx<double, 9, 1> e = u_.col(index);
SQPSolution solutions[2];
Matx<double, 9, 1> r;
nearestRotationMatrix(e, r);
solutions[0] = runSQP(r);
solutions[0].t = p_ * solutions[0].r_hat;
checkSolution(solutions[0], min_sq_err);
nearestRotationMatrix(-e, r);
solutions[1] = runSQP(r);
solutions[1].t = p_ * solutions[1].r_hat;
checkSolution(solutions[1], min_sq_err);
c++;
}
}
PoseSolver::SQPSolution PoseSolver::runSQP(const cv::Matx<double, 9, 1>& r0)
{
cv::Matx<double, 9, 1> r = r0;
double delta_squared_norm = std::numeric_limits<double>::max();
cv::Matx<double, 9, 1> delta;
int step = 0;
while (delta_squared_norm > SQP_SQUARED_TOLERANCE && step++ < SQP_MAX_ITERATION)
{
solveSQPSystem(r, delta);
r += delta;
delta_squared_norm = cv::norm(delta, cv::NORM_L2SQR);
}
SQPSolution solution;
double det_r = det3x3(r);
if (det_r < 0)
{
r = -r;
det_r = -det_r;
}
if (det_r > SQP_DET_THRESHOLD)
{
nearestRotationMatrix(r, solution.r_hat);
}
else
{
solution.r_hat = r;
}
return solution;
}
void PoseSolver::solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double, 9, 1>& delta)
{
double sqnorm_r1 = r(0) * r(0) + r(1) * r(1) + r(2) * r(2),
sqnorm_r2 = r(3) * r(3) + r(4) * r(4) + r(5) * r(5),
sqnorm_r3 = r(6) * r(6) + r(7) * r(7) + r(8) * r(8);
double dot_r1r2 = r(0) * r(3) + r(1) * r(4) + r(2) * r(5),
dot_r1r3 = r(0) * r(6) + r(1) * r(7) + r(2) * r(8),
dot_r2r3 = r(3) * r(6) + r(4) * r(7) + r(5) * r(8);
cv::Matx<double, 9, 3> N;
cv::Matx<double, 9, 6> H;
cv::Matx<double, 6, 6> JH;
computeRowAndNullspace(r, H, N, JH);
cv::Matx<double, 6, 1> g;
g(0) = 1 - sqnorm_r1; g(1) = 1 - sqnorm_r2; g(2) = 1 - sqnorm_r3; g(3) = -dot_r1r2; g(4) = -dot_r2r3; g(5) = -dot_r1r3;
cv::Matx<double, 6, 1> x;
x(0) = g(0) / JH(0, 0);
x(1) = g(1) / JH(1, 1);
x(2) = g(2) / JH(2, 2);
x(3) = (g(3) - JH(3, 0) * x(0) - JH(3, 1) * x(1)) / JH(3, 3);
x(4) = (g(4) - JH(4, 1) * x(1) - JH(4, 2) * x(2) - JH(4, 3) * x(3)) / JH(4, 4);
x(5) = (g(5) - JH(5, 0) * x(0) - JH(5, 2) * x(2) - JH(5, 3) * x(3) - JH(5, 4) * x(4)) / JH(5, 5);
delta = H * x;
cv::Matx<double, 3, 9> nt_omega = N.t() * omega_;
cv::Matx<double, 3, 3> W = nt_omega * N, W_inv;
analyticalInverse3x3Symm(W, W_inv);
cv::Matx<double, 3, 1> y = -W_inv * nt_omega * (delta + r);
delta += N * y;
}
bool PoseSolver::analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
cv::Matx<double, 3, 3>& Qinv,
const double& threshold)
{
// 1. Get the elements of the matrix
double a = Q(0, 0),
b = Q(1, 0), d = Q(1, 1),
c = Q(2, 0), e = Q(2, 1), f = Q(2, 2);
// 2. Determinant
double t2, t4, t7, t9, t12;
t2 = e * e;
t4 = a * d;
t7 = b * b;
t9 = b * c;
t12 = c * c;
double det = -t4 * f + a * t2 + t7 * f - 2.0 * t9 * e + t12 * d;
if (fabs(det) < threshold) return false;
// 3. Inverse
double t15, t20, t24, t30;
t15 = 1.0 / det;
t20 = (-b * f + c * e) * t15;
t24 = (b * e - c * d) * t15;
t30 = (a * e - t9) * t15;
Qinv(0, 0) = (-d * f + t2) * t15;
Qinv(0, 1) = Qinv(1, 0) = -t20;
Qinv(0, 2) = Qinv(2, 0) = -t24;
Qinv(1, 1) = -(a * f - t12) * t15;
Qinv(1, 2) = Qinv(2, 1) = t30;
Qinv(2, 2) = -(t4 - t7) * t15;
return true;
}
void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
cv::Matx<double, 9, 6>& H,
cv::Matx<double, 9, 3>& N,
cv::Matx<double, 6, 6>& K,
const double& norm_threshold)
{
H = cv::Matx<double, 9, 6>::zeros();
// 1. q1
double norm_r1 = sqrt(r(0) * r(0) + r(1) * r(1) + r(2) * r(2));
double inv_norm_r1 = norm_r1 > 1e-5 ? 1.0 / norm_r1 : 0.0;
H(0, 0) = r(0) * inv_norm_r1;
H(1, 0) = r(1) * inv_norm_r1;
H(2, 0) = r(2) * inv_norm_r1;
K(0, 0) = 2 * norm_r1;
// 2. q2
double norm_r2 = sqrt(r(3) * r(3) + r(4) * r(4) + r(5) * r(5));
double inv_norm_r2 = 1.0 / norm_r2;
H(3, 1) = r(3) * inv_norm_r2;
H(4, 1) = r(4) * inv_norm_r2;
H(5, 1) = r(5) * inv_norm_r2;
K(1, 0) = 0;
K(1, 1) = 2 * norm_r2;
// 3. q3 = (r3'*q2)*q2 - (r3'*q1)*q1 ; q3 = q3/norm(q3)
double norm_r3 = sqrt(r(6) * r(6) + r(7) * r(7) + r(8) * r(8));
double inv_norm_r3 = 1.0 / norm_r3;
H(6, 2) = r(6) * inv_norm_r3;
H(7, 2) = r(7) * inv_norm_r3;
H(8, 2) = r(8) * inv_norm_r3;
K(2, 0) = K(2, 1) = 0;
K(2, 2) = 2 * norm_r3;
// 4. q4
double dot_j4q1 = r(3) * H(0, 0) + r(4) * H(1, 0) + r(5) * H(2, 0),
dot_j4q2 = r(0) * H(3, 1) + r(1) * H(4, 1) + r(2) * H(5, 1);
H(0, 3) = r(3) - dot_j4q1 * H(0, 0);
H(1, 3) = r(4) - dot_j4q1 * H(1, 0);
H(2, 3) = r(5) - dot_j4q1 * H(2, 0);
H(3, 3) = r(0) - dot_j4q2 * H(3, 1);
H(4, 3) = r(1) - dot_j4q2 * H(4, 1);
H(5, 3) = r(2) - dot_j4q2 * H(5, 1);
double inv_norm_j4 = 1.0 / sqrt(H(0, 3) * H(0, 3) + H(1, 3) * H(1, 3) + H(2, 3) * H(2, 3) +
H(3, 3) * H(3, 3) + H(4, 3) * H(4, 3) + H(5, 3) * H(5, 3));
H(0, 3) *= inv_norm_j4;
H(1, 3) *= inv_norm_j4;
H(2, 3) *= inv_norm_j4;
H(3, 3) *= inv_norm_j4;
H(4, 3) *= inv_norm_j4;
H(5, 3) *= inv_norm_j4;
K(3, 0) = r(3) * H(0, 0) + r(4) * H(1, 0) + r(5) * H(2, 0);
K(3, 1) = r(0) * H(3, 1) + r(1) * H(4, 1) + r(2) * H(5, 1);
K(3, 2) = 0;
K(3, 3) = r(3) * H(0, 3) + r(4) * H(1, 3) + r(5) * H(2, 3) + r(0) * H(3, 3) + r(1) * H(4, 3) + r(2) * H(5, 3);
// 5. q5
double dot_j5q2 = r(6) * H(3, 1) + r(7) * H(4, 1) + r(8) * H(5, 1);
double dot_j5q3 = r(3) * H(6, 2) + r(4) * H(7, 2) + r(5) * H(8, 2);
double dot_j5q4 = r(6) * H(3, 3) + r(7) * H(4, 3) + r(8) * H(5, 3);
H(0, 4) = -dot_j5q4 * H(0, 3);
H(1, 4) = -dot_j5q4 * H(1, 3);
H(2, 4) = -dot_j5q4 * H(2, 3);
H(3, 4) = r(6) - dot_j5q2 * H(3, 1) - dot_j5q4 * H(3, 3);
H(4, 4) = r(7) - dot_j5q2 * H(4, 1) - dot_j5q4 * H(4, 3);
H(5, 4) = r(8) - dot_j5q2 * H(5, 1) - dot_j5q4 * H(5, 3);
H(6, 4) = r(3) - dot_j5q3 * H(6, 2); H(7, 4) = r(4) - dot_j5q3 * H(7, 2); H(8, 4) = r(5) - dot_j5q3 * H(8, 2);
Matx<double, 9, 1> q4 = H.col(4);
q4 /= cv::norm(q4);
set<double, 9, 1, 9, 6>(0, 4, H, q4);
K(4, 0) = 0;
K(4, 1) = r(6) * H(3, 1) + r(7) * H(4, 1) + r(8) * H(5, 1);
K(4, 2) = r(3) * H(6, 2) + r(4) * H(7, 2) + r(5) * H(8, 2);
K(4, 3) = r(6) * H(3, 3) + r(7) * H(4, 3) + r(8) * H(5, 3);
K(4, 4) = r(6) * H(3, 4) + r(7) * H(4, 4) + r(8) * H(5, 4) + r(3) * H(6, 4) + r(4) * H(7, 4) + r(5) * H(8, 4);
// 4. q6
double dot_j6q1 = r(6) * H(0, 0) + r(7) * H(1, 0) + r(8) * H(2, 0);
double dot_j6q3 = r(0) * H(6, 2) + r(1) * H(7, 2) + r(2) * H(8, 2);
double dot_j6q4 = r(6) * H(0, 3) + r(7) * H(1, 3) + r(8) * H(2, 3);
double dot_j6q5 = r(0) * H(6, 4) + r(1) * H(7, 4) + r(2) * H(8, 4) + r(6) * H(0, 4) + r(7) * H(1, 4) + r(8) * H(2, 4);
H(0, 5) = r(6) - dot_j6q1 * H(0, 0) - dot_j6q4 * H(0, 3) - dot_j6q5 * H(0, 4);
H(1, 5) = r(7) - dot_j6q1 * H(1, 0) - dot_j6q4 * H(1, 3) - dot_j6q5 * H(1, 4);
H(2, 5) = r(8) - dot_j6q1 * H(2, 0) - dot_j6q4 * H(2, 3) - dot_j6q5 * H(2, 4);
H(3, 5) = -dot_j6q5 * H(3, 4) - dot_j6q4 * H(3, 3);
H(4, 5) = -dot_j6q5 * H(4, 4) - dot_j6q4 * H(4, 3);
H(5, 5) = -dot_j6q5 * H(5, 4) - dot_j6q4 * H(5, 3);
H(6, 5) = r(0) - dot_j6q3 * H(6, 2) - dot_j6q5 * H(6, 4);
H(7, 5) = r(1) - dot_j6q3 * H(7, 2) - dot_j6q5 * H(7, 4);
H(8, 5) = r(2) - dot_j6q3 * H(8, 2) - dot_j6q5 * H(8, 4);
Matx<double, 9, 1> q5 = H.col(5);
q5 /= cv::norm(q5);
set<double, 9, 1, 9, 6>(0, 5, H, q5);
K(5, 0) = r(6) * H(0, 0) + r(7) * H(1, 0) + r(8) * H(2, 0);
K(5, 1) = 0; K(5, 2) = r(0) * H(6, 2) + r(1) * H(7, 2) + r(2) * H(8, 2);
K(5, 3) = r(6) * H(0, 3) + r(7) * H(1, 3) + r(8) * H(2, 3);
K(5, 4) = r(6) * H(0, 4) + r(7) * H(1, 4) + r(8) * H(2, 4) + r(0) * H(6, 4) + r(1) * H(7, 4) + r(2) * H(8, 4);
K(5, 5) = r(6) * H(0, 5) + r(7) * H(1, 5) + r(8) * H(2, 5) + r(0) * H(6, 5) + r(1) * H(7, 5) + r(2) * H(8, 5);
// Great! Now H is an orthogonalized, sparse basis of the Jacobian row space and K is filled.
//
// Now get a projector onto the null space H:
const cv::Matx<double, 9, 9> Pn = cv::Matx<double, 9, 9>::eye() - (H * H.t());
// Now we need to pick 3 columns of P with non-zero norm (> 0.3) and some angle between them (> 0.3).
//
// Find the 3 columns of Pn with largest norms
int index1 = 0,
index2 = 0,
index3 = 0;
double max_norm1 = std::numeric_limits<double>::min();
double min_dot12 = std::numeric_limits<double>::max();
double min_dot1323 = std::numeric_limits<double>::max();
double col_norms[9];
for (int i = 0; i < 9; i++)
{
col_norms[i] = cv::norm(Pn.col(i));
if (col_norms[i] >= norm_threshold)
{
if (max_norm1 < col_norms[i])
{
max_norm1 = col_norms[i];
index1 = i;
}
}
}
Matx<double, 9, 1> v1 = Pn.col(index1);
v1 /= max_norm1;
set<double, 9, 1, 9, 3>(0, 0, N, v1);
for (int i = 0; i < 9; i++)
{
if (i == index1) continue;
if (col_norms[i] >= norm_threshold)
{
double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
if (cos_v1_x_col <= min_dot12)
{
index2 = i;
min_dot12 = cos_v1_x_col;
}
}
}
Matx<double, 9, 1> v2 = Pn.col(index2);
Matx<double, 9, 1> n0 = N.col(0);
v2 -= v2.dot(n0) * n0;
v2 /= cv::norm(v2);
set<double, 9, 1, 9, 3>(0, 1, N, v2);
for (int i = 0; i < 9; i++)
{
if (i == index2 || i == index1) continue;
if (col_norms[i] >= norm_threshold)
{
double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
double cos_v2_x_col = fabs(Pn.col(i).dot(v2) / col_norms[i]);
if (cos_v1_x_col + cos_v2_x_col <= min_dot1323)
{
index3 = i;
min_dot1323 = cos_v2_x_col + cos_v2_x_col;
}
}
}
Matx<double, 9, 1> v3 = Pn.col(index3);
Matx<double, 9, 1> n1 = N.col(1);
v3 -= (v3.dot(n1)) * n1 - (v3.dot(n0)) * n0;
v3 /= cv::norm(v3);
set<double, 9, 1, 9, 3>(0, 2, N, v3);
}
// faster nearest rotation computation based on FOAM (see: http://users.ics.forth.gr/~lourakis/publ/2018_iros.pdf )
/* Solve the nearest orthogonal approximation problem
* i.e., given e, find R minimizing ||R-e||_F
*
* The computation borrows from Markley's FOAM algorithm
* "Attitude Determination Using Vector Observations: A Fast Optimal Matrix Algorithm", J. Astronaut. Sci.
*
* See also M. Lourakis: "An Efficient Solution to Absolute Orientation", ICPR 2016
*
* Copyright (C) 2019 Manolis Lourakis (lourakis **at** ics forth gr)
* Institute of Computer Science, Foundation for Research & Technology - Hellas
* Heraklion, Crete, Greece.
*/
void PoseSolver::nearestRotationMatrix(const cv::Matx<double, 9, 1>& e,
cv::Matx<double, 9, 1>& r)
{
int i;
double l, lprev, det_e, e_sq, adj_e_sq, adj_e[9];
// e's adjoint
adj_e[0] = e(4) * e(8) - e(5) * e(7); adj_e[1] = e(2) * e(7) - e(1) * e(8); adj_e[2] = e(1) * e(5) - e(2) * e(4);
adj_e[3] = e(5) * e(6) - e(3) * e(8); adj_e[4] = e(0) * e(8) - e(2) * e(6); adj_e[5] = e(2) * e(3) - e(0) * e(5);
adj_e[6] = e(3) * e(7) - e(4) * e(6); adj_e[7] = e(1) * e(6) - e(0) * e(7); adj_e[8] = e(0) * e(4) - e(1) * e(3);
// det(e), ||e||^2, ||adj(e)||^2
det_e = e(0) * e(4) * e(8) - e(0) * e(5) * e(7) - e(1) * e(3) * e(8) + e(2) * e(3) * e(7) + e(1) * e(6) * e(5) - e(2) * e(6) * e(4);
e_sq = e(0) * e(0) + e(1) * e(1) + e(2) * e(2) + e(3) * e(3) + e(4) * e(4) + e(5) * e(5) + e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
adj_e_sq = adj_e[0] * adj_e[0] + adj_e[1] * adj_e[1] + adj_e[2] * adj_e[2] + adj_e[3] * adj_e[3] + adj_e[4] * adj_e[4] + adj_e[5] * adj_e[5] + adj_e[6] * adj_e[6] + adj_e[7] * adj_e[7] + adj_e[8] * adj_e[8];
// compute l_max with Newton-Raphson from FOAM's characteristic polynomial, i.e. eq.(23) - (26)
for (i = 200, l = 2.0, lprev = 0.0; fabs(l - lprev) > 1E-12 * fabs(lprev) && i > 0; --i) {
double tmp, p, pp;
tmp = (l * l - e_sq);
p = (tmp * tmp - 8.0 * l * det_e - 4.0 * adj_e_sq);
pp = 8.0 * (0.5 * tmp * l - det_e);
lprev = l;
l -= p / pp;
}
// the rotation matrix equals ((l^2 + e_sq)*e + 2*l*adj(e') - 2*e*e'*e) / (l*(l*l-e_sq) - 2*det(e)), i.e. eq.(14) using (18), (19)
{
// compute (l^2 + e_sq)*e
double tmp[9], e_et[9], denom;
const double a = l * l + e_sq;
// e_et=e*e'
e_et[0] = e(0) * e(0) + e(1) * e(1) + e(2) * e(2);
e_et[1] = e(0) * e(3) + e(1) * e(4) + e(2) * e(5);
e_et[2] = e(0) * e(6) + e(1) * e(7) + e(2) * e(8);
e_et[3] = e_et[1];
e_et[4] = e(3) * e(3) + e(4) * e(4) + e(5) * e(5);
e_et[5] = e(3) * e(6) + e(4) * e(7) + e(5) * e(8);
e_et[6] = e_et[2];
e_et[7] = e_et[5];
e_et[8] = e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
// tmp=e_et*e
tmp[0] = e_et[0] * e(0) + e_et[1] * e(3) + e_et[2] * e(6);
tmp[1] = e_et[0] * e(1) + e_et[1] * e(4) + e_et[2] * e(7);
tmp[2] = e_et[0] * e(2) + e_et[1] * e(5) + e_et[2] * e(8);
tmp[3] = e_et[3] * e(0) + e_et[4] * e(3) + e_et[5] * e(6);
tmp[4] = e_et[3] * e(1) + e_et[4] * e(4) + e_et[5] * e(7);
tmp[5] = e_et[3] * e(2) + e_et[4] * e(5) + e_et[5] * e(8);
tmp[6] = e_et[6] * e(0) + e_et[7] * e(3) + e_et[8] * e(6);
tmp[7] = e_et[6] * e(1) + e_et[7] * e(4) + e_et[8] * e(7);
tmp[8] = e_et[6] * e(2) + e_et[7] * e(5) + e_et[8] * e(8);
// compute R as (a*e + 2*(l*adj(e)' - tmp))*denom; note that adj(e')=adj(e)'
denom = l * (l * l - e_sq) - 2.0 * det_e;
denom = 1.0 / denom;
r(0) = (a * e(0) + 2.0 * (l * adj_e[0] - tmp[0])) * denom;
r(1) = (a * e(1) + 2.0 * (l * adj_e[3] - tmp[1])) * denom;
r(2) = (a * e(2) + 2.0 * (l * adj_e[6] - tmp[2])) * denom;
r(3) = (a * e(3) + 2.0 * (l * adj_e[1] - tmp[3])) * denom;
r(4) = (a * e(4) + 2.0 * (l * adj_e[4] - tmp[4])) * denom;
r(5) = (a * e(5) + 2.0 * (l * adj_e[7] - tmp[5])) * denom;
r(6) = (a * e(6) + 2.0 * (l * adj_e[2] - tmp[6])) * denom;
r(7) = (a * e(7) + 2.0 * (l * adj_e[5] - tmp[7])) * denom;
r(8) = (a * e(8) + 2.0 * (l * adj_e[8] - tmp[8])) * denom;
}
}
double PoseSolver::det3x3(const cv::Matx<double, 9, 1>& e)
{
return e(0) * e(4) * e(8) + e(1) * e(5) * e(6) + e(2) * e(3) * e(7)
- e(6) * e(4) * e(2) - e(7) * e(5) * e(0) - e(8) * e(3) * e(1);
}
inline bool PoseSolver::positiveDepth(const SQPSolution& solution) const
{
const cv::Matx<double, 9, 1>& r = solution.r_hat;
const cv::Matx<double, 3, 1>& t = solution.t;
const cv::Vec3d& mean = point_mean_;
return (r(6) * mean(0) + r(7) * mean(1) + r(8) * mean(2) + t(2) > 0);
}
void PoseSolver::checkSolution(SQPSolution& solution, double& min_error)
{
if (positiveDepth(solution))
{
solution.sq_error = (omega_ * solution.r_hat).ddot(solution.r_hat);
if (fabs(min_error - solution.sq_error) > EQUAL_SQUARED_ERRORS_DIFF)
{
if (min_error > solution.sq_error)
{
min_error = solution.sq_error;
solutions_[0] = solution;
num_solutions_ = 1;
}
}
else
{
bool found = false;
for (int i = 0; i < num_solutions_; i++)
{
if (cv::norm(solutions_[i].r_hat - solution.r_hat, cv::NORM_L2SQR) < EQUAL_VECTORS_SQUARED_DIFF)
{
if (solutions_[i].sq_error > solution.sq_error)
{
solutions_[i] = solution;
}
found = true;
break;
}
}
if (!found)
{
solutions_[num_solutions_++] = solution;
}
if (min_error > solution.sq_error) min_error = solution.sq_error;
}
}
}
double PoseSolver::orthogonalityError(const cv::Matx<double, 9, 1>& e)
{
double sq_norm_e1 = e(0) * e(0) + e(1) * e(1) + e(2) * e(2);
double sq_norm_e2 = e(3) * e(3) + e(4) * e(4) + e(5) * e(5);
double sq_norm_e3 = e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
double dot_e1e2 = e(0) * e(3) + e(1) * e(4) + e(2) * e(5);
double dot_e1e3 = e(0) * e(6) + e(1) * e(7) + e(2) * e(8);
double dot_e2e3 = e(3) * e(6) + e(4) * e(7) + e(5) * e(8);
return (sq_norm_e1 - 1) * (sq_norm_e1 - 1) + (sq_norm_e2 - 1) * (sq_norm_e2 - 1) + (sq_norm_e3 - 1) * (sq_norm_e3 - 1) +
2 * (dot_e1e2 * dot_e1e2 + dot_e1e3 * dot_e1e3 + dot_e2e3 * dot_e2e3);
}
}
}

@ -0,0 +1,194 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// This file is based on file issued with the following license:
/*
BSD 3-Clause License
Copyright (c) 2020, George Terzakis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OPENCV_CALIB3D_SQPNP_HPP
#define OPENCV_CALIB3D_SQPNP_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace sqpnp {
class PoseSolver {
public:
/**
* @brief PoseSolver constructor
*/
PoseSolver();
/**
* @brief Finds the possible poses of a camera given a set of 3D points
* and their corresponding 2D image projections. The poses are
* sorted by lowest squared error (which corresponds to lowest
* reprojection error).
* @param objectPoints Array or vector of 3 or more 3D points defined in object coordinates.
* 1xN/Nx1 3-channel (float or double) where N is the number of points.
* @param imagePoints Array or vector of corresponding 2D points, 1xN/Nx1 2-channel.
* @param rvec The output rotation solutions (up to 18 3x1 rotation vectors)
* @param tvec The output translation solutions (up to 18 3x1 vectors)
*/
void solve(InputArray objectPoints, InputArray imagePoints, OutputArrayOfArrays rvec,
OutputArrayOfArrays tvec);
private:
struct SQPSolution
{
cv::Matx<double, 9, 1> r_hat;
cv::Matx<double, 3, 1> t;
double sq_error;
};
/*
* @brief Computes the 9x9 PSD Omega matrix and supporting matrices.
* @param objectPoints Array or vector of 3 or more 3D points defined in object coordinates.
* 1xN/Nx1 3-channel (float or double) where N is the number of points.
* @param imagePoints Array or vector of corresponding 2D points, 1xN/Nx1 2-channel.
*/
void computeOmega(InputArray objectPoints, InputArray imagePoints);
/*
* @brief Computes the 9x9 PSD Omega matrix and supporting matrices.
*/
void solveInternal();
/*
* @brief Produces the distance from being orthogonal for a given 3x3 matrix
* in row-major form.
* @param e The vector to test representing a 3x3 matrix in row major form.
* @return The distance the matrix is from being orthogonal.
*/
static double orthogonalityError(const cv::Matx<double, 9, 1>& e);
/*
* @brief Processes a solution and sorts it by error.
* @param solution The solution to evaluate.
* @param min_error The current minimum error.
*/
void checkSolution(SQPSolution& solution, double& min_error);
/*
* @brief Computes the determinant of a matrix stored in row-major format.
* @param e Vector representing a 3x3 matrix stored in row-major format.
* @return The determinant of the matrix.
*/
static double det3x3(const cv::Matx<double, 9, 1>& e);
/*
* @brief Tests the cheirality for a given solution.
* @param solution The solution to evaluate.
*/
inline bool positiveDepth(const SQPSolution& solution) const;
/*
* @brief Determines the nearest rotation matrix to a given rotaiton matrix.
* Input and output are 9x1 vector representing a vector stored in row-major
* form.
* @param e The input 3x3 matrix stored in a vector in row-major form.
* @param r The nearest rotation matrix to the input e (again in row-major form).
*/
static void nearestRotationMatrix(const cv::Matx<double, 9, 1>& e,
cv::Matx<double, 9, 1>& r);
/*
* @brief Runs the sequential quadratic programming on orthogonal matrices.
* @param r0 The start point of the solver.
*/
SQPSolution runSQP(const cv::Matx<double, 9, 1>& r0);
/*
* @brief Steps down the gradient for the given matrix r to solve the SQP system.
* @param r The current matrix step.
* @param delta The next step down the gradient.
*/
void solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double, 9, 1>& delta);
/*
* @brief Analytically computes the inverse of a symmetric 3x3 matrix using the
* lower triangle.
* @param Q The matrix to invert.
* @param Qinv The inverse of Q.
* @param threshold The threshold to determine if Q is singular and non-invertible.
*/
bool analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
cv::Matx<double, 3, 3>& Qinv,
const double& threshold = 1e-8);
/*
* @brief Computes the 3D null space and 6D normal space of the constraint Jacobian
* at a 9D vector r (representing a rank-3 matrix). Note that K is lower
* triangular so upper triangle is undefined.
* @param r 9D vector representing a rank-3 matrix.
* @param H 6D row space of the constraint Jacobian at r.
* @param N 3D null space of the constraint Jacobian at r.
* @param K The constraint Jacobian at r.
* @param norm_threshold Threshold for column vector norm of Pn (the projection onto the null space
* of the constraint Jacobian).
*/
void computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
cv::Matx<double, 9, 6>& H,
cv::Matx<double, 9, 3>& N,
cv::Matx<double, 6, 6>& K,
const double& norm_threshold = 0.1);
static const double RANK_TOLERANCE;
static const double SQP_SQUARED_TOLERANCE;
static const double SQP_DET_THRESHOLD;
static const double ORTHOGONALITY_SQUARED_ERROR_THRESHOLD;
static const double EQUAL_VECTORS_SQUARED_DIFF;
static const double EQUAL_SQUARED_ERRORS_DIFF;
static const double POINT_VARIANCE_THRESHOLD;
static const int SQP_MAX_ITERATION;
static const double SQRT3;
cv::Matx<double, 9, 9> omega_;
cv::Vec<double, 9> s_;
cv::Matx<double, 9, 9> u_;
cv::Matx<double, 3, 9> p_;
cv::Vec3d point_mean_;
int num_null_vectors_;
SQPSolution solutions_[18];
int num_solutions_;
};
}
}
#endif

@ -421,7 +421,7 @@ struct SPRT_history {
double epsilon, delta, A;
// number of samples processed by test
int tested_samples; // k
SPRT_history () {
SPRT_history () : epsilon(0), delta(0), A(0) {
tested_samples = 0;
}
};

@ -286,7 +286,7 @@ public:
current_score = quality->getScore(models[i]);
} else {
if (is_magsac && iters % repeat_magsac == 0) {
if (!local_optimization->refineModel
if (local_optimization && !local_optimization->refineModel
(models[i], best_score_thread, models[i], current_score))
continue;
} else if (model_verifier->isModelGood(models[i])) {
@ -1028,4 +1028,4 @@ bool run (const Ptr<const Model> &params, InputArray points1, InputArray points2
}
return false;
}
}}
}}

@ -190,6 +190,8 @@ static std::string printMethod(int method)
return "SOLVEPNP_IPPE";
case 7:
return "SOLVEPNP_IPPE_SQUARE";
case 8:
return "SOLVEPNP_SQPNP";
default:
return "Unknown value";
}
@ -206,6 +208,7 @@ public:
eps[SOLVEPNP_AP3P] = 1.0e-2;
eps[SOLVEPNP_DLS] = 1.0e-2;
eps[SOLVEPNP_UPNP] = 1.0e-2;
eps[SOLVEPNP_SQPNP] = 1.0e-2;
totalTestsCount = 10;
pointsCount = 500;
}
@ -436,6 +439,7 @@ public:
eps[SOLVEPNP_UPNP] = 1.0e-6; //UPnP is remapped to EPnP, so we use the same threshold
eps[SOLVEPNP_IPPE] = 1.0e-6;
eps[SOLVEPNP_IPPE_SQUARE] = 1.0e-6;
eps[SOLVEPNP_SQPNP] = 1.0e-6;
totalTestsCount = 1000;

@ -203,6 +203,9 @@ enum CovarFlags {
COVAR_COLS = 16
};
//! @addtogroup core_cluster
//! @{
//! k-Means flags
enum KmeansFlags {
/** Select random initial centers in each attempt.*/
@ -216,12 +219,18 @@ enum KmeansFlags {
KMEANS_USE_INITIAL_LABELS = 1
};
//! @} core_cluster
//! @addtogroup core_array
//! @{
enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.
REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.
REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.
REDUCE_MIN = 3 //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix.
};
//! @} core_array
/** @brief Swaps two matrices
*/

@ -499,7 +499,7 @@ typename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const
double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
double theta = acos(c);
double theta = std::acos(c);
if( s < 1e-5 )
{

@ -220,6 +220,11 @@ struct VZeroUpperGuard {
# define CV_VSX 1
#endif
#ifdef __F16C__
# include <immintrin.h>
# define CV_FP16 1
#endif
#endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code)

@ -90,7 +90,7 @@ namespace cv { namespace debug_build_guard { } using namespace debug_build_guard
// keep current value (through OpenCV port file)
#elif defined __GNUC__ || (defined (__cpluscplus) && (__cpluscplus >= 201103))
#define CV_Func __func__
#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major >= 305)
#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major__ >= 305)
#define CV_Func __func__
#elif defined(__STDC_VERSION__) && (__STDC_VERSION >= 199901)
#define CV_Func __func__
@ -844,7 +844,7 @@ protected:
float16_t() : w(0) {}
explicit float16_t(float x)
{
#if CV_AVX2
#if CV_FP16
__m128 v = _mm_load_ss(&x);
w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0));
#else
@ -875,7 +875,7 @@ protected:
operator float() const
{
#if CV_AVX2
#if CV_FP16
float f;
_mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w)));
return f;

@ -3121,18 +3121,39 @@ OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float32x8, float, f32, v_uint32x8, un
OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int64x4, int64, s64, v_uint64x4, uint64, u64)
OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float64x4, double, f64, v_uint64x4, uint64, u64)
//
// FP16
//
inline v_float32x8 v256_load_expand(const float16_t* ptr)
{
#if CV_FP16
return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr)));
#else
float CV_DECL_ALIGNED(32) buf[8];
for (int i = 0; i < 8; i++)
buf[i] = (float)ptr[i];
return v256_load_aligned(buf);
#endif
}
inline void v_pack_store(float16_t* ptr, const v_float32x8& a)
{
#if CV_FP16
__m128i ah = _mm256_cvtps_ph(a.val, 0);
_mm_storeu_si128((__m128i*)ptr, ah);
#else
float CV_DECL_ALIGNED(32) buf[8];
v_store_aligned(buf, a);
for (int i = 0; i < 8; i++)
ptr[i] = float16_t(buf[i]);
#endif
}
//
// end of FP16
//
inline void v256_cleanup() { _mm256_zeroall(); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END

@ -207,13 +207,7 @@ struct v_uint64x2
uint64 get0() const
{
#ifdef __wasm_unimplemented_simd128__
return (uint64)wasm_i64x2_extract_lane(val, 0);
#else
uint64 des[2];
wasm_v128_store(des, val);
return des[0];
#endif
}
v128_t val;
@ -235,13 +229,7 @@ struct v_int64x2
int64 get0() const
{
#ifdef __wasm_unimplemented_simd128__
return wasm_i64x2_extract_lane(val, 0);
#else
int64 des[2];
wasm_v128_store(des, val);
return des[0];
#endif
}
v128_t val;
@ -263,13 +251,7 @@ struct v_float64x2
double get0() const
{
#ifdef __wasm_unimplemented_simd128__
return wasm_f64x2_extract_lane(val, 0);
#else
double des[2];
wasm_v128_store(des, val);
return des[0];
#endif
}
v128_t val;
@ -1797,22 +1779,9 @@ OPENCV_HAL_IMPL_WASM_INITVEC(v_int16x8, short, s16, i16x8, short)
OPENCV_HAL_IMPL_WASM_INITVEC(v_uint32x4, unsigned, u32, i32x4, int)
OPENCV_HAL_IMPL_WASM_INITVEC(v_int32x4, int, s32, i32x4, int)
OPENCV_HAL_IMPL_WASM_INITVEC(v_float32x4, float, f32, f32x4, float)
#ifdef __wasm_unimplemented_simd128__
OPENCV_HAL_IMPL_WASM_INITVEC(v_uint64x2, uint64, u64, i64x2, int64)
OPENCV_HAL_IMPL_WASM_INITVEC(v_int64x2, int64, s64, i64x2, int64)
OPENCV_HAL_IMPL_WASM_INITVEC(v_float64x2, double, f64, f64x2, double)
#else
#define OPENCV_HAL_IMPL_FALLBACK_INITVEC(_Tpvec, _Tp, suffix, _Tps) \
inline _Tpvec v_setzero_##suffix() { return _Tpvec((_Tps)0, (_Tps)0); } \
inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec((_Tps)v, (_Tps)v); } \
template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
{ return _Tpvec(a.val); }
OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_uint64x2, uint64, u64, int64)
OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_int64x2, int64, s64, int64)
OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_float64x2, double, f64, double)
#endif
//////////////// PACK ///////////////
inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b)
@ -1931,28 +1900,18 @@ inline v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b)
template<int n>
inline v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b)
{
#ifdef __wasm_unimplemented_simd128__
v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
v128_t b1 = wasm_u64x2_shr(wasm_i64x2_add(b.val, delta), n);
return v_uint32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
#else
fallback::v_uint64x2 a_(a), b_(b);
return fallback::v_rshr_pack<n>(a_, b_);
#endif
}
template<int n>
inline v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b)
{
#ifdef __wasm_unimplemented_simd128__
v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
v128_t b1 = wasm_i64x2_shr(wasm_i64x2_add(b.val, delta), n);
return v_int32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27));
#else
fallback::v_int64x2 a_(a), b_(b);
return fallback::v_rshr_pack<n>(a_, b_);
#endif
}
template<int n>
inline v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b)
@ -2139,7 +2098,6 @@ inline void v_rshr_pack_store(short* ptr, const v_int32x4& a)
template<int n>
inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)
{
#ifdef __wasm_unimplemented_simd128__
v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n);
v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
@ -2148,15 +2106,10 @@ inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)
for (int i=0; i<2; ++i) {
ptr[i] = t_ptr[i];
}
#else
fallback::v_uint64x2 _a(a);
fallback::v_rshr_pack_store<n>(ptr, _a);
#endif
}
template<int n>
inline void v_rshr_pack_store(int* ptr, const v_int64x2& a)
{
#ifdef __wasm_unimplemented_simd128__
v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1)));
v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n);
v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11);
@ -2165,10 +2118,6 @@ inline void v_rshr_pack_store(int* ptr, const v_int64x2& a)
for (int i=0; i<2; ++i) {
ptr[i] = t_ptr[i];
}
#else
fallback::v_int64x2 _a(a);
fallback::v_rshr_pack_store<n>(ptr, _a);
#endif
}
template<int n>
inline void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a)
@ -2228,7 +2177,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin
const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f,
const v_uint64x2& g, const v_uint64x2& h)
{
#ifdef __wasm_unimplemented_simd128__
v128_t maxval = wasm_i32x4_splat(255);
v128_t a1 = wasm_v128_bitselect(maxval, a.val, ((__u64x2)(a.val) > (__u64x2)maxval));
v128_t b1 = wasm_v128_bitselect(maxval, b.val, ((__u64x2)(b.val) > (__u64x2)maxval));
@ -2245,10 +2193,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin
v128_t abcd = wasm_v8x16_shuffle(ab, cd, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
v128_t efgh = wasm_v8x16_shuffle(ef, gh, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19);
return v_uint8x16(wasm_v8x16_shuffle(abcd, efgh, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23));
#else
fallback::v_uint64x2 a_(a), b_(b), c_(c), d_(d), e_(e), f_(f), g_(g), h_(h);
return fallback::v_pack_b(a_, b_, c_, d_, e_, f_, g_, h_);
#endif
}
inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
@ -2310,8 +2254,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float32x4, wasm_f32x4_add)
OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float32x4, wasm_f32x4_sub)
OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float32x4, wasm_f32x4_mul)
OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float32x4, wasm_f32x4_div)
#ifdef __wasm_unimplemented_simd128__
OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint64x2, wasm_i64x2_add)
OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint64x2, wasm_i64x2_sub)
OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int64x2, wasm_i64x2_add)
@ -2320,30 +2262,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float64x2, wasm_f64x2_add)
OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float64x2, wasm_f64x2_sub)
OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float64x2, wasm_f64x2_mul)
OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float64x2, wasm_f64x2_div)
#else
#define OPENCV_HAL_IMPL_FALLBACK_BIN_OP(bin_op, _Tpvec) \
inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
{ \
fallback::_Tpvec a_(a), b_(b); \
return _Tpvec((a_) bin_op (b_)); \
} \
inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
{ \
fallback::_Tpvec a_(a), b_(b); \
a_ bin_op##= b_; \
a = _Tpvec(a_); \
return a; \
}
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_uint64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_uint64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_int64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_int64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_float64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_float64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(*, v_float64x2)
OPENCV_HAL_IMPL_FALLBACK_BIN_OP(/, v_float64x2)
#endif
// saturating multiply 8-bit, 16-bit
#define OPENCV_HAL_IMPL_WASM_MUL_SAT(_Tpvec, _Tpwvec) \
@ -2405,19 +2323,11 @@ inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,
inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,
v_uint64x2& c, v_uint64x2& d)
{
#ifdef __wasm_unimplemented_simd128__
v_uint64x2 a0, a1, b0, b1;
v_expand(a, a0, a1);
v_expand(b, b0, b1);
c.val = ((__u64x2)(a0.val) * (__u64x2)(b0.val));
d.val = ((__u64x2)(a1.val) * (__u64x2)(b1.val));
#else
fallback::v_uint32x4 a_(a), b_(b);
fallback::v_uint64x2 c_, d_;
fallback::v_mul_expand(a_, b_, c_, d_);
c = v_uint64x2(c_);
d = v_uint64x2(d_);
#endif
}
inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b)
@ -2457,7 +2367,6 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32
inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b)
{
#ifdef __wasm_unimplemented_simd128__
v128_t a0 = wasm_i64x2_shr(wasm_i64x2_shl(a.val, 32), 32);
v128_t a1 = wasm_i64x2_shr(a.val, 32);
v128_t b0 = wasm_i64x2_shr(wasm_i64x2_shl(b.val, 32), 32);
@ -2465,22 +2374,10 @@ inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b)
v128_t c = (v128_t)((__i64x2)a0 * (__i64x2)b0);
v128_t d = (v128_t)((__i64x2)a1 * (__i64x2)b1);
return v_int64x2(wasm_i64x2_add(c, d));
#else
fallback::v_int32x4 a_(a);
fallback::v_int32x4 b_(b);
return fallback::v_dotprod(a_, b_);
#endif
}
inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c)
{
#ifdef __wasm_unimplemented_simd128__
return v_dotprod(a, b) + c;
#else
fallback::v_int32x4 a_(a);
fallback::v_int32x4 b_(b);
fallback::v_int64x2 c_(c);
return fallback::v_dotprod(a_, b_, c_);
#endif
}
// 8 >> 32
@ -2515,32 +2412,32 @@ inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const
// 16 >> 64
inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b)
{
fallback::v_uint16x8 a_(a);
fallback::v_uint16x8 b_(b);
return fallback::v_dotprod_expand(a_, b_);
v128_t a0 = wasm_u32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
v128_t a1 = wasm_u32x4_shr(a.val, 16);
v128_t b0 = wasm_u32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
v128_t b1 = wasm_u32x4_shr(b.val, 16);
return v_uint64x2((
v_dotprod(v_int32x4(a0), v_int32x4(b0)) +
v_dotprod(v_int32x4(a1), v_int32x4(b1))).val
);
}
inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c)
{
fallback::v_uint16x8 a_(a);
fallback::v_uint16x8 b_(b);
fallback::v_uint64x2 c_(c);
return fallback::v_dotprod_expand(a_, b_, c_);
}
{ return v_dotprod_expand(a, b) + c; }
inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b)
{
fallback::v_int16x8 a_(a);
fallback::v_int16x8 b_(b);
return fallback::v_dotprod_expand(a_, b_);
v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16);
v128_t a1 = wasm_i32x4_shr(a.val, 16);
v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16);
v128_t b1 = wasm_i32x4_shr(b.val, 16);
return v_int64x2((
v_dotprod(v_int32x4(a0), v_int32x4(b0)) +
v_dotprod(v_int32x4(a1), v_int32x4(b1)))
);
}
inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c)
{
fallback::v_int16x8 a_(a);
fallback::v_int16x8 b_(b);
fallback::v_int64x2 c_(c);
return fallback::v_dotprod_expand(a_, b_, c_);
}
{ return v_dotprod_expand(a, b) + c; }
// 32 >> 64f
inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b)
@ -2610,44 +2507,24 @@ OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float64x2)
inline v_float32x4 v_sqrt(const v_float32x4& x)
{
#ifdef __wasm_unimplemented_simd128__
return v_float32x4(wasm_f32x4_sqrt(x.val));
#else
fallback::v_float32x4 x_(x);
return fallback::v_sqrt(x_);
#endif
}
inline v_float32x4 v_invsqrt(const v_float32x4& x)
{
#ifdef __wasm_unimplemented_simd128__
const v128_t _1_0 = wasm_f32x4_splat(1.0);
return v_float32x4(wasm_f32x4_div(_1_0, wasm_f32x4_sqrt(x.val)));
#else
fallback::v_float32x4 x_(x);
return fallback::v_invsqrt(x_);
#endif
}
inline v_float64x2 v_sqrt(const v_float64x2& x)
{
#ifdef __wasm_unimplemented_simd128__
return v_float64x2(wasm_f64x2_sqrt(x.val));
#else
fallback::v_float64x2 x_(x);
return fallback::v_sqrt(x_);
#endif
}
inline v_float64x2 v_invsqrt(const v_float64x2& x)
{
#ifdef __wasm_unimplemented_simd128__
const v128_t _1_0 = wasm_f64x2_splat(1.0);
return v_float64x2(wasm_f64x2_div(_1_0, wasm_f64x2_sqrt(x.val)));
#else
fallback::v_float64x2 x_(x);
return fallback::v_invsqrt(x_);
#endif
}
#define OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(_Tpuvec, _Tpsvec, suffix, zsuffix, shiftWidth) \
@ -2666,12 +2543,7 @@ inline v_float32x4 v_abs(const v_float32x4& x)
{ return v_float32x4(wasm_f32x4_abs(x.val)); }
inline v_float64x2 v_abs(const v_float64x2& x)
{
#ifdef __wasm_unimplemented_simd128__
return v_float64x2(wasm_f64x2_abs(x.val));
#else
fallback::v_float64x2 x_(x);
return fallback::v_abs(x_);
#endif
}
// TODO: exp, log, sin, cos
@ -2684,21 +2556,8 @@ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max)
#ifdef __wasm_unimplemented_simd128__
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_max, wasm_f64x2_max)
#else
#define OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(func) \
inline v_float64x2 func(const v_float64x2& a, const v_float64x2& b) \
{ \
fallback::v_float64x2 a_(a), b_(b); \
return fallback::func(a_, b_); \
}
OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_min)
OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_max)
#endif
#define OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(_Tpvec, suffix) \
inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \
@ -2753,24 +2612,7 @@ OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int16x8, i16x8, i16x8)
OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint32x4, u32x4, i32x4)
OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int32x4, i32x4, i32x4)
OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float32x4, f32x4, f32x4)
#ifdef __wasm_unimplemented_simd128__
OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float64x2, f64x2, f64x2)
#else
#define OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(_Tpvec, bin_op) \
inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
{ \
fallback::_Tpvec a_(a), b_(b); \
return _Tpvec((a_) bin_op (b_));\
} \
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, ==)
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, !=)
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <)
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >)
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <=)
OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >=)
#endif
#define OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(_Tpvec, cast) \
inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
@ -2789,14 +2631,9 @@ inline v_float32x4 v_not_nan(const v_float32x4& a)
}
inline v_float64x2 v_not_nan(const v_float64x2& a)
{
#ifdef __wasm_unimplemented_simd128__
v128_t z = wasm_i64x2_splat(0x7fffffffffffffff);
v128_t t = wasm_i64x2_splat(0x7ff0000000000000);
return v_float64x2((__u64x2)(wasm_v128_and(a.val, z)) < (__u64x2)t);
#else
fallback::v_float64x2 a_(a);
return fallback::v_not_nan(a_);
#endif
}
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_add_wrap, wasm_i8x16_add)
@ -2877,32 +2714,30 @@ inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b)
}
inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b)
{
#ifdef __wasm_unimplemented_simd128__
v128_t absmask_vec = wasm_u64x2_shr(wasm_i32x4_splat(-1), 1);
return v_float64x2(wasm_v128_and(wasm_f64x2_sub(a.val, b.val), absmask_vec));
#else
fallback::v_float64x2 a_(a), b_(b);
return fallback::v_absdiff(a_, b_);
#endif
}
#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec) \
#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec, suffix) \
inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \
{ \
fallback::_Tpvec a_(a), b_(b); \
return fallback::v_magnitude(a_, b_); \
v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \
v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \
return _Tpvec(wasm_##suffix##_sqrt(wasm_##suffix##_add(a_Square, b_Square))); \
} \
inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \
{ \
return v_fma(a, a, b*b); \
v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \
v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \
return _Tpvec(wasm_##suffix##_add(a_Square, b_Square)); \
} \
inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \
{ \
return v_fma(a, b, c); \
return _Tpvec(wasm_##suffix##_add(wasm_##suffix##_mul(a.val, b.val), c.val)); \
}
OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4)
OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2)
OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4, f32x4)
OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2, f64x2)
#define OPENCV_HAL_IMPL_WASM_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, ssuffix) \
inline _Tpuvec operator << (const _Tpuvec& a, int imm) \
@ -2945,37 +2780,7 @@ inline _Tpsvec v_shr(const _Tpsvec& a) \
OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint8x16, v_int8x16, i8x16, u8x16)
OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint16x8, v_int16x8, i16x8, u16x8)
OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint32x4, v_int32x4, i32x4, u32x4)
#ifdef __wasm_unimplemented_simd128__
OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint64x2, v_int64x2, i64x2, u64x2)
#else
#define OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(_Tpvec) \
inline _Tpvec operator << (const _Tpvec& a, int imm) \
{ \
fallback::_Tpvec a_(a); \
return a_ << imm; \
} \
inline _Tpvec operator >> (const _Tpvec& a, int imm) \
{ \
fallback::_Tpvec a_(a); \
return a_ >> imm; \
} \
template<int imm> \
inline _Tpvec v_shl(const _Tpvec& a) \
{ \
fallback::_Tpvec a_(a); \
return fallback::v_shl<imm>(a_); \
} \
template<int imm> \
inline _Tpvec v_shr(const _Tpvec& a) \
{ \
fallback::_Tpvec a_(a); \
return fallback::v_shr<imm>(a_); \
} \
OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_uint64x2)
OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_int64x2)
#endif
namespace hal_wasm_internal
{
@ -3180,9 +2985,18 @@ OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint8x16, unsigned)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int8x16, int)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint16x8, unsigned)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int16x8, int)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint64x2, uint64)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int64x2, int64)
OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_float64x2, double)
#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \
inline scalartype v_reduce_sum(const _Tpvec& a) \
{ \
regtype val = a.val; \
val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \
return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \
}
OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_uint64x2, uint64, v128_t, i64x2, i64x2)
OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_int64x2, int64, v128_t, i64x2, i64x2)
OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_float64x2, double, v128_t, f64x2,f64x2)
inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
const v_float32x4& c, const v_float32x4& d)
@ -3318,30 +3132,27 @@ OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int16x8, i16x8, short)
OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint32x4, i32x4, int)
OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int32x4, i32x4, int)
OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float32x4, i32x4, float)
OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float64x2, f64x2, double)
#define OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(_Tpvec, suffix, esuffix) \
inline bool v_check_all(const _Tpvec& a) \
{ \
v128_t masked = v_reinterpret_as_##esuffix(a).val; \
masked = wasm_i32x4_replace_lane(masked, 0, 0xffffffff); \
masked = wasm_i32x4_replace_lane(masked, 2, 0xffffffff); \
return wasm_i8x16_all_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \
} \
inline bool v_check_any(const _Tpvec& a) \
{ \
v128_t masked = v_reinterpret_as_##esuffix(a).val; \
masked = wasm_i32x4_replace_lane(masked, 0, 0x0); \
masked = wasm_i32x4_replace_lane(masked, 2, 0x0); \
return wasm_i8x16_any_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \
} \
OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_int64x2, i32x4, s32)
OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_uint64x2, i32x4, u32)
inline int v_signmask(const v_float64x2& a)
{
fallback::v_float64x2 a_(a);
return fallback::v_signmask(a_);
}
inline bool v_check_all(const v_float64x2& a)
{
#ifdef __wasm_unimplemented_simd128__
return wasm_i8x16_all_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));
#else
fallback::v_float64x2 a_(a);
return fallback::v_check_all(a_);
#endif
}
inline bool v_check_any(const v_float64x2& a)
{
#ifdef __wasm_unimplemented_simd128__
return wasm_i8x16_any_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));;
#else
fallback::v_float64x2 a_(a);
return fallback::v_check_any(a_);
#endif
}
inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); }
@ -3366,8 +3177,8 @@ OPENCV_HAL_IMPL_WASM_SELECT(v_uint16x8)
OPENCV_HAL_IMPL_WASM_SELECT(v_int16x8)
OPENCV_HAL_IMPL_WASM_SELECT(v_uint32x4)
OPENCV_HAL_IMPL_WASM_SELECT(v_int32x4)
// OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2)
// OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2)
OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2)
OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2)
OPENCV_HAL_IMPL_WASM_SELECT(v_float32x4)
OPENCV_HAL_IMPL_WASM_SELECT(v_float64x2)

@ -458,158 +458,6 @@ CV__DEBUG_NS_END
//////////////////////////////////////////// Mat //////////////////////////////////////////
inline
Mat::Mat()
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{}
inline
Mat::Mat(int _rows, int _cols, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_rows, _cols, _type);
}
inline
Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_rows, _cols, _type);
*this = _s;
}
inline
Mat::Mat(Size _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create( _sz.height, _sz.width, _type );
}
inline
Mat::Mat(Size _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_sz.height, _sz.width, _type);
*this = _s;
}
inline
Mat::Mat(int _dims, const int* _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_dims, _sz, _type);
}
inline
Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_dims, _sz, _type);
*this = _s;
}
inline
Mat::Mat(const std::vector<int>& _sz, int _type)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_sz, _type);
}
inline
Mat::Mat(const std::vector<int>& _sz, int _type, const Scalar& _s)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
datalimit(0), allocator(0), u(0), size(&rows), step(0)
{
create(_sz, _type);
*this = _s;
}
inline
Mat::Mat(const Mat& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
u(m.u), size(&rows), step(0)
{
if( u )
CV_XADD(&u->refcount, 1);
if( m.dims <= 2 )
{
step[0] = m.step[0]; step[1] = m.step[1];
}
else
{
dims = 0;
copySize(m);
}
}
inline
Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)
: flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),
data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
allocator(0), u(0), size(&rows)
{
CV_Assert(total() == 0 || data != NULL);
size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
size_t minstep = cols * esz;
if( _step == AUTO_STEP )
{
_step = minstep;
}
else
{
CV_DbgAssert( _step >= minstep );
if (_step % esz1 != 0)
{
CV_Error(Error::BadStep, "Step must be a multiple of esz1");
}
}
step[0] = _step;
step[1] = esz;
datalimit = datastart + _step * rows;
dataend = datalimit - _step + minstep;
updateContinuityFlag();
}
inline
Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
: flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),
data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),
allocator(0), u(0), size(&rows)
{
CV_Assert(total() == 0 || data != NULL);
size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
size_t minstep = cols*esz;
if( _step == AUTO_STEP )
{
_step = minstep;
}
else
{
CV_DbgAssert( _step >= minstep );
if (_step % esz1 != 0)
{
CV_Error(Error::BadStep, "Step must be a multiple of esz1");
}
}
step[0] = _step;
step[1] = esz;
datalimit = datastart + _step*rows;
dataend = datalimit - _step + minstep;
updateContinuityFlag();
}
template<typename _Tp> inline
Mat::Mat(const std::vector<_Tp>& vec, bool copyData)
: flags(MAGIC_VAL + traits::Type<_Tp>::value + CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
@ -743,43 +591,6 @@ Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)
*this = commaInitializer.operator Mat_<_Tp>();
}
inline
Mat::~Mat()
{
release();
if( step.p != step.buf )
fastFree(step.p);
}
inline
Mat& Mat::operator = (const Mat& m)
{
if( this != &m )
{
if( m.u )
CV_XADD(&m.u->refcount, 1);
release();
flags = m.flags;
if( dims <= 2 && m.dims <= 2 )
{
dims = m.dims;
rows = m.rows;
cols = m.cols;
step[0] = m.step[0];
step[1] = m.step[1];
}
else
copySize(m);
data = m.data;
datastart = m.datastart;
dataend = m.dataend;
datalimit = m.datalimit;
allocator = m.allocator;
u = m.u;
}
return *this;
}
inline
Mat Mat::row(int y) const
{
@ -816,67 +627,6 @@ Mat Mat::colRange(const Range& r) const
return Mat(*this, Range::all(), r);
}
inline
Mat Mat::clone() const
{
Mat m;
copyTo(m);
return m;
}
inline
void Mat::assignTo( Mat& m, int _type ) const
{
if( _type < 0 )
m = *this;
else
convertTo(m, _type);
}
inline
void Mat::create(int _rows, int _cols, int _type)
{
_type &= TYPE_MASK;
if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data )
return;
int sz[] = {_rows, _cols};
create(2, sz, _type);
}
inline
void Mat::create(Size _sz, int _type)
{
create(_sz.height, _sz.width, _type);
}
inline
void Mat::addref()
{
if( u )
CV_XADD(&u->refcount, 1);
}
inline
void Mat::release()
{
if( u && CV_XADD(&u->refcount, -1) == 1 )
deallocate();
u = NULL;
datastart = dataend = datalimit = data = 0;
for(int i = 0; i < dims; i++)
size.p[i] = 0;
#ifdef _DEBUG
flags = MAGIC_VAL;
dims = rows = cols = 0;
if(step.p != step.buf)
{
fastFree(step.p);
step.p = step.buf;
size.p = &rows;
}
#endif
}
inline
Mat Mat::operator()( Range _rowRange, Range _colRange ) const
{
@ -945,40 +695,6 @@ int Mat::channels() const
return CV_MAT_CN(flags);
}
inline
size_t Mat::step1(int i) const
{
return step.p[i] / elemSize1();
}
inline
bool Mat::empty() const
{
return data == 0 || total() == 0 || dims == 0;
}
inline
size_t Mat::total() const
{
if( dims <= 2 )
return (size_t)rows * cols;
size_t p = 1;
for( int i = 0; i < dims; i++ )
p *= size[i];
return p;
}
inline
size_t Mat::total(int startDim, int endDim) const
{
CV_Assert( 0 <= startDim && startDim <= endDim);
size_t p = 1;
int endDim_ = endDim <= dims ? endDim : dims;
for( int i = startDim; i < endDim_; i++ )
p *= size[i];
return p;
}
inline
uchar* Mat::ptr(int y)
{
@ -1396,67 +1112,6 @@ void Mat::push_back(const std::vector<_Tp>& v)
push_back(Mat(v));
}
inline
Mat::Mat(Mat&& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),
u(m.u), size(&rows)
{
if (m.dims <= 2) // move new step/size info
{
step[0] = m.step[0];
step[1] = m.step[1];
}
else
{
CV_DbgAssert(m.step.p != m.step.buf);
step.p = m.step.p;
size.p = m.size.p;
m.step.p = m.step.buf;
m.size.p = &m.rows;
}
m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
m.allocator = NULL;
m.u = NULL;
}
inline
Mat& Mat::operator = (Mat&& m)
{
if (this == &m)
return *this;
release();
flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data;
datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator;
u = m.u;
if (step.p != step.buf) // release self step/size
{
fastFree(step.p);
step.p = step.buf;
size.p = &rows;
}
if (m.dims <= 2) // move new step/size info
{
step[0] = m.step[0];
step[1] = m.step[1];
}
else
{
CV_DbgAssert(m.step.p != m.step.buf);
step.p = m.step.p;
size.p = m.size.p;
m.step.p = m.step.buf;
m.size.p = &m.rows;
}
m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;
m.allocator = NULL;
m.u = NULL;
return *this;
}
///////////////////////////// MatSize ////////////////////////////
@ -1503,22 +1158,6 @@ MatSize::operator const int*() const
return p;
}
inline
bool MatSize::operator == (const MatSize& sz) const
{
int d = dims();
int dsz = sz.dims();
if( d != dsz )
return false;
if( d == 2 )
return p[0] == sz.p[0] && p[1] == sz.p[1];
for( int i = 0; i < d; i++ )
if( p[i] != sz.p[i] )
return false;
return true;
}
inline
bool MatSize::operator != (const MatSize& sz) const
{
@ -1775,9 +1414,7 @@ template<typename _Tp> inline
void Mat_<_Tp>::release()
{
Mat::release();
#ifdef _DEBUG
flags = (flags & ~CV_MAT_TYPE_MASK) + traits::Type<_Tp>::value;
#endif
}
template<typename _Tp> inline
@ -2132,51 +1769,6 @@ Mat_<_Tp>::Mat_(MatExpr&& e)
///////////////////////////// SparseMat /////////////////////////////
inline
SparseMat::SparseMat()
: flags(MAGIC_VAL), hdr(0)
{}
inline
SparseMat::SparseMat(int _dims, const int* _sizes, int _type)
: flags(MAGIC_VAL), hdr(0)
{
create(_dims, _sizes, _type);
}
inline
SparseMat::SparseMat(const SparseMat& m)
: flags(m.flags), hdr(m.hdr)
{
addref();
}
inline
SparseMat::~SparseMat()
{
release();
}
inline
SparseMat& SparseMat::operator = (const SparseMat& m)
{
if( this != &m )
{
if( m.hdr )
CV_XADD(&m.hdr->refcount, 1);
release();
flags = m.flags;
hdr = m.hdr;
}
return *this;
}
inline
SparseMat& SparseMat::operator = (const Mat& m)
{
return (*this = SparseMat(m));
}
inline
SparseMat SparseMat::clone() const
{
@ -2185,30 +1777,6 @@ SparseMat SparseMat::clone() const
return temp;
}
inline
void SparseMat::assignTo( SparseMat& m, int _type ) const
{
if( _type < 0 )
m = *this;
else
convertTo(m, _type);
}
inline
void SparseMat::addref()
{
if( hdr )
CV_XADD(&hdr->refcount, 1);
}
inline
void SparseMat::release()
{
if( hdr && CV_XADD(&hdr->refcount, -1) == 1 )
delete hdr;
hdr = 0;
}
inline
size_t SparseMat::elemSize() const
{
@ -2268,36 +1836,6 @@ size_t SparseMat::nzcount() const
return hdr ? hdr->nodeCount : 0;
}
inline
size_t SparseMat::hash(int i0) const
{
return (size_t)i0;
}
inline
size_t SparseMat::hash(int i0, int i1) const
{
return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1;
}
inline
size_t SparseMat::hash(int i0, int i1, int i2) const
{
return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2;
}
inline
size_t SparseMat::hash(const int* idx) const
{
size_t h = (unsigned)idx[0];
if( !hdr )
return 0;
int d = hdr->dims;
for(int i = 1; i < d; i++ )
h = h * HASH_SCALE + (unsigned)idx[i];
return h;
}
template<typename _Tp> inline
_Tp& SparseMat::ref(int i0, size_t* hashval)
{
@ -3617,74 +3155,6 @@ const Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b)
//////////////////////////////// UMat ////////////////////////////////
inline
UMat::UMat(UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{}
inline
UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create(_rows, _cols, _type);
}
inline
UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create(_rows, _cols, _type);
*this = _s;
}
inline
UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create( _sz.height, _sz.width, _type );
}
inline
UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create(_sz.height, _sz.width, _type);
*this = _s;
}
inline
UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create(_dims, _sz, _type);
}
inline
UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
{
create(_dims, _sz, _type);
*this = _s;
}
inline
UMat::UMat(const UMat& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
{
addref();
if( m.dims <= 2 )
{
step[0] = m.step[0]; step[1] = m.step[1];
}
else
{
dims = 0;
copySize(m);
}
}
template<typename _Tp> inline
UMat::UMat(const std::vector<_Tp>& vec, bool copyData)
: flags(MAGIC_VAL + traits::Type<_Tp>::value + CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),
@ -3701,33 +3171,6 @@ cols(1), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)
Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this);
}
inline
UMat& UMat::operator = (const UMat& m)
{
if( this != &m )
{
const_cast<UMat&>(m).addref();
release();
flags = m.flags;
if( dims <= 2 && m.dims <= 2 )
{
dims = m.dims;
rows = m.rows;
cols = m.cols;
step[0] = m.step[0];
step[1] = m.step[1];
}
else
copySize(m);
allocator = m.allocator;
if (usageFlags == USAGE_DEFAULT)
usageFlags = m.usageFlags;
u = m.u;
offset = m.offset;
}
return *this;
}
inline
UMat UMat::row(int y) const
{
@ -3764,55 +3207,6 @@ UMat UMat::colRange(const Range& r) const
return UMat(*this, Range::all(), r);
}
inline
UMat UMat::clone() const
{
UMat m;
copyTo(m);
return m;
}
inline
void UMat::assignTo( UMat& m, int _type ) const
{
if( _type < 0 )
m = *this;
else
convertTo(m, _type);
}
inline
void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
{
_type &= TYPE_MASK;
if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u )
return;
int sz[] = {_rows, _cols};
create(2, sz, _type, _usageFlags);
}
inline
void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags)
{
create(_sz.height, _sz.width, _type, _usageFlags);
}
inline
void UMat::addref()
{
if( u )
CV_XADD(&(u->urefcount), 1);
}
inline void UMat::release()
{
if( u && CV_XADD(&(u->urefcount), -1) == 1 )
deallocate();
for(int i = 0; i < dims; i++)
size.p[i] = 0;
u = 0;
}
inline
UMat UMat::operator()( Range _rowRange, Range _colRange ) const
{
@ -3887,83 +3281,6 @@ size_t UMat::step1(int i) const
return step.p[i] / elemSize1();
}
inline
bool UMat::empty() const
{
return u == 0 || total() == 0 || dims == 0;
}
inline
size_t UMat::total() const
{
if( dims <= 2 )
return (size_t)rows * cols;
size_t p = 1;
for( int i = 0; i < dims; i++ )
p *= size[i];
return p;
}
inline
UMat::UMat(UMat&& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
{
if (m.dims <= 2) // move new step/size info
{
step[0] = m.step[0];
step[1] = m.step[1];
}
else
{
CV_DbgAssert(m.step.p != m.step.buf);
step.p = m.step.p;
size.p = m.size.p;
m.step.p = m.step.buf;
m.size.p = &m.rows;
}
m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
m.allocator = NULL;
m.u = NULL;
m.offset = 0;
}
inline
UMat& UMat::operator = (UMat&& m)
{
if (this == &m)
return *this;
release();
flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols;
allocator = m.allocator; usageFlags = m.usageFlags;
u = m.u;
offset = m.offset;
if (step.p != step.buf) // release self step/size
{
fastFree(step.p);
step.p = step.buf;
size.p = &rows;
}
if (m.dims <= 2) // move new step/size info
{
step[0] = m.step[0];
step[1] = m.step[1];
}
else
{
CV_DbgAssert(m.step.p != m.step.buf);
step.p = m.step.p;
size.p = m.size.p;
m.step.p = m.step.buf;
m.size.p = &m.rows;
}
m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
m.allocator = NULL;
m.u = NULL;
m.offset = 0;
return *this;
}
inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; }
inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; }

@ -403,8 +403,8 @@ public:
/**
* @brief Simplified writing API to use with bindings.
* @param name Name of the written object
* @param val Value of the written object
* @param name Name of the written object. When writing to sequences (a.k.a. "arrays"), pass an empty string.
* @param val Value of the written object.
*/
CV_WRAP void write(const String& name, int val);
/// @overload
@ -437,9 +437,10 @@ public:
CV_WRAP void writeComment(const String& comment, bool append = false);
/** @brief Starts to write a nested structure (sequence or a mapping).
@param name name of the structure (if it's a member of parent mapping, otherwise it should be empty
@param name name of the structure. When writing to sequences (a.k.a. "arrays"), pass an empty string.
@param flags type of the structure (FileNode::MAP or FileNode::SEQ (both with optional FileNode::FLOW)).
@param typeName usually an empty string
@param typeName optional name of the type you store. The effect of setting this depends on the storage format.
I.e. if the format has a specification for storing type information, this parameter is used.
*/
CV_WRAP void startWriteStruct(const String& name, int flags, const String& typeName=String());

File diff suppressed because it is too large Load Diff

@ -0,0 +1,849 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2020, Huawei Technologies Co., Ltd. All rights reserved.
// Third party copyrights are property of their respective owners.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Liangqian Kong <chargerKong@126.com>
// Longbu Wang <riskiest@gmail.com>
#ifndef OPENCV_CORE_QUATERNION_INL_HPP
#define OPENCV_CORE_QUATERNION_INL_HPP
#ifndef OPENCV_CORE_QUATERNION_HPP
#erorr This is not a standalone header. Include quaternion.hpp instead.
#endif
//@cond IGNORE
///////////////////////////////////////////////////////////////////////////////////////
//Implementation
namespace cv {
template <typename T>
Quat<T>::Quat() : w(0), x(0), y(0), z(0) {}
template <typename T>
Quat<T>::Quat(const Vec<T, 4> &coeff):w(coeff[0]), x(coeff[1]), y(coeff[2]), z(coeff[3]){}
template <typename T>
Quat<T>::Quat(const T qw, const T qx, const T qy, const T qz):w(qw), x(qx), y(qy), z(qz){}
template <typename T>
Quat<T> Quat<T>::createFromAngleAxis(const T angle, const Vec<T, 3> &axis)
{
T w, x, y, z;
T vNorm = std::sqrt(axis.dot(axis));
if (vNorm < CV_QUAT_EPS)
{
CV_Error(Error::StsBadArg, "this quaternion does not represent a rotation");
}
const T angle_half = angle * 0.5;
w = std::cos(angle_half);
const T sin_v = std::sin(angle_half);
const T sin_norm = sin_v / vNorm;
x = sin_norm * axis[0];
y = sin_norm * axis[1];
z = sin_norm * axis[2];
return Quat<T>(w, x, y, z);
}
template <typename T>
Quat<T> Quat<T>::createFromRotMat(InputArray _R)
{
CV_CheckTypeEQ(_R.type(), cv::traits::Type<T>::value, "");
if (_R.rows() != 3 || _R.cols() != 3)
{
CV_Error(Error::StsBadArg, "Cannot convert matrix to quaternion: rotation matrix should be a 3x3 matrix");
}
Matx<T, 3, 3> R;
_R.copyTo(R);
T S, w, x, y, z;
T trace = R(0, 0) + R(1, 1) + R(2, 2);
if (trace > 0)
{
S = std::sqrt(trace + 1) * 2;
x = (R(1, 2) - R(2, 1)) / S;
y = (R(2, 0) - R(0, 2)) / S;
z = (R(0, 1) - R(1, 0)) / S;
w = -0.25 * S;
}
else if (R(0, 0) > R(1, 1) && R(0, 0) > R(2, 2))
{
S = std::sqrt(1.0 + R(0, 0) - R(1, 1) - R(2, 2)) * 2;
x = -0.25 * S;
y = -(R(1, 0) + R(0, 1)) / S;
z = -(R(0, 2) + R(2, 0)) / S;
w = (R(1, 2) - R(2, 1)) / S;
}
else if (R(1, 1) > R(2, 2))
{
S = std::sqrt(1.0 - R(0, 0) + R(1, 1) - R(2, 2)) * 2;
x = (R(0, 1) + R(1, 0)) / S;
y = 0.25 * S;
z = (R(1, 2) + R(2, 1)) / S;
w = (R(0, 2) - R(2, 0)) / S;
}
else
{
S = std::sqrt(1.0 - R(0, 0) - R(1, 1) + R(2, 2)) * 2;
x = (R(0, 2) + R(2, 0)) / S;
y = (R(1, 2) + R(2, 1)) / S;
z = 0.25 * S;
w = -(R(0, 1) - R(1, 0)) / S;
}
return Quat<T> (w, x, y, z);
}
template <typename T>
Quat<T> Quat<T>::createFromRvec(InputArray _rvec)
{
if (!((_rvec.cols() == 1 && _rvec.rows() == 3) || (_rvec.cols() == 3 && _rvec.rows() == 1))) {
CV_Error(Error::StsBadArg, "Cannot convert rotation vector to quaternion: The length of rotation vector should be 3");
}
Vec<T, 3> rvec;
_rvec.copyTo(rvec);
T psi = std::sqrt(rvec.dot(rvec));
if (abs(psi) < CV_QUAT_EPS) {
return Quat<T> (1, 0, 0, 0);
}
Vec<T, 3> axis = rvec / psi;
return createFromAngleAxis(psi, axis);
}
template <typename T>
inline Quat<T> Quat<T>::operator-() const
{
return Quat<T>(-w, -x, -y, -z);
}
template <typename T>
inline bool Quat<T>::operator==(const Quat<T> &q) const
{
return (abs(w - q.w) < CV_QUAT_EPS && abs(x - q.x) < CV_QUAT_EPS && abs(y - q.y) < CV_QUAT_EPS && abs(z - q.z) < CV_QUAT_EPS);
}
template <typename T>
inline Quat<T> Quat<T>::operator+(const Quat<T> &q1) const
{
return Quat<T>(w + q1.w, x + q1.x, y + q1.y, z + q1.z);
}
template <typename T>
inline Quat<T> Quat<T>::operator-(const Quat<T> &q1) const
{
return Quat<T>(w - q1.w, x - q1.x, y - q1.y, z - q1.z);
}
template <typename T>
inline Quat<T>& Quat<T>::operator+=(const Quat<T> &q1)
{
w += q1.w;
x += q1.x;
y += q1.y;
z += q1.z;
return *this;
}
template <typename T>
inline Quat<T>& Quat<T>::operator-=(const Quat<T> &q1)
{
w -= q1.w;
x -= q1.x;
y -= q1.y;
z -= q1.z;
return *this;
}
template <typename T>
inline Quat<T> Quat<T>::operator*(const Quat<T> &q1) const
{
Vec<T, 4> q{w, x, y, z};
Vec<T, 4> q2{q1.w, q1.x, q1.y, q1.z};
return Quat<T>(q * q2);
}
template <typename T, typename S>
Quat<T> operator*(const Quat<T> &q1, const S a)
{
return Quat<T>(a * q1.w, a * q1.x, a * q1.y, a * q1.z);
}
template <typename T, typename S>
Quat<T> operator*(const S a, const Quat<T> &q1)
{
return Quat<T>(a * q1.w, a * q1.x, a * q1.y, a * q1.z);
}
template <typename T>
inline Quat<T>& Quat<T>::operator*=(const Quat<T> &q1)
{
T qw, qx, qy, qz;
qw = w * q1.w - x * q1.x - y * q1.y - z * q1.z;
qx = x * q1.w + w * q1.x + y * q1.z - z * q1.y;
qy = y * q1.w + w * q1.y + z * q1.x - x * q1.z;
qz = z * q1.w + w * q1.z + x * q1.y - y * q1.x;
w = qw;
x = qx;
y = qy;
z = qz;
return *this;
}
template <typename T>
inline Quat<T>& Quat<T>::operator/=(const Quat<T> &q1)
{
Quat<T> q(*this * q1.inv());
w = q.w;
x = q.x;
y = q.y;
z = q.z;
return *this;
}
template <typename T>
Quat<T>& Quat<T>::operator*=(const T &q1)
{
w *= q1;
x *= q1;
y *= q1;
z *= q1;
return *this;
}
template <typename T>
inline Quat<T>& Quat<T>::operator/=(const T &a)
{
const T a_inv = 1.0 / a;
w *= a_inv;
x *= a_inv;
y *= a_inv;
z *= a_inv;
return *this;
}
template <typename T>
inline Quat<T> Quat<T>::operator/(const T &a) const
{
const T a_inv = 1.0 / a;
return Quat<T>(w * a_inv, x * a_inv, y * a_inv, z * a_inv);
}
template <typename T>
inline Quat<T> Quat<T>::operator/(const Quat<T> &q) const
{
return *this * q.inv();
}
template <typename T>
inline const T& Quat<T>::operator[](std::size_t n) const
{
switch (n) {
case 0:
return w;
case 1:
return x;
case 2:
return y;
case 3:
return z;
default:
CV_Error(Error::StsOutOfRange, "subscript exceeds the index range");
}
}
template <typename T>
inline T& Quat<T>::operator[](std::size_t n)
{
switch (n) {
case 0:
return w;
case 1:
return x;
case 2:
return y;
case 3:
return z;
default:
CV_Error(Error::StsOutOfRange, "subscript exceeds the index range");
}
}
template <typename T>
std::ostream & operator<<(std::ostream &os, const Quat<T> &q)
{
os << "Quat " << Vec<T, 4>{q.w, q.x, q.y, q.z};
return os;
}
template <typename T>
inline T Quat<T>::at(size_t index) const
{
return (*this)[index];
}
template <typename T>
inline Quat<T> Quat<T>::conjugate() const
{
return Quat<T>(w, -x, -y, -z);
}
template <typename T>
inline T Quat<T>::norm() const
{
return std::sqrt(dot(*this));
}
template <typename T>
Quat<T> exp(const Quat<T> &q)
{
return q.exp();
}
template <typename T>
Quat<T> Quat<T>::exp() const
{
Vec<T, 3> v{x, y, z};
T normV = std::sqrt(v.dot(v));
T k = normV < CV_QUAT_EPS ? 1 : std::sin(normV) / normV;
return std::exp(w) * Quat<T>(std::cos(normV), v[0] * k, v[1] * k, v[2] * k);
}
template <typename T>
Quat<T> log(const Quat<T> &q, QuatAssumeType assumeUnit)
{
return q.log(assumeUnit);
}
template <typename T>
Quat<T> Quat<T>::log(QuatAssumeType assumeUnit) const
{
Vec<T, 3> v{x, y, z};
T vNorm = std::sqrt(v.dot(v));
if (assumeUnit)
{
T k = vNorm < CV_QUAT_EPS ? 1 : std::acos(w) / vNorm;
return Quat<T>(0, v[0] * k, v[1] * k, v[2] * k);
}
T qNorm = norm();
if (qNorm < CV_QUAT_EPS)
{
CV_Error(Error::StsBadArg, "Cannot apply this quaternion to log function: undefined");
}
T k = vNorm < CV_QUAT_EPS ? 1 : std::acos(w / qNorm) / vNorm;
return Quat<T>(std::log(qNorm), v[0] * k, v[1] * k, v[2] *k);
}
template <typename T, typename _T>
inline Quat<T> power(const Quat<T> &q1, _T alpha, QuatAssumeType assumeUnit)
{
return q1.power(alpha, assumeUnit);
}
template <typename T>
template <typename _T>
inline Quat<T> Quat<T>::power(_T alpha, QuatAssumeType assumeUnit) const
{
if (x * x + y * y + z * z > CV_QUAT_EPS)
{
T angle = getAngle(assumeUnit);
Vec<T, 3> axis = getAxis(assumeUnit);
if (assumeUnit)
{
return createFromAngleAxis(alpha * angle, axis);
}
return std::pow(norm(), alpha) * createFromAngleAxis(alpha * angle, axis);
}
else
{
return std::pow(norm(), alpha) * Quat<T>(w, x, y, z);
}
}
template <typename T>
inline Quat<T> sqrt(const Quat<T> &q, QuatAssumeType assumeUnit)
{
return q.sqrt(assumeUnit);
}
template <typename T>
inline Quat<T> Quat<T>::sqrt(QuatAssumeType assumeUnit) const
{
return power(0.5, assumeUnit);
}
template <typename T>
inline Quat<T> power(const Quat<T> &p, const Quat<T> &q, QuatAssumeType assumeUnit)
{
return p.power(q, assumeUnit);
}
template <typename T>
inline Quat<T> Quat<T>::power(const Quat<T> &q, QuatAssumeType assumeUnit) const
{
return cv::exp(q * log(assumeUnit));
}
template <typename T>
inline T Quat<T>::dot(Quat<T> q1) const
{
return w * q1.w + x * q1.x + y * q1.y + z * q1.z;
}
template <typename T>
inline Quat<T> crossProduct(const Quat<T> &p, const Quat<T> &q)
{
return p.crossProduct(q);
}
template <typename T>
inline Quat<T> Quat<T>::crossProduct(const Quat<T> &q) const
{
return Quat<T> (0, y * q.z - z * q.y, z * q.x - x * q.z, x * q.y - q.x * y);
}
template <typename T>
inline Quat<T> Quat<T>::normalize() const
{
T normVal = norm();
if (normVal < CV_QUAT_EPS)
{
CV_Error(Error::StsBadArg, "Cannot normalize this quaternion: the norm is too small.");
}
return Quat<T>(w / normVal, x / normVal, y / normVal, z / normVal) ;
}
template <typename T>
inline Quat<T> inv(const Quat<T> &q, QuatAssumeType assumeUnit)
{
return q.inv(assumeUnit);
}
template <typename T>
inline Quat<T> Quat<T>::inv(QuatAssumeType assumeUnit) const
{
if (assumeUnit)
{
return conjugate();
}
T norm2 = dot(*this);
if (norm2 < CV_QUAT_EPS)
{
CV_Error(Error::StsBadArg, "This quaternion do not have inverse quaternion");
}
return conjugate() / norm2;
}
template <typename T>
inline Quat<T> sinh(const Quat<T> &q)
{
return q.sinh();
}
template <typename T>
inline Quat<T> Quat<T>::sinh() const
{
Vec<T, 3> v{x, y ,z};
T vNorm = std::sqrt(v.dot(v));
T k = vNorm < CV_QUAT_EPS ? 1 : std::cosh(w) * std::sin(vNorm) / vNorm;
return Quat<T>(std::sinh(w) * std::cos(vNorm), v[0] * k, v[1] * k, v[2] * k);
}
template <typename T>
inline Quat<T> cosh(const Quat<T> &q)
{
return q.cosh();
}
template <typename T>
inline Quat<T> Quat<T>::cosh() const
{
Vec<T, 3> v{x, y ,z};
T vNorm = std::sqrt(v.dot(v));
T k = vNorm < CV_QUAT_EPS ? 1 : std::sinh(w) * std::sin(vNorm) / vNorm;
return Quat<T>(std::cosh(w) * std::cos(vNorm), v[0] * k, v[1] * k, v[2] * k);
}
template <typename T>
inline Quat<T> tanh(const Quat<T> &q)
{
return q.tanh();
}
template <typename T>
inline Quat<T> Quat<T>::tanh() const
{
return sinh() * cosh().inv();
}
template <typename T>
inline Quat<T> sin(const Quat<T> &q)
{
return q.sin();
}
template <typename T>
inline Quat<T> Quat<T>::sin() const
{
Vec<T, 3> v{x, y ,z};
T vNorm = std::sqrt(v.dot(v));
T k = vNorm < CV_QUAT_EPS ? 1 : std::cos(w) * std::sinh(vNorm) / vNorm;
return Quat<T>(std::sin(w) * std::cosh(vNorm), v[0] * k, v[1] * k, v[2] * k);
}
template <typename T>
inline Quat<T> cos(const Quat<T> &q)
{
return q.cos();
}
template <typename T>
inline Quat<T> Quat<T>::cos() const
{
Vec<T, 3> v{x, y ,z};
T vNorm = std::sqrt(v.dot(v));
T k = vNorm < CV_QUAT_EPS ? 1 : std::sin(w) * std::sinh(vNorm) / vNorm;
return Quat<T>(std::cos(w) * std::cosh(vNorm), -v[0] * k, -v[1] * k, -v[2] * k);
}
template <typename T>
inline Quat<T> tan(const Quat<T> &q)
{
return q.tan();
}
template <typename T>
inline Quat<T> Quat<T>::tan() const
{
return sin() * cos().inv();
}
template <typename T>
inline Quat<T> asinh(const Quat<T> &q)
{
return q.asinh();
}
template <typename T>
inline Quat<T> Quat<T>::asinh() const
{
return cv::log(*this + cv::power(*this * *this + Quat<T>(1, 0, 0, 0), 0.5));
}
template <typename T>
inline Quat<T> acosh(const Quat<T> &q)
{
return q.acosh();
}
template <typename T>
inline Quat<T> Quat<T>::acosh() const
{
return cv::log(*this + cv::power(*this * *this - Quat<T>(1,0,0,0), 0.5));
}
template <typename T>
inline Quat<T> atanh(const Quat<T> &q)
{
return q.atanh();
}
template <typename T>
inline Quat<T> Quat<T>::atanh() const
{
Quat<T> ident(1, 0, 0, 0);
Quat<T> c1 = (ident + *this).log();
Quat<T> c2 = (ident - *this).log();
return 0.5 * (c1 - c2);
}
template <typename T>
inline Quat<T> asin(const Quat<T> &q)
{
return q.asin();
}
template <typename T>
inline Quat<T> Quat<T>::asin() const
{
Quat<T> v(0, x, y, z);
T vNorm = v.norm();
T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
return -v / k * (*this * v / k).asinh();
}
template <typename T>
inline Quat<T> acos(const Quat<T> &q)
{
return q.acos();
}
template <typename T>
inline Quat<T> Quat<T>::acos() const
{
Quat<T> v(0, x, y, z);
T vNorm = v.norm();
T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
return -v / k * acosh();
}
template <typename T>
inline Quat<T> atan(const Quat<T> &q)
{
return q.atan();
}
template <typename T>
inline Quat<T> Quat<T>::atan() const
{
Quat<T> v(0, x, y, z);
T vNorm = v.norm();
T k = vNorm < CV_QUAT_EPS ? 1 : vNorm;
return -v / k * (*this * v / k).atanh();
}
template <typename T>
inline T Quat<T>::getAngle(QuatAssumeType assumeUnit) const
{
if (assumeUnit)
{
return 2 * std::acos(w);
}
if (norm() < CV_QUAT_EPS)
{
CV_Error(Error::StsBadArg, "This quaternion does not represent a rotation");
}
return 2 * std::acos(w / norm());
}
template <typename T>
inline Vec<T, 3> Quat<T>::getAxis(QuatAssumeType assumeUnit) const
{
T angle = getAngle(assumeUnit);
const T sin_v = std::sin(angle * 0.5);
if (assumeUnit)
{
return Vec<T, 3>{x, y, z} / sin_v;
}
return Vec<T, 3> {x, y, z} / (norm() * sin_v);
}
template <typename T>
Matx<T, 4, 4> Quat<T>::toRotMat4x4(QuatAssumeType assumeUnit) const
{
T a = w, b = x, c = y, d = z;
if (!assumeUnit)
{
Quat<T> qTemp = normalize();
a = qTemp.w;
b = qTemp.x;
c = qTemp.y;
d = qTemp.z;
}
Matx<T, 4, 4> R{
1 - 2 * (c * c + d * d), 2 * (b * c - a * d) , 2 * (b * d + a * c) , 0,
2 * (b * c + a * d) , 1 - 2 * (b * b + d * d), 2 * (c * d - a * b) , 0,
2 * (b * d - a * c) , 2 * (c * d + a * b) , 1 - 2 * (b * b + c * c), 0,
0 , 0 , 0 , 1,
};
return R;
}
template <typename T>
Matx<T, 3, 3> Quat<T>::toRotMat3x3(QuatAssumeType assumeUnit) const
{
T a = w, b = x, c = y, d = z;
if (!assumeUnit)
{
Quat<T> qTemp = normalize();
a = qTemp.w;
b = qTemp.x;
c = qTemp.y;
d = qTemp.z;
}
Matx<T, 3, 3> R{
1 - 2 * (c * c + d * d), 2 * (b * c - a * d) , 2 * (b * d + a * c),
2 * (b * c + a * d) , 1 - 2 * (b * b + d * d), 2 * (c * d - a * b),
2 * (b * d - a * c) , 2 * (c * d + a * b) , 1 - 2 * (b * b + c * c)
};
return R;
}
template <typename T>
Vec<T, 3> Quat<T>::toRotVec(QuatAssumeType assumeUnit) const
{
T angle = getAngle(assumeUnit);
Vec<T, 3> axis = getAxis(assumeUnit);
return angle * axis;
}
template <typename T>
Vec<T, 4> Quat<T>::toVec() const
{
return Vec<T, 4>{w, x, y, z};
}
template <typename T>
Quat<T> Quat<T>::lerp(const Quat<T> &q0, const Quat<T> &q1, const T t)
{
return (1 - t) * q0 + t * q1;
}
template <typename T>
Quat<T> Quat<T>::slerp(const Quat<T> &q0, const Quat<T> &q1, const T t, QuatAssumeType assumeUnit, bool directChange)
{
Quatd v0(q0);
Quatd v1(q1);
if (!assumeUnit)
{
v0 = v0.normalize();
v1 = v1.normalize();
}
T cosTheta = v0.dot(v1);
constexpr T DOT_THRESHOLD = 0.995;
if (cosTheta > DOT_THRESHOLD)
{
return nlerp(v0, v1, t, QUAT_ASSUME_UNIT);
}
if (directChange && cosTheta < 0)
{
v0 = -v0;
cosTheta = -cosTheta;
}
T sinTheta = std::sqrt(1 - cosTheta * cosTheta);
T angle = atan2(sinTheta, cosTheta);
return (std::sin((1 - t) * angle) / (sinTheta) * v0 + std::sin(t * angle) / (sinTheta) * v1).normalize();
}
template <typename T>
inline Quat<T> Quat<T>::nlerp(const Quat<T> &q0, const Quat<T> &q1, const T t, QuatAssumeType assumeUnit)
{
Quat<T> v0(q0), v1(q1);
if (v1.dot(v0) < 0)
{
v0 = -v0;
}
if (assumeUnit)
{
return ((1 - t) * v0 + t * v1).normalize();
}
v0 = v0.normalize();
v1 = v1.normalize();
return ((1 - t) * v0 + t * v1).normalize();
}
template <typename T>
inline bool Quat<T>::isNormal(T eps) const
{
double normVar = norm();
if ((normVar > 1 - eps) && (normVar < 1 + eps))
return true;
return false;
}
template <typename T>
inline void Quat<T>::assertNormal(T eps) const
{
if (!isNormal(eps))
CV_Error(Error::StsBadArg, "Quaternion should be normalized");
}
template <typename T>
inline Quat<T> Quat<T>::squad(const Quat<T> &q0, const Quat<T> &q1,
const Quat<T> &q2, const Quat<T> &q3,
const T t, QuatAssumeType assumeUnit,
bool directChange)
{
Quat<T> v0(q0), v1(q1), v2(q2), v3(q3);
if (!assumeUnit)
{
v0 = v0.normalize();
v1 = v1.normalize();
v2 = v2.normalize();
v3 = v3.normalize();
}
Quat<T> c0 = slerp(v0, v3, t, assumeUnit, directChange);
Quat<T> c1 = slerp(v1, v2, t, assumeUnit, directChange);
return slerp(c0, c1, 2 * t * (1 - t), assumeUnit, directChange);
}
template <typename T>
Quat<T> Quat<T>::interPoint(const Quat<T> &q0, const Quat<T> &q1,
const Quat<T> &q2, QuatAssumeType assumeUnit)
{
Quat<T> v0(q0), v1(q1), v2(q2);
if (!assumeUnit)
{
v0 = v0.normalize();
v1 = v1.normalize();
v2 = v2.normalize();
}
return v1 * cv::exp(-(cv::log(v1.conjugate() * v0, assumeUnit) + (cv::log(v1.conjugate() * v2, assumeUnit))) / 4);
}
template <typename T>
Quat<T> Quat<T>::spline(const Quat<T> &q0, const Quat<T> &q1, const Quat<T> &q2, const Quat<T> &q3, const T t, QuatAssumeType assumeUnit)
{
Quatd v0(q0), v1(q1), v2(q2), v3(q3);
if (!assumeUnit)
{
v0 = v0.normalize();
v1 = v1.normalize();
v2 = v2.normalize();
v3 = v3.normalize();
}
T cosTheta;
std::vector<Quat<T>> vec{v0, v1, v2, v3};
for (size_t i = 0; i < 3; ++i)
{
cosTheta = vec[i].dot(vec[i + 1]);
if (cosTheta < 0)
{
vec[i + 1] = -vec[i + 1];
}
}
Quat<T> s1 = interPoint(vec[0], vec[1], vec[2], QUAT_ASSUME_UNIT);
Quat<T> s2 = interPoint(vec[1], vec[2], vec[3], QUAT_ASSUME_UNIT);
return squad(vec[1], s1, s2, vec[2], t, assumeUnit, QUAT_ASSUME_NOT_UNIT);
}
} // namepsace
//! @endcond
#endif /*OPENCV_CORE_QUATERNION_INL_HPP*/

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import <opencv2/opencv.hpp>
#import <opencv2/core.hpp>
#else
#define CV_EXPORTS
#endif

@ -5,7 +5,7 @@
//
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
@ -97,6 +97,7 @@ CV_EXPORTS @interface Mat : NSObject
- (void)createEx:(NSArray<NSNumber*>*)sizes type:(int)type NS_SWIFT_NAME(create(sizes:type:));
- (void)copySize:(Mat*)mat;
- (Mat*)cross:(Mat*)mat;
- (unsigned char*)dataPtr NS_SWIFT_NAME(dataPointer());
- (int)depth;
- (Mat*)diag:(int)diagonal;
- (Mat*)diag;

@ -286,6 +286,10 @@ static bool updateIdx(cv::Mat* mat, std::vector<int>& indices, int inc) {
return [[Mat alloc] initWithNativeMat:new cv::Mat(_nativePtr->cross(*(cv::Mat*)mat.nativePtr))];
}
- (unsigned char*)dataPtr {
return _nativePtr->data;
}
- (int)depth {
return _nativePtr->depth();
}

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

@ -7,7 +7,7 @@
#pragma once
#ifdef __cplusplus
#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save