diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 81e18973f7..42e8650ad4 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -146,46 +146,76 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
# ========= Doxygen docs =========
+
+macro(make_reference result modules_list black_list)
+ set(_res)
+ foreach(m ${${modules_list}})
+ list(FIND ${black_list} ${m} _pos)
+ if(${_pos} EQUAL -1)
+ set(_res "${_res} @ref ${m} | ${m} \n")
+ endif()
+ endforeach()
+ set(${result} ${_res})
+endmacro()
+
if(BUILD_DOCS AND HAVE_DOXYGEN)
- # documented modules list
- set(candidates)
- list(APPEND candidates ${BASE_MODULES} ${EXTRA_MODULES})
- # blacklisted modules
- ocv_list_filterout(candidates "^ts$")
+ # not documented modules list
+ list(APPEND blacklist "ts" "java" "python2" "python3" "world")
# gathering headers
- set(all_headers) # files and dirs to process
- set(all_images) # image search paths
- set(reflist) # modules reference
- foreach(m ${candidates})
- set(reflist "${reflist} \n- @subpage ${m}")
- set(all_headers ${all_headers} "${OPENCV_MODULE_opencv_${m}_HEADERS}")
- set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
- if(EXISTS ${docs_dir})
- set(all_images ${all_images} ${docs_dir})
- set(all_headers ${all_headers} ${docs_dir})
+ set(paths_include)
+ set(paths_doc)
+ set(paths_bib)
+ set(deps)
+ foreach(m ${BASE_MODULES} ${EXTRA_MODULES})
+ list(FIND blacklist ${m} _pos)
+ if(${_pos} EQUAL -1)
+ # include folder
+ set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
+ if(EXISTS "${header_dir}")
+ list(APPEND paths_include "${header_dir}")
+ list(APPEND deps ${header_dir})
+ endif()
+ # doc folder
+ set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
+ if(EXISTS "${docs_dir}")
+ list(APPEND paths_doc "${docs_dir}")
+ list(APPEND deps ${docs_dir})
+ endif()
+ # BiBTeX file
+ set(bib_file "${docs_dir}/${m}.bib")
+ if(EXISTS "${bib_file}")
+ set(paths_bib "${paths_bib} ${bib_file}")
+ list(APPEND deps ${bib_file})
+ endif()
endif()
endforeach()
+
# additional config
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile")
set(rootfile "${CMAKE_CURRENT_BINARY_DIR}/root.markdown")
- set(all_headers ${all_headers} ${rootfile})
- string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_INPUT_LIST "${all_headers}")
- string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_IMAGE_PATH "${all_images}")
+ set(bibfile "${CMAKE_CURRENT_SOURCE_DIR}/opencv.bib")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${paths_include} ; ${paths_doc}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp ; ${paths_doc}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
set(CMAKE_DOXYGEN_OUTPUT_PATH "doxygen")
- set(CMAKE_DOXYGEN_MODULES_REFERENCE "${reflist}")
- set(CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp")
+ set(CMAKE_EXTRA_BIB_FILES "${bibfile} ${paths_bib}")
+
+ # generate references
+ make_reference(CMAKE_DOXYGEN_MAIN_REFERENCE BASE_MODULES blacklist)
+ make_reference(CMAKE_DOXYGEN_EXTRA_REFERENCE EXTRA_MODULES blacklist)
# writing file
configure_file(Doxyfile.in ${doxyfile} @ONLY)
configure_file(root.markdown.in ${rootfile} @ONLY)
configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/html/mymath.sty" @ONLY)
+ configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/latex/mymath.sty" @ONLY)
add_custom_target(doxygen
COMMAND ${DOXYGEN_BUILD} ${doxyfile}
- DEPENDS ${doxyfile} ${all_headers} ${all_images})
+DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps})
endif()
if(HAVE_DOC_GENERATOR)
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index c8222c77b6..624e83bae9 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -85,7 +85,7 @@ SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE = @CMAKE_DOXYGEN_LAYOUT@
-CITE_BIB_FILES = @CMAKE_CURRENT_SOURCE_DIR@/opencv.bib
+CITE_BIB_FILES = @CMAKE_EXTRA_BIB_FILES@
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
@@ -99,7 +99,7 @@ FILE_PATTERNS =
RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
-EXCLUDE_PATTERNS =
+EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
EXCLUDE_SYMBOLS = cv::DataType<*> int
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
@@ -119,7 +119,7 @@ REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = NO
-ALPHABETICAL_INDEX = NO
+ALPHABETICAL_INDEX = YES
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
GENERATE_HTML = YES
@@ -222,6 +222,7 @@ INCLUDE_FILE_PATTERNS =
PREDEFINED = __cplusplus=1 \
HAVE_IPP_A=1 \
CVAPI(x)=x \
+ CV_DOXYGEN= \
CV_EXPORTS= \
CV_EXPORTS_W= \
CV_EXPORTS_W_SIMPLE= \
@@ -241,7 +242,8 @@ PREDEFINED = __cplusplus=1 \
CV_INLINE= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
- CV_NEON=1
+ CV_NEON=1 \
+ FLANN_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =
diff --git a/doc/disabled_doc_warnings.txt b/doc/disabled_doc_warnings.txt
new file mode 100644
index 0000000000..8c81b8dd65
--- /dev/null
+++ b/doc/disabled_doc_warnings.txt
@@ -0,0 +1,2 @@
+# doxygen citelist build workaround
+citelist : .*Unexpected new line character.*
diff --git a/doc/mymath.js b/doc/mymath.js
index 13ee86a533..d9af0350fe 100644
--- a/doc/mymath.js
+++ b/doc/mymath.js
@@ -8,7 +8,8 @@ MathJax.Hub.Config(
forkthree: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ \\end{array} \\right.", 6],
vecthree: ["\\begin{bmatrix} #1\\\\ #2\\\\ #3 \\end{bmatrix}", 3],
vecthreethree: ["\\begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{bmatrix}", 9],
- hdotsfor: ["\\dots", 1]
+ hdotsfor: ["\\dots", 1],
+ mathbbm: ["\\mathbb{#1}", 1]
}
}
}
diff --git a/doc/mymath.sty b/doc/mymath.sty
index 24dae263a5..08ab50d2b4 100644
--- a/doc/mymath.sty
+++ b/doc/mymath.sty
@@ -3,6 +3,7 @@
\usepackage{euler}
\usepackage{amssymb}
\usepackage{amsmath}
+\usepackage{bbm}
\newcommand{\matTT}[9]{
\[
diff --git a/doc/opencv.bib b/doc/opencv.bib
index ad993b07ab..09206587a2 100644
--- a/doc/opencv.bib
+++ b/doc/opencv.bib
@@ -1,427 +1,826 @@
-@inproceedings{Agrawal08,
- author = {Agrawal, M. and Konolige, K. and Blas, M.R.},
- title = {CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching},
- booktitle = {ECCV08},
- year = {2008},
- pages = {IV: 102-115},
- bibsource = {http://www.visionbib.com/bibliography/twod276.html#TT22337}
+@comment{Bib-it,
+ This file was created by Bib-it 1.4
+ 97 entries written
+}
+
+@INCOLLECTION{ABD12,
+ author = {Alcantarilla, Pablo Fern{\'a}ndez and Bartoli, Adrien and Davison, Andrew J},
+ title = {KAZE features},
+ booktitle = {Computer Vision--ECCV 2012},
+ year = {2012},
+ pages = {214--227},
+ publisher = {Springer}
+}
+@ARTICLE{ANB13,
+ author = {Alcantarilla, Pablo F and Nuevo, Jes{\'u}s and Bartoli, Adrien},
+ title = {Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces},
+ year = {2011},
+ pages = {1281--1298},
+ journal = {Trans. Pattern Anal. Machine Intell},
+ volume = {34},
+ number = {7}
+}
+@ARTICLE{BA83,
+ author = {Burt, Peter J and Adelson, Edward H},
+ title = {A multiresolution spline with application to image mosaics},
+ year = {1983},
+ pages = {217--236},
+ journal = {ACM Transactions on Graphics (TOG)},
+ volume = {2},
+ number = {4},
+ publisher = {ACM}
+}
+@ARTICLE{BL07,
+ author = {Brown, Matthew and Lowe, David G},
+ title = {Automatic panoramic image stitching using invariant features},
+ year = {2007},
+ pages = {59--73},
+ journal = {International journal of computer vision},
+ volume = {74},
+ number = {1},
+ publisher = {Springer}
+}
+@ARTICLE{BT96,
+ author = {Birchfield, Stan and Tomasi, Carlo},
+ title = {Depth discontinuities by pixel-to-pixel stereo},
+ year = {1999},
+ pages = {269--293},
+ journal = {International Journal of Computer Vision},
+ volume = {35},
+ number = {3},
+ publisher = {Springer}
+}
+@ARTICLE{BT98,
+ author = {Birchfield, Stan and Tomasi, Carlo},
+ title = {A pixel dissimilarity measure that is insensitive to image sampling},
+ year = {1998},
+ pages = {401--406},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {20},
+ number = {4},
+ publisher = {IEEE}
+}
+@ARTICLE{Ballard1981,
+ author = {Ballard, Dana H},
+ title = {Generalizing the Hough transform to detect arbitrary shapes},
+ year = {1981},
+ pages = {111--122},
+ journal = {Pattern recognition},
+ volume = {13},
+ number = {2},
+ publisher = {Elsevier}
+}
+@ARTICLE{Borgefors86,
+ author = {Borgefors, Gunilla},
+ title = {Distance transformations in digital images},
+ year = {1986},
+ pages = {344--371},
+ journal = {Computer vision, graphics, and image processing},
+ volume = {34},
+ number = {3},
+ publisher = {Elsevier}
+}
+@ARTICLE{Bouguet00,
+ author = {Bouguet, Jean-Yves},
+ title = {Pyramidal implementation of the affine lucas kanade feature tracker description of the algorithm},
+ year = {2001},
+ journal = {Intel Corporation},
+ volume = {5}
+}
+@MISC{BouguetMCT,
+ author = {Bouguet, Jean-Yves},
+ title = {Camera Calibration Tool box for Matlab [EB/OL]},
+ year = {2004}
+}
+@INPROCEEDINGS{Bradski00,
+ author = {Bradski, GR and Davis, J},
+ title = {Motion segmentation and pose recognition with motion history gradients},
+ booktitle = {Applications of Computer Vision, 2000, Fifth IEEE Workshop on.},
+ year = {2000},
+ pages = {238--244},
+ organization = {IEEE}
+}
+@ARTICLE{Bradski98,
+ author = {Bradski, Gary R},
+ title = {Computer vision face tracking for use in a perceptual user interface},
+ year = {1998},
+ publisher = {Citeseer}
+}
+@ARTICLE{Breiman84,
+ author = {Olshen, LBJFR and Stone, Charles J},
+ title = {Classification and regression trees},
+ year = {1984},
+ journal = {Wadsworth International Group}
+}
+@INCOLLECTION{Brox2004,
+ author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim},
+ title = {High accuracy optical flow estimation based on a theory for warping},
+ booktitle = {Computer Vision-ECCV 2004},
+ year = {2004},
+ pages = {25--36},
+ publisher = {Springer}
+}
+@ARTICLE{Burges98,
+ author = {Burges, Christopher JC},
+ title = {A tutorial on support vector machines for pattern recognition},
+ year = {1998},
+ pages = {121--167},
+ journal = {Data mining and knowledge discovery},
+ volume = {2},
+ number = {2},
+ publisher = {Springer}
+}
+@INPROCEEDINGS{CL12,
+ author = {Lu, Cewu and Xu, Li and Jia, Jiaya},
+ title = {Contrast preserving decolorization},
+ booktitle = {Computational Photography (ICCP), 2012 IEEE International Conference on},
+ year = {2012},
+ pages = {1--7},
+ organization = {IEEE}
+}
+@ARTICLE{Canny86,
+ author = {Canny, John},
+ title = {A computational approach to edge detection},
+ year = {1986},
+ pages = {679--698},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ number = {6},
+ publisher = {IEEE}
+}
+@ARTICLE{ChambolleEtAl,
+ author = {Chambolle, Antonin and Caselles, Vicent and Cremers, Daniel and Novaga, Matteo and Pock, Thomas},
+ title = {An introduction to total variation for image analysis},
+ year = {2010},
+ pages = {263--340},
+ journal = {Theoretical foundations and numerical methods for sparse recovery},
+ volume = {9},
+ publisher = {Walter de Gruyter}
+}
+@INPROCEEDINGS{DD02,
+ author = {Durand, Fr{\'e}do and Dorsey, Julie},
+ title = {Fast bilateral filtering for the display of high-dynamic-range images},
+ booktitle = {ACM Transactions on Graphics (TOG)},
+ year = {2002},
+ pages = {257--266},
+ volume = {21},
+ number = {3},
+ organization = {ACM}
+}
+@INPROCEEDINGS{DM03,
+ author = {Drago, Fr{\'e}d{\'e}ric and Myszkowski, Karol and Annen, Thomas and Chiba, Norishige},
+ title = {Adaptive logarithmic mapping for displaying high contrast scenes},
+ booktitle = {Computer Graphics Forum},
+ year = {2003},
+ pages = {419--426},
+ volume = {22},
+ number = {3},
+ organization = {Wiley Online Library}
+}
+@INPROCEEDINGS{DM97,
+ author = {Debevec, Paul E and Malik, Jitendra},
+ title = {Recovering high dynamic range radiance maps from photographs},
+ booktitle = {ACM SIGGRAPH 2008 classes},
+ year = {2008},
+ pages = {31},
+ organization = {ACM}
+}
+@INPROCEEDINGS{Dalal2005,
+ author = {Dalal, Navneet and Triggs, Bill},
+ title = {Histograms of oriented gradients for human detection},
+ booktitle = {Computer Vision and Pattern Recognition, 2005. CVPR 2005. IEEE Computer Society Conference on},
+ year = {2005},
+ pages = {886--893},
+ volume = {1},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{Davis97,
+ author = {Davis, James W and Bobick, Aaron F},
+ title = {The representation and recognition of human movement using temporal templates},
+ booktitle = {Computer Vision and Pattern Recognition, 1997. Proceedings., 1997 IEEE Computer Society Conference on},
+ year = {1997},
+ pages = {928--934},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{EM11,
+ author = {Gastal, Eduardo SL and Oliveira, Manuel M},
+ title = {Domain transform for edge-aware image and video processing},
+ booktitle = {ACM Transactions on Graphics (TOG)},
+ year = {2011},
+ pages = {69},
+ volume = {30},
+ number = {4},
+ organization = {ACM}
+}
+@ARTICLE{EP08,
+ author = {Evangelidis, Georgios D and Psarakis, Emmanouil Z},
+ title = {Parametric image alignment using enhanced correlation coefficient maximization},
+ year = {2008},
+ pages = {1858--1865},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {30},
+ number = {10},
+ publisher = {IEEE}
+}
+@INPROCEEDINGS{FGD2003,
+ author = {Li, Liyuan and Huang, Weimin and Gu, Irene YH and Tian, Qi},
+ title = {Foreground object detection from videos containing complex background},
+ booktitle = {Proceedings of the eleventh ACM international conference on Multimedia},
+ year = {2003},
+ pages = {2--10},
+ organization = {ACM}
+}
+@ARTICLE{FHT98,
+ author = {Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert},
+ title = {Additive Logistic Regression: a Statistical View of Boosting},
+ year = {1998}
+}
+@INPROCEEDINGS{FL02,
+ author = {Fattal, Raanan and Lischinski, Dani and Werman, Michael},
+ title = {Gradient domain high dynamic range compression},
+ booktitle = {ACM Transactions on Graphics (TOG)},
+ year = {2002},
+ pages = {249--256},
+ volume = {21},
+ number = {3},
+ organization = {ACM}
+}
+@INCOLLECTION{Farneback2003,
+ author = {Farneb{\"a}ck, Gunnar},
+ title = {Two-frame motion estimation based on polynomial expansion},
+ booktitle = {Image Analysis},
+ year = {2003},
+ pages = {363--370},
+ publisher = {Springer}
+}
+@INPROCEEDINGS{Farsiu03,
+ author = {Farsiu, Sina and Robinson, Dirk and Elad, Michael and Milanfar, Peyman},
+ title = {Fast and robust super-resolution},
+ booktitle = {Image Processing, 2003. ICIP 2003. Proceedings. 2003 International Conference on},
+ year = {2003},
+ pages = {II--291},
+ volume = {2},
+ organization = {IEEE}
+}
+@TECHREPORT{Felzenszwalb04,
+ author = {Felzenszwalb, Pedro and Huttenlocher, Daniel},
+ title = {Distance transforms of sampled functions},
+ year = {2004},
+ institution = {Cornell University}
+}
+@ARTICLE{Felzenszwalb10,
+ author = {Felzenszwalb, Pedro F and Girshick, Ross B and McAllester, David and Ramanan, Deva},
+ title = {Object detection with discriminatively trained part-based models},
+ year = {2010},
+ pages = {1627--1645},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {32},
+ number = {9},
+ publisher = {IEEE}
+}
+@ARTICLE{Felzenszwalb2006,
+ author = {Felzenszwalb, Pedro F and Huttenlocher, Daniel P},
+ title = {Efficient belief propagation for early vision},
+ year = {2006},
+ pages = {41--54},
+ journal = {International journal of computer vision},
+ volume = {70},
+ number = {1},
+ publisher = {Springer}
+}
+@INPROCEEDINGS{Fitzgibbon95,
+ author = {Fitzgibbon, Andrew W and Fisher, Robert B},
+ title = {A buyer's guide to conic fitting},
+ booktitle = {Proceedings of the 6th British conference on Machine vision (Vol. 2)},
+ year = {1995},
+ pages = {513--522},
+ organization = {BMVA Press}
+}
+@INPROCEEDINGS{G11,
+ author = {Grundmann, Matthias and Kwatra, Vivek and Essa, Irfan},
+ title = {Auto-directed video stabilization with robust l1 optimal camera paths},
+ booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on},
+ year = {2011},
+ pages = {225--232},
+ organization = {IEEE}
+}
+@ARTICLE{GW03,
+ author = {Ward, Greg},
+ title = {Fast, robust image registration for compositing high dynamic range photographs from hand-held exposures},
+ year = {2003},
+ pages = {17--30},
+ journal = {Journal of graphics tools},
+ volume = {8},
+ number = {2},
+ publisher = {Taylor \& Francis}
+}
+@INPROCEEDINGS{Gold2012,
+ author = {Godbehere, Andrew B and Matsukawa, Akihiro and Goldberg, Ken},
+ title = {Visual tracking of human visitors under variable-lighting conditions for a responsive audio art installation},
+ booktitle = {American Control Conference (ACC), 2012},
+ year = {2012},
+ pages = {4305--4312},
+ organization = {IEEE}
+}
+@ARTICLE{Guil1999,
+ author = {Guil, N and Gonzalez-Linares, Jos{\'e} Mar{\'\i}a and Zapata, Emilio L},
+ title = {Bidimensional shape detection using an invariant approach},
+ year = {1999},
+ pages = {1025--1038},
+ journal = {Pattern Recognition},
+ volume = {32},
+ number = {6},
+ publisher = {Elsevier}
+}
+@ARTICLE{HH08,
+ author = {Hirschmuller, Heiko},
+ title = {Stereo processing by semiglobal matching and mutual information},
+ year = {2008},
+ pages = {328--341},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {30},
+ number = {2},
+ publisher = {IEEE}
+}
+@ARTICLE{HTF01,
+ author = {Trevor, Hastie and Robert, Tibshirani and Jerome, Friedman},
+ title = {The elements of statistical learning: data mining, inference and prediction},
+ year = {2001},
+ pages = {371--406},
+ journal = {New York: Springer-Verlag},
+ volume = {1},
+ number = {8}
+}
+@ARTICLE{Hartley99,
+ author = {Hartley, Richard I},
+ title = {Theory and practice of projective rectification},
+ year = {1999},
+ pages = {115--127},
+ journal = {International Journal of Computer Vision},
+ volume = {35},
+ number = {2},
+ publisher = {Springer}
+}
+@BOOK{HartleyZ00,
+ author = {Hartley, Richard and Zisserman, Andrew},
+ title = {Multiple view geometry in computer vision},
+ year = {2003},
+ publisher = {Cambridge university press}
+}
+@ARTICLE{Horn81,
+ author = {Horn, Berthold KP and Schunck, Brian G},
+ title = {Determining Optical Flow},
+ year = {1981},
+ pages = {185--203},
+ journal = {Artificial Intelligence},
+ volume = {17}
+}
+@ARTICLE{Hu62,
+ author = {Hu, Ming-Kuei},
+ title = {Visual pattern recognition by moment invariants},
+ year = {1962},
+ pages = {179--187},
+ journal = {Information Theory, IRE Transactions on},
+ volume = {8},
+ number = {2},
+ publisher = {IEEE}
+}
+@ARTICLE{Javier2012,
+ author = {S{\'a}nchez P{\'e}rez, Javier and Meinhardt-Llopis, Enric and Facciolo, Gabriele},
+ title = {TV-L1 optical flow estimation},
+ year = {2012}
+}
+@ARTICLE{KleeLaskowski85,
+ author = {Klee, Victor and Laskowski, Michael C},
+ title = {Finding the smallest triangles containing a given convex polygon},
+ year = {1985},
+ pages = {359--375},
+ journal = {Journal of Algorithms},
+ volume = {6},
+ number = {3},
+ publisher = {Elsevier}
+}
+@INPROCEEDINGS{Kolmogorov03,
+ author = {Kim, Junhwan and Kolmogorov, Vladimir and Zabih, Ramin},
+ title = {Visual correspondence using energy minimization and mutual information},
+ booktitle = {Computer Vision, 2003. Proceedings. Ninth IEEE International Conference on},
+ year = {2003},
+ pages = {1033--1040},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{LCS11,
+ author = {Leutenegger, Stefan and Chli, Margarita and Siegwart, Roland Yves},
+ title = {BRISK: Binary robust invariant scalable keypoints},
+ booktitle = {Computer Vision (ICCV), 2011 IEEE International Conference on},
+ year = {2011},
+ pages = {2548--2555},
+ organization = {IEEE}
+}
+@ARTICLE{LibSVM,
+ author = {Chang, Chih-Chung and Lin, Chih-Jen},
+ title = {LIBSVM: a library for support vector machines},
+ year = {2011},
+ pages = {27},
+ journal = {ACM Transactions on Intelligent Systems and Technology (TIST)},
+ volume = {2},
+ number = {3},
+ publisher = {ACM}
+}
+@INPROCEEDINGS{Lienhart02,
+ author = {Lienhart, Rainer and Maydt, Jochen},
+ title = {An extended set of haar-like features for rapid object detection},
+ booktitle = {Image Processing. 2002. Proceedings. 2002 International Conference on},
+ year = {2002},
+ pages = {I--900},
+ volume = {1},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{Lucas81,
+ author = {Lucas, Bruce D and Kanade, Takeo and others},
+ title = {An iterative image registration technique with an application to stereo vision.},
+ booktitle = {IJCAI},
+ year = {1981},
+ pages = {674--679},
+ volume = {81}
+}
+@MISC{MA13,
+ author = {Mordvintsev, Alexander},
+ title = {ROF and TV-L1 denoising with Primal-Dual algorithm},
+ url = {http://znah.net/rof-and-tv-l1-denoising-with-primal-dual-algorithm.html}
+}
+@ARTICLE{MHT2011,
+ author = {Getreuer, Pascal},
+ title = {Malvar-He-Cutler Linear Image Demosaicking},
+ year = {2011},
+ journal = {Image Processing on Line}
+}
+@INPROCEEDINGS{MK07,
+ author = {Mertens, Tom and Kautz, Jan and Van Reeth, Frank},
+ title = {Exposure fusion},
+ booktitle = {Computer Graphics and Applications, 2007. PG'07. 15th Pacific Conference on},
+ year = {2007},
+ pages = {382--390},
+ organization = {IEEE}
+}
+@ARTICLE{MM06,
+ author = {Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter},
+ title = {A perceptual framework for contrast processing of high dynamic range images},
+ year = {2006},
+ pages = {286--308},
+ journal = {ACM Transactions on Applied Perception (TAP)},
+ volume = {3},
+ number = {3},
+ publisher = {ACM}
+}
+@INCOLLECTION{MOG2001,
+ author = {KaewTraKulPong, Pakorn and Bowden, Richard},
+ title = {An improved adaptive background mixture model for real-time tracking with shadow detection},
+ booktitle = {Video-Based Surveillance Systems},
+ year = {2002},
+ pages = {135--144},
+ publisher = {Springer}
+}
+@ARTICLE{Malis,
+ author = {Malis, Ezio and Vargas, Manuel and others},
+ title = {Deeper understanding of the homography decomposition for vision-based control},
+ year = {2007}
+}
+@ARTICLE{Matas00,
+ author = {Matas, Jiri and Galambos, Charles and Kittler, Josef},
+ title = {Robust detection of lines using the progressive probabilistic hough transform},
+ year = {2000},
+ pages = {119--137},
+ journal = {Computer Vision and Image Understanding},
+ volume = {78},
+ number = {1},
+ publisher = {Elsevier}
+}
+@INPROCEEDINGS{Meyer92,
+ author = {Meyer, Fernand},
+ title = {Color image segmentation},
+ booktitle = {Image Processing and its Applications, 1992., International Conference on},
+ year = {1992},
+ pages = {303--306},
+ organization = {IET}
+}
+@INCOLLECTION{Mitzel09,
+ author = {Mitzel, Dennis and Pock, Thomas and Schoenemann, Thomas and Cremers, Daniel},
+ title = {Video super resolution using duality based tv-l 1 optical flow},
+ booktitle = {Pattern Recognition},
+ year = {2009},
+ pages = {432--441},
+ publisher = {Springer}
+}
+@INPROCEEDINGS{Muja2009,
+ author = {Muja, Marius and Lowe, David G},
+ title = {Fast Approximate Nearest Neighbors with Automatic Algorithm Configuration},
+ booktitle = {VISAPP (1)},
+ year = {2009},
+ pages = {331--340}
+}
+@ARTICLE{Nister03,
+ author = {Nist{\'e}r, David},
+ title = {An efficient solution to the five-point relative pose problem},
+ year = {2004},
+ pages = {756--770},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {26},
+ number = {6},
+ publisher = {IEEE}
+}
+@ARTICLE{OF06,
+ author = {Matsushita, Yasuyuki and Ofek, Eyal and Ge, Weina and Tang, Xiaoou and Shum, Heung-Yeung},
+ title = {Full-frame video stabilization with motion inpainting},
+ year = {2006},
+ pages = {1150--1163},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {28},
+ number = {7},
+ publisher = {IEEE}
+}
+@ARTICLE{ORourke86,
+ author = {O'Rourke, Joseph and Aggarwal, Alok and Maddila, Sanjeev and Baldwin, Michael},
+ title = {An optimal algorithm for finding minimal enclosing triangles},
+ year = {1986},
+ pages = {258--269},
+ journal = {Journal of Algorithms},
+ volume = {7},
+ number = {2},
+ publisher = {Elsevier}
+}
+@INPROCEEDINGS{PM03,
+ author = {P{\'e}rez, Patrick and Gangnet, Michel and Blake, Andrew},
+ title = {Poisson image editing},
+ booktitle = {ACM Transactions on Graphics (TOG)},
+ year = {2003},
+ pages = {313--318},
+ volume = {22},
+ number = {3},
+ organization = {ACM}
+}
+@INPROCEEDINGS{Puzicha1997,
+ author = {Puzicha, Jan and Hofmann, Thomas and Buhmann, Joachim M},
+ title = {Non-parametric similarity measures for unsupervised texture segmentation and image retrieval},
+ booktitle = {Computer Vision and Pattern Recognition, 1997. Proceedings., 1997 IEEE Computer Society Conference on},
+ year = {1997},
+ pages = {267--272},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{RB99,
+ author = {Robertson, Mark A and Borman, Sean and Stevenson, Robert L},
+ title = {Dynamic range improvement through multiple exposures},
+ booktitle = {Image Processing, 1999. ICIP 99. Proceedings. 1999 International Conference on},
+ year = {1999},
+ pages = {159--163},
+ volume = {3},
+ organization = {IEEE}
+}
+@ARTICLE{RD05,
+ author = {Reinhard, Erik and Devlin, Kate},
+ title = {Dynamic range reduction inspired by photoreceptor physiology},
+ year = {2005},
+ pages = {13--24},
+ journal = {Visualization and Computer Graphics, IEEE Transactions on},
+ volume = {11},
+ number = {1},
+ publisher = {IEEE}
+}
+@INPROCEEDINGS{RPROP93,
+ author = {Riedmiller, Martin and Braun, Heinrich},
+ title = {A direct adaptive method for faster backpropagation learning: The RPROP algorithm},
+ booktitle = {Neural Networks, 1993., IEEE International Conference on},
+ year = {1993},
+ pages = {586--591},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{RRKB11,
+ author = {Rublee, Ethan and Rabaud, Vincent and Konolige, Kurt and Bradski, Gary},
+ title = {ORB: an efficient alternative to SIFT or SURF},
+ booktitle = {Computer Vision (ICCV), 2011 IEEE International Conference on},
+ year = {2011},
+ pages = {2564--2571},
+ organization = {IEEE}
+}
+@TECHREPORT{RS04,
+ author = {Szeliski, R},
+ title = {Image alignment and stitching: a tutorial, Microsoft Corporation, Redmond, WA},
+ year = {2004},
+ institution = {MSR-TR-2004-92}
+}
+@BOOK{RS10,
+ author = {Szeliski, Richard},
+ title = {Computer vision: algorithms and applications},
+ year = {2010},
+ publisher = {Springer}
+}
+@ARTICLE{Rafael12,
+ author = {von Gioi, Rafael Grompone and Jakubowicz, J{\'e}r{\'e}mie and Morel, Jean-Michel and Randall, Gregory},
+ title = {LSD: a line segment detector},
+ year = {2012}
+}
+@INCOLLECTION{Rosten06,
+ author = {Rosten, Edward and Drummond, Tom},
+ title = {Machine learning for high-speed corner detection},
+ booktitle = {Computer Vision--ECCV 2006},
+ year = {2006},
+ pages = {430--443},
+ publisher = {Springer}
+}
+@ARTICLE{Rubner2000,
+ author = {Rubner, Yossi and Tomasi, Carlo and Guibas, Leonidas J},
+ title = {The earth mover's distance as a metric for image retrieval},
+ year = {2000},
+ pages = {99--121},
+ journal = {International Journal of Computer Vision},
+ volume = {40},
+ number = {2},
+ publisher = {Springer}
+}
+@ARTICLE{RubnerSept98,
+ author = {Rubner, Yossi and Tomasi, Carlo and Guibas, Leonidas J},
+ title = {The Earth Mover''s Distance as a Metric for Image Retrieval},
+ year = {1998},
+ publisher = {Stanford University}
+}
+@ARTICLE{SS00,
+ author = {Shum, Heung-Yeung and Szeliski, Richard},
+ title = {Systems and experiment paper: Construction of panoramic image mosaics with global and local alignment},
+ year = {2000},
+ pages = {101--130},
+ journal = {International Journal of Computer Vision},
+ volume = {36},
+ number = {2},
+ publisher = {Springer}
+}
+@INPROCEEDINGS{Shi94,
+ author = {Shi, Jianbo and Tomasi, Carlo},
+ title = {Good features to track},
+ booktitle = {Computer Vision and Pattern Recognition, 1994. Proceedings CVPR'94., 1994 IEEE Computer Society Conference on},
+ year = {1994},
+ pages = {593--600},
+ organization = {IEEE}
+}
+@ARTICLE{Sklansky82,
+ author = {Sklansky, Jack},
+ title = {Finding the convex hull of a simple polygon},
+ year = {1982},
+ pages = {79--83},
+ journal = {Pattern Recognition Letters},
+ volume = {1},
+ number = {2},
+ publisher = {Elsevier}
+}
+@ARTICLE{Slabaugh,
+ author = {Slabaugh, Gregory G},
+ title = {Computing Euler angles from a rotation matrix},
+ year = {1999},
+ pages = {2000},
+ journal = {Retrieved on August},
+ volume = {6}
+}
+@MISC{SteweniusCFS,
+ author = {Stewenius, Henrik},
+ title = {Calibrated Fivepoint solver},
+ url = {http://www.vis.uky.edu/~stewe/FIVEPOINT/}
+}
+@ARTICLE{Suzuki85,
+ author = {Suzuki, Satoshi and others},
+ title = {Topological structural analysis of digitized binary images by border following},
+ year = {1985},
+ pages = {32--46},
+ journal = {Computer Vision, Graphics, and Image Processing},
+ volume = {30},
+ number = {1},
+ publisher = {Elsevier}
+}
+@ARTICLE{TehChin89,
+ author = {Teh, C-H and Chin, Roland T.},
+ title = {On the detection of dominant points on digital curves},
+ year = {1989},
+ pages = {859--872},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {11},
+ number = {8},
+ publisher = {IEEE}
+}
+@ARTICLE{Telea04,
+ author = {Telea, Alexandru},
+ title = {An image inpainting technique based on the fast marching method},
+ year = {2004},
+ pages = {23--34},
+ journal = {Journal of graphics tools},
+ volume = {9},
+ number = {1},
+ publisher = {Taylor \& Francis}
+}
+@INPROCEEDINGS{UES01,
+ author = {Uyttendaele, Matthew and Eden, Ashley and Skeliski, R},
+ title = {Eliminating ghosting and exposure artifacts in image mosaics},
+ booktitle = {Computer Vision and Pattern Recognition, 2001. CVPR 2001. Proceedings of the 2001 IEEE Computer Society Conference on},
+ year = {2001},
+ pages = {II--509},
+ volume = {2},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{V03,
+ author = {Kwatra, Vivek and Sch{\"o}dl, Arno and Essa, Irfan and Turk, Greg and Bobick, Aaron},
+ title = {Graphcut textures: image and video synthesis using graph cuts},
+ booktitle = {ACM Transactions on Graphics (ToG)},
+ year = {2003},
+ pages = {277--286},
+ volume = {22},
+ number = {3},
+ organization = {ACM}
+}
+@INPROCEEDINGS{Viola01,
+ author = {Viola, Paul and Jones, Michael},
+ title = {Rapid object detection using a boosted cascade of simple features},
+ booktitle = {Computer Vision and Pattern Recognition, 2001. CVPR 2001. Proceedings of the 2001 IEEE Computer Society Conference on},
+ year = {2001},
+ pages = {I--511},
+ volume = {1},
+ organization = {IEEE}
+}
+@INPROCEEDINGS{WJ10,
+ author = {Xu, Wei and Mulligan, Jane},
+ title = {Performance evaluation of color correction approaches for automatic multi-view image and video stitching},
+ booktitle = {Computer Vision and Pattern Recognition (CVPR), 2010 IEEE Conference on},
+ year = {2010},
+ pages = {263--270},
+ organization = {IEEE}
+}
+@MISC{Welch95,
+ author = {Welch, Greg and Bishop, Gary},
+ title = {An introduction to the Kalman filter},
+ year = {1995}
+}
+@INPROCEEDINGS{Yang2010,
+ author = {Yang, Qingxiong and Wang, Liang and Ahuja, Narendra},
+ title = {A constant-space belief propagation algorithm for stereo matching},
+ booktitle = {Computer Vision and Pattern Recognition (CVPR), 2010 IEEE Conference on},
+ year = {2010},
+ pages = {1458--1465},
+ organization = {IEEE}
+}
+@ARTICLE{Yuen90,
+ author = {Yuen, HK and Princen, John and Illingworth, John and Kittler, Josef},
+ title = {Comparative study of Hough transform methods for circle finding},
+ year = {1990},
+ pages = {71--77},
+ journal = {Image and Vision Computing},
+ volume = {8},
+ number = {1},
+ publisher = {Elsevier}
+}
+@INCOLLECTION{Zach2007,
+ author = {Zach, Christopher and Pock, Thomas and Bischof, Horst},
+ title = {A duality based approach for realtime TV-L 1 optical flow},
+ booktitle = {Pattern Recognition},
+ year = {2007},
+ pages = {214--223},
+ publisher = {Springer}
+}
+@ARTICLE{Zhang2000,
+ author = {Zhang, Zhengyou},
+ title = {A flexible new technique for camera calibration},
+ year = {2000},
+ pages = {1330--1334},
+ journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
+ volume = {22},
+ number = {11},
+ publisher = {IEEE}
+}
+@INPROCEEDINGS{Zivkovic2004,
+ author = {Zivkovic, Zoran},
+ title = {Improved adaptive Gaussian mixture model for background subtraction},
+ booktitle = {Pattern Recognition, 2004. ICPR 2004. Proceedings of the 17th International Conference on},
+ year = {2004},
+ pages = {28--31},
+ volume = {2},
+ organization = {IEEE}
+}
+@ARTICLE{Zivkovic2006,
+ author = {Zivkovic, Zoran and van der Heijden, Ferdinand},
+ title = {Efficient adaptive density estimation per image pixel for the task of background subtraction},
+ year = {2006},
+ pages = {773--780},
+ journal = {Pattern recognition letters},
+ volume = {27},
+ number = {7},
+ publisher = {Elsevier}
+}
+@INPROCEEDINGS{arthur_kmeanspp_2007,
+ author = {Arthur, David and Vassilvitskii, Sergei},
+ title = {k-means++: The advantages of careful seeding},
+ booktitle = {Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms},
+ year = {2007},
+ pages = {1027--1035},
+ organization = {Society for Industrial and Applied Mathematics}
+}
+@ARTICLE{mitchell2005logistic,
+ author = {Mitchell, Tom M},
+ title = {Logistic Regression},
+ year = {2005},
+ pages = {701},
+ journal = {Machine learning},
+ volume = {10}
}
-
-@inproceedings{Bay06,
- address = {Graz Austria},
- author = {Bay, H. and Tuytelaars, T. and Van Gool, L.},
- booktitle = {9th European Conference on Computer Vision},
- keywords = {local-feature, sift},
- month = {May},
- title = {SURF: Speeded Up Robust Features},
- year = {2006}
-}
-
-@inproceedings{BT96,
- author = {Tomasi, C. and Birchfield, S.T.},
- title = {Depth Discontinuities by Pixel-to-Pixel Stereo},
- booktitle = {STAN-CS},
- year = {1996},
- bibsource = {http://www.visionbib.com/bibliography/stereo413.html#TT35577}
-}
-
-@article{Borgefors86,
- author = {Borgefors, Gunilla},
- title = {Distance transformations in digital images},
- journal = {Comput. Vision Graph. Image Process.},
- volume = {34},
- number = {3},
- year = {1986},
- issn = {0734-189X},
- pages = {344--371},
- doi = {http://dx.doi.org/10.1016/S0734-189X(86)80047-0},
- publisher = {Academic Press Professional, Inc.},
- address = {San Diego, CA, USA},
-}
-
-@MISC{Bouguet00,
- author = {Jean-Yves Bouguet},
- title = {Pyramidal Implementation of the Lucas-Kanade Feature Tracker},
- year = {2000},
- abstract = {},
- keywords = {Optical Flow, Lucas Kanade, Pyramidal Method},
-}
-
-
-@inproceedings{Bradski00,
- author = {Davis, J.W. and Bradski, G.R.},
- title = {Motion Segmentation and Pose Recognition with Motion History Gradients},
- booktitle = {WACV00},
- year = {2000},
- pages = {238-244}
-}
-
-@inproceedings{Bradski98,
- author = {Bradski, G.R.},
- title = {Computer Vision Face Tracking for Use in a Perceptual User Interface},
- booktitle = {Intel},
- year = {1998},
- bibsource = {http://www.visionbib.com/bibliography/people911.html#TT90944}
-}
-
-@inproceedings{Davis97,
- author = {Davis, J.W. and Bobick, A.F.},
- title = {The Representation and Recognition of Action Using Temporal Templates},
- booktitle = {CVPR97},
- year = {1997},
- pages = {928-934}
-}
-
-@techreport{Felzenszwalb04,
- author = {Felzenszwalb, Pedro F. and Huttenlocher, Daniel P.},
- edition = {TR2004-1963},
- institution = {Cornell Computing and Information Science},
- keywords = {Distance Transform, Hausdorff},
- month = {September},
- title = {Distance Transforms of Sampled Functions},
- year = {2004}
-}
-
-@article{Felzenszwalb10,
- author = {Felzenszwalb, P.F. and Girshick, R.B. and McAllester, D. and Ramanan, D.},
- title = {Object Detection with Discriminatively Trained Part Based Models},
- journal = {PAMI},
- volume = {32},
- year = {2010},
- number = {9},
- month = {September},
- pages = {1627-1645},
- bibsource = {http://www.visionbib.com/bibliography/bib/457.html#BB45794}
-}
-
-@article{Hartley99,
- author = {Hartley, R.I.},
- title = {Theory and Practice of Projective Rectification},
- journal = {IJCV},
- volume = {35},
- year = {1999},
- number = {2},
- month = {November},
- pages = {115-127},
- bibsource = {http://www.visionbib.com/bibliography/image-proc118.html#TT9097}
-}
-
-@article{HH08,
- author = {Hirschmuller, H.},
- title = "Stereo Processing by Semiglobal Matching and Mutual Information",
- journal = {PAMI},
- volume = {30},
- year = {2008},
- number = {2},
- month = {February},
- pages = {328-341},
- bibsource = {http://www.visionbib.com/bibliography/stereo422.html#TT36174}
-}
-
-@article{Horn81,
- author = {Horn, B.K.P. and Schunck, B.G.},
- title = {Determining Optical Flow},
- journal = {AI},
- volume = {17},
- year = {1981},
- number = {1-3},
- month = {August},
- pages = {185-203},
- bibsource = {http://www.visionbib.com/bibliography/optic-f733.html#TT69126}
-}
-
-@inproceedings{Kolmogorov03,
- author = {Kim, Junhwan and Kolmogorov, Vladimir and Zabih, Ramin},
- title = {Visual Correspondence Using Energy Minimization and Mutual Information},
- booktitle = {ICCV '03: Proceedings of the Ninth IEEE International Conference on Computer Vision},
- year = {2003},
- isbn = {0-7695-1950-4},
- pages = {1033},
- publisher = {IEEE Computer Society},
- address = {Washington, DC, USA},
-}
-
-@inproceedings{Lucas81,
- author = {Lucas, B. D. and Kanade, T.},
- title = {An Iterative Image Registration Technique with an Application to Stereo Vision (IJCAI)},
- booktitle = {Proceedings of the 7th International Joint Conference on Artificial Intelligence (IJCAI '81)},
- pages = {674-679},
- month = {April},
- year = {1981},
- Notes = {A more complete version is available as Proceedings DARPA Image Understanding Workshop, April 1981, pp.121-130. When you refer to this work, please refer to the IJCAI paper.}
-}
-
-
-@article{Matas00,
- author = {Matas, J. and Galambos, C. and Kittler, J.V.},
- title = {Robust Detection of Lines Using the Progressive Probabilistic Hough Transform},
- journal = {CVIU},
- volume = {78},
- year = {2000},
- number = {1},
- month = {April},
- pages = {119-137},
- bibsource = {http://www.visionbib.com/bibliography/edge264.html#TT21167}
-}
-
-
-@inproceedings{Meyer92,
- author = {Meyer, F.},
- title = {Color image segmentation},
- booktitle = {ICIP92},
- year = {1992},
- pages = {303–306}
-}
-
-
-@inproceedings{Shi94,
- author = {Tomasi, C. and Shi, J.},
- title = {Good Features to Track},
- booktitle = {CVPR94},
- year = {1994},
- pages = {593-600},
- bibsource = {http://www.visionbib.com/bibliography/motion-f716.html#TT61248}
-}
-
-
-@article{Sklansky82,
- author = {Sklansky, J.},
- title = {Finding the Convex Hull of a Simple Polygon},
- journal = {PRL},
- volume = {1},
- year = {1982},
- pages = {79-83},
- bibsource = {http://www.visionbib.com/bibliography/twod283.html#TT22999}
-}
-
-
-@article{Suzuki85,
- author = {Suzuki, S. and Abe, K.},
- title = {Topological Structural Analysis of Digitized Binary Images by Border Following},
- journal = {CVGIP},
- volume = {30},
- year = {1985},
- number = {1},
- month = {April},
- pages = {32-46},
- bibsource = {http://www.visionbib.com/bibliography/twod289.html#TT23296}
-}
-
-
-@article{TehChin89,
- author = {Teh, C.H. and Chin, R.T.},
- title = {On the Detection of Dominant Points on Digital Curve},
- journal = {PAMI},
- volume = {11},
- year = {1989},
- number = {8},
- month = {August},
- pages = {859-872},
- bibsource = {http://www.visionbib.com/bibliography/edge257.html#TT20546}
-}
-
-@article{Telea04,
- author = {Alexandru Telea},
- title = {An Image Inpainting Technique Based on the Fast Marching Method},
- journal = {Journal of Graphics, GPU, and Game Tools},
- volume = {9},
- number = {1},
- pages = {23-34},
- year = {2004},
-}
-
-@misc{Welch95,
- author = {Greg Welch and Gary Bishop},
- title = {An Introduction to the Kalman Filter},
- year = {1995}
-}
-
-@article{Yuen90,
- author = {Yuen, H. K. and Princen, J. and Illingworth, J. and Kittler, J.},
- title = {Comparative study of Hough transform methods for circle finding},
- journal = {Image Vision Comput.},
- volume = {8},
- number = {1},
- year = {1990},
- issn = {0262-8856},
- pages = {71--77},
- doi = {http://dx.doi.org/10.1016/0262-8856(90)90059-E},
- publisher = {Butterworth-Heinemann},
- address = {Newton, MA, USA},
-}
-
-@inproceedings{arthur_kmeanspp_2007,
- title = {k-means++: the advantages of careful seeding},
- booktitle = {Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms},
- publisher = {Society for Industrial and Applied Mathematics Philadelphia, PA, USA},
- author = {D. Arthur and S. Vassilvitskii},
- year = {2007},
- pages = {1027--1035}
-}
-
-@inproceedings{muja_flann_2009,
- author = {Marius Muja and David G. Lowe},
- title = {Fast Approximate Nearest Neighbors with Automatic Algorithm Configuration},
- booktitle = {International Conference on Computer Vision Theory and Applications (VISSAPP'09)},
- year = {2009},
- pages = {331-340},
-}
-
-@inproceedings{qx_csbp,
- author = {Q. Yang and L. Wang and N. Ahuja},
- title = {A Constant-Space Belief Propagation Algorithm for Stereo Matching},
- booktitle = {CVPR},
- year = {2010}
-}
-
-@article{felzenszwalb_bp,
- author = {Pedro F. Felzenszwalb and Daniel P. Huttenlocher},
- title = {Efficient Belief Propagation for Early Vision},
- journal = {International Journal of Computer Vision},
- volume = {70},
- number = {1},
- year = {2006},
- month = {October}
-}
-
-@article{dalal_hog,
- author = {Navneet Dalal and Bill Triggs},
- title = {Histogram of Oriented Gradients for Human Detection},
- booktitle = {CVPR},
- year = {2005}
-}
-
-@inproceedings{Puzicha1997,
- author = {Puzicha, Jan and Hofmann, Thomas and Buhmann, Joachim M.},
- title = {Non-parametric Similarity Measures for Unsupervised Texture Segmentation and Image Retrieval},
- booktitle = {Proceedings of the 1997 Conference on Computer Vision and Pattern Recognition (CVPR '97)},
- series = {CVPR '97},
- year = {1997},
- isbn = {0-8186-7822-4},
- pages = {267--},
- url = {http://dl.acm.org/citation.cfm?id=794189.794386},
- acmid = {794386},
- publisher = {IEEE Computer Society},
- address = {Washington, DC, USA},
-}
-
-@techreport{RubnerSept98,
- author = {Rubner, Yossi and Tomasi, Carlo and Guibas, Leonidas J.},
- title = {The Earth Mover's Distance As a Metric for Image Retrieval},
- year = {1998},
- source = {http://www.ncstrl.org:8900/ncstrl/servlet/search?formname=detail\&id=oai%3Ancstrlh%3Astan%3ASTAN%2F%2FCS-TN-98-86},
- publisher = {Stanford University},
- address = {Stanford, CA, USA},
-}
-
-@article{Rubner2000,
- author = {Rubner, Yossi and Tomasi, Carlo and Guibas, Leonidas J.},
- title = {The Earth Mover's Distance As a Metric for Image Retrieval},
- journal = {Int. J. Comput. Vision},
- issue_date = {Nov. 2000},
- volume = {40},
- number = {2},
- month = nov,
- year = {2000},
- issn = {0920-5691},
- pages = {99--121},
- numpages = {23},
- url = {http://dx.doi.org/10.1023/A:1026543900054},
- doi = {10.1023/A:1026543900054},
- acmid = {365881},
- publisher = {Kluwer Academic Publishers},
- address = {Hingham, MA, USA},
-}
-
-@article{Hu62,
- author={Ming-Kuei Hu},
- journal={Information Theory, IRE Transactions on},
- title={Visual pattern recognition by moment invariants},
- year={1962},
- month={February},
- volume={8},
- number={2},
- pages={179-187},
- doi={10.1109/TIT.1962.1057692},
- ISSN={0096-1000},
-}
-
-@inproceedings{Fitzgibbon95,
- author = {Fitzgibbon, Andrew W. and Fisher, Robert B.},
- title = {A Buyer's Guide to Conic Fitting},
- booktitle = {Proceedings of the 6th British Conference on Machine Vision (Vol. 2)},
- series = {BMVC '95},
- year = {1995},
- isbn = {0-9521898-2-8},
- location = {Birmingham, United Kingdom},
- pages = {513--522},
- numpages = {10},
- url = {http://dl.acm.org/citation.cfm?id=243124.243148},
- acmid = {243148},
- publisher = {BMVA Press},
- address = {Surrey, UK, UK},
-}
-
-@article{KleeLaskowski85,
- author = {Klee, Victor and Laskowski, Michael C.},
- ee = {http://dx.doi.org/10.1016/0196-6774(85)90005-7},
- journal = {J. Algorithms},
- number = 3,
- pages = {359-375},
- title = {Finding the Smallest Triangles Containing a Given Convex Polygon.},
- url = {http://dblp.uni-trier.de/db/journals/jal/jal6.html#KleeL85},
- volume = 6,
- year = 1985
-}
-
-@article{Canny86,
- author = {Canny, J},
- title = {A Computational Approach to Edge Detection},
- journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
- issue_date = {June 1986},
- volume = {8},
- number = {6},
- month = jun,
- year = {1986},
- issn = {0162-8828},
- pages = {679--698},
- numpages = {20},
- url = {http://dx.doi.org/10.1109/TPAMI.1986.4767851},
- doi = {10.1109/TPAMI.1986.4767851},
- acmid = {11275},
- publisher = {IEEE Computer Society},
- address = {Washington, DC, USA}
-}
-
-# '''[Bradski98]''' G.R. Bradski. Computer vision face tracking as a component of a perceptual user interface. In Workshop on Applications of Computer Vision, pages 214?219, Princeton, NJ, Oct. 1998.<
> Updated version can be found at http://www.intel.com/technology/itj/q21998/articles/art\_2.htm.<
> Also, it is included into OpenCV distribution ([[attachment:camshift.pdf]])
-# '''[Burt81]''' P. J. Burt, T. H. Hong, A. Rosenfeld. Segmentation and Estimation of Image Region Properties Through Cooperative Hierarchical Computation. IEEE Tran. On SMC, Vol. 11, N.12, 1981, pp. 802-809.
-# '''[Canny86]''' J. Canny. A Computational Approach to Edge Detection, IEEE Trans. on Pattern Analysis and Machine Intelligence, 8(6), pp. 679-698 (1986).
-# '''[Davis97]''' J. Davis and Bobick. The Representation and Recognition of Action Using Temporal Templates. MIT Media Lab Technical Report 402, 1997.
-# '''[DeMenthon92]''' Daniel F. DeMenthon and Larry S. Davis. Model-Based Object Pose in 25 Lines of Code. In Proceedings of ECCV '92, pp. 335-343, 1992.
-# '''[Fitzgibbon95]''' Andrew W. Fitzgibbon, R.B.Fisher. A Buyer?s Guide to Conic Fitting. Proc.5th British Machine Vision Conference, Birmingham, pp. 513-522, 1995.
-# '''[Ford98]''' Adrian Ford, Alan Roberts. Colour Space Conversions. http://www.poynton.com/PDFs/coloureq.pdf
-# '''[Horn81]''' Berthold K.P. Horn and Brian G. Schunck. Determining Optical Flow. Artificial Intelligence, 17, pp. 185-203, 1981.
-# '''[Hu62]''' M. Hu. Visual Pattern Recognition by Moment Invariants, IRE Transactions on Information Theory, 8:2, pp. 179-187, 1962.
-# '''[Iivarinen97]''' Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. Comparison of Combined Shape Descriptors for Irregular Objects, 8th British Machine Vision Conference, BMVC'97.<
>http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
-# '''[Jahne97]''' B. Jahne. Digital Image Processing. Springer, New York, 1997.
-# '''[Lucas81]''' Lucas, B., and Kanade, T. An Iterative Image Registration Technique with an Application to Stereo Vision, Proc. of 7th International Joint Conference on Artificial Intelligence (IJCAI), pp. 674-679.
-# '''[Kass88]''' M. Kass, A. Witkin, and D. Terzopoulos. Snakes: Active Contour Models, International Journal of Computer Vision, pp. 321-331, 1988.
-# '''[Lienhart02]''' Rainer Lienhart and Jochen Maydt. An Extended Set of Haar-like Features for Rapid Object Detection. IEEE ICIP 2002, Vol. 1, pp. 900-903, Sep. 2002.<
> This paper, as well as the extended technical report, can be retrieved at http://www.lienhart.de/Publications/publications.html
-# '''[Matas98]''' J.Matas, C.Galambos, J.Kittler. Progressive Probabilistic Hough Transform. British Machine Vision Conference, 1998.
-# '''[Rosenfeld73]''' A. Rosenfeld and E. Johnston. Angle Detection on Digital Curves. IEEE Trans. Computers, 22:875-878, 1973.
-# '''[RubnerJan98]''' Y. Rubner. C. Tomasi, L.J. Guibas. Metrics for Distributions with Applications to Image Databases. Proceedings of the 1998 IEEE International Conference on Computer Vision, Bombay, India, January 1998, pp. 59-66.
-# '''[RubnerSept98]''' Y. Rubner. C. Tomasi, L.J. Guibas. The Earth Mover?s Distance as a Metric for Image Retrieval. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.
-# '''[RubnerOct98]''' Y. Rubner. C. Tomasi. Texture Metrics. Proceeding of the IEEE International Conference on Systems, Man, and Cybernetics, San-Diego, CA, October 1998, pp. 4601-4607. http://robotics.stanford.edu/~rubner/publications.html
-# '''[Serra82]''' J. Serra. Image Analysis and Mathematical Morphology. Academic Press, 1982.
-# '''[Schiele00]''' Bernt Schiele and James L. Crowley. Recognition without Correspondence Using Multidimensional Receptive Field Histograms. In International Journal of Computer Vision 36 (1), pp. 31-50, January 2000.
-# '''[Suzuki85]''' S. Suzuki, K. Abe. Topological Structural Analysis of Digital Binary Images by Border Following. CVGIP, v.30, n.1. 1985, pp. 32-46.
-# '''[Teh89]''' C.H. Teh, R.T. Chin. On the Detection of Dominant Points on Digital Curves. - IEEE Tr. PAMI, 1989, v.11, No.8, p. 859-872.
-# '''[Trucco98]''' Emanuele Trucco, Alessandro Verri. Introductory Techniques for 3-D Computer Vision. Prentice Hall, Inc., 1998.
-# '''[Viola01]''' Paul Viola and Michael J. Jones. Rapid Object Detection using a Boosted Cascade of Simple Features. IEEE CVPR, 2001.<
> The paper is available online at http://www.ai.mit.edu/people/viola/
-# '''[Welch95]''' Greg Welch, Gary Bishop. An Introduction To the Kalman Filter. Technical Report TR95-041, University of North Carolina at Chapel Hill, 1995.<
> Online version is available at http://www.cs.unc.edu/~welch/kalman/kalmanIntro.html
-# '''[Williams92]''' D. J. Williams and M. Shah. A Fast Algorithm for Active Contours and Curvature Estimation. CVGIP: Image Understanding, Vol. 55, No. 1, pp. 14-26, Jan., 1992. http://www.cs.ucf.edu/~vision/papers/shah/92/WIS92A.pdf.
-# '''[Yuen03]''' H.K. Yuen, J. Princen, J. Illingworth and J. Kittler. Comparative study of Hough Transform methods for circle finding.<
>http://www.sciencedirect.com/science/article/B6V09-48TCV4N-5Y/2/91f551d124777f7a4cf7b18325235673
-# '''[Yuille89]''' A.Y.Yuille, D.S.Cohen, and P.W.Hallinan. Feature Extraction from Faces Using Deformable Templates in CVPR, pp. 104-109, 1989.
-# '''[Zhang96]''' Z. Zhang. Parameter Estimation Techniques: A Tutorial with Application to Conic Fitting, Image and Vision Computing Journal, 1996.
-# '''[Zhang99]''' Z. Zhang. Flexible Camera Calibration By Viewing a Plane From Unknown Orientations. International Conference on Computer Vision (ICCV'99), Corfu, Greece, pages 666-673, September 1999.
-# '''[Zhang00]''' Z. Zhang. A Flexible New Technique for Camera Calibration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
diff --git a/doc/root.markdown.in b/doc/root.markdown.in
index c98bb35317..72095780cf 100644
--- a/doc/root.markdown.in
+++ b/doc/root.markdown.in
@@ -3,9 +3,14 @@ OpenCV modules {#mainpage}
@subpage intro
-Module name | Folder
-------------- | -------------
-@ref core | core
-@ref imgproc | imgproc
+### Main modules
-
+ Module name | Folder
+-------------- | -------------
+@CMAKE_DOXYGEN_MAIN_REFERENCE@
+
+### Extra modules
+
+ Module name | Folder
+-------------- | -------------
+@CMAKE_DOXYGEN_EXTRA_REFERENCE@
diff --git a/modules/androidcamera/include/camera_activity.hpp b/modules/androidcamera/include/camera_activity.hpp
index 2af7befe30..7e79aafd53 100644
--- a/modules/androidcamera/include/camera_activity.hpp
+++ b/modules/androidcamera/include/camera_activity.hpp
@@ -3,6 +3,12 @@
#include
+/** @defgroup androidcamera Android Camera Support
+*/
+
+//! @addtogroup androidcamera
+//! @{
+
class CameraActivity
{
public:
@@ -44,4 +50,6 @@ private:
int frameHeight;
};
+//! @}
+
#endif
diff --git a/modules/androidcamera/include/camera_properties.h b/modules/androidcamera/include/camera_properties.h
index 65499be2d6..5078401951 100644
--- a/modules/androidcamera/include/camera_properties.h
+++ b/modules/androidcamera/include/camera_properties.h
@@ -1,6 +1,9 @@
#ifndef CAMERA_PROPERTIES_H
#define CAMERA_PROPERTIES_H
+//! @addtogroup androidcamera
+//! @{
+
enum {
ANDROID_CAMERA_PROPERTY_FRAMEWIDTH = 0,
ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT = 1,
@@ -67,4 +70,6 @@ enum {
ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX
};
+//! @}
+
#endif // CAMERA_PROPERTIES_H
diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp
index 4f405afc66..396b666ee1 100644
--- a/modules/calib3d/include/opencv2/calib3d.hpp
+++ b/modules/calib3d/include/opencv2/calib3d.hpp
@@ -48,9 +48,140 @@
#include "opencv2/features2d.hpp"
#include "opencv2/core/affine.hpp"
+/**
+ @defgroup calib3d Camera Calibration and 3D Reconstruction
+
+The functions in this section use a so-called pinhole camera model. In this model, a scene view is
+formed by projecting 3D points into the image plane using a perspective transformation.
+
+\f[s \; m' = A [R|t] M'\f]
+
+or
+
+\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}
+\begin{bmatrix}
+r_{11} & r_{12} & r_{13} & t_1 \\
+r_{21} & r_{22} & r_{23} & t_2 \\
+r_{31} & r_{32} & r_{33} & t_3
+\end{bmatrix}
+\begin{bmatrix}
+X \\
+Y \\
+Z \\
+1
+\end{bmatrix}\f]
+
+where:
+
+- \f$(X, Y, Z)\f$ are the coordinates of a 3D point in the world coordinate space
+- \f$(u, v)\f$ are the coordinates of the projection point in pixels
+- \f$A\f$ is a camera matrix, or a matrix of intrinsic parameters
+- \f$(cx, cy)\f$ is a principal point that is usually at the image center
+- \f$fx, fy\f$ are the focal lengths expressed in pixel units.
+
+Thus, if an image from the camera is scaled by a factor, all of these parameters should be scaled
+(multiplied/divided, respectively) by the same factor. The matrix of intrinsic parameters does not
+depend on the scene viewed. So, once estimated, it can be re-used as long as the focal length is
+fixed (in case of zoom lens). The joint rotation-translation matrix \f$[R|t]\f$ is called a matrix of
+extrinsic parameters. It is used to describe the camera motion around a static scene, or vice versa,
+rigid motion of an object in front of a still camera. That is, \f$[R|t]\f$ translates coordinates of a
+point \f$(X, Y, Z)\f$ to a coordinate system, fixed with respect to the camera. The transformation above
+is equivalent to the following (when \f$z \ne 0\f$ ):
+
+\f[\begin{array}{l}
+\vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\
+x' = x/z \\
+y' = y/z \\
+u = f_x*x' + c_x \\
+v = f_y*y' + c_y
+\end{array}\f]
+
+Real lenses usually have some distortion, mostly radial distortion and slight tangential distortion.
+So, the above model is extended as:
+
+\f[\begin{array}{l} \vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\ x' = x/z \\ y' = y/z \\ x'' = x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\ y'' = y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_1 r^2 + s_2 r^4 \\ \text{where} \quad r^2 = x'^2 + y'^2 \\ u = f_x*x'' + c_x \\ v = f_y*y'' + c_y \end{array}\f]
+
+\f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$ are radial distortion coefficients. \f$p_1\f$ and \f$p_2\f$ are
+tangential distortion coefficients. \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, are the thin prism distortion
+coefficients. Higher-order coefficients are not considered in OpenCV. In the functions below the
+coefficients are passed or returned as
+
+\f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f]
+
+vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion
+coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera
+parameters. And they remain the same regardless of the captured image resolution. If, for example, a
+camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion
+coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$, \f$c_x\f$, and
+\f$c_y\f$ need to be scaled appropriately.
+
+The functions below use the above model to do the following:
+
+- Project 3D points to the image plane given intrinsic and extrinsic parameters.
+- Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their
+projections.
+- Estimate intrinsic and extrinsic camera parameters from several views of a known calibration
+pattern (every view is described by several 3D-2D point correspondences).
+- Estimate the relative position and orientation of the stereo camera "heads" and compute the
+*rectification* transformation that makes the camera optical axes parallel.
+
+@note
+ - A calibration sample for 3 cameras in horizontal position can be found at
+ opencv_source_code/samples/cpp/3calibration.cpp
+ - A calibration sample based on a sequence of images can be found at
+ opencv_source_code/samples/cpp/calibration.cpp
+ - A calibration sample in order to do 3D reconstruction can be found at
+ opencv_source_code/samples/cpp/build3dmodel.cpp
+ - A calibration sample of an artificially generated camera and chessboard patterns can be
+ found at opencv_source_code/samples/cpp/calibration_artificial.cpp
+ - A calibration example on stereo calibration can be found at
+ opencv_source_code/samples/cpp/stereo_calib.cpp
+ - A calibration example on stereo matching can be found at
+ opencv_source_code/samples/cpp/stereo_match.cpp
+ - (Python) A camera calibration sample can be found at
+ opencv_source_code/samples/python2/calibrate.py
+
+ @{
+ @defgroup calib3d_fisheye Fisheye camera model
+
+ Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the
+ matrix X) The coordinate vector of P in the camera reference frame is:
+
+ \f[Xc = R X + T\f]
+
+ where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y
+ and z the 3 coordinates of Xc:
+
+ \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
+
+ The pinehole projection coordinates of P is [a; b] where
+
+ \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
+
+ Fisheye distortion:
+
+ \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f]
+
+ The distorted point coordinates are [x'; y'] where
+
+ \f[x' = (\theta_d / r) x \\ y' = (\theta_d / r) y \f]
+
+ Finally, convertion into pixel coordinates: The final pixel coordinates vector [u; v] where:
+
+ \f[u = f_x (x' + \alpha y') + c_x \\
+ v = f_y yy + c_y\f]
+
+ @defgroup calib3d_c C API
+
+ @}
+ */
+
namespace cv
{
+//! @addtogroup calib3d
+//! @{
+
//! type of the robust estimation algorithm
enum { LMEDS = 4, //!< least-median algorithm
RANSAC = 8 //!< RANSAC algorithm
@@ -105,26 +236,143 @@ enum { FM_7POINT = 1, //!< 7-point algorithm
-//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
+/** @brief Converts a rotation matrix to a rotation vector or vice versa.
+
+@param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
+@param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
+@param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
+derivatives of the output array components with respect to the input array components.
+
+\f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos{\theta} I + (1- \cos{\theta} ) r r^T + \sin{\theta} \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
+
+Inverse transformation can be also done easily, since
+
+\f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
+
+A rotation vector is a convenient and most compact representation of a rotation matrix (since any
+rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
+optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .
+ */
CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
-//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
+/** @brief Finds a perspective transformation between two planes.
+
+@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
+or vector\ .
+@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
+a vector\ .
+@param method Method used to computed a homography matrix. The following methods are possible:
+- **0** - a regular method using all the points
+- **RANSAC** - RANSAC-based robust method
+- **LMEDS** - Least-Median robust method
+@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
+(used in the RANSAC method only). That is, if
+\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \| > \texttt{ransacReprojThreshold}\f]
+then the point \f$i\f$ is considered an outlier. If srcPoints and dstPoints are measured in pixels,
+it usually makes sense to set this parameter somewhere in the range of 1 to 10.
+@param mask Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
+mask values are ignored.
+@param maxIters The maximum number of RANSAC iterations, 2000 is the maximum it can be.
+@param confidence Confidence level, between 0 and 1.
+
+The functions find and return the perspective transformation \f$H\f$ between the source and the
+destination planes:
+
+\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f]
+
+so that the back-projection error
+
+\f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f]
+
+is minimized. If the parameter method is set to the default value 0, the function uses all the point
+pairs to compute an initial homography estimate with a simple least-squares scheme.
+
+However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective
+transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
+you can use one of the two robust methods. Both methods, RANSAC and LMeDS , try many different
+random subsets of the corresponding point pairs (of four pairs each), estimate the homography matrix
+using this subset and a simple least-square algorithm, and then compute the quality/goodness of the
+computed homography (which is the number of inliers for RANSAC or the median re-projection error for
+LMeDs). The best subset is then used to produce the initial estimate of the homography matrix and
+the mask of inliers/outliers.
+
+Regardless of the method, robust or not, the computed homography matrix is refined further (using
+inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
+re-projection error even more.
+
+The method RANSAC can handle practically any ratio of outliers but it needs a threshold to
+distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
+correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
+noise is rather small, use the default method (method=0).
+
+The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
+determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an H matrix
+cannot be estimated, an empty one will be returned.
+
+@sa
+ getAffineTransform, getPerspectiveTransform, estimateRigidTransform, warpPerspective,
+ perspectiveTransform
+
+@note
+ - A example on calculating a homography for image matching can be found at
+ opencv_source_code/samples/cpp/video_homography.cpp
+
+ */
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method = 0, double ransacReprojThreshold = 3,
OutputArray mask=noArray(), const int maxIters = 2000,
const double confidence = 0.995);
-//! variant of findHomography for backward compatibility
+/** @overload */
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
-//! Computes RQ decomposition of 3x3 matrix
+/** @brief Computes an RQ decomposition of 3x3 matrices.
+
+@param src 3x3 input matrix.
+@param mtxR Output 3x3 upper-triangular matrix.
+@param mtxQ Output 3x3 orthogonal matrix.
+@param Qx Optional output 3x3 rotation matrix around x-axis.
+@param Qy Optional output 3x3 rotation matrix around y-axis.
+@param Qz Optional output 3x3 rotation matrix around z-axis.
+
+The function computes a RQ decomposition using the given rotations. This function is used in
+decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
+and a rotation matrix.
+
+It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
+degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
+sequence of rotations about the three principle axes that results in the same orientation of an
+object, eg. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angules
+are only one of the possible solutions.
+ */
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx = noArray(),
OutputArray Qy = noArray(),
OutputArray Qz = noArray());
-//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
+/** @brief Decomposes a projection matrix into a rotation matrix and a camera matrix.
+
+@param projMatrix 3x4 input projection matrix P.
+@param cameraMatrix Output 3x3 camera matrix K.
+@param rotMatrix Output 3x3 external rotation matrix R.
+@param transVect Output 4x1 translation vector T.
+@param rotMatrixX Optional 3x3 rotation matrix around x-axis.
+@param rotMatrixY Optional 3x3 rotation matrix around y-axis.
+@param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
+@param eulerAngles Optional three-element vector containing three Euler angles of rotation in
+degrees.
+
+The function computes a decomposition of a projection matrix into a calibration and a rotation
+matrix and the position of a camera.
+
+It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
+be used in OpenGL. Note, there is always more than one sequence of rotations about the three
+principle axes that results in the same orientation of an object, eg. see @cite Slabaugh . Returned
+tree rotation matrices and corresponding three Euler angules are only one of the possible solutions.
+
+The function is based on RQDecomp3x3 .
+ */
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
OutputArray rotMatrixX = noArray(),
@@ -132,10 +380,51 @@ CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray
OutputArray rotMatrixZ = noArray(),
OutputArray eulerAngles =noArray() );
-//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
+/** @brief Computes partial derivatives of the matrix product for each multiplied matrix.
+
+@param A First multiplied matrix.
+@param B Second multiplied matrix.
+@param dABdA First output derivative matrix d(A\*B)/dA of size
+\f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ .
+@param dABdB Second output derivative matrix d(A\*B)/dB of size
+\f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ .
+
+The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to
+the elements of each of the two input matrices. The function is used to compute the Jacobian
+matrices in stereoCalibrate but can also be used in any other similar optimization function.
+ */
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
-//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
+/** @brief Combines two rotation-and-shift transformations.
+
+@param rvec1 First rotation vector.
+@param tvec1 First translation vector.
+@param rvec2 Second rotation vector.
+@param tvec2 Second translation vector.
+@param rvec3 Output rotation vector of the superposition.
+@param tvec3 Output translation vector of the superposition.
+@param dr3dr1
+@param dr3dt1
+@param dr3dr2
+@param dr3dt2
+@param dt3dr1
+@param dt3dt1
+@param dt3dr2
+@param dt3dt2 Optional output derivatives of rvec3 or tvec3 with regard to rvec1, rvec2, tvec1 and
+tvec2, respectively.
+
+The functions compute:
+
+\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f]
+
+where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and
+\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details.
+
+Also, the functions can compute the derivatives of the output vectors with regards to the input
+vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
+your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
+function that contains a matrix multiplication.
+ */
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
InputArray rvec2, InputArray tvec2,
OutputArray rvec3, OutputArray tvec3,
@@ -144,7 +433,38 @@ CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
-//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
+/** @brief Projects 3D points to an image plane.
+
+@param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
+vector\ ), where N is the number of points in the view.
+@param rvec Rotation vector. See Rodrigues for details.
+@param tvec Translation vector.
+@param cameraMatrix Camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. If
+the vector is NULL/empty, the zero distortion coefficients are assumed.
+@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
+vector\ .
+@param jacobian Optional output 2Nx(10+\) jacobian matrix of derivatives of image
+points with respect to components of the rotation vector, translation vector, focal lengths,
+coordinates of the principal point and the distortion coefficients. In the old interface different
+components of the jacobian are returned via different output parameters.
+@param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
+function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
+matrix.
+
+The function computes projections of 3D points to the image plane given intrinsic and extrinsic
+camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
+image points coordinates (as functions of all the input parameters) with respect to the particular
+parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in
+calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a
+re-projection error given the current intrinsic and extrinsic parameters.
+
+@note By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by
+passing zero distortion coefficients, you can get various useful partial cases of the function. This
+means that you can compute the distorted coordinates for a sparse set of points or apply a
+perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
+ */
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
InputArray rvec, InputArray tvec,
InputArray cameraMatrix, InputArray distCoeffs,
@@ -152,13 +472,86 @@ CV_EXPORTS_W void projectPoints( InputArray objectPoints,
OutputArray jacobian = noArray(),
double aspectRatio = 0 );
-//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
+/** @brief Finds an object pose from 3D-2D point correspondences.
+
+@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
+@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+where N is the number of points. vector\ can be also passed here.
+@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. If
+the vector is NULL/empty, the zero distortion coefficients are assumed.
+@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
+the model coordinate system to the camera coordinate system.
+@param tvec Output translation vector.
+@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
+the provided rvec and tvec values as initial approximations of the rotation and translation
+vectors, respectively, and further optimizes them.
+@param flags Method for solving a PnP problem:
+- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
+this case the function finds such a pose that minimizes reprojection error, that is the sum
+of squared distances between the observed projections imagePoints and the projected (using
+projectPoints ) objectPoints .
+- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
+"Complete Solution Classification for the Perspective-Three-Point Problem". In this case the
+function requires exactly four object and image points.
+- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
+paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation".
+- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
+"A Direct Least-Squares (DLS) Method for PnP".
+- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
+F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
+Estimation". In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
+assuming that both have the same value. Then the cameraMatrix is updated with the estimated
+focal length.
+
+The function estimates the object pose given a set of object points, their corresponding image
+projections, as well as the camera matrix and the distortion coefficients.
+
+@note
+ - An example of how to use solvePnP for planar augmented reality can be found at
+ opencv_source_code/samples/python2/plane_ar.py
+ */
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );
-//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
+/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
+
+@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
+@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+where N is the number of points. vector\ can be also passed here.
+@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. If
+the vector is NULL/empty, the zero distortion coefficients are assumed.
+@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
+the model coordinate system to the camera coordinate system.
+@param tvec Output translation vector.
+@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
+the provided rvec and tvec values as initial approximations of the rotation and translation
+vectors, respectively, and further optimizes them.
+@param iterationsCount Number of iterations.
+@param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
+is the maximum allowed distance between the observed and computed point projections to consider it
+an inlier.
+@param confidence The probability that the algorithm produces a useful result.
+@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
+@param flags Method for solving a PnP problem (see solvePnP ).
+
+The function estimates an object pose given a set of object points, their corresponding image
+projections, as well as the camera matrix and the distortion coefficients. This function finds such
+a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
+projections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC
+makes the function resistant to outliers.
+
+@note
+ - An example of how to use solvePNPRansac for object detection can be found at
+ opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
+ */
CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
@@ -166,28 +559,227 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
float reprojectionError = 8.0, double confidence = 0.99,
OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
-//! initializes camera matrix from a few 3D points and the corresponding projections.
+/** @brief Finds an initial camera matrix from 3D-2D point correspondences.
+
+@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
+coordinate space. In the old interface all the per-view vectors are concatenated. See
+calibrateCamera for details.
+@param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
+old interface all the per-view vectors are concatenated.
+@param imageSize Image size in pixels used to initialize the principal point.
+@param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
+Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ .
+
+The function estimates and returns an initial camera matrix for the camera calibration process.
+Currently, the function only supports planar calibration patterns, which are patterns where each
+object point has z-coordinate =0.
+ */
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize, double aspectRatio = 1.0 );
-//! finds checkerboard pattern of the specified size in the image
+/** @brief Finds the positions of internal corners of the chessboard.
+
+@param image Source chessboard view. It must be an 8-bit grayscale or color image.
+@param patternSize Number of inner corners per a chessboard row and column
+( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).
+@param corners Output array of detected corners.
+@param flags Various operation flags that can be zero or a combination of the following values:
+- **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
+and white, rather than a fixed threshold level (computed from the average image brightness).
+- **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
+applying fixed or adaptive thresholding.
+- **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
+square-like shape) to filter out false quads extracted at the contour retrieval stage.
+- **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,
+and shortcut the call if none is found. This can drastically speed up the call in the
+degenerate condition when no chessboard is observed.
+
+The function attempts to determine whether the input image is a view of the chessboard pattern and
+locate the internal chessboard corners. The function returns a non-zero value if all of the corners
+are found and they are placed in a certain order (row by row, left to right in every row).
+Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
+a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
+squares touch each other. The detected coordinates are approximate, and to determine their positions
+more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
+different parameters if returned coordinates are not accurate enough.
+
+Sample usage of detecting and drawing chessboard corners: :
+@code
+ Size patternsize(8,6); //interior number of corners
+ Mat gray = ....; //source image
+ vector corners; //this will be filled by the detected corners
+
+ //CALIB_CB_FAST_CHECK saves a lot of time on images
+ //that do not contain any chessboard corners
+ bool patternfound = findChessboardCorners(gray, patternsize, corners,
+ CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
+ + CALIB_CB_FAST_CHECK);
+
+ if(patternfound)
+ cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
+ TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
+
+ drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
+@endcode
+@note The function requires white space (like a square-thick border, the wider the better) around
+the board to make the detection more robust in various environments. Otherwise, if there is no
+border and the background is dark, the outer black squares cannot be segmented properly and so the
+square grouping and ordering algorithm fails.
+ */
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
-//! draws the checkerboard pattern (found or partly found) in the image
+/** @brief Renders the detected chessboard corners.
+
+@param image Destination image. It must be an 8-bit color image.
+@param patternSize Number of inner corners per a chessboard row and column
+(patternSize = cv::Size(points_per_row,points_per_column)).
+@param corners Array of detected corners, the output of findChessboardCorners.
+@param patternWasFound Parameter indicating whether the complete board was found or not. The
+return value of findChessboardCorners should be passed here.
+
+The function draws individual chessboard corners detected either as red circles if the board was not
+found, or as colored corners connected with lines if the board was found.
+ */
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
-//! finds circles' grid pattern of the specified size in the image
+/** @brief Finds centers in the grid of circles.
+
+@param image grid view of input circles; it must be an 8-bit grayscale or color image.
+@param patternSize number of circles per row and column
+( patternSize = Size(points_per_row, points_per_colum) ).
+@param centers output array of detected centers.
+@param flags various operation flags that can be one of the following values:
+- **CALIB_CB_SYMMETRIC_GRID** uses symmetric pattern of circles.
+- **CALIB_CB_ASYMMETRIC_GRID** uses asymmetric pattern of circles.
+- **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
+perspective distortions but much more sensitive to background clutter.
+@param blobDetector feature detector that finds blobs like dark circles on light background.
+
+The function attempts to determine whether the input image contains a grid of circles. If it is, the
+function locates centers of the circles. The function returns a non-zero value if all of the centers
+have been found and they have been placed in a certain order (row by row, left to right in every
+row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
+
+Sample usage of detecting and drawing the centers of circles: :
+@code
+ Size patternsize(7,7); //number of centers
+ Mat gray = ....; //source image
+ vector centers; //this will be filled by the detected centers
+
+ bool patternfound = findCirclesGrid(gray, patternsize, centers);
+
+ drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
+@endcode
+@note The function requires white space (like a square-thick border, the wider the better) around
+the board to make the detection more robust in various environments.
+ */
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr &blobDetector = SimpleBlobDetector::create());
-//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
+/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
+
+@param objectPoints In the new interface it is a vector of vectors of calibration pattern points
+in the calibration pattern coordinate space. The outer vector contains as many elements as the
+number of the pattern views. If the same calibration pattern is shown in each view and it is fully
+visible, all the vectors will be the same. Although, it is possible to use partially occluded
+patterns, or even different patterns in different views. Then, the vectors will be different. The
+points are 3D, but since they are in a pattern coordinate system, then, if the rig is planar, it
+may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object
+point is 0.
+In the old interface all the vectors of object points from different views are concatenated
+together.
+@param imagePoints In the new interface it is a vector of vectors of the projections of
+calibration pattern points. imagePoints.size() and objectPoints.size() and imagePoints[i].size()
+must be equal to objectPoints[i].size() for each i.
+In the old interface all the vectors of object points from different views are concatenated
+together.
+@param imageSize Size of the image used only to initialize the intrinsic camera matrix.
+@param cameraMatrix Output 3x3 floating-point camera matrix
+\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
+and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
+initialized before calling the function.
+@param distCoeffs Output vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements.
+@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
+That is, each k-th rotation vector together with the corresponding k-th translation vector (see
+the next output parameter description) brings the calibration pattern from the model coordinate
+space (in which object points are specified) to the world coordinate space, that is, a real
+position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
+@param tvecs Output vector of translation vectors estimated for each pattern view.
+@param flags Different flags that may be zero or a combination of the following values:
+- **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
+fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+center ( imageSize is used), and focal distances are computed in a least-squares fashion.
+Note, that if intrinsic parameters are known, there is no need to use this function just to
+estimate extrinsic parameters. Use solvePnP instead.
+- **CV_CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
+optimization. It stays at the center or at a different location specified when
+CV_CALIB_USE_INTRINSIC_GUESS is set too.
+- **CV_CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
+ratio fx/fy stays the same as in the input cameraMatrix . When
+CV_CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
+ignored, only their ratio is computed and used further.
+- **CV_CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
+to zeros and stay zero.
+- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** The corresponding radial distortion
+coefficient is not changed during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is
+set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the rational model and return 8 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the thin prism model and return 12 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+@param criteria Termination criteria for the iterative optimization algorithm.
+
+The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
+views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
+points and their corresponding 2D projections in each view must be specified. That may be achieved
+by using an object with a known geometry and easily detectable feature points. Such an object is
+called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
+a calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters
+(when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
+patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
+be used as long as initial cameraMatrix is provided.
+
+The algorithm performs the following steps:
+
+- Compute the initial intrinsic parameters (the option only available for planar calibration
+ patterns) or read them from the input parameters. The distortion coefficients are all set to
+ zeros initially unless some of CV_CALIB_FIX_K? are specified.
+
+- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
+ done using solvePnP .
+
+- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
+ that is, the total sum of squared distances between the observed feature points imagePoints and
+ the projected (using the current estimates for camera parameters and the poses) object points
+ objectPoints. See projectPoints for details.
+
+The function returns the final re-projection error.
+
+@note
+ If you use a non-square (=non-NxN) grid and findChessboardCorners for calibration, and
+ calibrateCamera returns bad values (zero distortion coefficients, an image center very far from
+ (w/2-0.5,h/2-0.5), and/or large differences between \f$f_x\f$ and \f$f_y\f$ (ratios of 10:1 or more)),
+ then you have probably used patternSize=cvSize(rows,cols) instead of using
+ patternSize=cvSize(cols,rows) in findChessboardCorners .
+
+@sa
+ findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
+ */
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
@@ -195,14 +787,117 @@ CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
-//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
+/** @brief Computes useful camera characteristics from the camera matrix.
+
+@param cameraMatrix Input camera matrix that can be estimated by calibrateCamera or
+stereoCalibrate .
+@param imageSize Input image size in pixels.
+@param apertureWidth Physical width in mm of the sensor.
+@param apertureHeight Physical height in mm of the sensor.
+@param fovx Output field of view in degrees along the horizontal sensor axis.
+@param fovy Output field of view in degrees along the vertical sensor axis.
+@param focalLength Focal length of the lens in mm.
+@param principalPoint Principal point in mm.
+@param aspectRatio \f$f_y/f_x\f$
+
+The function computes various useful camera characteristics from the previously estimated camera
+matrix.
+
+@note
+ Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
+ the chessboard pitch (it can thus be any value).
+ */
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
double apertureWidth, double apertureHeight,
CV_OUT double& fovx, CV_OUT double& fovy,
CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
-//! finds intrinsic and extrinsic parameters of a stereo camera
+/** @brief Calibrates the stereo camera.
+
+@param objectPoints Vector of vectors of the calibration pattern points.
+@param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
+observed by the first camera.
+@param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
+observed by the second camera.
+@param cameraMatrix1 Input/output first camera matrix:
+\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
+any of CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,
+CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
+matrix components must be initialized. See the flags description for details.
+@param distCoeffs1 Input/output vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 ot 12 elements. The
+output vector length depends on the flags.
+@param cameraMatrix2 Input/output second camera matrix. The parameter is similar to cameraMatrix1
+@param distCoeffs2 Input/output lens distortion coefficients for the second camera. The parameter
+is similar to distCoeffs1 .
+@param imageSize Size of the image used only to initialize intrinsic camera matrix.
+@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
+@param T Output translation vector between the coordinate systems of the cameras.
+@param E Output essential matrix.
+@param F Output fundamental matrix.
+@param flags Different flags that may be zero or a combination of the following values:
+- **CV_CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
+matrices are estimated.
+- **CV_CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
+according to the specified flags. Initial values are provided by the user.
+- **CV_CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
+- **CV_CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
+- **CV_CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
+.
+- **CV_CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
+- **CV_CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
+zeros and fix there.
+- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** Do not change the corresponding radial
+distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set,
+the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
+compatibility, this extra flag should be explicitly specified to make the calibration
+function use the rational model and return 8 coefficients. If the flag is not set, the
+function computes and returns only 5 distortion coefficients.
+- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the thin prism model and return 12 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+@param criteria Termination criteria for the iterative optimization algorithm.
+
+The function estimates transformation between two cameras making a stereo pair. If you have a stereo
+camera where the relative position and orientation of two cameras is fixed, and if you computed
+poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2),
+respectively (this can be done with solvePnP ), then those poses definitely relate to each other.
+This means that, given ( \f$R_1\f$,\f$T_1\f$ ), it should be possible to compute ( \f$R_2\f$,\f$T_2\f$ ). You only
+need to know the position and orientation of the second camera relative to the first camera. This is
+what the described function does. It computes ( \f$R\f$,\f$T\f$ ) so that:
+
+\f[R_2=R*R_1
+T_2=R*T_1 + T,\f]
+
+Optionally, it computes the essential matrix E:
+
+\f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} *R\f]
+
+where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ . And the function
+can also compute the fundamental matrix F:
+
+\f[F = cameraMatrix2^{-T} E cameraMatrix1^{-1}\f]
+
+Besides the stereo-related information, the function can also perform a full calibration of each of
+two cameras. However, due to the high dimensionality of the parameter space and noise in the input
+data, the function can diverge from the correct solution. If the intrinsic parameters can be
+estimated with high accuracy for each of the cameras individually (for example, using
+calibrateCamera ), you are recommended to do so and then pass CV_CALIB_FIX_INTRINSIC flag to the
+function along with the computed intrinsic parameters. Otherwise, if all the parameters are
+estimated at once, it makes sense to restrict some parameters, for example, pass
+CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST flags, which is usually a
+reasonable assumption.
+
+Similarly to calibrateCamera , the function minimizes the total re-projection error for all the
+points in all the available views from both cameras. The function returns the final value of the
+re-projection error.
+ */
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
@@ -212,7 +907,85 @@ CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
-//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
+/** @brief Computes rectification transforms for each head of a calibrated stereo camera.
+
+@param cameraMatrix1 First camera matrix.
+@param cameraMatrix2 Second camera matrix.
+@param distCoeffs1 First camera distortion parameters.
+@param distCoeffs2 Second camera distortion parameters.
+@param imageSize Size of the image used for stereo calibration.
+@param R Rotation matrix between the coordinate systems of the first and the second cameras.
+@param T Translation vector between coordinate systems of the cameras.
+@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
+@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
+@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
+camera.
+@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
+camera.
+@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
+@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+the function makes the principal points of each camera have the same pixel coordinates in the
+rectified views. And if the flag is not set, the function may still shift the images in the
+horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
+useful image area.
+@param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
+scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
+images are zoomed and shifted so that only valid pixels are visible (no black areas after
+rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
+pixels from the original images from the cameras are retained in the rectified images (no source
+image pixels are lost). Obviously, any intermediate value yields an intermediate result between
+those two extreme cases.
+@param newImageSize New image resolution after rectification. The same size should be passed to
+initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
+is passed (default), it is set to the original imageSize . Setting it to larger value can help you
+preserve details in the original image, especially when there is a big radial distortion.
+@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
+are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
+(see the picture below).
+@param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
+are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
+(see the picture below).
+
+The function computes the rotation matrices for each camera that (virtually) make both camera image
+planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
+the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
+as input. As output, it provides two rotation matrices and also two projection matrices in the new
+coordinates. The function distinguishes the following two cases:
+
+- **Horizontal stereo**: the first and the second camera views are shifted relative to each other
+ mainly along the x axis (with possible small vertical shift). In the rectified images, the
+ corresponding epipolar lines in the left and right cameras are horizontal and have the same
+ y-coordinate. P1 and P2 look like:
+
+ \f[\texttt{P1} = \begin{bmatrix} f & 0 & cx_1 & 0 \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
+
+ \f[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x*f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
+
+ where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
+ CV_CALIB_ZERO_DISPARITY is set.
+
+- **Vertical stereo**: the first and the second camera views are shifted relative to each other
+ mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar
+ lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
+
+ \f[\texttt{P1} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_1 & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
+
+ \f[\texttt{P2} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_2 & T_y*f \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
+
+ where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if CALIB_ZERO_DISPARITY is
+ set.
+
+As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
+matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
+initialize the rectification map for each camera.
+
+See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
+the corresponding image regions. This means that the images are well rectified, which is what most
+stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
+their interiors are all valid pixels.
+
+![image](pics/stereo_undistort.jpg)
+ */
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
@@ -222,7 +995,35 @@ CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs
double alpha = -1, Size newImageSize = Size(),
CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
-//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
+/** @brief Computes a rectification transform for an uncalibrated stereo camera.
+
+@param points1 Array of feature points in the first image.
+@param points2 The corresponding points in the second image. The same formats as in
+findFundamentalMat are supported.
+@param F Input fundamental matrix. It can be computed from the same set of point pairs using
+findFundamentalMat .
+@param imgSize Size of the image.
+@param H1 Output rectification homography matrix for the first image.
+@param H2 Output rectification homography matrix for the second image.
+@param threshold Optional threshold used to filter out the outliers. If the parameter is greater
+than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
+for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
+rejected prior to computing the homographies. Otherwise,all the points are considered inliers.
+
+The function computes the rectification transformations without knowing intrinsic parameters of the
+cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
+related difference from stereoRectify is that the function outputs not the rectification
+transformations in the object (3D) space, but the planar perspective transformations encoded by the
+homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .
+
+@note
+ While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
+ depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
+ it would be better to correct it before computing the fundamental matrix and calling this
+ function. For example, distortion coefficients can be estimated for each head of stereo camera
+ separately by using calibrateCamera . Then, the images can be corrected using undistort , or
+ just the point coordinates can be corrected with undistortPoints .
+ */
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
InputArray F, Size imgSize,
OutputArray H1, OutputArray H2,
@@ -240,60 +1041,311 @@ CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distC
OutputArray Q, double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
-//! returns the optimal new camera matrix
+/** @brief Returns the new camera matrix based on the free scaling parameter.
+
+@param cameraMatrix Input camera matrix.
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. If
+the vector is NULL/empty, the zero distortion coefficients are assumed.
+@param imageSize Original image size.
+@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
+valid) and 1 (when all the source image pixels are retained in the undistorted image). See
+stereoRectify for details.
+@param newImgSize Image size after rectification. By default,it is set to imageSize .
+@param validPixROI Optional output rectangle that outlines all-good-pixels region in the
+undistorted image. See roi1, roi2 description in stereoRectify .
+@param centerPrincipalPoint Optional flag that indicates whether in the new camera matrix the
+principal point should be at the image center or not. By default, the principal point is chosen to
+best fit a subset of the source image (determined by alpha) to the corrected image.
+@return new_camera_matrix Output new camera matrix.
+
+The function computes and returns the optimal new camera matrix based on the free scaling parameter.
+By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
+image pixels if there is valuable information in the corners alpha=1 , or get something in between.
+When alpha\>0 , the undistortion result is likely to have some black pixels corresponding to
+"virtual" pixels outside of the captured distorted image. The original camera matrix, distortion
+coefficients, the computed new camera matrix, and newImageSize should be passed to
+initUndistortRectifyMap to produce the maps for remap .
+ */
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize = Size(),
CV_OUT Rect* validPixROI = 0,
bool centerPrincipalPoint = false);
-//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
+/** @brief Converts points from Euclidean to homogeneous space.
+
+@param src Input vector of N-dimensional points.
+@param dst Output vector of N+1-dimensional points.
+
+The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
+point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
+ */
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
-//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
+/** @brief Converts points from homogeneous to Euclidean space.
+
+@param src Input vector of N-dimensional points.
+@param dst Output vector of N-1-dimensional points.
+
+The function converts points homogeneous to Euclidean space using perspective projection. That is,
+each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
+output point coordinates will be (0,0,0,...).
+ */
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
-//! for backward compatibility
+/** @brief Converts points to/from homogeneous coordinates.
+
+@param src Input array or vector of 2D, 3D, or 4D points.
+@param dst Output vector of 2D, 3D, or 4D points.
+
+The function converts 2D or 3D points from/to homogeneous coordinates by calling either
+convertPointsToHomogeneous or convertPointsFromHomogeneous.
+
+@note The function is obsolete. Use one of the previous two functions instead.
+ */
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
-//! finds fundamental matrix from a set of corresponding 2D points
+/** @brief Calculates a fundamental matrix from the corresponding points in two images.
+
+@param points1 Array of N points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param method Method for computing a fundamental matrix.
+- **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
+- **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
+- **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
+- **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
+@param param1 Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
+line in pixels, beyond which the point is considered an outlier and is not used for computing the
+final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
+point localization, image resolution, and the image noise.
+@param param2 Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level
+of confidence (probability) that the estimated matrix is correct.
+@param mask
+
+The epipolar geometry is described by the following equation:
+
+\f[[p_2; 1]^T F [p_1; 1] = 0\f]
+
+where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
+second images, respectively.
+
+The function calculates the fundamental matrix using one of four methods listed above and returns
+the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
+algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3
+matrices sequentially).
+
+The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
+epipolar lines corresponding to the specified points. It can also be passed to
+stereoRectifyUncalibrated to compute the rectification transformation. :
+@code
+ // Example. Estimation of fundamental matrix using the RANSAC algorithm
+ int point_count = 100;
+ vector points1(point_count);
+ vector points2(point_count);
+
+ // initialize the points here ...
+ for( int i = 0; i < point_count; i++ )
+ {
+ points1[i] = ...;
+ points2[i] = ...;
+ }
+
+ Mat fundamental_matrix =
+ findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
+@endcode
+ */
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99,
OutputArray mask = noArray() );
-//! variant of findFundamentalMat for backward compatibility
+/** @overload */
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99 );
-//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
+/** @brief Calculates an essential matrix from the corresponding points in two images.
+
+@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
+be floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param focal focal length of the camera. Note that this function assumes that points1 and points2
+are feature points from cameras with same focal length and principle point.
+@param pp principle point of the camera.
+@param method Method for computing a fundamental matrix.
+- **RANSAC** for the RANSAC algorithm.
+- **MEDS** for the LMedS algorithm.
+@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
+line in pixels, beyond which the point is considered an outlier and is not used for computing the
+final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
+point localization, image resolution, and the image noise.
+@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
+confidence (probability) that the estimated matrix is correct.
+@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
+for the other points. The array is computed only in the RANSAC and LMedS methods.
+
+This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
+@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
+
+\f[[p_2; 1]^T K^T E K [p_1; 1] = 0 \\\f]\f[K =
+\begin{bmatrix}
+f & 0 & x_{pp} \\
+0 & f & y_{pp} \\
+0 & 0 & 1
+\end{bmatrix}\f]
+
+where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
+second images, respectively. The result of this function may be passed further to
+decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
+ */
CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
double focal = 1.0, Point2d pp = Point2d(0, 0),
int method = RANSAC, double prob = 0.999,
double threshold = 1.0, OutputArray mask = noArray() );
-//! decompose essential matrix to possible rotation matrix and one translation vector
+/** @brief Decompose an essential matrix to possible rotations and translation.
+
+@param E The input essential matrix.
+@param R1 One possible rotation matrix.
+@param R2 Another possible rotation matrix.
+@param t One possible translation.
+
+This function decompose an essential matrix E using svd decomposition @cite HartleyZ00 . Generally 4
+possible poses exists for a given E. They are \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. By
+decomposing E, you can only get the direction of the translation, so the function returns unit t.
+ */
CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
-//! recover relative camera pose from a set of corresponding 2D points
+/** @brief Recover relative camera rotation and translation from an estimated essential matrix and the
+corresponding points in two images, using cheirality check. Returns the number of inliers which pass
+the check.
+
+@param E The input essential matrix.
+@param points1 Array of N 2D points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param R Recovered relative rotation.
+@param t Recoverd relative translation.
+@param focal Focal length of the camera. Note that this function assumes that points1 and points2
+are feature points from cameras with same focal length and principle point.
+@param pp Principle point of the camera.
+@param mask Input/output mask for inliers in points1 and points2.
+: If it is not empty, then it marks inliers in points1 and points2 for then given essential
+matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
+which pass the cheirality check.
+This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
+pose hypotheses by doing cheirality check. The cheirality check basically means that the
+triangulated 3D points should have positive depth. Some details can be found in @cite Nister03 .
+
+This function can be used to process output E and mask from findEssentialMat. In this scenario,
+points1 and points2 are the same input for findEssentialMat. :
+@code
+ // Example. Estimation of fundamental matrix using the RANSAC algorithm
+ int point_count = 100;
+ vector points1(point_count);
+ vector points2(point_count);
+
+ // initialize the points here ...
+ for( int i = 0; i < point_count; i++ )
+ {
+ points1[i] = ...;
+ points2[i] = ...;
+ }
+
+ double focal = 1.0;
+ cv::Point2d pp(0.0, 0.0);
+ Mat E, R, t, mask;
+
+ E = findEssentialMat(points1, points2, focal, pp, RANSAC, 0.999, 1.0, mask);
+ recoverPose(E, points1, points2, R, t, focal, pp, mask);
+@endcode
+ */
CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
OutputArray R, OutputArray t,
double focal = 1.0, Point2d pp = Point2d(0, 0),
InputOutputArray mask = noArray() );
-//! finds coordinates of epipolar lines corresponding the specified points
+/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
+
+@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
+vector\ .
+@param whichImage Index of the image (1 or 2) that contains the points .
+@param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .
+@param lines Output vector of the epipolar lines corresponding to the points in the other image.
+Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ .
+
+For every point in one of the two images of a stereo pair, the function finds the equation of the
+corresponding epipolar line in the other image.
+
+From the fundamental matrix definition (see findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second
+image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as:
+
+\f[l^{(2)}_i = F p^{(1)}_i\f]
+
+And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as:
+
+\f[l^{(1)}_i = F^T p^{(2)}_i\f]
+
+Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ .
+ */
CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage,
InputArray F, OutputArray lines );
+/** @brief Reconstructs points by triangulation.
+
+@param projMatr1 3x4 projection matrix of the first camera.
+@param projMatr2 3x4 projection matrix of the second camera.
+@param projPoints1 2xN array of feature points in the first image. In case of c++ version it can
+be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
+@param projPoints2 2xN array of corresponding points in the second image. In case of c++ version
+it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
+@param points4D 4xN array of reconstructed points in homogeneous coordinates.
+
+The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their
+observations with a stereo camera. Projections matrices can be obtained from stereoRectify.
+
+@note
+ Keep in mind that all input data should be of float type in order for this function to work.
+
+@sa
+ reprojectImageTo3D
+ */
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,
OutputArray points4D );
+/** @brief Refines coordinates of corresponding points.
+
+@param F 3x3 fundamental matrix.
+@param points1 1xN array containing the first set of points.
+@param points2 1xN array containing the second set of points.
+@param newPoints1 The optimized points1.
+@param newPoints2 The optimized points2.
+
+The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
+For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
+computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
+error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the
+geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint
+\f$newPoints2^T * F * newPoints1 = 0\f$ .
+ */
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
-//! filters off speckles (small regions of incorrectly computed disparity)
+/** @brief Filters off small noise blobs (speckles) in the disparity map
+
+@param img The input 16-bit signed disparity image
+@param newVal The disparity value used to paint-off the speckles
+@param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
+affected by the algorithm
+@param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
+blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
+disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
+account when specifying this parameter value.
+@param buf The optional temporary buffer to avoid memory allocation within the function.
+ */
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
int maxSpeckleSize, double maxDiff,
InputOutputArray buf = noArray() );
@@ -308,23 +1360,77 @@ CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost
int minDisparity, int numberOfDisparities,
int disp12MaxDisp = 1 );
-//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
+/** @brief Reprojects a disparity image to 3D space.
+
+@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
+floating-point disparity image.
+@param _3dImage Output 3-channel floating-point image of the same size as disparity . Each
+element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
+map.
+@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with stereoRectify.
+@param handleMissingValues Indicates, whether the function should handle missing values (i.e.
+points where the disparity was not computed). If handleMissingValues=true, then pixels with the
+minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
+to 3D points with a very large Z value (currently set to 10000).
+@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
+depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
+
+The function transforms a single-channel disparity map to a 3-channel image representing a 3D
+surface. That is, for each pixel (x,y) andthe corresponding disparity d=disparity(x,y) , it
+computes:
+
+\f[\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\f]
+
+The matrix Q can be an arbitrary \f$4 \times 4\f$ matrix (for example, the one computed by
+stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use
+perspectiveTransform .
+ */
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues = false,
int ddepth = -1 );
+/** @brief Computes an optimal affine transformation between two 3D point sets.
+
+@param src First input 3D point set.
+@param dst Second input 3D point set.
+@param out Output 3D affine transformation matrix \f$3 \times 4\f$ .
+@param inliers Output vector indicating which points are inliers.
+@param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
+an inlier.
+@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
+between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
+significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
+
+The function estimates an optimal 3D affine transformation between two 3D point sets using the
+RANSAC algorithm.
+ */
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold = 3, double confidence = 0.99);
+/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
+
+@param H The input homography matrix between two images.
+@param K The input intrinsic camera calibration matrix.
+@param rotations Array of rotation matrices.
+@param translations Array of translation matrices.
+@param normals Array of plane normal matrices.
+This function extracts relative camera motion between two views observing a planar object from the
+homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function
+may return up to four mathematical solution sets. At least two of the solutions may further be
+invalidated if point correspondences are available by applying positive depth constraint (all points
+must be in front of the camera). The decomposition method is described in detail in @cite Malis .
+ */
CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
InputArray K,
OutputArrayOfArrays rotations,
OutputArrayOfArrays translations,
OutputArrayOfArrays normals);
+/** @brief The base class for stereo correspondence algorithms.
+ */
class CV_EXPORTS_W StereoMatcher : public Algorithm
{
public:
@@ -332,6 +1438,14 @@ public:
DISP_SCALE = (1 << DISP_SHIFT)
};
+ /** @brief Computes disparity map for the specified stereo pair
+
+ @param left Left 8-bit single-channel image.
+ @param right Right image of the same size and the same type as the left one.
+ @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
+ like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
+ has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
+ */
CV_WRAP virtual void compute( InputArray left, InputArray right,
OutputArray disparity ) = 0;
@@ -355,7 +1469,9 @@ public:
};
-
+/** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and
+contributed to OpenCV by K. Konolige.
+ */
class CV_EXPORTS_W StereoBM : public StereoMatcher
{
public:
@@ -387,10 +1503,40 @@ public:
CV_WRAP virtual Rect getROI2() const = 0;
CV_WRAP virtual void setROI2(Rect roi2) = 0;
+ /** @brief Creates StereoBM object
+
+ @param numDisparities the disparity search range. For each pixel algorithm will find the best
+ disparity from 0 (default minimum disparity) to numDisparities. The search range can then be
+ shifted by changing the minimum disparity.
+ @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd
+ (as the block is centered at the current pixel). Larger block size implies smoother, though less
+ accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
+ chance for algorithm to find a wrong correspondence.
+
+ The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for
+ a specific stereo pair.
+ */
CV_WRAP static Ptr create(int numDisparities = 0, int blockSize = 21);
};
-
+/** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original
+one as follows:
+
+- By default, the algorithm is single-pass, which means that you consider only 5 directions
+instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the
+algorithm but beware that it may consume a lot of memory.
+- The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the
+blocks to single pixels.
+- Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi
+sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.
+- Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for
+example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness
+check, quadratic interpolation and speckle filtering).
+
+@note
+ - (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found
+ at opencv_source_code/samples/python2/stereo_match.py
+ */
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
{
public:
@@ -415,6 +1561,43 @@ public:
CV_WRAP virtual int getMode() const = 0;
CV_WRAP virtual void setMode(int mode) = 0;
+ /** @brief Creates StereoSGBM object
+
+ @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes
+ rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
+ @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than
+ zero. In the current implementation, this parameter must be divisible by 16.
+ @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be
+ somewhere in the 3..11 range.
+ @param P1 The first parameter controlling the disparity smoothness. See below.
+ @param P2 The second parameter controlling the disparity smoothness. The larger the values are,
+ the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
+ between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
+ pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
+ P1 and P2 values are shown (like 8\*number_of_image_channels\*SADWindowSize\*SADWindowSize and
+ 32\*number_of_image_channels\*SADWindowSize\*SADWindowSize , respectively).
+ @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
+ disparity check. Set it to a non-positive value to disable the check.
+ @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
+ computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
+ The result values are passed to the Birchfield-Tomasi pixel cost function.
+ @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function
+ value should "win" the second best value to consider the found match correct. Normally, a value
+ within the 5-15 range is good enough.
+ @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles
+ and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the
+ 50-200 range.
+ @param speckleRange Maximum disparity variation within each connected component. If you do speckle
+ filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
+ Normally, 1 or 2 is good enough.
+ @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming
+ algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and
+ huge for HD-size pictures. By default, it is set to false .
+
+ The first constructor initializes StereoSGBM with all the default parameters. So, you only have to
+ set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter
+ to a custom value.
+ */
CV_WRAP static Ptr create(int minDisparity, int numDisparities, int blockSize,
int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
int preFilterCap = 0, int uniquenessRatio = 0,
@@ -422,8 +1605,16 @@ public:
int mode = StereoSGBM::MODE_SGBM);
};
+//! @} calib3d
+
+/** @brief The methods in this namespace use a so-called fisheye camera model.
+ @ingroup calib3d_fisheye
+*/
namespace fisheye
{
+//! @addtogroup calib3d_fisheye
+//! @{
+
enum{
CALIB_USE_INTRINSIC_GUESS = 1,
CALIB_RECOMPUTE_EXTRINSIC = 2,
@@ -436,50 +1627,229 @@ namespace fisheye
CALIB_FIX_INTRINSIC = 256
};
- //! projects 3D points using fisheye model
+ /** @brief Projects points using fisheye model
+
+ @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\ ), where N is
+ the number of points in the view.
+ @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
+ vector\.
+ @param affine
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param alpha The skew coefficient.
+ @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect
+ to components of the focal lengths, coordinates of the principal point, distortion coefficients,
+ rotation vector, translation vector, and the skew. In the old interface different components of
+ the jacobian are returned via different output parameters.
+
+ The function computes projections of 3D points to the image plane given intrinsic and extrinsic
+ camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
+ image points coordinates (as functions of all the input parameters) with respect to the particular
+ parameters, intrinsic and/or extrinsic.
+ */
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
- //! projects points using fisheye model
+ /** @overload */
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
- //! distorts 2D points using fisheye model
+ /** @brief Distorts 2D points using fisheye model.
+
+ @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is
+ the number of points in the view.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param alpha The skew coefficient.
+ @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
+ */
CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
- //! undistorts 2D points using fisheye model
+ /** @brief Undistorts 2D points using fisheye model
+
+ @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is the
+ number of points in the view.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
+ */
CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
- //! computing undistortion and rectification maps for image transform by cv::remap()
- //! If D is empty zero distortion is used, if R or P is empty identity matrixes are used
+ /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero
+ distortion is used, if R or P is empty identity matrixes are used.
+
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param size Undistorted image size.
+ @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
+ for details.
+ @param map1 The first output map.
+ @param map2 The second output map.
+ */
CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
- //! undistorts image, optionally changes resolution and camera matrix. If Knew zero identity matrix is used
+ /** @brief Transforms an image to compensate for fisheye lens distortion.
+
+ @param distorted image with fisheye lens distortion.
+ @param undistorted Output image with compensated fisheye lens distortion.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param Knew Camera matrix of the distorted image. By default, it is the identity matrix but you
+ may additionally scale and shift the result by using a different matrix.
+ @param new_size
+
+ The function transforms an image to compensate radial and tangential lens distortion.
+
+ The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
+ (with bilinear interpolation). See the former function for details of the transformation being
+ performed.
+
+ See below the results of undistortImage.
+ - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
+ k_4, k_5, k_6) of distortion were optimized under calibration)
+ - b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
+ k_3, k_4) of fisheye distortion were optimized under calibration)
+ - c\) original image was captured with fisheye lens
+
+ Pictures a) and b) almost the same. But if we consider points of image located far from the center
+ of image, we can notice that on image a) these points are distorted.
+
+ ![image](pics/fisheye_undistorted.jpg)
+ */
CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
- //! estimates new camera matrix for undistortion or rectification
+ /** @brief Estimates new camera matrix for undistortion or rectification.
+
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param image_size
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param balance Sets the new focal length in range between the min focal length and the max focal
+ length. Balance is in range of [0, 1].
+ @param new_size
+ @param fov_scale Divisor for new focal length.
+ */
CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
- //! performs camera calibaration
+ /** @brief Performs camera calibaration
+
+ @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
+ coordinate space.
+ @param imagePoints vector of vectors of the projections of calibration pattern points.
+ imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
+ objectPoints[i].size() for each i.
+ @param image_size Size of the image used only to initialize the intrinsic camera matrix.
+ @param K Output 3x3 floating-point camera matrix
+ \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If
+ fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
+ initialized before calling the function.
+ @param D Output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
+ That is, each k-th rotation vector together with the corresponding k-th translation vector (see
+ the next output parameter description) brings the calibration pattern from the model coordinate
+ space (in which object points are specified) to the world coordinate space, that is, a real
+ position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
+ @param tvecs Output vector of translation vectors estimated for each pattern view.
+ @param flags Different flags that may be zero or a combination of the following values:
+ - **fisheye::CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
+ fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+ center ( imageSize is used), and focal distances are computed in a least-squares fashion.
+ - **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
+ of intrinsic optimization.
+ - **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
+ - **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
+ - **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
+ zero.
+ @param criteria Termination criteria for the iterative optimization algorithm.
+ */
CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
- //! stereo rectification estimation
+ /** @brief Stereo rectification for fisheye camera model
+
+ @param K1 First camera matrix.
+ @param D1 First camera distortion parameters.
+ @param K2 Second camera matrix.
+ @param D2 Second camera distortion parameters.
+ @param imageSize Size of the image used for stereo calibration.
+ @param R Rotation matrix between the coordinate systems of the first and the second
+ cameras.
+ @param tvec Translation vector between coordinate systems of the cameras.
+ @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
+ @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
+ @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
+ camera.
+ @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
+ camera.
+ @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
+ @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+ the function makes the principal points of each camera have the same pixel coordinates in the
+ rectified views. And if the flag is not set, the function may still shift the images in the
+ horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
+ useful image area.
+ @param newImageSize New image resolution after rectification. The same size should be passed to
+ initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
+ is passed (default), it is set to the original imageSize . Setting it to larger value can help you
+ preserve details in the original image, especially when there is a big radial distortion.
+ @param balance Sets the new focal length in range between the min focal length and the max focal
+ length. Balance is in range of [0, 1].
+ @param fov_scale Divisor for new focal length.
+ */
CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
double balance = 0.0, double fov_scale = 1.0);
- //! performs stereo calibaration
+ /** @brief Performs stereo calibration
+
+ @param objectPoints Vector of vectors of the calibration pattern points.
+ @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
+ observed by the first camera.
+ @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
+ observed by the second camera.
+ @param K1 Input/output first camera matrix:
+ \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
+ any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CV_CALIB_FIX_INTRINSIC are specified,
+ some or all of the matrix components must be initialized.
+ @param D1 Input/output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$ of 4 elements.
+ @param K2 Input/output second camera matrix. The parameter is similar to K1 .
+ @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
+ similar to D1 .
+ @param imageSize Size of the image used only to initialize intrinsic camera matrix.
+ @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
+ @param T Output translation vector between the coordinate systems of the cameras.
+ @param flags Different flags that may be zero or a combination of the following values:
+ - **fisheye::CV_CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
+ are estimated.
+ - **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of
+ fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+ center (imageSize is used), and focal distances are computed in a least-squares fashion.
+ - **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
+ of intrinsic optimization.
+ - **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
+ - **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
+ - **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
+ zero.
+ @param criteria Termination criteria for the iterative optimization algorithm.
+ */
CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
+//! @} calib3d_fisheye
}
} // cv
diff --git a/modules/calib3d/include/opencv2/calib3d/calib3d_c.h b/modules/calib3d/include/opencv2/calib3d/calib3d_c.h
index c99c25a4d9..2392692389 100644
--- a/modules/calib3d/include/opencv2/calib3d/calib3d_c.h
+++ b/modules/calib3d/include/opencv2/calib3d/calib3d_c.h
@@ -50,6 +50,10 @@
extern "C" {
#endif
+/** @addtogroup calib3d_c
+ @{
+ */
+
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
@@ -371,6 +375,8 @@ CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
+/** @} calib3d_c */
+
#ifdef __cplusplus
} // extern "C"
diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp
index 2b5ad7ffe3..a9011d0b33 100644
--- a/modules/core/include/opencv2/core.hpp
+++ b/modules/core/include/opencv2/core.hpp
@@ -75,6 +75,9 @@
@defgroup core_opengl OpenGL interoperability
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
@defgroup core_optim Optimization Algorithms
+ @defgroup core_directx DirectX interoperability
+ @defgroup core_eigen Eigen support
+ @defgroup core_opencl OpenCL support
@}
*/
diff --git a/modules/core/include/opencv2/core/cuda.hpp b/modules/core/include/opencv2/core/cuda.hpp
index 612b5dbd1d..15d526e802 100644
--- a/modules/core/include/opencv2/core/cuda.hpp
+++ b/modules/core/include/opencv2/core/cuda.hpp
@@ -51,13 +51,6 @@
#include "opencv2/core.hpp"
#include "opencv2/core/cuda_types.hpp"
-/**
-@defgroup cuda CUDA-accelerated Computer Vision
-@{
- @defgroup cuda_struct Data structures
-@}
- */
-
namespace cv { namespace cuda {
//! @addtogroup cuda_struct
@@ -65,8 +58,28 @@ namespace cv { namespace cuda {
//////////////////////////////// GpuMat ///////////////////////////////
-//! Smart pointer for GPU memory with reference counting.
-//! Its interface is mostly similar with cv::Mat.
+/** @brief Base storage class for GPU memory with reference counting.
+
+Its interface matches the Mat interface with the following limitations:
+
+- no arbitrary dimensions support (only 2D)
+- no functions that return references to their data (because references on GPU are not valid for
+ CPU)
+- no expression templates technique support
+
+Beware that the latter limitation may lead to overloaded matrix operators that cause memory
+allocations. The GpuMat class is convertible to cuda::PtrStepSz and cuda::PtrStep so it can be
+passed directly to the kernel.
+
+@note In contrast with Mat, in most cases GpuMat::isContinuous() == false . This means that rows are
+aligned to a size depending on the hardware. Single-row GpuMat is always a continuous matrix.
+
+@note You are not recommended to leave static or global GpuMat variables allocated, that is, to rely
+on its destructor. The destruction order of such variables and CUDA context is undefined. GPU memory
+release function returns error if the CUDA context has been destroyed before.
+
+@sa Mat
+ */
class CV_EXPORTS GpuMat
{
public:
@@ -277,11 +290,28 @@ public:
Allocator* allocator;
};
-//! creates continuous matrix
+/** @brief Creates a continuous matrix.
+
+@param rows Row count.
+@param cols Column count.
+@param type Type of the matrix.
+@param arr Destination matrix. This parameter changes only if it has a proper type and area (
+\f$\texttt{rows} \times \texttt{cols}\f$ ).
+
+Matrix is called continuous if its elements are stored continuously, that is, without gaps at the
+end of each row.
+ */
CV_EXPORTS void createContinuous(int rows, int cols, int type, OutputArray arr);
-//! ensures that size of the given matrix is not less than (rows, cols) size
-//! and matrix type is match specified one too
+/** @brief Ensures that the size of a matrix is big enough and the matrix has a proper type.
+
+@param rows Minimum desired number of rows.
+@param cols Minimum desired number of columns.
+@param type Desired matrix type.
+@param arr Destination matrix.
+
+The function does not reallocate memory if the matrix has proper attributes already.
+ */
CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
CV_EXPORTS GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat& mat);
@@ -292,10 +322,21 @@ CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCou
//////////////////////////////// CudaMem ////////////////////////////////
-//! CudaMem is limited cv::Mat with page locked memory allocation.
-//! Page locked memory is only needed for async and faster coping to GPU.
-//! It is convertable to cv::Mat header without reference counting
-//! so you can use it with other opencv functions.
+/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA.
+
+Its interface is also Mat-like but with additional memory type parameters.
+
+- **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
+ uploading/downloading data from/to GPU.
+- **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU
+ address space, if supported.
+- **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
+ used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache
+ utilization.
+
+@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2
+Pinned Memory APIs* document or *CUDA C Programming Guide*.
+ */
class CV_EXPORTS CudaMem
{
public:
@@ -335,7 +376,13 @@ public:
//! returns matrix header with disabled reference counting for CudaMem data.
Mat createMatHeader() const;
- //! maps host memory into device address space and returns GpuMat header for it. Throws exception if not supported by hardware.
+ /** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting
+ for it.
+
+ This can be done only if memory was allocated with the SHARED flag and if it is supported by the
+ hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which
+ eliminates an extra copy.
+ */
GpuMat createGpuMatHeader() const;
// Please see cv::Mat for descriptions
@@ -363,17 +410,28 @@ public:
AllocType alloc_type;
};
-//! page-locks the matrix m memory and maps it for the device(s)
+/** @brief Page-locks the memory of matrix and maps it for the device(s).
+
+@param m Input matrix.
+ */
CV_EXPORTS void registerPageLocked(Mat& m);
-//! unmaps the memory of matrix m, and makes it pageable again
+/** @brief Unmaps the memory of matrix and makes it pageable again.
+
+@param m Input matrix.
+ */
CV_EXPORTS void unregisterPageLocked(Mat& m);
///////////////////////////////// Stream //////////////////////////////////
-//! Encapculates Cuda Stream. Provides interface for async coping.
-//! Passed to each function that supports async kernel execution.
-//! Reference counting is enabled.
+/** @brief This class encapsulates a queue of asynchronous calls.
+
+@note Currently, you may face problems if an operation is enqueued twice with different data. Some
+functions use the constant GPU memory, and next call may update the memory before the previous one
+has been finished. But calling different operations asynchronously is safe because each operation
+has its own constant buffer. Memory copy/upload/download/set operations to the buffers you hold are
+also safe. :
+ */
class CV_EXPORTS Stream
{
typedef void (Stream::*bool_type)() const;
@@ -385,16 +443,26 @@ public:
//! creates a new asynchronous stream
Stream();
- //! queries an asynchronous stream for completion status
+ /** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.
+ */
bool queryIfComplete() const;
- //! waits for stream tasks to complete
+ /** @brief Blocks the current CPU thread until all operations in the stream are complete.
+ */
void waitForCompletion();
- //! makes a compute stream wait on an event
+ /** @brief Makes a compute stream wait on an event.
+ */
void waitEvent(const Event& event);
- //! adds a callback to be called on the host after all currently enqueued items in the stream have completed
+ /** @brief Adds a callback to be called on the host after all currently enqueued items in the stream have
+ completed.
+
+ @note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization
+ that may depend on outstanding device work or other callbacks that are not mandated to run earlier.
+ Callbacks without a mandated order (in independent streams) execute in undefined order and may be
+ serialized.
+ */
void enqueueHostCallback(StreamCallback callback, void* userData);
//! return Stream object for default CUDA stream
@@ -446,21 +514,41 @@ private:
friend struct EventAccessor;
};
+//! @} cuda_struct
+
//////////////////////////////// Initialization & Info ////////////////////////
-//! this is the only function that do not throw exceptions if the library is compiled without CUDA
+//! @addtogroup cuda_init
+//! @{
+
+/** @brief Returns the number of installed CUDA-enabled devices.
+
+Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support,
+this function returns 0.
+ */
CV_EXPORTS int getCudaEnabledDeviceCount();
-//! set device to be used for GPU executions for the calling host thread
+/** @brief Sets a device and initializes it for the current thread.
+
+@param device System index of a CUDA device starting with 0.
+
+If the call of this function is omitted, a default device is initialized at the fist CUDA usage.
+ */
CV_EXPORTS void setDevice(int device);
-//! returns which device is currently being used for the calling host thread
+/** @brief Returns the current device index set by cuda::setDevice or initialized by default.
+ */
CV_EXPORTS int getDevice();
-//! explicitly destroys and cleans up all resources associated with the current device in the current process
-//! any subsequent API call to this device will reinitialize the device
+/** @brief Explicitly destroys and cleans up all resources associated with the current device in the current
+process.
+
+Any subsequent API call to this device will reinitialize the device.
+ */
CV_EXPORTS void resetDevice();
+/** @brief Enumeration providing CUDA computing features.
+ */
enum FeatureSet
{
FEATURE_SET_COMPUTE_10 = 10,
@@ -482,12 +570,27 @@ enum FeatureSet
//! checks whether current device supports the given feature
CV_EXPORTS bool deviceSupports(FeatureSet feature_set);
-//! information about what GPU archs this OpenCV CUDA module was compiled for
+/** @brief Class providing a set of static methods to check what NVIDIA\* card architecture the CUDA module was
+built for.
+
+According to the CUDA C Programming Guide Version 3.2: "PTX code produced for some specific compute
+capability can always be compiled to binary code of greater or equal compute capability".
+ */
class CV_EXPORTS TargetArchs
{
public:
+ /** @brief The following method checks whether the module was built with the support of the given feature:
+
+ @param feature_set Features to be checked. See :ocvcuda::FeatureSet.
+ */
static bool builtWith(FeatureSet feature_set);
+ /** @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA
+ code for the given architecture(s):
+
+ @param major Major compute capability version.
+ @param minor Minor compute capability version.
+ */
static bool has(int major, int minor);
static bool hasPtx(int major, int minor);
static bool hasBin(int major, int minor);
@@ -498,17 +601,25 @@ public:
static bool hasEqualOrGreaterBin(int major, int minor);
};
-//! information about the given GPU.
+/** @brief Class providing functionality for querying the specified GPU properties.
+ */
class CV_EXPORTS DeviceInfo
{
public:
//! creates DeviceInfo object for the current GPU
DeviceInfo();
- //! creates DeviceInfo object for the given GPU
+ /** @brief The constructors.
+
+ @param device_id System index of the CUDA device starting with 0.
+
+ Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it
+ constructs an object for the current device.
+ */
DeviceInfo(int device_id);
- //! device number.
+ /** @brief Returns system index of the CUDA device starting with 0.
+ */
int deviceID() const;
//! ASCII string identifying device
@@ -680,10 +791,19 @@ public:
size_t freeMemory() const;
size_t totalMemory() const;
- //! checks whether device supports the given feature
+ /** @brief Provides information on CUDA feature support.
+
+ @param feature_set Features to be checked. See cuda::FeatureSet.
+
+ This function returns true if the device has the specified CUDA feature. Otherwise, it returns false
+ */
bool supports(FeatureSet feature_set) const;
- //! checks whether the CUDA module can be run on the given device
+ /** @brief Checks the CUDA module and device compatibility.
+
+ This function returns true if the CUDA module can be run on the specified device. Otherwise, it
+ returns false .
+ */
bool isCompatible() const;
private:
@@ -693,7 +813,7 @@ private:
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
-//! @}
+//! @} cuda_init
}} // namespace cv { namespace cuda {
diff --git a/modules/core/include/opencv2/core/cuda_stream_accessor.hpp b/modules/core/include/opencv2/core/cuda_stream_accessor.hpp
index 4eb4ba61ad..66aaf56c52 100644
--- a/modules/core/include/opencv2/core/cuda_stream_accessor.hpp
+++ b/modules/core/include/opencv2/core/cuda_stream_accessor.hpp
@@ -66,6 +66,11 @@ namespace cv
class Stream;
class Event;
+ /** @brief Class that enables getting cudaStream_t from cuda::Stream
+
+ because it is the only public header that depends on the CUDA Runtime API. Including it
+ brings a dependency to your code.
+ */
struct StreamAccessor
{
CV_EXPORTS static cudaStream_t getStream(const Stream& stream);
diff --git a/modules/core/include/opencv2/core/cuda_types.hpp b/modules/core/include/opencv2/core/cuda_types.hpp
index ec67ae08ba..490086fb0a 100644
--- a/modules/core/include/opencv2/core/cuda_types.hpp
+++ b/modules/core/include/opencv2/core/cuda_types.hpp
@@ -89,6 +89,11 @@ namespace cv
size_t size;
};
+ /** @brief Structure similar to cuda::PtrStepSz but containing only a pointer and row step.
+
+ Width and height fields are excluded due to performance reasons. The structure is intended
+ for internal use or for users who write device code.
+ */
template struct PtrStep : public DevPtr
{
__CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {}
@@ -104,6 +109,12 @@ namespace cv
__CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
+ /** @brief Lightweight class encapsulating pitched memory on a GPU and passed to nvcc-compiled code (CUDA
+ kernels).
+
+ Typically, it is used internally by OpenCV and by users who write device code. You can call
+ its members from both host and device code.
+ */
template struct PtrStepSz : public PtrStep
{
__CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
diff --git a/modules/cuda/doc/introduction.markdown b/modules/cuda/doc/introduction.markdown
new file mode 100644
index 0000000000..ebe8c21af3
--- /dev/null
+++ b/modules/cuda/doc/introduction.markdown
@@ -0,0 +1,85 @@
+CUDA Module Introduction {#cuda_intro}
+========================
+
+General Information
+-------------------
+
+The OpenCV CUDA module is a set of classes and functions to utilize CUDA computational capabilities.
+It is implemented using NVIDIA\* CUDA\* Runtime API and supports only NVIDIA GPUs. The OpenCV CUDA
+module includes utility functions, low-level vision primitives, and high-level algorithms. The
+utility functions and low-level primitives provide a powerful infrastructure for developing fast
+vision algorithms taking advantage of CUDA whereas the high-level functionality includes some
+state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others)
+ready to be used by the application developers.
+
+The CUDA module is designed as a host-level API. This means that if you have pre-compiled OpenCV
+CUDA binaries, you are not required to have the CUDA Toolkit installed or write any extra code to
+make use of the CUDA.
+
+The OpenCV CUDA module is designed for ease of use and does not require any knowledge of CUDA.
+Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest
+performance. It is helpful to understand the cost of various operations, what the GPU does, what the
+preferred data formats are, and so on. The CUDA module is an effective instrument for quick
+implementation of CUDA-accelerated computer vision algorithms. However, if your algorithm involves
+many simple operations, then, for the best possible performance, you may still need to write your
+own kernels to avoid extra write and read operations on the intermediate results.
+
+To enable CUDA support, configure OpenCV using CMake with WITH\_CUDA=ON . When the flag is set and
+if CUDA is installed, the full-featured OpenCV CUDA module is built. Otherwise, the module is still
+built but at runtime all functions from the module throw Exception with CV\_GpuNotSupported error
+code, except for cuda::getCudaEnabledDeviceCount(). The latter function returns zero GPU count in
+this case. Building OpenCV without CUDA support does not perform device code compilation, so it does
+not require the CUDA Toolkit installed. Therefore, using the cuda::getCudaEnabledDeviceCount()
+function, you can implement a high-level algorithm that will detect GPU presence at runtime and
+choose an appropriate implementation (CPU or GPU) accordingly.
+
+Compilation for Different NVIDIA\* Platforms
+--------------------------------------------
+
+NVIDIA\* compiler enables generating binary code (cubin and fatbin) and intermediate code (PTX).
+Binary code often implies a specific GPU architecture and generation, so the compatibility with
+other GPUs is not guaranteed. PTX is targeted for a virtual platform that is defined entirely by the
+set of capabilities or features. Depending on the selected virtual platform, some of the
+instructions are emulated or disabled, even if the real hardware supports all the features.
+
+At the first call, the PTX code is compiled to binary code for the particular GPU using a JIT
+compiler. When the target GPU has a compute capability (CC) lower than the PTX code, JIT fails. By
+default, the OpenCV CUDA module includes:
+
+\*
+ Binaries for compute capabilities 1.3 and 2.0 (controlled by CUDA\_ARCH\_BIN in CMake)
+
+\*
+ PTX code for compute capabilities 1.1 and 1.3 (controlled by CUDA\_ARCH\_PTX in CMake)
+
+This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer
+platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the
+PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
+Exception. For platforms where JIT compilation is performed first, the run is slow.
+
+On a GPU with CC 1.0, you can still compile the CUDA module and most of the functions will run
+flawlessly. To achieve this, add "1.0" to the list of binaries, for example,
+CUDA\_ARCH\_BIN="1.0 1.3 2.0" . The functions that cannot be run on CC 1.0 GPUs throw an exception.
+
+You can always determine at runtime whether the OpenCV GPU-built binaries (or PTX code) are
+compatible with your GPU. The function cuda::DeviceInfo::isCompatible returns the compatibility
+status (true/false).
+
+Utilizing Multiple GPUs
+-----------------------
+
+In the current version, each of the OpenCV CUDA algorithms can use only a single GPU. So, to utilize
+multiple GPUs, you have to manually distribute the work between GPUs. Switching active devie can be
+done using cuda::setDevice() function. For more details please read Cuda C Programming Guide.
+
+While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions
+and small images, it can be significant, which may eliminate all the advantages of having multiple
+GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo
+Block Matching algorithm has been successfully parallelized using the following algorithm:
+
+1. Split each image of the stereo pair into two horizontal overlapping stripes.
+2. Process each pair of stripes (from the left and right images) on a separate Fermi\* GPU.
+3. Merge the results into a single disparity map.
+
+With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU.
+For a source code example, see .
diff --git a/modules/cuda/include/opencv2/cuda.hpp b/modules/cuda/include/opencv2/cuda.hpp
index a42bfb7d83..ac51b87dde 100644
--- a/modules/cuda/include/opencv2/cuda.hpp
+++ b/modules/cuda/include/opencv2/cuda.hpp
@@ -49,10 +49,25 @@
#include "opencv2/core/cuda.hpp"
+/**
+@defgroup cuda CUDA-accelerated Computer Vision
+ @ref cuda_intro "Introduction page"
+ @{
+ @defgroup cuda_init Initalization and Information
+ @defgroup cuda_struct Data Structures
+ @defgroup cuda_calib3d Camera Calibration and 3D Reconstruction
+ @defgroup cuda_objdetect Object Detection
+ @}
+
+ */
+
namespace cv { namespace cuda {
//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
+//! @addtogroup cuda_objdetect
+//! @{
+
struct CV_EXPORTS HOGConfidence
{
double scale;
@@ -61,31 +76,92 @@ struct CV_EXPORTS HOGConfidence
std::vector part_scores[4];
};
+/** @brief The class implements Histogram of Oriented Gradients (@cite Dalal2005) object detector.
+
+Interfaces of all methods are kept similar to the CPU HOG descriptor and detector analogues as much
+as possible.
+
+@note
+ - An example applying the HOG descriptor for people detection can be found at
+ opencv_source_code/samples/cpp/peopledetect.cpp
+ - A CUDA example applying the HOG descriptor for people detection can be found at
+ opencv_source_code/samples/gpu/hog.cpp
+ - (Python) An example applying the HOG descriptor for people detection can be found at
+ opencv_source_code/samples/python2/peopledetect.py
+ */
struct CV_EXPORTS HOGDescriptor
{
enum { DEFAULT_WIN_SIGMA = -1 };
enum { DEFAULT_NLEVELS = 64 };
enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL };
+ /** @brief Creates the HOG descriptor and detector.
+
+ @param win_size Detection window size. Align to block size and block stride.
+ @param block_size Block size in pixels. Align to cell size. Only (16,16) is supported for now.
+ @param block_stride Block stride. It must be a multiple of cell size.
+ @param cell_size Cell size. Only (8, 8) is supported for now.
+ @param nbins Number of bins. Only 9 bins per cell are supported for now.
+ @param win_sigma Gaussian smoothing window parameter.
+ @param threshold_L2hys L2-Hys normalization method shrinkage.
+ @param gamma_correction Flag to specify whether the gamma correction preprocessing is required or
+ not.
+ @param nlevels Maximum number of detection window increases.
+ */
HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16),
Size block_stride=Size(8, 8), Size cell_size=Size(8, 8),
int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA,
double threshold_L2hys=0.2, bool gamma_correction=true,
int nlevels=DEFAULT_NLEVELS);
+ /** @brief Returns the number of coefficients required for the classification.
+ */
size_t getDescriptorSize() const;
+ /** @brief Returns the block histogram size.
+ */
size_t getBlockHistogramSize() const;
+ /** @brief Sets coefficients for the linear SVM classifier.
+ */
void setSVMDetector(const std::vector& detector);
+ /** @brief Returns coefficients of the classifier trained for people detection (for default window size).
+ */
static std::vector getDefaultPeopleDetector();
+ /** @brief Returns coefficients of the classifier trained for people detection (for 48x96 windows).
+ */
static std::vector getPeopleDetector48x96();
+ /** @brief Returns coefficients of the classifier trained for people detection (for 64x128 windows).
+ */
static std::vector getPeopleDetector64x128();
+ /** @brief Performs object detection without a multi-scale window.
+
+ @param img Source image. CV_8UC1 and CV_8UC4 types are supported for now.
+ @param found_locations Left-top corner points of detected objects boundaries.
+ @param hit_threshold Threshold for the distance between features and SVM classifying plane.
+ Usually it is 0 and should be specfied in the detector coefficients (as the last free
+ coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
+ manually here.
+ @param win_stride Window stride. It must be a multiple of block stride.
+ @param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
+ */
void detect(const GpuMat& img, std::vector& found_locations,
double hit_threshold=0, Size win_stride=Size(),
Size padding=Size());
+ /** @brief Performs object detection with a multi-scale window.
+
+ @param img Source image. See cuda::HOGDescriptor::detect for type limitations.
+ @param found_locations Detected objects boundaries.
+ @param hit_threshold Threshold for the distance between features and SVM classifying plane. See
+ cuda::HOGDescriptor::detect for details.
+ @param win_stride Window stride. It must be a multiple of block stride.
+ @param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
+ @param scale0 Coefficient of the detection window increase.
+ @param group_threshold Coefficient to regulate the similarity threshold. When detected, some
+ objects can be covered by many rectangles. 0 means not to perform grouping. See groupRectangles .
+ */
void detectMultiScale(const GpuMat& img, std::vector& found_locations,
double hit_threshold=0, Size win_stride=Size(),
Size padding=Size(), double scale0=1.05,
@@ -98,6 +174,17 @@ struct CV_EXPORTS HOGDescriptor
double hit_threshold, Size win_stride, Size padding,
std::vector &conf_out, int group_threshold);
+ /** @brief Returns block descriptors computed for the whole image.
+
+ @param img Source image. See cuda::HOGDescriptor::detect for type limitations.
+ @param win_stride Window stride. It must be a multiple of block stride.
+ @param descriptors 2D array of descriptors.
+ @param descr_format Descriptor storage format:
+ - **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
+ - **DESCR_FORMAT_COL_BY_COL** - Column-major order.
+
+ The function is mainly used to learn the classifier.
+ */
void getDescriptors(const GpuMat& img, Size win_stride,
GpuMat& descriptors,
int descr_format=DESCR_FORMAT_COL_BY_COL);
@@ -145,20 +232,82 @@ protected:
//////////////////////////// CascadeClassifier ////////////////////////////
-// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny.
+/** @brief Cascade classifier class used for object detection. Supports HAAR and LBP cascades. :
+
+@note
+ - A cascade classifier example can be found at
+ opencv_source_code/samples/gpu/cascadeclassifier.cpp
+ - A Nvidea API specific cascade classifier example can be found at
+ opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
+ */
class CV_EXPORTS CascadeClassifier_CUDA
{
public:
CascadeClassifier_CUDA();
+ /** @brief Loads the classifier from a file. Cascade type is detected automatically by constructor parameter.
+
+ @param filename Name of the file from which the classifier is loaded. Only the old haar classifier
+ (trained by the haar training application) and NVIDIA's nvbin are supported for HAAR and only new
+ type of OpenCV XML cascade supported for LBP.
+ */
CascadeClassifier_CUDA(const String& filename);
~CascadeClassifier_CUDA();
+ /** @brief Checks whether the classifier is loaded or not.
+ */
bool empty() const;
+ /** @brief Loads the classifier from a file. The previous content is destroyed.
+
+ @param filename Name of the file from which the classifier is loaded. Only the old haar classifier
+ (trained by the haar training application) and NVIDIA's nvbin are supported for HAAR and only new
+ type of OpenCV XML cascade supported for LBP.
+ */
bool load(const String& filename);
+ /** @brief Destroys the loaded classifier.
+ */
void release();
- /* returns number of detected objects */
+ /** @overload */
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size());
+ /** @brief Detects objects of different sizes in the input image.
+
+ @param image Matrix of type CV_8U containing an image where objects should be detected.
+ @param objectsBuf Buffer to store detected objects (rectangles). If it is empty, it is allocated
+ with the default size. If not empty, the function searches not more than N objects, where
+ N = sizeof(objectsBufer's data)/sizeof(cv::Rect).
+ @param maxObjectSize Maximum possible object size. Objects larger than that are ignored. Used for
+ second signature and supported only for LBP cascades.
+ @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
+ @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
+ to retain it.
+ @param minSize Minimum possible object size. Objects smaller than that are ignored.
+
+ The detected objects are returned as a list of rectangles.
+
+ The function returns the number of detected objects, so you can retrieve them as in the following
+ example:
+ @code
+ cuda::CascadeClassifier_CUDA cascade_gpu(...);
+
+ Mat image_cpu = imread(...)
+ GpuMat image_gpu(image_cpu);
+
+ GpuMat objbuf;
+ int detections_number = cascade_gpu.detectMultiScale( image_gpu,
+ objbuf, 1.2, minNeighbors);
+
+ Mat obj_host;
+ // download only detected number of rectangles
+ objbuf.colRange(0, detections_number).download(obj_host);
+
+ Rect* faces = obj_host.ptr();
+ for(int i = 0; i < detections_num; ++i)
+ cv::rectangle(image_cpu, faces[i], Scalar(255));
+
+ imshow("Faces", image_cpu);
+ @endcode
+ @sa CascadeClassifier::detectMultiScale
+ */
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4);
bool findLargestObject;
@@ -174,8 +323,13 @@ private:
friend class CascadeClassifier_CUDA_LBP;
};
+//! @} cuda_objdetect
+
//////////////////////////// Labeling ////////////////////////////
+//! @addtogroup cuda
+//! @{
+
//!performs labeling via graph cuts of a 2D regular 4-connected graph.
CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
@@ -192,8 +346,13 @@ CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Sc
//! performs connected componnents labeling.
CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
+//! @}
+
//////////////////////////// Calib3d ////////////////////////////
+//! @addtogroup cuda_calib3d
+//! @{
+
CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
GpuMat& dst, Stream& stream = Stream::Null());
@@ -201,13 +360,34 @@ CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tve
const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
Stream& stream = Stream::Null());
+/** @brief Finds the object pose from 3D-2D point correspondences.
+
+@param object Single-row matrix of object points.
+@param image Single-row matrix of image points.
+@param camera_mat 3x3 matrix of intrinsic camera parameters.
+@param dist_coef Distortion coefficients. See undistortPoints for details.
+@param rvec Output 3D rotation vector.
+@param tvec Output 3D translation vector.
+@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
+initial transformation guess. It is not supported for now.
+@param num_iters Maximum number of RANSAC iterations.
+@param max_dist Euclidean distance threshold to detect whether point is inlier or not.
+@param min_inlier_count Flag to indicate that the function must stop if greater or equal number
+of inliers is achieved. It is not supported for now.
+@param inliers Output vector of inlier indices.
+ */
CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
std::vector* inliers=NULL);
+//! @}
+
//////////////////////////// VStab ////////////////////////////
+//! @addtogroup cuda
+//! @{
+
//! removes points (CV_32FC2, single row matrix) with zero mask value
CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask);
@@ -215,6 +395,8 @@ CV_EXPORTS void calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy);
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDA_HPP__ */
diff --git a/modules/cudaarithm/include/opencv2/cudaarithm.hpp b/modules/cudaarithm/include/opencv2/cudaarithm.hpp
index e493fd759c..8f3d352baf 100644
--- a/modules/cudaarithm/include/opencv2/cudaarithm.hpp
+++ b/modules/cudaarithm/include/opencv2/cudaarithm.hpp
@@ -49,18 +49,85 @@
#include "opencv2/core/cuda.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudaarithm Operations on Matrices
+ @{
+ @defgroup cudaarithm_core Core Operations on Matrices
+ @defgroup cudaarithm_elem Per-element Operations
+ @defgroup cudaarithm_reduce Matrix Reductions
+ @defgroup cudaarithm_arithm Arithm Operations on Matrices
+ @}
+ @}
+ */
+
namespace cv { namespace cuda {
-//! adds one matrix to another (dst = src1 + src2)
+//! @addtogroup cudaarithm
+//! @{
+
+//! @addtogroup cudaarithm_elem
+//! @{
+
+/** @brief Computes a matrix-matrix or matrix-scalar sum.
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar. Matrix should have the same size and type as src1 .
+@param dst Destination matrix that has the same size and number of channels as the input array(s).
+The depth is defined by dtype or src1 depth.
+@param mask Optional operation mask, 8-bit single channel array, that specifies elements of the
+destination array to be changed.
+@param dtype Optional depth of the output array.
+@param stream Stream for the asynchronous version.
+
+@sa add
+ */
CV_EXPORTS void add(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
-//! subtracts one matrix from another (dst = src1 - src2)
+/** @brief Computes a matrix-matrix or matrix-scalar difference.
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar. Matrix should have the same size and type as src1 .
+@param dst Destination matrix that has the same size and number of channels as the input array(s).
+The depth is defined by dtype or src1 depth.
+@param mask Optional operation mask, 8-bit single channel array, that specifies elements of the
+destination array to be changed.
+@param dtype Optional depth of the output array.
+@param stream Stream for the asynchronous version.
+
+@sa subtract
+ */
CV_EXPORTS void subtract(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
-//! computes element-wise weighted product of the two arrays (dst = scale * src1 * src2)
+/** @brief Computes a matrix-matrix or matrix-scalar per-element product.
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and number of channels as the input array(s).
+The depth is defined by dtype or src1 depth.
+@param scale Optional scale factor.
+@param dtype Optional depth of the output array.
+@param stream Stream for the asynchronous version.
+
+@sa multiply
+ */
CV_EXPORTS void multiply(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
-//! computes element-wise weighted quotient of the two arrays (dst = scale * (src1 / src2))
+/** @brief Computes a matrix-matrix or matrix-scalar division.
+
+@param src1 First source matrix or a scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and number of channels as the input array(s).
+The depth is defined by dtype or src1 depth.
+@param scale Optional scale factor.
+@param dtype Optional depth of the output array.
+@param stream Stream for the asynchronous version.
+
+This function, in contrast to divide, uses a round-down rounding mode.
+
+@sa divide
+ */
CV_EXPORTS void divide(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
@@ -69,59 +136,199 @@ static inline void divide(double src1, InputArray src2, OutputArray dst, int dty
divide(src1, src2, dst, 1.0, dtype, stream);
}
-//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2))
+/** @brief Computes per-element absolute difference of two matrices (or of a matrix and scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param stream Stream for the asynchronous version.
+
+@sa absdiff
+ */
CV_EXPORTS void absdiff(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
-//! computes absolute value of each matrix element
+/** @brief Computes an absolute value of each matrix element.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+
+@sa abs
+ */
CV_EXPORTS void abs(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! computes square of each pixel in an image
+/** @brief Computes a square value of each matrix element.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void sqr(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! computes square root of each pixel in an image
+/** @brief Computes a square root of each matrix element.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+
+@sa sqrt
+ */
CV_EXPORTS void sqrt(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! computes exponent of each matrix element
+/** @brief Computes an exponent of each matrix element.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+
+@sa exp
+ */
CV_EXPORTS void exp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! computes natural logarithm of absolute value of each matrix element
+/** @brief Computes a natural logarithm of absolute value of each matrix element.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+
+@sa log
+ */
CV_EXPORTS void log(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! computes power of each matrix element:
-//! (dst(i,j) = pow( src(i,j) , power), if src.type() is integer
-//! (dst(i,j) = pow(fabs(src(i,j)), power), otherwise
+/** @brief Raises every matrix element to a power.
+
+@param src Source matrix.
+@param power Exponent of power.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+
+The function pow raises every element of the input matrix to power :
+
+\f[\texttt{dst} (I) = \fork{\texttt{src}(I)^power}{if \texttt{power} is integer}{|\texttt{src}(I)|^power}{otherwise}\f]
+
+@sa pow
+ */
CV_EXPORTS void pow(InputArray src, double power, OutputArray dst, Stream& stream = Stream::Null());
-//! compares elements of two arrays (dst = src1 src2)
+/** @brief Compares elements of two matrices (or of a matrix and scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param cmpop Flag specifying the relation between the elements to be checked:
+- **CMP_EQ:** a(.) == b(.)
+- **CMP_GT:** a(.) \< b(.)
+- **CMP_GE:** a(.) \<= b(.)
+- **CMP_LT:** a(.) \< b(.)
+- **CMP_LE:** a(.) \<= b(.)
+- **CMP_NE:** a(.) != b(.)
+@param stream Stream for the asynchronous version.
+
+@sa compare
+ */
CV_EXPORTS void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop, Stream& stream = Stream::Null());
-//! performs per-elements bit-wise inversion
+/** @brief Performs a per-element bitwise inversion.
+
+@param src Source matrix.
+@param dst Destination matrix with the same size and type as src .
+@param mask Optional operation mask. 8-bit single channel image.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void bitwise_not(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
-//! calculates per-element bit-wise disjunction of two arrays
+/** @brief Performs a per-element bitwise disjunction of two matrices (or of matrix and scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param mask Optional operation mask. 8-bit single channel image.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
-//! calculates per-element bit-wise conjunction of two arrays
+/** @brief Performs a per-element bitwise conjunction of two matrices (or of matrix and scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param mask Optional operation mask. 8-bit single channel image.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
-//! calculates per-element bit-wise "exclusive or" operation
+/** @brief Performs a per-element bitwise exclusive or operation of two matrices (or of matrix and scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param mask Optional operation mask. 8-bit single channel image.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void bitwise_xor(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
-//! pixel by pixel right shift of an image by a constant value
-//! supports 1, 3 and 4 channels images with integers elements
+/** @brief Performs pixel by pixel right shift of an image by a constant value.
+
+@param src Source matrix. Supports 1, 3 and 4 channels images with integers elements.
+@param val Constant values, one per channel.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void rshift(InputArray src, Scalar_ val, OutputArray dst, Stream& stream = Stream::Null());
-//! pixel by pixel left shift of an image by a constant value
-//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
+/** @brief Performs pixel by pixel right left of an image by a constant value.
+
+@param src Source matrix. Supports 1, 3 and 4 channels images with CV_8U , CV_16U or CV_32S
+depth.
+@param val Constant values, one per channel.
+@param dst Destination matrix with the same size and type as src .
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void lshift(InputArray src, Scalar_ val, OutputArray dst, Stream& stream = Stream::Null());
-//! computes per-element minimum of two arrays (dst = min(src1, src2))
+/** @brief Computes the per-element minimum of two matrices (or a matrix and a scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param stream Stream for the asynchronous version.
+
+@sa min
+ */
CV_EXPORTS void min(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
-//! computes per-element maximum of two arrays (dst = max(src1, src2))
+/** @brief Computes the per-element maximum of two matrices (or a matrix and a scalar).
+
+@param src1 First source matrix or scalar.
+@param src2 Second source matrix or scalar.
+@param dst Destination matrix that has the same size and type as the input array(s).
+@param stream Stream for the asynchronous version.
+
+@sa max
+ */
CV_EXPORTS void max(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
-//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
+/** @brief Computes the weighted sum of two arrays.
+
+@param src1 First source array.
+@param alpha Weight for the first array elements.
+@param src2 Second source array of the same size and channel number as src1 .
+@param beta Weight for the second array elements.
+@param dst Destination array that has the same size and number of channels as the input arrays.
+@param gamma Scalar added to each sum.
+@param dtype Optional depth of the destination array. When both input arrays have the same depth,
+dtype can be set to -1, which will be equivalent to src1.depth().
+@param stream Stream for the asynchronous version.
+
+The function addWeighted calculates the weighted sum of two arrays as follows:
+
+\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f]
+
+where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
+channel is processed independently.
+
+@sa addWeighted
+ */
CV_EXPORTS void addWeighted(InputArray src1, double alpha, InputArray src2, double beta, double gamma, OutputArray dst,
int dtype = -1, Stream& stream = Stream::Null());
@@ -131,142 +338,352 @@ static inline void scaleAdd(InputArray src1, double alpha, InputArray src2, Outp
addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream);
}
-//! applies fixed threshold to the image
+/** @brief Applies a fixed-level threshold to each array element.
+
+@param src Source array (single-channel).
+@param dst Destination array with the same size and type as src .
+@param thresh Threshold value.
+@param maxval Maximum value to use with THRESH_BINARY and THRESH_BINARY_INV threshold types.
+@param type Threshold type. For details, see threshold . The THRESH_OTSU and THRESH_TRIANGLE
+threshold types are not supported.
+@param stream Stream for the asynchronous version.
+
+@sa threshold
+ */
CV_EXPORTS double threshold(InputArray src, OutputArray dst, double thresh, double maxval, int type, Stream& stream = Stream::Null());
-//! computes magnitude of complex (x(i).re, x(i).im) vector
-//! supports only CV_32FC2 type
+/** @brief Computes magnitudes of complex matrix elements.
+
+@param xy Source complex matrix in the interleaved format ( CV_32FC2 ).
+@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
+@param stream Stream for the asynchronous version.
+
+@sa magnitude
+ */
CV_EXPORTS void magnitude(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
-//! computes squared magnitude of complex (x(i).re, x(i).im) vector
-//! supports only CV_32FC2 type
+/** @brief Computes squared magnitudes of complex matrix elements.
+
+@param xy Source complex matrix in the interleaved format ( CV_32FC2 ).
+@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
-//! computes magnitude of each (x(i), y(i)) vector
-//! supports only floating-point source
+/** @overload
+ computes magnitude of each (x(i), y(i)) vector
+ supports only floating-point source
+@param x Source matrix containing real components ( CV_32FC1 ).
+@param y Source matrix containing imaginary components ( CV_32FC1 ).
+@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void magnitude(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
-//! computes squared magnitude of each (x(i), y(i)) vector
-//! supports only floating-point source
+/** @overload
+ computes squared magnitude of each (x(i), y(i)) vector
+ supports only floating-point source
+@param x Source matrix containing real components ( CV_32FC1 ).
+@param y Source matrix containing imaginary components ( CV_32FC1 ).
+@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
+@param stream Stream for the asynchronous version.
+*/
CV_EXPORTS void magnitudeSqr(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
-//! computes angle of each (x(i), y(i)) vector
-//! supports only floating-point source
+/** @brief Computes polar angles of complex matrix elements.
+
+@param x Source matrix containing real components ( CV_32FC1 ).
+@param y Source matrix containing imaginary components ( CV_32FC1 ).
+@param angle Destination matrix of angles ( CV_32FC1 ).
+@param angleInDegrees Flag for angles that must be evaluated in degrees.
+@param stream Stream for the asynchronous version.
+
+@sa phase
+ */
CV_EXPORTS void phase(InputArray x, InputArray y, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
-//! converts Cartesian coordinates to polar
-//! supports only floating-point source
+/** @brief Converts Cartesian coordinates into polar.
+
+@param x Source matrix containing real components ( CV_32FC1 ).
+@param y Source matrix containing imaginary components ( CV_32FC1 ).
+@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
+@param angle Destination matrix of angles ( CV_32FC1 ).
+@param angleInDegrees Flag for angles that must be evaluated in degrees.
+@param stream Stream for the asynchronous version.
+
+@sa cartToPolar
+ */
CV_EXPORTS void cartToPolar(InputArray x, InputArray y, OutputArray magnitude, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
-//! converts polar coordinates to Cartesian
-//! supports only floating-point source
+/** @brief Converts polar coordinates into Cartesian.
+
+@param magnitude Source matrix containing magnitudes ( CV_32FC1 ).
+@param angle Source matrix containing angles ( CV_32FC1 ).
+@param x Destination matrix of real components ( CV_32FC1 ).
+@param y Destination matrix of imaginary components ( CV_32FC1 ).
+@param angleInDegrees Flag that indicates angles in degrees.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void polarToCart(InputArray magnitude, InputArray angle, OutputArray x, OutputArray y, bool angleInDegrees = false, Stream& stream = Stream::Null());
-//! makes multi-channel array out of several single-channel arrays
+//! @} cudaarithm_elem
+
+//! @addtogroup cudaarithm_core
+//! @{
+
+/** @brief Makes a multi-channel matrix out of several single-channel matrices.
+
+@param src Array/vector of source matrices.
+@param n Number of source matrices.
+@param dst Destination matrix.
+@param stream Stream for the asynchronous version.
+
+@sa merge
+ */
CV_EXPORTS void merge(const GpuMat* src, size_t n, OutputArray dst, Stream& stream = Stream::Null());
+/** @overload */
CV_EXPORTS void merge(const std::vector& src, OutputArray dst, Stream& stream = Stream::Null());
-//! copies each plane of a multi-channel array to a dedicated array
+/** @brief Copies each plane of a multi-channel matrix into an array.
+
+@param src Source matrix.
+@param dst Destination array/vector of single-channel matrices.
+@param stream Stream for the asynchronous version.
+
+@sa split
+ */
CV_EXPORTS void split(InputArray src, GpuMat* dst, Stream& stream = Stream::Null());
+/** @overload */
CV_EXPORTS void split(InputArray src, std::vector& dst, Stream& stream = Stream::Null());
-//! transposes the matrix
-//! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc)
+/** @brief Transposes a matrix.
+
+@param src1 Source matrix. 1-, 4-, 8-byte element sizes are supported for now.
+@param dst Destination matrix.
+@param stream Stream for the asynchronous version.
+
+@sa transpose
+ */
CV_EXPORTS void transpose(InputArray src1, OutputArray dst, Stream& stream = Stream::Null());
-//! reverses the order of the rows, columns or both in a matrix
-//! supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or CV_32F depth
+/** @brief Flips a 2D matrix around vertical, horizontal, or both axes.
+
+@param src Source matrix. Supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or
+CV_32F depth.
+@param dst Destination matrix.
+@param flipCode Flip mode for the source:
+- 0 Flips around x-axis.
+- \> 0 Flips around y-axis.
+- \< 0 Flips around both axes.
+@param stream Stream for the asynchronous version.
+
+@sa flip
+ */
CV_EXPORTS void flip(InputArray src, OutputArray dst, int flipCode, Stream& stream = Stream::Null());
-//! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
-//! destination array will have the depth type as lut and the same channels number as source
-//! supports CV_8UC1, CV_8UC3 types
+/** @brief Base class for transform using lookup table.
+ */
class CV_EXPORTS LookUpTable : public Algorithm
{
public:
+ /** @brief Transforms the source matrix into the destination matrix using the given look-up table:
+ dst(I) = lut(src(I)) .
+
+ @param src Source matrix. CV_8UC1 and CV_8UC3 matrices are supported for now.
+ @param dst Destination matrix.
+ @param stream Stream for the asynchronous version.
+ */
virtual void transform(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
};
+/** @brief Creates implementation for cuda::LookUpTable .
+
+@param lut Look-up table of 256 elements. It is a continuous CV_8U matrix.
+ */
CV_EXPORTS Ptr createLookUpTable(InputArray lut);
-//! copies 2D array to a larger destination array and pads borders with user-specifiable constant
+/** @brief Forms a border around an image.
+
+@param src Source image. CV_8UC1 , CV_8UC4 , CV_32SC1 , and CV_32FC1 types are supported.
+@param dst Destination image with the same type as src. The size is
+Size(src.cols+left+right, src.rows+top+bottom) .
+@param top
+@param bottom
+@param left
+@param right Number of pixels in each direction from the source image rectangle to extrapolate.
+For example: top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs to be built.
+@param borderType Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
+BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
+@param value Border value.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void copyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, int borderType,
Scalar value = Scalar(), Stream& stream = Stream::Null());
-//! computes norm of array
-//! supports NORM_INF, NORM_L1, NORM_L2
-//! supports all matrices except 64F
+//! @} cudaarithm_core
+
+//! @addtogroup cudaarithm_reduce
+//! @{
+
+/** @brief Returns the norm of a matrix (or difference of two matrices).
+
+@param src1 Source matrix. Any matrices except 64F are supported.
+@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+@sa norm
+ */
CV_EXPORTS double norm(InputArray src1, int normType, InputArray mask, GpuMat& buf);
+/** @overload
+uses new buffer, no mask
+*/
static inline double norm(InputArray src, int normType)
{
GpuMat buf;
return norm(src, normType, GpuMat(), buf);
}
+/** @overload
+no mask
+*/
static inline double norm(InputArray src, int normType, GpuMat& buf)
{
return norm(src, normType, GpuMat(), buf);
}
-//! computes norm of the difference between two arrays
-//! supports NORM_INF, NORM_L1, NORM_L2
-//! supports only CV_8UC1 type
+/** @brief Returns the difference of two matrices.
+
+@param src1 Source matrix. Any matrices except 64F are supported.
+@param src2 Second source matrix (if any) with the same size and type as src1.
+@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+@sa norm
+ */
CV_EXPORTS double norm(InputArray src1, InputArray src2, GpuMat& buf, int normType=NORM_L2);
+/** @overload
+uses new buffer
+*/
static inline double norm(InputArray src1, InputArray src2, int normType=NORM_L2)
{
GpuMat buf;
return norm(src1, src2, buf, normType);
}
-//! computes sum of array elements
-//! supports only single channel images
+/** @brief Returns the sum of matrix elements.
+
+@param src Source image of any depth except for CV_64F .
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+@sa sum
+ */
CV_EXPORTS Scalar sum(InputArray src, InputArray mask, GpuMat& buf);
+/** @overload
+uses new buffer, no mask
+*/
static inline Scalar sum(InputArray src)
{
GpuMat buf;
return sum(src, GpuMat(), buf);
}
+/** @overload
+no mask
+*/
static inline Scalar sum(InputArray src, GpuMat& buf)
{
return sum(src, GpuMat(), buf);
}
-//! computes sum of array elements absolute values
-//! supports only single channel images
+/** @brief Returns the sum of absolute values for matrix elements.
+
+@param src Source image of any depth except for CV_64F .
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+ */
CV_EXPORTS Scalar absSum(InputArray src, InputArray mask, GpuMat& buf);
+/** @overload
+uses new buffer, no mask
+*/
static inline Scalar absSum(InputArray src)
{
GpuMat buf;
return absSum(src, GpuMat(), buf);
}
+/** @overload
+no mask
+*/
static inline Scalar absSum(InputArray src, GpuMat& buf)
{
return absSum(src, GpuMat(), buf);
}
-//! computes squared sum of array elements
-//! supports only single channel images
+/** @brief Returns the squared sum of matrix elements.
+
+@param src Source image of any depth except for CV_64F .
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+ */
CV_EXPORTS Scalar sqrSum(InputArray src, InputArray mask, GpuMat& buf);
+/** @overload
+uses new buffer, no mask
+*/
static inline Scalar sqrSum(InputArray src)
{
GpuMat buf;
return sqrSum(src, GpuMat(), buf);
}
+/** @overload
+no mask
+*/
static inline Scalar sqrSum(InputArray src, GpuMat& buf)
{
return sqrSum(src, GpuMat(), buf);
}
-//! finds global minimum and maximum array elements and returns their values
+/** @brief Finds global minimum and maximum matrix elements and returns their values.
+
+@param src Single-channel source image.
+@param minVal Pointer to the returned minimum value. Use NULL if not required.
+@param maxVal Pointer to the returned maximum value. Use NULL if not required.
+@param mask Optional mask to select a sub-matrix.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
+
+@sa minMaxLoc
+ */
CV_EXPORTS void minMax(InputArray src, double* minVal, double* maxVal, InputArray mask, GpuMat& buf);
+/** @overload
+uses new buffer
+*/
static inline void minMax(InputArray src, double* minVal, double* maxVal=0, InputArray mask=noArray())
{
GpuMat buf;
minMax(src, minVal, maxVal, mask, buf);
}
-//! finds global minimum and maximum array elements and returns their values with locations
+/** @brief Finds global minimum and maximum matrix elements and returns their values with locations.
+
+@param src Single-channel source image.
+@param minVal Pointer to the returned minimum value. Use NULL if not required.
+@param maxVal Pointer to the returned maximum value. Use NULL if not required.
+@param minLoc Pointer to the returned minimum location. Use NULL if not required.
+@param maxLoc Pointer to the returned maximum location. Use NULL if not required.
+@param mask Optional mask to select a sub-matrix.
+@param valbuf Optional values buffer to avoid extra memory allocations. It is resized
+automatically.
+@param locbuf Optional locations buffer to avoid extra memory allocations. It is resized
+automatically.
+The function does not work with CV_64F images on GPU with the compute capability \< 1.3.
+
+@sa minMaxLoc
+ */
CV_EXPORTS void minMaxLoc(InputArray src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
InputArray mask, GpuMat& valbuf, GpuMat& locbuf);
+/** @overload
+uses new buffer
+*/
static inline void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0,
InputArray mask=noArray())
{
@@ -274,34 +691,104 @@ static inline void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, P
minMaxLoc(src, minVal, maxVal, minLoc, maxLoc, mask, valBuf, locBuf);
}
-//! counts non-zero array elements
+/** @brief Counts non-zero matrix elements.
+
+@param src Single-channel source image.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
+
+@sa countNonZero
+ */
CV_EXPORTS int countNonZero(InputArray src, GpuMat& buf);
+/** @overload
+uses new buffer
+*/
static inline int countNonZero(const GpuMat& src)
{
GpuMat buf;
return countNonZero(src, buf);
}
-//! reduces a matrix to a vector
+/** @brief Reduces a matrix to a vector.
+
+@param mtx Source 2D matrix.
+@param vec Destination vector. Its size and type is defined by dim and dtype parameters.
+@param dim Dimension index along which the matrix is reduced. 0 means that the matrix is reduced
+to a single row. 1 means that the matrix is reduced to a single column.
+@param reduceOp Reduction operation that could be one of the following:
+- **CV_REDUCE_SUM** The output is the sum of all rows/columns of the matrix.
+- **CV_REDUCE_AVG** The output is the mean vector of all rows/columns of the matrix.
+- **CV_REDUCE_MAX** The output is the maximum (column/row-wise) of all rows/columns of the
+matrix.
+- **CV_REDUCE_MIN** The output is the minimum (column/row-wise) of all rows/columns of the
+matrix.
+@param dtype When it is negative, the destination vector will have the same type as the source
+matrix. Otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels()) .
+@param stream Stream for the asynchronous version.
+
+The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
+1D vectors and performing the specified operation on the vectors until a single row/column is
+obtained. For example, the function can be used to compute horizontal and vertical projections of a
+raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG , the output may have a larger element
+bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction
+modes.
+
+@sa reduce
+ */
CV_EXPORTS void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null());
-//! computes mean value and standard deviation of all or selected array elements
-//! supports only CV_8UC1 type
+/** @brief Computes a mean value and a standard deviation of matrix elements.
+
+@param mtx Source matrix. CV_8UC1 matrices are supported for now.
+@param mean Mean value.
+@param stddev Standard deviation value.
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+@sa meanStdDev
+ */
CV_EXPORTS void meanStdDev(InputArray mtx, Scalar& mean, Scalar& stddev, GpuMat& buf);
+/** @overload
+uses new buffer
+*/
static inline void meanStdDev(InputArray src, Scalar& mean, Scalar& stddev)
{
GpuMat buf;
meanStdDev(src, mean, stddev, buf);
}
-//! computes the standard deviation of integral images
-//! supports only CV_32SC1 source type and CV_32FC1 sqr type
-//! output will have CV_32FC1 type
+/** @brief Computes a standard deviation of integral images.
+
+@param src Source image. Only the CV_32SC1 type is supported.
+@param sqr Squared source image. Only the CV_32FC1 type is supported.
+@param dst Destination image with the same type and size as src .
+@param rect Rectangular window.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void rectStdDev(InputArray src, InputArray sqr, OutputArray dst, Rect rect, Stream& stream = Stream::Null());
-//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
+/** @brief Normalizes the norm or value range of an array.
+
+@param src Input array.
+@param dst Output array of the same size as src .
+@param alpha Norm value to normalize to or the lower range boundary in case of the range
+normalization.
+@param beta Upper range boundary in case of the range normalization; it is not used for the norm
+normalization.
+@param norm_type Normalization type ( NORM_MINMAX , NORM_L2 , NORM_L1 or NORM_INF ).
+@param dtype When negative, the output array has the same type as src; otherwise, it has the same
+number of channels as src and the depth =CV_MAT_DEPTH(dtype).
+@param mask Optional operation mask.
+@param norm_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+@param cvt_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+
+@sa normalize
+ */
CV_EXPORTS void normalize(InputArray src, OutputArray dst, double alpha, double beta,
int norm_type, int dtype, InputArray mask, GpuMat& norm_buf, GpuMat& cvt_buf);
+/** @overload
+uses new buffers
+*/
static inline void normalize(InputArray src, OutputArray dst, double alpha = 1, double beta = 0,
int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray())
{
@@ -310,65 +797,179 @@ static inline void normalize(InputArray src, OutputArray dst, double alpha = 1,
normalize(src, dst, alpha, beta, norm_type, dtype, mask, norm_buf, cvt_buf);
}
-//! computes the integral image
-//! sum will have CV_32S type, but will contain unsigned int values
-//! supports only CV_8UC1 source type
+/** @brief Computes an integral image.
+
+@param src Source image. Only CV_8UC1 images are supported for now.
+@param sum Integral image containing 32-bit unsigned integer values packed into CV_32SC1 .
+@param buffer Optional buffer to avoid extra memory allocations. It is resized automatically.
+@param stream Stream for the asynchronous version.
+
+@sa integral
+ */
CV_EXPORTS void integral(InputArray src, OutputArray sum, GpuMat& buffer, Stream& stream = Stream::Null());
static inline void integralBuffered(InputArray src, OutputArray sum, GpuMat& buffer, Stream& stream = Stream::Null())
{
integral(src, sum, buffer, stream);
}
+/** @overload
+uses new buffer
+*/
static inline void integral(InputArray src, OutputArray sum, Stream& stream = Stream::Null())
{
GpuMat buffer;
integral(src, sum, buffer, stream);
}
-//! computes squared integral image
-//! result matrix will have 64F type, but will contain 64U values
-//! supports source images of 8UC1 type only
+/** @brief Computes a squared integral image.
+
+@param src Source image. Only CV_8UC1 images are supported for now.
+@param sqsum Squared integral image containing 64-bit unsigned integer values packed into
+CV_64FC1 .
+@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void sqrIntegral(InputArray src, OutputArray sqsum, GpuMat& buf, Stream& stream = Stream::Null());
+/** @overload
+uses new buffer
+*/
static inline void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream = Stream::Null())
{
GpuMat buffer;
sqrIntegral(src, sqsum, buffer, stream);
}
+//! @} cudaarithm_reduce
+
+//! @addtogroup cudaarithm_arithm
+//! @{
+
+/** @brief Performs generalized matrix multiplication.
+
+@param src1 First multiplied input matrix that should have CV_32FC1 , CV_64FC1 , CV_32FC2 , or
+CV_64FC2 type.
+@param src2 Second multiplied input matrix of the same type as src1 .
+@param alpha Weight of the matrix product.
+@param src3 Third optional delta matrix added to the matrix product. It should have the same type
+as src1 and src2 .
+@param beta Weight of src3 .
+@param dst Destination matrix. It has the proper size and the same type as input matrices.
+@param flags Operation flags:
+- **GEMM_1_T** transpose src1
+- **GEMM_2_T** transpose src2
+- **GEMM_3_T** transpose src3
+@param stream Stream for the asynchronous version.
+
+The function performs generalized matrix multiplication similar to the gemm functions in BLAS level
+3. For example, gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to
+
+\f[\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T\f]
+
+@note Transposition operation doesn't support CV_64FC2 input type.
+
+@sa gemm
+ */
CV_EXPORTS void gemm(InputArray src1, InputArray src2, double alpha,
InputArray src3, double beta, OutputArray dst, int flags = 0, Stream& stream = Stream::Null());
-//! performs per-element multiplication of two full (not packed) Fourier spectrums
-//! supports 32FC2 matrices only (interleaved format)
+/** @brief Performs a per-element multiplication of two Fourier spectrums.
+
+@param src1 First spectrum.
+@param src2 Second spectrum with the same size and type as a .
+@param dst Destination spectrum.
+@param flags Mock parameter used for CPU/CUDA interfaces similarity.
+@param conjB Optional flag to specify if the second spectrum needs to be conjugated before the
+multiplication.
+@param stream Stream for the asynchronous version.
+
+Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are supported for now.
+
+@sa mulSpectrums
+ */
CV_EXPORTS void mulSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, bool conjB=false, Stream& stream = Stream::Null());
-//! performs per-element multiplication of two full (not packed) Fourier spectrums
-//! supports 32FC2 matrices only (interleaved format)
+/** @brief Performs a per-element multiplication of two Fourier spectrums and scales the result.
+
+@param src1 First spectrum.
+@param src2 Second spectrum with the same size and type as a .
+@param dst Destination spectrum.
+@param flags Mock parameter used for CPU/CUDA interfaces similarity.
+@param scale Scale constant.
+@param conjB Optional flag to specify if the second spectrum needs to be conjugated before the
+multiplication.
+@param stream Stream for the asynchronous version.
+
+Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are supported for now.
+
+@sa mulSpectrums
+ */
CV_EXPORTS void mulAndScaleSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null());
-//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
-//! Param dft_size is the size of DFT transform.
-//!
-//! If the source matrix is not continous, then additional copy will be done,
-//! so to avoid copying ensure the source matrix is continous one. If you want to use
-//! preallocated output ensure it is continuous too, otherwise it will be reallocated.
-//!
-//! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values
-//! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved.
-//!
-//! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format.
+/** @brief Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
+
+@param src Source matrix (real or complex).
+@param dst Destination matrix (real or complex).
+@param dft_size Size of a discrete Fourier transform.
+@param flags Optional flags:
+- **DFT_ROWS** transforms each individual row of the source matrix.
+- **DFT_SCALE** scales the result: divide it by the number of elements in the transform
+(obtained from dft_size ).
+- **DFT_INVERSE** inverts DFT. Use for complex-complex cases (real-complex and complex-real
+cases are always forward and inverse, respectively).
+- **DFT_REAL_OUTPUT** specifies the output as real. The source matrix is the result of
+real-complex transform, so the destination matrix must be real.
+@param stream Stream for the asynchronous version.
+
+Use to handle real matrices ( CV32FC1 ) and complex matrices in the interleaved format ( CV32FC2 ).
+
+The source matrix should be continuous, otherwise reallocation and data copying is performed. The
+function chooses an operation mode depending on the flags, size, and channel count of the source
+matrix:
+
+- If the source matrix is complex and the output is not specified as real, the destination
+matrix is complex and has the dft_size size and CV_32FC2 type. The destination matrix
+contains a full result of the DFT (forward or inverse).
+- If the source matrix is complex and the output is specified as real, the function assumes that
+its input is the result of the forward transform (see the next item). The destination matrix
+has the dft_size size and CV_32FC1 type. It contains the result of the inverse DFT.
+- If the source matrix is real (its type is CV_32FC1 ), forward DFT is performed. The result of
+the DFT is packed into complex ( CV_32FC2 ) matrix. So, the width of the destination matrix
+is dft_size.width / 2 + 1 . But if the source is a single column, the height is reduced
+instead of the width.
+
+@sa dft
+ */
CV_EXPORTS void dft(InputArray src, OutputArray dst, Size dft_size, int flags=0, Stream& stream = Stream::Null());
-//! computes convolution (or cross-correlation) of two images using discrete Fourier transform
-//! supports source images of 32FC1 type only
-//! result matrix will have 32FC1 type
+/** @brief Base class for convolution (or cross-correlation) operator. :
+ */
class CV_EXPORTS Convolution : public Algorithm
{
public:
+ /** @brief Computes a convolution (or cross-correlation) of two images.
+
+ @param image Source image. Only CV_32FC1 images are supported for now.
+ @param templ Template image. The size is not greater than the image size. The type is the same as
+ image .
+ @param result Result image. If image is *W x H* and templ is *w x h*, then result must be *W-w+1 x
+ H-h+1*.
+ @param ccorr Flags to evaluate cross-correlation instead of convolution.
+ @param stream Stream for the asynchronous version.
+ */
virtual void convolve(InputArray image, InputArray templ, OutputArray result, bool ccorr = false, Stream& stream = Stream::Null()) = 0;
};
+/** @brief Creates implementation for cuda::Convolution .
+
+@param user_block_size Block size. If you leave default value Size(0,0) then automatic
+estimation of block size will be used (which is optimized for speed). By varying user_block_size
+you can reduce memory requirements at the cost of speed.
+ */
CV_EXPORTS Ptr createConvolution(Size user_block_size = Size());
+//! @} cudaarithm_arithm
+
+//! @} cudaarithm
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAARITHM_HPP__ */
diff --git a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp
index a08ed64b1a..4b5e305d6a 100644
--- a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp
+++ b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp
@@ -50,11 +50,33 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/video/background_segm.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudabgsegm Background Segmentation
+ @}
+ */
+
namespace cv { namespace cuda {
+//! @addtogroup cudabgsegm
+//! @{
+
////////////////////////////////////////////////////
// MOG
+/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
+
+The class discriminates between foreground and background pixels by building and maintaining a model
+of the background. Any pixel which does not fit this model is then deemed to be foreground. The
+class implements algorithm described in @cite MOG2001 .
+
+@sa BackgroundSubtractorMOG
+
+@note
+ - An example on gaussian mixture based background/foreground segmantation can be found at
+ opencv_source_code/samples/gpu/bgfg_segm.cpp
+ */
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
public:
@@ -78,6 +100,14 @@ public:
virtual void setNoiseSigma(double noiseSigma) = 0;
};
+/** @brief Creates mixture-of-gaussian background subtractor
+
+@param history Length of the history.
+@param nmixtures Number of Gaussian mixtures.
+@param backgroundRatio Background ratio.
+@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
+means some automatic value.
+ */
CV_EXPORTS Ptr
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
double backgroundRatio = 0.7, double noiseSigma = 0);
@@ -85,6 +115,14 @@ CV_EXPORTS Ptr
////////////////////////////////////////////////////
// MOG2
+/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
+
+The class discriminates between foreground and background pixels by building and maintaining a model
+of the background. Any pixel which does not fit this model is then deemed to be foreground. The
+class implements algorithm described in @cite Zivkovic2004 .
+
+@sa BackgroundSubtractorMOG2
+ */
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
{
public:
@@ -96,6 +134,15 @@ public:
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
+/** @brief Creates MOG2 Background Subtractor
+
+@param history Length of the history.
+@param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
+to decide whether a pixel is well described by the background model. This parameter does not
+affect the background update.
+@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
+speed a bit, so if you do not need this feature, set the parameter to false.
+ */
CV_EXPORTS Ptr
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
@@ -103,6 +150,12 @@ CV_EXPORTS Ptr
////////////////////////////////////////////////////
// GMG
+/** @brief Background/Foreground Segmentation Algorithm.
+
+The class discriminates between foreground and background pixels by building and maintaining a model
+of the background. Any pixel which does not fit this model is then deemed to be foreground. The
+class implements algorithm described in @cite Gold2012 .
+ */
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
@@ -140,54 +193,71 @@ public:
virtual void setMaxVal(double val) = 0;
};
+/** @brief Creates GMG Background Subtractor
+
+@param initializationFrames Number of frames of video to use to initialize histograms.
+@param decisionThreshold Value above which pixel is determined to be FG.
+ */
CV_EXPORTS Ptr
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
////////////////////////////////////////////////////
// FGD
-/**
- * Foreground Object Detection from Videos Containing Complex Background.
- * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
- * ACM MM2003 9p
+/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
+of the background.
+
+Any pixel which does not fit this model is then deemed to be foreground. The class implements
+algorithm described in @cite FGD2003 .
+@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
+ /** @brief Returns the output foreground regions calculated by findContours.
+
+ @param foreground_regions Output array (CPU memory).
+ */
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
- int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
- int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
- int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
- // Used to allow the first N1c vectors to adapt over time to changing background.
+ int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
+ int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
+ int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
+ //!< Used to allow the first N1c vectors to adapt over time to changing background.
- int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
- int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
- int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
- // Used to allow the first N1cc vectors to adapt over time to changing background.
+ int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
+ int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
+ int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
+ //!< Used to allow the first N1cc vectors to adapt over time to changing background.
- bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
- int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
- // These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
+ bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
+ int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
+ //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
- float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
- float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
- float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
+ float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
+ float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
+ float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
- float delta; // Affects color and color co-occurrence quantization, typically set to 2.
- float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
- float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
+ float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
+ float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
+ float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
- // default Params
+ //! default Params
FGDParams();
};
+/** @brief Creates FGD Background Subtractor
+
+@param params Algorithm's parameters. See @cite FGD2003 for explanation.
+ */
CV_EXPORTS Ptr
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDABGSEGM_HPP__ */
diff --git a/modules/cudacodec/include/opencv2/cudacodec.hpp b/modules/cudacodec/include/opencv2/cudacodec.hpp
index 747c044ee5..610ecf607f 100644
--- a/modules/cudacodec/include/opencv2/cudacodec.hpp
+++ b/modules/cudacodec/include/opencv2/cudacodec.hpp
@@ -50,8 +50,18 @@
#include "opencv2/core/cuda.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudacodec Video Encoding/Decoding
+ @}
+ */
+
namespace cv { namespace cudacodec {
+//! @addtogroup cudacodec
+//! @{
+
////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows.
@@ -68,35 +78,53 @@ enum SurfaceFormat
SF_GRAY = SF_BGR
};
+/** @brief Different parameters for CUDA video encoder.
+ */
struct CV_EXPORTS EncoderParams
{
- int P_Interval; // NVVE_P_INTERVAL,
- int IDR_Period; // NVVE_IDR_PERIOD,
- int DynamicGOP; // NVVE_DYNAMIC_GOP,
- int RCType; // NVVE_RC_TYPE,
- int AvgBitrate; // NVVE_AVG_BITRATE,
- int PeakBitrate; // NVVE_PEAK_BITRATE,
- int QP_Level_Intra; // NVVE_QP_LEVEL_INTRA,
- int QP_Level_InterP; // NVVE_QP_LEVEL_INTER_P,
- int QP_Level_InterB; // NVVE_QP_LEVEL_INTER_B,
- int DeblockMode; // NVVE_DEBLOCK_MODE,
- int ProfileLevel; // NVVE_PROFILE_LEVEL,
- int ForceIntra; // NVVE_FORCE_INTRA,
- int ForceIDR; // NVVE_FORCE_IDR,
- int ClearStat; // NVVE_CLEAR_STAT,
- int DIMode; // NVVE_SET_DEINTERLACE,
- int Presets; // NVVE_PRESETS,
- int DisableCabac; // NVVE_DISABLE_CABAC,
- int NaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE
- int DisableSPSPPS; // NVVE_DISABLE_SPS_PPS
+ int P_Interval; //!< NVVE_P_INTERVAL,
+ int IDR_Period; //!< NVVE_IDR_PERIOD,
+ int DynamicGOP; //!< NVVE_DYNAMIC_GOP,
+ int RCType; //!< NVVE_RC_TYPE,
+ int AvgBitrate; //!< NVVE_AVG_BITRATE,
+ int PeakBitrate; //!< NVVE_PEAK_BITRATE,
+ int QP_Level_Intra; //!< NVVE_QP_LEVEL_INTRA,
+ int QP_Level_InterP; //!< NVVE_QP_LEVEL_INTER_P,
+ int QP_Level_InterB; //!< NVVE_QP_LEVEL_INTER_B,
+ int DeblockMode; //!< NVVE_DEBLOCK_MODE,
+ int ProfileLevel; //!< NVVE_PROFILE_LEVEL,
+ int ForceIntra; //!< NVVE_FORCE_INTRA,
+ int ForceIDR; //!< NVVE_FORCE_IDR,
+ int ClearStat; //!< NVVE_CLEAR_STAT,
+ int DIMode; //!< NVVE_SET_DEINTERLACE,
+ int Presets; //!< NVVE_PRESETS,
+ int DisableCabac; //!< NVVE_DISABLE_CABAC,
+ int NaluFramingType; //!< NVVE_CONFIGURE_NALU_FRAMING_TYPE
+ int DisableSPSPPS; //!< NVVE_DISABLE_SPS_PPS
EncoderParams();
+ /** @brief Constructors.
+
+ @param configFile Config file name.
+
+ Creates default parameters or reads parameters from config file.
+ */
explicit EncoderParams(const String& configFile);
+ /** @brief Reads parameters from config file.
+
+ @param configFile Config file name.
+ */
void load(const String& configFile);
+ /** @brief Saves parameters to config file.
+
+ @param configFile Config file name.
+ */
void save(const String& configFile) const;
};
+/** @brief Callbacks for CUDA video encoder.
+ */
class CV_EXPORTS EncoderCallBack
{
public:
@@ -109,41 +137,109 @@ public:
virtual ~EncoderCallBack() {}
- //! callback function to signal the start of bitstream that is to be encoded
- //! callback must allocate host buffer for CUDA encoder and return pointer to it and it's size
+ /** @brief Callback function to signal the start of bitstream that is to be encoded.
+
+ Callback must allocate buffer for CUDA encoder and return pointer to it and it's size.
+ */
virtual uchar* acquireBitStream(int* bufferSize) = 0;
- //! callback function to signal that the encoded bitstream is ready to be written to file
+ /** @brief Callback function to signal that the encoded bitstream is ready to be written to file.
+ */
virtual void releaseBitStream(unsigned char* data, int size) = 0;
- //! callback function to signal that the encoding operation on the frame has started
+ /** @brief Callback function to signal that the encoding operation on the frame has started.
+
+ @param frameNumber
+ @param picType Specify frame type (I-Frame, P-Frame or B-Frame).
+ */
virtual void onBeginFrame(int frameNumber, PicType picType) = 0;
- //! callback function signals that the encoding operation on the frame has finished
+ /** @brief Callback function signals that the encoding operation on the frame has finished.
+
+ @param frameNumber
+ @param picType Specify frame type (I-Frame, P-Frame or B-Frame).
+ */
virtual void onEndFrame(int frameNumber, PicType picType) = 0;
};
+/** @brief Video writer interface.
+
+The implementation uses H264 video codec.
+
+@note Currently only Windows platform is supported.
+
+@note
+ - An example on how to use the videoWriter class can be found at
+ opencv_source_code/samples/gpu/video_writer.cpp
+ */
class CV_EXPORTS VideoWriter
{
public:
virtual ~VideoWriter() {}
- //! writes the next frame from GPU memory
+ /** @brief Writes the next video frame.
+
+ @param frame The written frame.
+ @param lastFrame Indicates that it is end of stream. The parameter can be ignored.
+
+ The method write the specified image to video file. The image must have the same size and the same
+ surface format as has been specified when opening the video writer.
+ */
virtual void write(InputArray frame, bool lastFrame = false) = 0;
virtual EncoderParams getEncoderParams() const = 0;
};
-//! create VideoWriter for specified output file (only AVI file format is supported)
+/** @brief Creates video writer.
+
+@param fileName Name of the output video file. Only AVI file format is supported.
+@param frameSize Size of the input video frames.
+@param fps Framerate of the created video stream.
+@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
+SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
+encoding, frames with other formats will be used as is.
+
+The constructors initialize video writer. FFMPEG is used to write videos. User can implement own
+multiplexing with cudacodec::EncoderCallBack .
+ */
CV_EXPORTS Ptr createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+/** @overload
+@param fileName Name of the output video file. Only AVI file format is supported.
+@param frameSize Size of the input video frames.
+@param fps Framerate of the created video stream.
+@param params Encoder parameters. See cudacodec::EncoderParams .
+@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
+SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
+encoding, frames with other formats will be used as is.
+*/
CV_EXPORTS Ptr createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
-//! create VideoWriter for user-defined callbacks
+/** @overload
+@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
+want to work with raw video stream.
+@param frameSize Size of the input video frames.
+@param fps Framerate of the created video stream.
+@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
+SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
+encoding, frames with other formats will be used as is.
+*/
CV_EXPORTS Ptr createVideoWriter(const Ptr& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+/** @overload
+@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
+want to work with raw video stream.
+@param frameSize Size of the input video frames.
+@param fps Framerate of the created video stream.
+@param params Encoder parameters. See cudacodec::EncoderParams .
+@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
+SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
+encoding, frames with other formats will be used as is.
+*/
CV_EXPORTS Ptr createVideoWriter(const Ptr& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
////////////////////////////////// Video Decoding //////////////////////////////////////////
+/** @brief Video codecs supported by cudacodec::VideoReader .
+ */
enum Codec
{
MPEG1 = 0,
@@ -155,13 +251,15 @@ enum Codec
H264_SVC,
H264_MVC,
- Uncompressed_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0)
- Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0)
- Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0)
- Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2)
- Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) // UYVY (4:2:2)
+ Uncompressed_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), //!< Y,U,V (4:2:0)
+ Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), //!< Y,V,U (4:2:0)
+ Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), //!< Y,UV (4:2:0)
+ Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), //!< YUYV/YUY2 (4:2:2)
+ Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) //!< UYVY (4:2:2)
};
+/** @brief Chroma formats supported by cudacodec::VideoReader .
+ */
enum ChromaFormat
{
Monochrome = 0,
@@ -170,6 +268,8 @@ enum ChromaFormat
YUV444
};
+/** @brief Struct providing information about video file format. :
+ */
struct FormatInfo
{
Codec codec;
@@ -178,29 +278,65 @@ struct FormatInfo
int height;
};
+/** @brief Video reader interface.
+
+@note
+ - An example on how to use the videoReader class can be found at
+ opencv_source_code/samples/gpu/video_reader.cpp
+ */
class CV_EXPORTS VideoReader
{
public:
virtual ~VideoReader() {}
+ /** @brief Grabs, decodes and returns the next video frame.
+
+ If no frames has been grabbed (there are no more frames in video file), the methods return false .
+ The method throws Exception if error occurs.
+ */
virtual bool nextFrame(OutputArray frame) = 0;
+ /** @brief Returns information about video file format.
+ */
virtual FormatInfo format() const = 0;
};
+/** @brief Interface for video demultiplexing. :
+
+User can implement own demultiplexing by implementing this interface.
+ */
class CV_EXPORTS RawVideoSource
{
public:
virtual ~RawVideoSource() {}
+ /** @brief Returns next packet with RAW video frame.
+
+ @param data Pointer to frame data.
+ @param size Size in bytes of current frame.
+ @param endOfFile Indicates that it is end of stream.
+ */
virtual bool getNextPacket(unsigned char** data, int* size, bool* endOfFile) = 0;
+ /** @brief Returns information about video file format.
+ */
virtual FormatInfo format() const = 0;
};
+/** @brief Creates video reader.
+
+@param filename Name of the input video file.
+
+FFMPEG is used to read videos. User can implement own demultiplexing with cudacodec::RawVideoSource
+ */
CV_EXPORTS Ptr createVideoReader(const String& filename);
+/** @overload
+@param source RAW video source implemented by user.
+*/
CV_EXPORTS Ptr createVideoReader(const Ptr& source);
+//! @}
+
}} // namespace cv { namespace cudacodec {
#endif /* __OPENCV_CUDACODEC_HPP__ */
diff --git a/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp b/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp
index a89580e0ee..f61d2dfd00 100644
--- a/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp
+++ b/modules/cudafeatures2d/include/opencv2/cudafeatures2d.hpp
@@ -50,150 +50,175 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudafilters.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudafeatures2d Feature Detection and Description
+ @}
+ */
+
namespace cv { namespace cuda {
+//! @addtogroup cudafeatures2d
+//! @{
+
+/** @brief Brute-force descriptor matcher.
+
+For each descriptor in the first set, this matcher finds the closest descriptor in the second set
+by trying each one. This descriptor matcher supports masking permissible matches between descriptor
+sets.
+
+The class BFMatcher_CUDA has an interface similar to the class DescriptorMatcher. It has two groups
+of match methods: for matching descriptors of one image with another image or with an image set.
+Also, all functions have an alternative to save results either to the GPU memory or to the CPU
+memory.
+
+@sa DescriptorMatcher, BFMatcher
+ */
class CV_EXPORTS BFMatcher_CUDA
{
public:
explicit BFMatcher_CUDA(int norm = cv::NORM_L2);
- // Add descriptors to train descriptor collection
+ //! Add descriptors to train descriptor collection
void add(const std::vector& descCollection);
- // Get train descriptors collection
+ //! Get train descriptors collection
const std::vector& getTrainDescriptors() const;
- // Clear train descriptors collection
+ //! Clear train descriptors collection
void clear();
- // Return true if there are not train descriptors in collection
+ //! Return true if there are not train descriptors in collection
bool empty() const;
- // Return true if the matcher supports mask in match methods
+ //! Return true if the matcher supports mask in match methods
bool isMaskSupported() const;
- // Find one best match for each query descriptor
+ //! Find one best match for each query descriptor
void matchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance,
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
- // Download trainIdx and distance and convert it to CPU vector with DMatch
+ //! Download trainIdx and distance and convert it to CPU vector with DMatch
static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector& matches);
- // Convert trainIdx and distance to vector with DMatch
+ //! Convert trainIdx and distance to vector with DMatch
static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector& matches);
- // Find one best match for each query descriptor
+ //! Find one best match for each query descriptor
void match(const GpuMat& query, const GpuMat& train, std::vector& matches, const GpuMat& mask = GpuMat());
- // Make gpu collection of trains and masks in suitable format for matchCollection function
+ //! Make gpu collection of trains and masks in suitable format for matchCollection function
void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector& masks = std::vector());
- // Find one best match from train collection for each query descriptor
+ //! Find one best match from train collection for each query descriptor
void matchCollection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null());
- // Download trainIdx, imgIdx and distance and convert it to vector with DMatch
+ //! Download trainIdx, imgIdx and distance and convert it to vector with DMatch
static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector& matches);
- // Convert trainIdx, imgIdx and distance to vector with DMatch
+ //! Convert trainIdx, imgIdx and distance to vector with DMatch
static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector& matches);
- // Find one best match from train collection for each query descriptor.
+ //! Find one best match from train collection for each query descriptor.
void match(const GpuMat& query, std::vector& matches, const std::vector& masks = std::vector());
- // Find k best matches for each query descriptor (in increasing order of distances)
+ //! Find k best matches for each query descriptor (in increasing order of distances)
void knnMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
- // Download trainIdx and distance and convert it to vector with DMatch
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Download trainIdx and distance and convert it to vector with DMatch
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
std::vector< std::vector >& matches, bool compactResult = false);
- // Convert trainIdx and distance to vector with DMatch
+ //! Convert trainIdx and distance to vector with DMatch
static void knnMatchConvert(const Mat& trainIdx, const Mat& distance,
std::vector< std::vector >& matches, bool compactResult = false);
- // Find k best matches for each query descriptor (in increasing order of distances).
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Find k best matches for each query descriptor (in increasing order of distances).
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
void knnMatch(const GpuMat& query, const GpuMat& train,
std::vector< std::vector >& matches, int k, const GpuMat& mask = GpuMat(),
bool compactResult = false);
- // Find k best matches from train collection for each query descriptor (in increasing order of distances)
+ //! Find k best matches from train collection for each query descriptor (in increasing order of distances)
void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null());
- // Download trainIdx and distance and convert it to vector with DMatch
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Download trainIdx and distance and convert it to vector with DMatch
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
+ //! @see BFMatcher_CUDA::knnMatchDownload
static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
std::vector< std::vector >& matches, bool compactResult = false);
- // Convert trainIdx and distance to vector with DMatch
+ //! Convert trainIdx and distance to vector with DMatch
+ //! @see BFMatcher_CUDA::knnMatchConvert
static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
std::vector< std::vector >& matches, bool compactResult = false);
- // Find k best matches for each query descriptor (in increasing order of distances).
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Find k best matches for each query descriptor (in increasing order of distances).
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
void knnMatch(const GpuMat& query, std::vector< std::vector >& matches, int k,
const std::vector& masks = std::vector(), bool compactResult = false);
- // Find best matches for each query descriptor which have distance less than maxDistance.
- // nMatches.at(0, queryIdx) will contain matches count for queryIdx.
- // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
- // because it didn't have enough memory.
- // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
- // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
- // Matches doesn't sorted.
+ //! Find best matches for each query descriptor which have distance less than maxDistance.
+ //! nMatches.at(0, queryIdx) will contain matches count for queryIdx.
+ //! carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
+ //! because it didn't have enough memory.
+ //! If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
+ //! otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ //! Matches doesn't sorted.
void radiusMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
- // Download trainIdx, nMatches and distance and convert it to vector with DMatch.
- // matches will be sorted in increasing order of distances.
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Download trainIdx, nMatches and distance and convert it to vector with DMatch.
+ //! matches will be sorted in increasing order of distances.
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
std::vector< std::vector >& matches, bool compactResult = false);
- // Convert trainIdx, nMatches and distance to vector with DMatch.
+ //! Convert trainIdx, nMatches and distance to vector with DMatch.
static void radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,
std::vector< std::vector >& matches, bool compactResult = false);
- // Find best matches for each query descriptor which have distance less than maxDistance
- // in increasing order of distances).
+ //! Find best matches for each query descriptor which have distance less than maxDistance
+ //! in increasing order of distances).
void radiusMatch(const GpuMat& query, const GpuMat& train,
std::vector< std::vector >& matches, float maxDistance,
const GpuMat& mask = GpuMat(), bool compactResult = false);
- // Find best matches for each query descriptor which have distance less than maxDistance.
- // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
- // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
- // Matches doesn't sorted.
+ //! Find best matches for each query descriptor which have distance less than maxDistance.
+ //! If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
+ //! otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ //! Matches doesn't sorted.
void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
const std::vector& masks = std::vector(), Stream& stream = Stream::Null());
- // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
- // matches will be sorted in increasing order of distances.
- // compactResult is used when mask is not empty. If compactResult is false matches
- // vector will have the same size as queryDescriptors rows. If compactResult is true
- // matches vector will not contain matches for fully masked out query descriptors.
+ //! Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
+ //! matches will be sorted in increasing order of distances.
+ //! compactResult is used when mask is not empty. If compactResult is false matches
+ //! vector will have the same size as queryDescriptors rows. If compactResult is true
+ //! matches vector will not contain matches for fully masked out query descriptors.
static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,
std::vector< std::vector >& matches, bool compactResult = false);
- // Convert trainIdx, nMatches and distance to vector with DMatch.
+ //! Convert trainIdx, nMatches and distance to vector with DMatch.
static void radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches,
std::vector< std::vector >& matches, bool compactResult = false);
- // Find best matches from train collection for each query descriptor which have distance less than
- // maxDistance (in increasing order of distances).
+ //! Find best matches from train collection for each query descriptor which have distance less than
+ //! maxDistance (in increasing order of distances).
void radiusMatch(const GpuMat& query, std::vector< std::vector >& matches, float maxDistance,
const std::vector& masks = std::vector(), bool compactResult = false);
@@ -203,6 +228,8 @@ private:
std::vector trainDescCollection;
};
+/** @brief Class used for corner detection using the FAST algorithm. :
+ */
class CV_EXPORTS FAST_CUDA
{
public:
@@ -213,23 +240,45 @@ public:
ROWS_COUNT
};
- // all features have same size
+ //! all features have same size
static const int FEATURE_SIZE = 7;
+ /** @brief Constructor.
+
+ @param threshold Threshold on difference between intensity of the central pixel and pixels on a
+ circle around this pixel.
+ @param nonmaxSuppression If it is true, non-maximum suppression is applied to detected corners
+ (keypoints).
+ @param keypointsRatio Inner buffer size for keypoints store is determined as (keypointsRatio \*
+ image_width \* image_height).
+ */
explicit FAST_CUDA(int threshold, bool nonmaxSuppression = true, double keypointsRatio = 0.05);
- //! finds the keypoints using FAST detector
- //! supports only CV_8UC1 images
+ /** @brief Finds the keypoints using FAST detector.
+
+ @param image Image where keypoints (corners) are detected. Only 8-bit grayscale images are
+ supported.
+ @param mask Optional input mask that marks the regions where we should detect features.
+ @param keypoints The output vector of keypoints. Can be stored both in CPU and GPU memory. For GPU
+ memory:
+ - keypoints.ptr\(LOCATION_ROW)[i] will contain location of i'th point
+ - keypoints.ptr\(RESPONSE_ROW)[i] will contain response of i'th point (if non-maximum
+ suppression is applied)
+ */
void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
+ /** @overload */
void operator ()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints);
- //! download keypoints from device to host memory
+ /** @brief Download keypoints from GPU to CPU memory.
+ */
static void downloadKeypoints(const GpuMat& d_keypoints, std::vector& keypoints);
- //! convert keypoints to KeyPoint vector
+ /** @brief Converts keypoints from CUDA representation to vector of KeyPoint.
+ */
static void convertKeypoints(const Mat& h_keypoints, std::vector& keypoints);
- //! release temporary buffer's memory
+ /** @brief Releases inner buffer memory.
+ */
void release();
bool nonmaxSuppression;
@@ -239,13 +288,22 @@ public:
//! max keypoints = keypointsRatio * img.size().area()
double keypointsRatio;
- //! find keypoints and compute it's response if nonmaxSuppression is true
- //! return count of detected keypoints
+ /** @brief Find keypoints and compute it's response if nonmaxSuppression is true.
+
+ @param image Image where keypoints (corners) are detected. Only 8-bit grayscale images are
+ supported.
+ @param mask Optional input mask that marks the regions where we should detect features.
+
+ The function returns count of detected keypoints.
+ */
int calcKeyPointsLocation(const GpuMat& image, const GpuMat& mask);
- //! get final array of keypoints
- //! performs nonmax suppression if needed
- //! return final count of keypoints
+ /** @brief Gets final array of keypoints.
+
+ @param keypoints The output vector of keypoints.
+
+ The function performs non-max suppression if needed and returns final count of keypoints.
+ */
int getKeyPoints(GpuMat& keypoints);
private:
@@ -257,6 +315,8 @@ private:
GpuMat d_keypoints_;
};
+/** @brief Class for extracting ORB features and descriptors from an image. :
+ */
class CV_EXPORTS ORB_CUDA
{
public:
@@ -276,28 +336,51 @@ public:
DEFAULT_FAST_THRESHOLD = 20
};
- //! Constructor
+ /** @brief Constructor.
+
+ @param nFeatures The number of desired features.
+ @param scaleFactor Coefficient by which we divide the dimensions from one scale pyramid level to
+ the next.
+ @param nLevels The number of levels in the scale pyramid.
+ @param edgeThreshold How far from the boundary the points should be.
+ @param firstLevel The level at which the image is given. If 1, that means we will also look at the
+ image scaleFactor times bigger.
+ @param WTA_K
+ @param scoreType
+ @param patchSize
+ */
explicit ORB_CUDA(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31,
int firstLevel = 0, int WTA_K = 2, int scoreType = 0, int patchSize = 31);
- //! Compute the ORB features on an image
- //! image - the image to compute the features (supports only CV_8UC1 images)
- //! mask - the mask to apply
- //! keypoints - the resulting keypoints
+ /** @overload */
void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints);
+ /** @overload */
void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
- //! Compute the ORB features and descriptors on an image
- //! image - the image to compute the features (supports only CV_8UC1 images)
- //! mask - the mask to apply
- //! keypoints - the resulting keypoints
- //! descriptors - descriptors array
+ /** @brief Detects keypoints and computes descriptors for them.
+
+ @param image Input 8-bit grayscale image.
+ @param mask Optional input mask that marks the regions where we should detect features.
+ @param keypoints The input/output vector of keypoints. Can be stored both in CPU and GPU memory.
+ For GPU memory:
+ - keypoints.ptr\(X_ROW)[i] contains x coordinate of the i'th feature.
+ - keypoints.ptr\(Y_ROW)[i] contains y coordinate of the i'th feature.
+ - keypoints.ptr\(RESPONSE_ROW)[i] contains the response of the i'th feature.
+ - keypoints.ptr\(ANGLE_ROW)[i] contains orientation of the i'th feature.
+ - keypoints.ptr\(OCTAVE_ROW)[i] contains the octave of the i'th feature.
+ - keypoints.ptr\(SIZE_ROW)[i] contains the size of the i'th feature.
+ @param descriptors Computed descriptors. if blurForDescriptor is true, image will be blurred
+ before descriptors calculation.
+ */
void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors);
+ /** @overload */
void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors);
- //! download keypoints from device to host memory
+ /** @brief Download keypoints from GPU to CPU memory.
+ */
static void downloadKeyPoints(const GpuMat& d_keypoints, std::vector& keypoints);
- //! convert keypoints to KeyPoint vector
+ /** @brief Converts keypoints from CUDA representation to vector of KeyPoint.
+ */
static void convertKeyPoints(const Mat& d_keypoints, std::vector& keypoints);
//! returns the descriptor size in bytes
@@ -309,7 +392,8 @@ public:
fastDetector_.nonmaxSuppression = nonmaxSuppression;
}
- //! release temporary buffer's memory
+ /** @brief Releases inner buffer memory.
+ */
void release();
//! if true, image will be blurred before descriptors calculation
@@ -335,10 +419,10 @@ private:
int scoreType_;
int patchSize_;
- // The number of desired features per scale
+ //! The number of desired features per scale
std::vector n_features_per_level_;
- // Points to compute BRIEF descriptors from
+ //! Points to compute BRIEF descriptors from
GpuMat pattern_;
std::vector imagePyr_;
@@ -356,6 +440,8 @@ private:
GpuMat d_keypoints_;
};
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAFEATURES2D_HPP__ */
diff --git a/modules/cudafilters/include/opencv2/cudafilters.hpp b/modules/cudafilters/include/opencv2/cudafilters.hpp
index 2c06575b5d..9e86cc3a71 100644
--- a/modules/cudafilters/include/opencv2/cudafilters.hpp
+++ b/modules/cudafilters/include/opencv2/cudafilters.hpp
@@ -50,65 +50,189 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/imgproc.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudafilters Image Filtering
+
+Functions and classes described in this section are used to perform various linear or non-linear
+filtering operations on 2D images.
+
+@note
+ - An example containing all basic morphology operators like erode and dilate can be found at
+ opencv_source_code/samples/gpu/morphology.cpp
+
+ @}
+ */
+
namespace cv { namespace cuda {
+//! @addtogroup cudafilters
+//! @{
+
+/** @brief Common interface for all CUDA filters :
+ */
class CV_EXPORTS Filter : public Algorithm
{
public:
+ /** @brief Applies the specified filter to the image.
+
+ @param src Input image.
+ @param dst Output image.
+ @param stream Stream for the asynchronous version.
+ */
virtual void apply(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Box Filter
-//! creates a normalized 2D box filter
-//! supports CV_8UC1, CV_8UC4 types
+/** @brief Creates a normalized 2D box filter.
+
+@param srcType Input image type. Only CV_8UC1 and CV_8UC4 are supported for now.
+@param dstType Output image type. Only the same type as src is supported for now.
+@param ksize Kernel size.
+@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
+center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+
+@sa boxFilter
+ */
CV_EXPORTS Ptr createBoxFilter(int srcType, int dstType, Size ksize, Point anchor = Point(-1,-1),
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Filter
-//! Creates a non-separable linear 2D filter
-//! supports 1 and 4 channel CV_8U, CV_16U and CV_32F input
+/** @brief Creates a non-separable linear 2D filter.
+
+@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
+@param dstType Output image type. Only the same type as src is supported for now.
+@param kernel 2D array of filter coefficients.
+@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
+center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+
+@sa filter2D
+ */
CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor = Point(-1,-1),
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
////////////////////////////////////////////////////////////////////////////////////////////////////
// Laplacian Filter
-//! creates a Laplacian operator
-//! supports only ksize = 1 and ksize = 3
+/** @brief Creates a Laplacian operator.
+
+@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
+@param dstType Output image type. Only the same type as src is supported for now.
+@param ksize Aperture size used to compute the second-derivative filters (see getDerivKernels). It
+must be positive and odd. Only ksize = 1 and ksize = 3 are supported.
+@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
+applied (see getDerivKernels ).
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+
+@sa Laplacian
+ */
CV_EXPORTS Ptr createLaplacianFilter(int srcType, int dstType, int ksize = 1, double scale = 1,
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
////////////////////////////////////////////////////////////////////////////////////////////////////
// Separable Linear Filter
-//! creates a separable linear filter
+/** @brief Creates a separable linear filter.
+
+@param srcType Source array type.
+@param dstType Destination array type.
+@param rowKernel Horizontal filter coefficients. Support kernels with size \<= 32 .
+@param columnKernel Vertical filter coefficients. Support kernels with size \<= 32 .
+@param anchor Anchor position within the kernel. Negative values mean that anchor is positioned at
+the aperture center.
+@param rowBorderMode Pixel extrapolation method in the vertical direction For details, see
+borderInterpolate.
+@param columnBorderMode Pixel extrapolation method in the horizontal direction.
+
+@sa sepFilter2D
+ */
CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel,
Point anchor = Point(-1,-1), int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
////////////////////////////////////////////////////////////////////////////////////////////////////
// Deriv Filter
-//! creates a generalized Deriv operator
+/** @brief Creates a generalized Deriv operator.
+
+@param srcType Source image type.
+@param dstType Destination array type.
+@param dx Derivative order in respect of x.
+@param dy Derivative order in respect of y.
+@param ksize Aperture size. See getDerivKernels for details.
+@param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
+See getDerivKernels for details.
+@param scale Optional scale factor for the computed derivative values. By default, no scaling is
+applied. For details, see getDerivKernels .
+@param rowBorderMode Pixel extrapolation method in the vertical direction. For details, see
+borderInterpolate.
+@param columnBorderMode Pixel extrapolation method in the horizontal direction.
+ */
CV_EXPORTS Ptr createDerivFilter(int srcType, int dstType, int dx, int dy,
int ksize, bool normalize = false, double scale = 1,
int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
-//! creates a Sobel operator
+/** @brief Creates a Sobel operator.
+
+@param srcType Source image type.
+@param dstType Destination array type.
+@param dx Derivative order in respect of x.
+@param dy Derivative order in respect of y.
+@param ksize Size of the extended Sobel kernel. Possible values are 1, 3, 5 or 7.
+@param scale Optional scale factor for the computed derivative values. By default, no scaling is
+applied. For details, see getDerivKernels .
+@param rowBorderMode Pixel extrapolation method in the vertical direction. For details, see
+borderInterpolate.
+@param columnBorderMode Pixel extrapolation method in the horizontal direction.
+
+@sa Sobel
+ */
CV_EXPORTS Ptr createSobelFilter(int srcType, int dstType, int dx, int dy, int ksize = 3,
double scale = 1, int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
-//! creates a vertical or horizontal Scharr operator
+/** @brief Creates a vertical or horizontal Scharr operator.
+
+@param srcType Source image type.
+@param dstType Destination array type.
+@param dx Order of the derivative x.
+@param dy Order of the derivative y.
+@param scale Optional scale factor for the computed derivative values. By default, no scaling is
+applied. See getDerivKernels for details.
+@param rowBorderMode Pixel extrapolation method in the vertical direction. For details, see
+borderInterpolate.
+@param columnBorderMode Pixel extrapolation method in the horizontal direction.
+
+@sa Scharr
+ */
CV_EXPORTS Ptr createScharrFilter(int srcType, int dstType, int dx, int dy,
double scale = 1, int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
////////////////////////////////////////////////////////////////////////////////////////////////////
// Gaussian Filter
-//! creates a Gaussian filter
+/** @brief Creates a Gaussian filter.
+
+@param srcType Source image type.
+@param dstType Destination array type.
+@param ksize Aperture size. See getGaussianKernel for details.
+@param sigma1 Gaussian sigma in the horizontal direction. See getGaussianKernel for details.
+@param sigma2 Gaussian sigma in the vertical direction. If 0, then
+\f$\texttt{sigma2}\leftarrow\texttt{sigma1}\f$ .
+@param rowBorderMode Pixel extrapolation method in the vertical direction. For details, see
+borderInterpolate.
+@param columnBorderMode Pixel extrapolation method in the horizontal direction.
+
+@sa GaussianBlur
+ */
CV_EXPORTS Ptr createGaussianFilter(int srcType, int dstType, Size ksize,
double sigma1, double sigma2 = 0,
int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
@@ -116,19 +240,49 @@ CV_EXPORTS Ptr createGaussianFilter(int srcType, int dstType, Size ksize
////////////////////////////////////////////////////////////////////////////////////////////////////
// Morphology Filter
-//! creates a 2D morphological filter
-//! supports CV_8UC1 and CV_8UC4 types
+/** @brief Creates a 2D morphological filter.
+
+@param op Type of morphological operation. The following types are possible:
+- **MORPH_ERODE** erode
+- **MORPH_DILATE** dilate
+- **MORPH_OPEN** opening
+- **MORPH_CLOSE** closing
+- **MORPH_GRADIENT** morphological gradient
+- **MORPH_TOPHAT** "top hat"
+- **MORPH_BLACKHAT** "black hat"
+@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
+@param kernel 2D 8-bit structuring element for the morphological operation.
+@param anchor Anchor position within the structuring element. Negative values mean that the anchor
+is at the center.
+@param iterations Number of times erosion and dilation to be applied.
+
+@sa morphologyEx
+ */
CV_EXPORTS Ptr createMorphologyFilter(int op, int srcType, InputArray kernel, Point anchor = Point(-1, -1), int iterations = 1);
////////////////////////////////////////////////////////////////////////////////////////////////////
// Image Rank Filter
-//! result pixel value is the maximum of pixel values under the rectangular mask region
+/** @brief Creates the maximum filter.
+
+@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
+@param ksize Kernel size.
+@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+ */
CV_EXPORTS Ptr createBoxMaxFilter(int srcType, Size ksize,
Point anchor = Point(-1, -1),
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
-//! result pixel value is the maximum of pixel values under the rectangular mask region
+/** @brief Creates the minimum filter.
+
+@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
+@param ksize Kernel size.
+@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+ */
CV_EXPORTS Ptr createBoxMinFilter(int srcType, Size ksize,
Point anchor = Point(-1, -1),
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
@@ -136,14 +290,30 @@ CV_EXPORTS Ptr createBoxMinFilter(int srcType, Size ksize,
////////////////////////////////////////////////////////////////////////////////////////////////////
// 1D Sum Filter
-//! creates a horizontal 1D box filter
-//! supports only CV_8UC1 source type and CV_32FC1 sum type
+/** @brief Creates a horizontal 1D box filter.
+
+@param srcType Input image type. Only CV_8UC1 type is supported for now.
+@param dstType Output image type. Only CV_32FC1 type is supported for now.
+@param ksize Kernel size.
+@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+ */
CV_EXPORTS Ptr createRowSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
-//! creates a vertical 1D box filter
-//! supports only CV_8UC1 sum type and CV_32FC1 dst type
+/** @brief Creates a vertical 1D box filter.
+
+@param srcType Input image type. Only CV_8UC1 type is supported for now.
+@param dstType Output image type. Only CV_32FC1 type is supported for now.
+@param ksize Kernel size.
+@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
+@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
+@param borderVal Default border value.
+ */
CV_EXPORTS Ptr createColumnSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAFILTERS_HPP__ */
diff --git a/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp b/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
index d451b93b1e..1ec288fa9c 100644
--- a/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
+++ b/modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
@@ -50,16 +50,48 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/imgproc.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudaimgproc Image Processing
+ @{
+ @defgroup cudaimgproc_color Color space processing
+ @defgroup cudaimgproc_hist Histogram Calculation
+ @defgroup cudaimgproc_hough Hough Transform
+ @defgroup cudaimgproc_feature Feature Detection
+ @}
+ @}
+*/
+
namespace cv { namespace cuda {
+//! @addtogroup cudaimgproc
+//! @{
+
/////////////////////////// Color Processing ///////////////////////////
-//! converts image from one color space to another
+//! @addtogroup cudaimgproc_color
+//! @{
+
+/** @brief Converts an image from one color space to another.
+
+@param src Source image with CV_8U , CV_16U , or CV_32F depth and 1, 3, or 4 channels.
+@param dst Destination image.
+@param code Color space conversion code. For details, see cvtColor .
+@param dcn Number of channels in the destination image. If the parameter is 0, the number of the
+channels is derived automatically from src and the code .
+@param stream Stream for the asynchronous version.
+
+3-channel color spaces (like HSV, XYZ, and so on) can be stored in a 4-channel image for better
+performance.
+
+@sa cvtColor
+ */
CV_EXPORTS void cvtColor(InputArray src, OutputArray dst, int code, int dcn = 0, Stream& stream = Stream::Null());
enum
{
- // Bayer Demosaicing (Malvar, He, and Cutler)
+ //! Bayer Demosaicing (Malvar, He, and Cutler)
COLOR_BayerBG2BGR_MHT = 256,
COLOR_BayerGB2BGR_MHT = 257,
COLOR_BayerRG2BGR_MHT = 258,
@@ -75,105 +107,228 @@ enum
COLOR_BayerRG2GRAY_MHT = 262,
COLOR_BayerGR2GRAY_MHT = 263
};
+
+/** @brief Converts an image from Bayer pattern to RGB or grayscale.
+
+@param src Source image (8-bit or 16-bit single channel).
+@param dst Destination image.
+@param code Color space conversion code (see the description below).
+@param dcn Number of channels in the destination image. If the parameter is 0, the number of the
+channels is derived automatically from src and the code .
+@param stream Stream for the asynchronous version.
+
+The function can do the following transformations:
+
+- Demosaicing using bilinear interpolation
+
+ > - COLOR_BayerBG2GRAY , COLOR_BayerGB2GRAY , COLOR_BayerRG2GRAY , COLOR_BayerGR2GRAY
+ > - COLOR_BayerBG2BGR , COLOR_BayerGB2BGR , COLOR_BayerRG2BGR , COLOR_BayerGR2BGR
+
+- Demosaicing using Malvar-He-Cutler algorithm (@cite MHT2011)
+
+ > - COLOR_BayerBG2GRAY_MHT , COLOR_BayerGB2GRAY_MHT , COLOR_BayerRG2GRAY_MHT ,
+ > COLOR_BayerGR2GRAY_MHT
+ > - COLOR_BayerBG2BGR_MHT , COLOR_BayerGB2BGR_MHT , COLOR_BayerRG2BGR_MHT ,
+ > COLOR_BayerGR2BGR_MHT
+
+@sa cvtColor
+ */
CV_EXPORTS void demosaicing(InputArray src, OutputArray dst, int code, int dcn = -1, Stream& stream = Stream::Null());
-//! swap channels
-//! dstOrder - Integer array describing how channel values are permutated. The n-th entry
-//! of the array contains the number of the channel that is stored in the n-th channel of
-//! the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR
-//! channel order.
+/** @brief Exchanges the color channels of an image in-place.
+
+@param image Source image. Supports only CV_8UC4 type.
+@param dstOrder Integer array describing how channel values are permutated. The n-th entry of the
+array contains the number of the channel that is stored in the n-th channel of the output image.
+E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel order.
+@param stream Stream for the asynchronous version.
+
+The methods support arbitrary permutations of the original channels, including replication.
+ */
CV_EXPORTS void swapChannels(InputOutputArray image, const int dstOrder[4], Stream& stream = Stream::Null());
-//! Routines for correcting image color gamma
+/** @brief Routines for correcting image color gamma.
+
+@param src Source image (3- or 4-channel 8 bit).
+@param dst Destination image.
+@param forward true for forward gamma correction or false for inverse gamma correction.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void gammaCorrection(InputArray src, OutputArray dst, bool forward = true, Stream& stream = Stream::Null());
enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL,
ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL};
-//! Composite two images using alpha opacity values contained in each image
-//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types
+/** @brief Composites two images using alpha opacity values contained in each image.
+
+@param img1 First image. Supports CV_8UC4 , CV_16UC4 , CV_32SC4 and CV_32FC4 types.
+@param img2 Second image. Must have the same size and the same type as img1 .
+@param dst Destination image.
+@param alpha_op Flag specifying the alpha-blending operation:
+- **ALPHA_OVER**
+- **ALPHA_IN**
+- **ALPHA_OUT**
+- **ALPHA_ATOP**
+- **ALPHA_XOR**
+- **ALPHA_PLUS**
+- **ALPHA_OVER_PREMUL**
+- **ALPHA_IN_PREMUL**
+- **ALPHA_OUT_PREMUL**
+- **ALPHA_ATOP_PREMUL**
+- **ALPHA_XOR_PREMUL**
+- **ALPHA_PLUS_PREMUL**
+- **ALPHA_PREMUL**
+@param stream Stream for the asynchronous version.
+
+@note
+ - An example demonstrating the use of alphaComp can be found at
+ opencv_source_code/samples/gpu/alpha_comp.cpp
+ */
CV_EXPORTS void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int alpha_op, Stream& stream = Stream::Null());
+//! @} cudaimgproc_color
+
////////////////////////////// Histogram ///////////////////////////////
-//! Calculates histogram for 8u one channel image
-//! Output hist will have one row, 256 cols and CV32SC1 type.
+//! @addtogroup cudaimgproc_hist
+//! @{
+
+/** @brief Calculates histogram for one channel 8-bit image.
+
+@param src Source image with CV_8UC1 type.
+@param hist Destination histogram with one row, 256 columns, and the CV_32SC1 type.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void calcHist(InputArray src, OutputArray hist, Stream& stream = Stream::Null());
-//! normalizes the grayscale image brightness and contrast by normalizing its histogram
+/** @brief Equalizes the histogram of a grayscale image.
+
+@param src Source image with CV_8UC1 type.
+@param dst Destination image.
+@param buf Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
+@param stream Stream for the asynchronous version.
+
+@sa equalizeHist
+ */
CV_EXPORTS void equalizeHist(InputArray src, OutputArray dst, InputOutputArray buf, Stream& stream = Stream::Null());
+/** @overload */
static inline void equalizeHist(InputArray src, OutputArray dst, Stream& stream = Stream::Null())
{
GpuMat buf;
cuda::equalizeHist(src, dst, buf, stream);
}
+/** @brief Base class for Contrast Limited Adaptive Histogram Equalization. :
+ */
class CV_EXPORTS CLAHE : public cv::CLAHE
{
public:
using cv::CLAHE::apply;
+ /** @brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
+
+ @param src Source image with CV_8UC1 type.
+ @param dst Destination image.
+ @param stream Stream for the asynchronous version.
+ */
virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
};
+
+/** @brief Creates implementation for cuda::CLAHE .
+
+@param clipLimit Threshold for contrast limiting.
+@param tileGridSize Size of grid for histogram equalization. Input image will be divided into
+equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
+ */
CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
-//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
+/** @brief Computes levels with even distribution.
+
+@param levels Destination array. levels has 1 row, nLevels columns, and the CV_32SC1 type.
+@param nLevels Number of computed levels. nLevels must be at least 2.
+@param lowerLevel Lower boundary value of the lowest level.
+@param upperLevel Upper boundary value of the greatest level.
+ */
CV_EXPORTS void evenLevels(OutputArray levels, int nLevels, int lowerLevel, int upperLevel);
-//! Calculates histogram with evenly distributed bins for signle channel source.
-//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.
-//! Output hist will have one row and histSize cols and CV_32SC1 type.
+/** @brief Calculates a histogram with evenly distributed bins.
+
+@param src Source image. CV_8U, CV_16U, or CV_16S depth and 1 or 4 channels are supported. For
+a four-channel image, all channels are processed separately.
+@param hist Destination histogram with one row, histSize columns, and the CV_32S type.
+@param histSize Size of the histogram.
+@param lowerLevel Lower boundary of lowest-level bin.
+@param upperLevel Upper boundary of highest-level bin.
+@param buf Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void histEven(InputArray src, OutputArray hist, InputOutputArray buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
+/** @overload */
static inline void histEven(InputArray src, OutputArray hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
{
GpuMat buf;
cuda::histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
}
-//! Calculates histogram with evenly distributed bins for four-channel source.
-//! All channels of source are processed separately.
-//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.
-//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.
+/** @overload */
CV_EXPORTS void histEven(InputArray src, GpuMat hist[4], InputOutputArray buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
+/** @overload */
static inline void histEven(InputArray src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null())
{
GpuMat buf;
cuda::histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
}
-//! Calculates histogram with bins determined by levels array.
-//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
-//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.
-//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.
+/** @brief Calculates a histogram with bins determined by the levels array.
+
+@param src Source image. CV_8U , CV_16U , or CV_16S depth and 1 or 4 channels are supported.
+For a four-channel image, all channels are processed separately.
+@param hist Destination histogram with one row, (levels.cols-1) columns, and the CV_32SC1 type.
+@param levels Number of levels in the histogram.
+@param buf Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void histRange(InputArray src, OutputArray hist, InputArray levels, InputOutputArray buf, Stream& stream = Stream::Null());
+/** @overload */
static inline void histRange(InputArray src, OutputArray hist, InputArray levels, Stream& stream = Stream::Null())
{
GpuMat buf;
cuda::histRange(src, hist, levels, buf, stream);
}
-//! Calculates histogram with bins determined by levels array.
-//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
-//! All channels of source are processed separately.
-//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.
-//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.
+/** @overload */
CV_EXPORTS void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4], InputOutputArray buf, Stream& stream = Stream::Null());
+/** @overload */
static inline void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null())
{
GpuMat buf;
cuda::histRange(src, hist, levels, buf, stream);
}
+//! @} cudaimgproc_hist
+
//////////////////////////////// Canny ////////////////////////////////
+/** @brief Base class for Canny Edge Detector. :
+ */
class CV_EXPORTS CannyEdgeDetector : public Algorithm
{
public:
+ /** @brief Finds edges in an image using the @cite Canny86 algorithm.
+
+ @param image Single-channel 8-bit input image.
+ @param edges Output edge map. It has the same size and type as image .
+ */
virtual void detect(InputArray image, OutputArray edges) = 0;
+ /** @overload
+ @param dx First derivative of image in the vertical direction. Support only CV_32S type.
+ @param dy First derivative of image in the horizontal direction. Support only CV_32S type.
+ @param edges Output edge map. It has the same size and type as image .
+ */
virtual void detect(InputArray dx, InputArray dy, OutputArray edges) = 0;
virtual void setLowThreshold(double low_thresh) = 0;
@@ -189,6 +344,16 @@ public:
virtual bool getL2Gradient() const = 0;
};
+/** @brief Creates implementation for cuda::CannyEdgeDetector .
+
+@param low_thresh First threshold for the hysteresis procedure.
+@param high_thresh Second threshold for the hysteresis procedure.
+@param apperture_size Aperture size for the Sobel operator.
+@param L2gradient Flag indicating whether a more accurate \f$L_2\f$ norm
+\f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to compute the image gradient magnitude (
+L2gradient=true ), or a faster default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough ( L2gradient=false
+).
+ */
CV_EXPORTS Ptr createCannyEdgeDetector(double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
/////////////////////////// Hough Transform ////////////////////////////
@@ -196,10 +361,32 @@ CV_EXPORTS Ptr createCannyEdgeDetector(double low_thresh, dou
//////////////////////////////////////
// HoughLines
+//! @addtogroup cudaimgproc_hough
+//! @{
+
+/** @brief Base class for lines detector algorithm. :
+ */
class CV_EXPORTS HoughLinesDetector : public Algorithm
{
public:
+ /** @brief Finds lines in a binary image using the classical Hough transform.
+
+ @param src 8-bit, single-channel binary source image.
+ @param lines Output vector of lines. Each line is represented by a two-element vector
+ \f$(\rho, \theta)\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
+ the image). \f$\theta\f$ is the line rotation angle in radians (
+ \f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
+
+ @sa HoughLines
+ */
virtual void detect(InputArray src, OutputArray lines) = 0;
+
+ /** @brief Downloads results from cuda::HoughLinesDetector::detect to host memory.
+
+ @param d_lines Result of cuda::HoughLinesDetector::detect .
+ @param h_lines Output host array.
+ @param h_votes Optional output array for line's votes.
+ */
virtual void downloadResults(InputArray d_lines, OutputArray h_lines, OutputArray h_votes = noArray()) = 0;
virtual void setRho(float rho) = 0;
@@ -218,16 +405,35 @@ public:
virtual int getMaxLines() const = 0;
};
+/** @brief Creates implementation for cuda::HoughLinesDetector .
+
+@param rho Distance resolution of the accumulator in pixels.
+@param theta Angle resolution of the accumulator in radians.
+@param threshold Accumulator threshold parameter. Only those lines are returned that get enough
+votes ( \f$>\texttt{threshold}\f$ ).
+@param doSort Performs lines sort by votes.
+@param maxLines Maximum number of output lines.
+ */
CV_EXPORTS Ptr createHoughLinesDetector(float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
//////////////////////////////////////
// HoughLinesP
-//! finds line segments in the black-n-white image using probabilistic Hough transform
+/** @brief Base class for line segments detector algorithm. :
+ */
class CV_EXPORTS HoughSegmentDetector : public Algorithm
{
public:
+ /** @brief Finds line segments in a binary image using the probabilistic Hough transform.
+
+ @param src 8-bit, single-channel binary source image.
+ @param lines Output vector of lines. Each line is represented by a 4-element vector
+ \f$(x_1, y_1, x_2, y_2)\f$ , where \f$(x_1,y_1)\f$ and \f$(x_2, y_2)\f$ are the ending points of each detected
+ line segment.
+
+ @sa HoughLinesP
+ */
virtual void detect(InputArray src, OutputArray lines) = 0;
virtual void setRho(float rho) = 0;
@@ -246,14 +452,32 @@ public:
virtual int getMaxLines() const = 0;
};
+/** @brief Creates implementation for cuda::HoughSegmentDetector .
+
+@param rho Distance resolution of the accumulator in pixels.
+@param theta Angle resolution of the accumulator in radians.
+@param minLineLength Minimum line length. Line segments shorter than that are rejected.
+@param maxLineGap Maximum allowed gap between points on the same line to link them.
+@param maxLines Maximum number of output lines.
+ */
CV_EXPORTS Ptr createHoughSegmentDetector(float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096);
//////////////////////////////////////
// HoughCircles
+/** @brief Base class for circles detector algorithm. :
+ */
class CV_EXPORTS HoughCirclesDetector : public Algorithm
{
public:
+ /** @brief Finds circles in a grayscale image using the Hough transform.
+
+ @param src 8-bit, single-channel grayscale input image.
+ @param circles Output vector of found circles. Each vector is encoded as a 3-element
+ floating-point vector \f$(x, y, radius)\f$ .
+
+ @sa HoughCircles
+ */
virtual void detect(InputArray src, OutputArray circles) = 0;
virtual void setDp(float dp) = 0;
@@ -278,85 +502,257 @@ public:
virtual int getMaxCircles() const = 0;
};
+/** @brief Creates implementation for cuda::HoughCirclesDetector .
+
+@param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
+dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
+half as big width and height.
+@param minDist Minimum distance between the centers of the detected circles. If the parameter is
+too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
+too large, some circles may be missed.
+@param cannyThreshold The higher threshold of the two passed to Canny edge detector (the lower one
+is twice smaller).
+@param votesThreshold The accumulator threshold for the circle centers at the detection stage. The
+smaller it is, the more false circles may be detected.
+@param minRadius Minimum circle radius.
+@param maxRadius Maximum circle radius.
+@param maxCircles Maximum number of output circles.
+ */
CV_EXPORTS Ptr createHoughCirclesDetector(float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
//////////////////////////////////////
// GeneralizedHough
-//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
-//! Detects position only without traslation and rotation
+/** @brief Creates implementation for generalized hough transform from @cite Ballard1981 .
+ */
CV_EXPORTS Ptr createGeneralizedHoughBallard();
-//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
-//! Detects position, traslation and rotation
+/** @brief Creates implementation for generalized hough transform from @cite Guil1999 .
+ */
CV_EXPORTS Ptr createGeneralizedHoughGuil();
+//! @} cudaimgproc_hough
+
////////////////////////// Corners Detection ///////////////////////////
+//! @addtogroup cudaimgproc_feature
+//! @{
+
+/** @brief Base class for Cornerness Criteria computation. :
+ */
class CV_EXPORTS CornernessCriteria : public Algorithm
{
public:
+ /** @brief Computes the cornerness criteria at each image pixel.
+
+ @param src Source image.
+ @param dst Destination image containing cornerness values. It will have the same size as src and
+ CV_32FC1 type.
+ @param stream Stream for the asynchronous version.
+ */
virtual void compute(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
};
-//! computes Harris cornerness criteria at each image pixel
+/** @brief Creates implementation for Harris cornerness criteria.
+
+@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
+@param blockSize Neighborhood size.
+@param ksize Aperture parameter for the Sobel operator.
+@param k Harris detector free parameter.
+@param borderType Pixel extrapolation method. Only BORDER_REFLECT101 and BORDER_REPLICATE are
+supported for now.
+
+@sa cornerHarris
+ */
CV_EXPORTS Ptr createHarrisCorner(int srcType, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
-//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
+/** @brief Creates implementation for the minimum eigen value of a 2x2 derivative covariation matrix (the
+cornerness criteria).
+
+@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
+@param blockSize Neighborhood size.
+@param ksize Aperture parameter for the Sobel operator.
+@param borderType Pixel extrapolation method. Only BORDER_REFLECT101 and BORDER_REPLICATE are
+supported for now.
+
+@sa cornerMinEigenVal
+ */
CV_EXPORTS Ptr createMinEigenValCorner(int srcType, int blockSize, int ksize, int borderType = BORDER_REFLECT101);
////////////////////////// Corners Detection ///////////////////////////
+/** @brief Base class for Corners Detector. :
+ */
class CV_EXPORTS CornersDetector : public Algorithm
{
public:
- //! return 1 rows matrix with CV_32FC2 type
+ /** @brief Determines strong corners on an image.
+
+ @param image Input 8-bit or floating-point 32-bit, single-channel image.
+ @param corners Output vector of detected corners (1-row matrix with CV_32FC2 type with corners
+ positions).
+ @param mask Optional region of interest. If the image is not empty (it needs to have the type
+ CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
+ */
virtual void detect(InputArray image, OutputArray corners, InputArray mask = noArray()) = 0;
};
+/** @brief Creates implementation for cuda::CornersDetector .
+
+@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
+@param maxCorners Maximum number of corners to return. If there are more corners than are found,
+the strongest of them is returned.
+@param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
+parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
+(see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the
+quality measure less than the product are rejected. For example, if the best corner has the
+quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
+less than 15 are rejected.
+@param minDistance Minimum possible Euclidean distance between the returned corners.
+@param blockSize Size of an average block for computing a derivative covariation matrix over each
+pixel neighborhood. See cornerEigenValsAndVecs .
+@param useHarrisDetector Parameter indicating whether to use a Harris detector (see cornerHarris)
+or cornerMinEigenVal.
+@param harrisK Free parameter of the Harris detector.
+ */
CV_EXPORTS Ptr createGoodFeaturesToTrackDetector(int srcType, int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);
+//! @} cudaimgproc_feature
+
///////////////////////////// Mean Shift //////////////////////////////
-//! Does mean shift filtering on GPU.
+/** @brief Performs mean-shift filtering for each point of the source image.
+
+@param src Source image. Only CV_8UC4 images are supported for now.
+@param dst Destination image containing the color of mapped points. It has the same size and type
+as src .
+@param sp Spatial window radius.
+@param sr Color window radius.
+@param criteria Termination criteria. See TermCriteria.
+@param stream
+
+It maps each point of the source image into another point. As a result, you have a new color and new
+position of each point.
+ */
CV_EXPORTS void meanShiftFiltering(InputArray src, OutputArray dst, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
-//! Does mean shift procedure on GPU.
+/** @brief Performs a mean-shift procedure and stores information about processed points (their colors and
+positions) in two images.
+
+@param src Source image. Only CV_8UC4 images are supported for now.
+@param dstr Destination image containing the color of mapped points. The size and type is the same
+as src .
+@param dstsp Destination image containing the position of mapped points. The size is the same as
+src size. The type is CV_16SC2 .
+@param sp Spatial window radius.
+@param sr Color window radius.
+@param criteria Termination criteria. See TermCriteria.
+@param stream
+
+@sa cuda::meanShiftFiltering
+ */
CV_EXPORTS void meanShiftProc(InputArray src, OutputArray dstr, OutputArray dstsp, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
-//! Does mean shift segmentation with elimination of small regions.
+/** @brief Performs a mean-shift segmentation of the source image and eliminates small segments.
+
+@param src Source image. Only CV_8UC4 images are supported for now.
+@param dst Segmented image with the same size and type as src (host memory).
+@param sp Spatial window radius.
+@param sr Color window radius.
+@param minsize Minimum segment size. Smaller segments are merged.
+@param criteria Termination criteria. See TermCriteria.
+ */
CV_EXPORTS void meanShiftSegmentation(InputArray src, OutputArray dst, int sp, int sr, int minsize,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
/////////////////////////// Match Template ////////////////////////////
-//! computes the proximity map for the raster template and the image where the template is searched for
+/** @brief Base class for Template Matching. :
+ */
class CV_EXPORTS TemplateMatching : public Algorithm
{
public:
+ /** @brief Computes a proximity map for a raster template and an image where the template is searched for.
+
+ @param image Source image.
+ @param templ Template image with the size and type the same as image .
+ @param result Map containing comparison results ( CV_32FC1 ). If image is *W x H* and templ is *w
+ x h*, then result must be *W-w+1 x H-h+1*.
+ @param stream Stream for the asynchronous version.
+ */
virtual void match(InputArray image, InputArray templ, OutputArray result, Stream& stream = Stream::Null()) = 0;
};
+/** @brief Creates implementation for cuda::TemplateMatching .
+
+@param srcType Input source type. CV_32F and CV_8U depth images (1..4 channels) are supported
+for now.
+@param method Specifies the way to compare the template with the image.
+@param user_block_size You can use field user_block_size to set specific block size. If you
+leave its default value Size(0,0) then automatic estimation of block size will be used (which is
+optimized for speed). By varying user_block_size you can reduce memory requirements at the cost
+of speed.
+
+The following methods are supported for the CV_8U depth images for now:
+
+- CV_TM_SQDIFF
+- CV_TM_SQDIFF_NORMED
+- CV_TM_CCORR
+- CV_TM_CCORR_NORMED
+- CV_TM_CCOEFF
+- CV_TM_CCOEFF_NORMED
+
+The following methods are supported for the CV_32F images for now:
+
+- CV_TM_SQDIFF
+- CV_TM_CCORR
+
+@sa matchTemplate
+ */
CV_EXPORTS Ptr createTemplateMatching(int srcType, int method, Size user_block_size = Size());
////////////////////////// Bilateral Filter ///////////////////////////
-//! Performa bilateral filtering of passsed image
+/** @brief Performs bilateral filtering of passed image
+
+@param src Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S
+&& depth() != CV_64F).
+@param dst Destination imagwe.
+@param kernel_size Kernel window size.
+@param sigma_color Filter sigma in the color space.
+@param sigma_spatial Filter sigma in the coordinate space.
+@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
+BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
+@param stream Stream for the asynchronous version.
+
+@sa bilateralFilter
+ */
CV_EXPORTS void bilateralFilter(InputArray src, OutputArray dst, int kernel_size, float sigma_color, float sigma_spatial,
int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
///////////////////////////// Blending ////////////////////////////////
-//! performs linear blending of two images
-//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
+/** @brief Performs linear blending of two images.
+
+@param img1 First image. Supports only CV_8U and CV_32F depth.
+@param img2 Second image. Must have the same size and the same type as img1 .
+@param weights1 Weights for first image. Must have tha same size as img1 . Supports only CV_32F
+type.
+@param weights2 Weights for second image. Must have tha same size as img2 . Supports only CV_32F
+type.
+@param result Destination image.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void blendLinear(InputArray img1, InputArray img2, InputArray weights1, InputArray weights2,
OutputArray result, Stream& stream = Stream::Null());
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAIMGPROC_HPP__ */
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp
index 5ae75cd316..a72ef09c75 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp
@@ -49,4 +49,11 @@
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudalegacy Legacy support
+ @}
+*/
+
#endif /* __OPENCV_CUDALEGACY_HPP__ */
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/NCV.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/NCV.hpp
index cb84c23ad5..9b9a6fe178 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/NCV.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/NCV.hpp
@@ -60,6 +60,8 @@
//
//==============================================================================
+//! @addtogroup cudalegacy
+//! @{
/**
* Compile-time assert namespace
@@ -203,6 +205,7 @@ struct NcvPoint2D32u
__host__ __device__ NcvPoint2D32u(Ncv32u x_, Ncv32u y_) : x(x_), y(y_) {}
};
+//! @cond IGNORED
NCV_CT_ASSERT(sizeof(NcvBool) <= 4);
NCV_CT_ASSERT(sizeof(Ncv64s) == 8);
@@ -221,6 +224,7 @@ NCV_CT_ASSERT(sizeof(NcvRect32u) == 4 * sizeof(Ncv32u));
NCV_CT_ASSERT(sizeof(NcvSize32u) == 2 * sizeof(Ncv32u));
NCV_CT_ASSERT(sizeof(NcvPoint2D32u) == 2 * sizeof(Ncv32u));
+//! @endcond
//==============================================================================
//
@@ -1023,6 +1027,6 @@ CV_EXPORTS NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst, Ncv32u dstStride, Nc
NCVMatrixAlloc name(alloc, width, height); \
ncvAssertReturn(name.isMemAllocated(), err);
-
+//! @}
#endif // _ncv_hpp_
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/NCVBroxOpticalFlow.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/NCVBroxOpticalFlow.hpp
index 777000cf7b..c14532b480 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/NCVBroxOpticalFlow.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/NCVBroxOpticalFlow.hpp
@@ -62,6 +62,9 @@
#include "opencv2/cudalegacy/NCV.hpp"
+//! @addtogroup cudalegacy
+//! @{
+
/// \brief Model and solver parameters
struct NCVBroxOpticalFlowDescriptor
{
@@ -89,6 +92,7 @@ struct NCVBroxOpticalFlowDescriptor
/// \param [in] frame1 frame to track
/// \param [out] u flow horizontal component (along \b x axis)
/// \param [out] v flow vertical component (along \b y axis)
+/// \param stream
/// \return computation status
/////////////////////////////////////////////////////////////////////////////////////////
@@ -101,4 +105,6 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
NCVMatrix &v,
cudaStream_t stream);
+//! @}
+
#endif
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/NCVHaarObjectDetection.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/NCVHaarObjectDetection.hpp
index 6c69cbd5a1..6b84e8b255 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/NCVHaarObjectDetection.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/NCVHaarObjectDetection.hpp
@@ -61,6 +61,8 @@
#include "opencv2/cudalegacy/NCV.hpp"
+//! @addtogroup cudalegacy
+//! @{
//==============================================================================
//
@@ -456,6 +458,6 @@ CV_EXPORTS NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
NCVVector &h_HaarNodes,
NCVVector &h_HaarFeatures);
-
+//! @}
#endif // _ncvhaarobjectdetection_hpp_
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/NCVPyramid.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/NCVPyramid.hpp
index 7ec22a367f..9f4501a5aa 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/NCVPyramid.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/NCVPyramid.hpp
@@ -48,6 +48,8 @@
#include "opencv2/cudalegacy/NCV.hpp"
#include "opencv2/core/cuda/common.hpp"
+//! @cond IGNORED
+
namespace cv { namespace cuda { namespace device
{
namespace pyramid
@@ -106,4 +108,6 @@ private:
#endif //_WIN32
+//! @endcond
+
#endif //_ncvpyramid_hpp_
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/NPP_staging.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/NPP_staging.hpp
index 979ceef41f..6cc50d7a47 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/NPP_staging.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/NPP_staging.hpp
@@ -45,19 +45,14 @@
#include "opencv2/cudalegacy/NCV.hpp"
-
-/**
-* \file NPP_staging.hpp
-* NPP Staging Library
-*/
-
+//! @addtogroup cudalegacy
+//! @{
/** \defgroup core_npp NPPST Core
* Basic functions for CUDA streams management.
* @{
*/
-
/**
* Gets an active CUDA stream used by NPPST
* NOT THREAD SAFE
@@ -168,6 +163,7 @@ NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState);
* \param nSrcStep [IN] Source image line step
* \param pDst [OUT] Destination image pointer (CUDA device memory)
* \param dstSize [OUT] Destination image size
+ * \param nDstStep
* \param oROI [IN] Region of interest in the source image
* \param borderType [IN] Type of border
* \param pKernel [IN] Pointer to row kernel values (CUDA device memory)
@@ -201,6 +197,7 @@ NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
* \param nSrcStep [IN] Source image line step
* \param pDst [OUT] Destination image pointer (CUDA device memory)
* \param dstSize [OUT] Destination image size
+ * \param nDstStep [IN]
* \param oROI [IN] Region of interest in the source image
* \param borderType [IN] Type of border
* \param pKernel [IN] Pointer to column kernel values (CUDA device memory)
@@ -228,7 +225,7 @@ NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
/** Size of buffer required for vector image warping.
*
* \param srcSize [IN] Source image size
- * \param nStep [IN] Source image line step
+ * \param nSrcStep [IN] Source image line step
* \param hpSize [OUT] Where to store computed size (host memory)
*
* \return NCV status code
@@ -285,6 +282,7 @@ NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
* \param pU [IN] Pointer to horizontal displacement field (CUDA device memory)
* \param pV [IN] Pointer to vertical displacement field (CUDA device memory)
* \param nVFStep [IN] Displacement field line step
+ * \param pBuffer
* \param timeScale [IN] Value by which displacement field will be scaled for warping
* \param pDst [OUT] Destination image pointer (CUDA device memory)
*
@@ -903,5 +901,6 @@ NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
/*@}*/
+//! @}
#endif // _npp_staging_hpp_
diff --git a/modules/cudalegacy/include/opencv2/cudalegacy/private.hpp b/modules/cudalegacy/include/opencv2/cudalegacy/private.hpp
index 41c23836d9..721748099c 100644
--- a/modules/cudalegacy/include/opencv2/cudalegacy/private.hpp
+++ b/modules/cudalegacy/include/opencv2/cudalegacy/private.hpp
@@ -56,6 +56,8 @@
#include "opencv2/cudalegacy.hpp"
+//! @cond IGNORED
+
namespace cv { namespace cuda
{
class NppStStreamHandler
@@ -89,4 +91,6 @@ namespace cv { namespace cuda
#define ncvSafeCall(expr) cv::cuda::checkNcvError(expr, __FILE__, __LINE__, CV_Func)
+//! @endcond
+
#endif // __OPENCV_CORE_CUDALEGACY_PRIVATE_HPP__
diff --git a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp
index d07a834ef3..f65b1447b2 100644
--- a/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp
+++ b/modules/cudaoptflow/include/opencv2/cudaoptflow.hpp
@@ -49,8 +49,21 @@
#include "opencv2/core/cuda.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudaoptflow Optical Flow
+ @}
+ */
+
namespace cv { namespace cuda {
+//! @addtogroup cudaoptflow
+//! @{
+
+/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm
+(@cite Brox2004). :
+ */
class CV_EXPORTS BroxOpticalFlow
{
public:
@@ -88,16 +101,58 @@ public:
GpuMat buf;
};
+/** @brief Class used for calculating an optical flow.
+
+The class can calculate an optical flow for a sparse feature set or dense optical flow using the
+iterative Lucas-Kanade method with pyramids.
+
+@sa calcOpticalFlowPyrLK
+
+@note
+ - An example of the Lucas Kanade optical flow algorithm can be found at
+ opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
+ */
class CV_EXPORTS PyrLKOpticalFlow
{
public:
PyrLKOpticalFlow();
+ /** @brief Calculate an optical flow for a sparse feature set.
+
+ @param prevImg First 8-bit input image (supports both grayscale and color images).
+ @param nextImg Second input image of the same size and the same type as prevImg .
+ @param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix
+ with CV_32FC2 type.
+ @param nextPts Output vector of 2D points (with single-precision floating-point coordinates)
+ containing the calculated new positions of input features in the second image. When useInitialFlow
+ is true, the vector must have the same size as in the input.
+ @param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
+ flow for the corresponding features has been found. Otherwise, it is set to 0.
+ @param err Output vector (CV_32FC1 type) that contains the difference between patches around the
+ original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
+ needed.
+
+ @sa calcOpticalFlowPyrLK
+ */
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
+ /** @brief Calculate dense optical flow.
+
+ @param prevImg First 8-bit grayscale input image.
+ @param nextImg Second input image of the same size and the same type as prevImg .
+ @param u Horizontal component of the optical flow of the same size as input images, 32-bit
+ floating-point, single-channel
+ @param v Vertical component of the optical flow of the same size as input images, 32-bit
+ floating-point, single-channel
+ @param err Output vector (CV_32FC1 type) that contains the difference between patches around the
+ original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
+ needed.
+ */
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
+ /** @brief Releases inner buffers memory.
+ */
void releaseMemory();
Size winSize;
@@ -115,6 +170,8 @@ private:
GpuMat vPyr_[2];
};
+/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. :
+ */
class CV_EXPORTS FarnebackOpticalFlow
{
public:
@@ -139,8 +196,20 @@ public:
double polySigma;
int flags;
+ /** @brief Computes a dense optical flow using the Gunnar Farneback’s algorithm.
+
+ @param frame0 First 8-bit gray-scale input image
+ @param frame1 Second 8-bit gray-scale input image
+ @param flowx Flow horizontal component
+ @param flowy Flow vertical component
+ @param s Stream
+
+ @sa calcOpticalFlowFarneback
+ */
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
+ /** @brief Releases unused auxiliary memory buffers.
+ */
void releaseMemory()
{
frames_[0].release();
@@ -295,20 +364,22 @@ private:
GpuMat extended_I1;
};
-//! Interpolate frames (images) using provided optical flow (displacement field).
-//! frame0 - frame 0 (32-bit floating point images, single channel)
-//! frame1 - frame 1 (the same type and size)
-//! fu - forward horizontal displacement
-//! fv - forward vertical displacement
-//! bu - backward horizontal displacement
-//! bv - backward vertical displacement
-//! pos - new frame position
-//! newFrame - new frame
-//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat;
-//! occlusion masks 0, occlusion masks 1,
-//! interpolated forward flow 0, interpolated forward flow 1,
-//! interpolated backward flow 0, interpolated backward flow 1
-//!
+/** @brief Interpolates frames (images) using provided optical flow (displacement field).
+
+@param frame0 First frame (32-bit floating point images, single channel).
+@param frame1 Second frame. Must have the same type and size as frame0 .
+@param fu Forward horizontal displacement.
+@param fv Forward vertical displacement.
+@param bu Backward horizontal displacement.
+@param bv Backward vertical displacement.
+@param pos New frame position.
+@param newFrame Output image.
+@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
+GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
+horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
+interpolated backward vertical flow.
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
@@ -317,6 +388,8 @@ CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAOPTFLOW_HPP__ */
diff --git a/modules/cudastereo/include/opencv2/cudastereo.hpp b/modules/cudastereo/include/opencv2/cudastereo.hpp
index a58156c433..af265bb44f 100644
--- a/modules/cudastereo/include/opencv2/cudastereo.hpp
+++ b/modules/cudastereo/include/opencv2/cudastereo.hpp
@@ -50,11 +50,25 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/calib3d.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudastereo Stereo Correspondence
+ @}
+ */
+
namespace cv { namespace cuda {
+//! @addtogroup cudastereo
+//! @{
+
/////////////////////////////////////////
// StereoBM
+/** @brief Class computing stereo correspondence (disparity map) using the block matching algorithm. :
+
+@sa StereoBM
+ */
class CV_EXPORTS StereoBM : public cv::StereoBM
{
public:
@@ -63,20 +77,70 @@ public:
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
};
+/** @brief Creates StereoBM object.
+
+@param numDisparities the disparity search range. For each pixel algorithm will find the best
+disparity from 0 (default minimum disparity) to numDisparities. The search range can then be
+shifted by changing the minimum disparity.
+@param blockSize the linear size of the blocks compared by the algorithm. The size should be odd
+(as the block is centered at the current pixel). Larger block size implies smoother, though less
+accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
+chance for algorithm to find a wrong correspondence.
+ */
CV_EXPORTS Ptr createStereoBM(int numDisparities = 64, int blockSize = 19);
/////////////////////////////////////////
// StereoBeliefPropagation
-//! "Efficient Belief Propagation for Early Vision" P.Felzenszwalb
+/** @brief Class computing stereo correspondence using the belief propagation algorithm. :
+
+The class implements algorithm described in @cite Felzenszwalb2006 . It can compute own data cost
+(using a truncated linear model) or use a user-provided data cost.
+
+@note
+ StereoBeliefPropagation requires a lot of memory for message storage:
+
+ \f[width \_ step \cdot height \cdot ndisp \cdot 4 \cdot (1 + 0.25)\f]
+
+ and for data cost storage:
+
+ \f[width\_step \cdot height \cdot ndisp \cdot (1 + 0.25 + 0.0625 + \dotsm + \frac{1}{4^{levels}})\f]
+
+ width_step is the number of bytes in a line including padding.
+
+StereoBeliefPropagation uses a truncated linear model for the data cost and discontinuity terms:
+
+\f[DataCost = data \_ weight \cdot \min ( \lvert Img_Left(x,y)-Img_Right(x-d,y) \rvert , max \_ data \_ term)\f]
+
+\f[DiscTerm = \min (disc \_ single \_ jump \cdot \lvert f_1-f_2 \rvert , max \_ disc \_ term)\f]
+
+For more details, see @cite Felzenszwalb2006 .
+
+By default, StereoBeliefPropagation uses floating-point arithmetics and the CV_32FC1 type for
+messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
+performance. To avoid an overflow in this case, the parameters must satisfy the following
+requirement:
+
+\f[10 \cdot 2^{levels-1} \cdot max \_ data \_ term < SHRT \_ MAX\f]
+
+@sa StereoMatcher
+ */
class CV_EXPORTS StereoBeliefPropagation : public cv::StereoMatcher
{
public:
using cv::StereoMatcher::compute;
+ /** @overload */
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
- //! version for user specified data term
+ /** @brief Enables the stereo correspondence operator that finds the disparity for the specified data cost.
+
+ @param data User-specified data cost, a matrix of msg_type type and
+ Size(\\*ndisp, \) size.
+ @param disparity Output disparity map. If disparity is empty, the output type is CV_16SC1 .
+ Otherwise, the type is retained.
+ @param stream Stream for the asynchronous version.
+ */
virtual void compute(InputArray data, OutputArray disparity, Stream& stream = Stream::Null()) = 0;
//! number of BP iterations on each level
@@ -107,18 +171,48 @@ public:
virtual int getMsgType() const = 0;
virtual void setMsgType(int msg_type) = 0;
+ /** @brief Uses a heuristic method to compute the recommended parameters ( ndisp, iters and levels ) for the
+ specified image size ( width and height ).
+ */
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
};
+/** @brief Creates StereoBeliefPropagation object.
+
+@param ndisp Number of disparities.
+@param iters Number of BP iterations on each level.
+@param levels Number of levels.
+@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
+ */
CV_EXPORTS Ptr
createStereoBeliefPropagation(int ndisp = 64, int iters = 5, int levels = 5, int msg_type = CV_32F);
/////////////////////////////////////////
// StereoConstantSpaceBP
-//! "A Constant-Space Belief Propagation Algorithm for Stereo Matching"
-//! Qingxiong Yang, Liang Wang, Narendra Ahuja
-//! http://vision.ai.uiuc.edu/~qyang6/
+/** @brief Class computing stereo correspondence using the constant space belief propagation algorithm. :
+
+The class implements algorithm described in @cite Yang2010 . StereoConstantSpaceBP supports both local
+minimum and global minimum data cost initialization algorithms. For more details, see the paper
+mentioned above. By default, a local algorithm is used. To enable a global algorithm, set
+use_local_init_data_cost to false .
+
+StereoConstantSpaceBP uses a truncated linear model for the data cost and discontinuity terms:
+
+\f[DataCost = data \_ weight \cdot \min ( \lvert I_2-I_1 \rvert , max \_ data \_ term)\f]
+
+\f[DiscTerm = \min (disc \_ single \_ jump \cdot \lvert f_1-f_2 \rvert , max \_ disc \_ term)\f]
+
+For more details, see @cite Yang2010 .
+
+By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV_32FC1 type for
+messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
+performance. To avoid an overflow in this case, the parameters must satisfy the following
+requirement:
+
+\f[10 \cdot 2^{levels-1} \cdot max \_ data \_ term < SHRT \_ MAX\f]
+
+ */
class CV_EXPORTS StereoConstantSpaceBP : public cuda::StereoBeliefPropagation
{
public:
@@ -129,23 +223,40 @@ public:
virtual bool getUseLocalInitDataCost() const = 0;
virtual void setUseLocalInitDataCost(bool use_local_init_data_cost) = 0;
+ /** @brief Uses a heuristic method to compute parameters (ndisp, iters, levelsand nrplane) for the specified
+ image size (widthand height).
+ */
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
};
+/** @brief Creates StereoConstantSpaceBP object.
+
+@param ndisp Number of disparities.
+@param iters Number of BP iterations on each level.
+@param levels Number of levels.
+@param nr_plane Number of disparity levels on the first level.
+@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
+ */
CV_EXPORTS Ptr
createStereoConstantSpaceBP(int ndisp = 128, int iters = 8, int levels = 4, int nr_plane = 4, int msg_type = CV_32F);
/////////////////////////////////////////
// DisparityBilateralFilter
-//! Disparity map refinement using joint bilateral filtering given a single color image.
-//! Qingxiong Yang, Liang Wang, Narendra Ahuja
-//! http://vision.ai.uiuc.edu/~qyang6/
+/** @brief Class refining a disparity map using joint bilateral filtering. :
+
+The class implements @cite Yang2010 algorithm.
+ */
class CV_EXPORTS DisparityBilateralFilter : public cv::Algorithm
{
public:
- //! the disparity map refinement operator. Refine disparity map using joint bilateral filtering given a single color image.
- //! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type.
+ /** @brief Refines a disparity map using joint bilateral filtering.
+
+ @param disparity Input disparity map. CV_8UC1 and CV_16SC1 types are supported.
+ @param image Input image. CV_8UC1 and CV_8UC3 types are supported.
+ @param dst Destination disparity map. It has the same size and type as disparity .
+ @param stream Stream for the asynchronous version.
+ */
virtual void apply(InputArray disparity, InputArray image, OutputArray dst, Stream& stream = Stream::Null()) = 0;
virtual int getNumDisparities() const = 0;
@@ -170,24 +281,48 @@ public:
virtual void setSigmaRange(double sigma_range) = 0;
};
+/** @brief Creates DisparityBilateralFilter object.
+
+@param ndisp Number of disparities.
+@param radius Filter radius.
+@param iters Number of iterations.
+ */
CV_EXPORTS Ptr
createDisparityBilateralFilter(int ndisp = 64, int radius = 3, int iters = 1);
/////////////////////////////////////////
// Utility
-//! Reprojects disparity image to 3D space.
-//! Supports CV_8U and CV_16S types of input disparity.
-//! The output is a 3- or 4-channel floating-point matrix.
-//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.
-//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.
+/** @brief Reprojects a disparity image to 3D space.
+
+@param disp Input disparity image. CV_8U and CV_16S types are supported.
+@param xyzw Output 3- or 4-channel floating-point image of the same size as disp . Each element of
+xyzw(x,y) contains 3D coordinates (x,y,z) or (x,y,z,1) of the point (x,y) , computed from the
+disparity map.
+@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained via stereoRectify .
+@param dst_cn The number of channels for output image. Can be 3 or 4.
+@param stream Stream for the asynchronous version.
+
+@sa reprojectImageTo3D
+ */
CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null());
-//! Does coloring of disparity image: [0..ndisp) -> [0..240, 1, 1] in HSV.
-//! Supported types of input disparity: CV_8U, CV_16S.
-//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255).
+/** @brief Colors a disparity image.
+
+@param src_disp Source disparity image. CV_8UC1 and CV_16SC1 types are supported.
+@param dst_disp Output disparity image. It has the same size as src_disp . The type is CV_8UC4
+in BGRA format (alpha = 255).
+@param ndisp Number of disparities.
+@param stream Stream for the asynchronous version.
+
+This function draws a colored disparity map by converting disparity values from [0..ndisp) interval
+first to HSV color space (where different disparity values correspond to different hues) and then
+converting the pixels to RGB for visualization.
+ */
CV_EXPORTS void drawColorDisp(InputArray src_disp, OutputArray dst_disp, int ndisp, Stream& stream = Stream::Null());
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDASTEREO_HPP__ */
diff --git a/modules/cudawarping/include/opencv2/cudawarping.hpp b/modules/cudawarping/include/opencv2/cudawarping.hpp
index d759f55598..ca877d50c9 100644
--- a/modules/cudawarping/include/opencv2/cudawarping.hpp
+++ b/modules/cudawarping/include/opencv2/cudawarping.hpp
@@ -50,54 +50,178 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/imgproc.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudawarping Image Warping
+ @}
+ */
+
namespace cv { namespace cuda {
-//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]]
-//! supports only CV_32FC1 map type
+//! @addtogroup cudawarping
+//! @{
+
+/** @brief Applies a generic geometrical transformation to an image.
+
+@param src Source image.
+@param dst Destination image with the size the same as xmap and the type the same as src .
+@param xmap X values. Only CV_32FC1 type is supported.
+@param ymap Y values. Only CV_32FC1 type is supported.
+@param interpolation Interpolation method (see resize ). INTER_NEAREST , INTER_LINEAR and
+INTER_CUBIC are supported for now.
+@param borderMode Pixel extrapolation method (see borderInterpolate ). BORDER_REFLECT101 ,
+BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
+@param borderValue Value used in case of a constant border. By default, it is 0.
+@param stream Stream for the asynchronous version.
+
+The function transforms the source image using the specified map:
+
+\f[\texttt{dst} (x,y) = \texttt{src} (xmap(x,y), ymap(x,y))\f]
+
+Values of pixels with non-integer coordinates are computed using the bilinear interpolation.
+
+@sa remap
+ */
CV_EXPORTS void remap(InputArray src, OutputArray dst, InputArray xmap, InputArray ymap,
int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(),
Stream& stream = Stream::Null());
-//! resizes the image
-//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA
+/** @brief Resizes an image.
+
+@param src Source image.
+@param dst Destination image with the same type as src . The size is dsize (when it is non-zero)
+or the size is computed from src.size() , fx , and fy .
+@param dsize Destination image size. If it is zero, it is computed as:
+\f[\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\f]
+Either dsize or both fx and fy must be non-zero.
+@param fx Scale factor along the horizontal axis. If it is zero, it is computed as:
+\f[\texttt{(double)dsize.width/src.cols}\f]
+@param fy Scale factor along the vertical axis. If it is zero, it is computed as:
+\f[\texttt{(double)dsize.height/src.rows}\f]
+@param interpolation Interpolation method. INTER_NEAREST , INTER_LINEAR and INTER_CUBIC are
+supported for now.
+@param stream Stream for the asynchronous version.
+
+@sa resize
+ */
CV_EXPORTS void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
-//! warps the image using affine transformation
-//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+/** @brief Applies an affine transformation to an image.
+
+@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
+supported.
+@param dst Destination image with the same type as src . The size is dsize .
+@param M *2x3* transformation matrix.
+@param dsize Size of the destination image.
+@param flags Combination of interpolation methods (see resize) and the optional flag
+WARP_INVERSE_MAP specifying that M is an inverse transformation ( dst=\>src ). Only
+INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
+@param borderMode
+@param borderValue
+@param stream Stream for the asynchronous version.
+
+@sa warpAffine
+ */
CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
+/** @brief Builds transformation maps for affine transformation.
+
+@param M *2x3* transformation matrix.
+@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
+@param dsize Size of the destination image.
+@param xmap X values with CV_32FC1 type.
+@param ymap Y values with CV_32FC1 type.
+@param stream Stream for the asynchronous version.
+
+@sa cuda::warpAffine , cuda::remap
+ */
CV_EXPORTS void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
-//! warps the image using perspective transformation
-//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+/** @brief Applies a perspective transformation to an image.
+
+@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
+supported.
+@param dst Destination image with the same type as src . The size is dsize .
+@param M *3x3* transformation matrix.
+@param dsize Size of the destination image.
+@param flags Combination of interpolation methods (see resize ) and the optional flag
+WARP_INVERSE_MAP specifying that M is the inverse transformation ( dst =\> src ). Only
+INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
+@param borderMode
+@param borderValue
+@param stream Stream for the asynchronous version.
+
+@sa warpPerspective
+ */
CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
+/** @brief Builds transformation maps for perspective transformation.
+
+@param M *3x3* transformation matrix.
+@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
+@param dsize Size of the destination image.
+@param xmap X values with CV_32FC1 type.
+@param ymap Y values with CV_32FC1 type.
+@param stream Stream for the asynchronous version.
+
+@sa cuda::warpPerspective , cuda::remap
+ */
CV_EXPORTS void buildWarpPerspectiveMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
-//! builds plane warping maps
+/** @brief Builds plane warping maps.
+ */
CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, InputArray K, InputArray R, InputArray T, float scale,
OutputArray map_x, OutputArray map_y, Stream& stream = Stream::Null());
-//! builds cylindrical warping maps
+/** @brief Builds cylindrical warping maps.
+ */
CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, InputArray K, InputArray R, float scale,
OutputArray map_x, OutputArray map_y, Stream& stream = Stream::Null());
-//! builds spherical warping maps
+/** @brief Builds spherical warping maps.
+ */
CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, InputArray K, InputArray R, float scale,
OutputArray map_x, OutputArray map_y, Stream& stream = Stream::Null());
-//! rotates an image around the origin (0,0) and then shifts it
-//! supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
-//! supports 1, 3 or 4 channels images with CV_8U, CV_16U or CV_32F depth
+/** @brief Rotates an image around the origin (0,0) and then shifts it.
+
+@param src Source image. Supports 1, 3 or 4 channels images with CV_8U , CV_16U or CV_32F
+depth.
+@param dst Destination image with the same type as src . The size is dsize .
+@param dsize Size of the destination image.
+@param angle Angle of rotation in degrees.
+@param xShift Shift along the horizontal axis.
+@param yShift Shift along the vertical axis.
+@param interpolation Interpolation method. Only INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC
+are supported.
+@param stream Stream for the asynchronous version.
+
+@sa cuda::warpAffine
+ */
CV_EXPORTS void rotate(InputArray src, OutputArray dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
-//! smoothes the source image and downsamples it
+/** @brief Smoothes an image and downsamples it.
+
+@param src Source image.
+@param dst Destination image. Will have Size((src.cols+1)/2, (src.rows+1)/2) size and the same
+type as src .
+@param stream Stream for the asynchronous version.
+
+@sa pyrDown
+ */
CV_EXPORTS void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
-//! upsamples the source image and then smoothes it
+/** @brief Upsamples an image and then smoothes it.
+
+@param src Source image.
+@param dst Destination image. Will have Size(src.cols\*2, src.rows\*2) size and the same type as
+src .
+@param stream Stream for the asynchronous version.
+ */
CV_EXPORTS void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
class CV_EXPORTS ImagePyramid : public Algorithm
@@ -108,6 +232,8 @@ public:
CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null());
+//! @}
+
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAWARPING_HPP__ */
diff --git a/modules/cudev/include/opencv2/cudev.hpp b/modules/cudev/include/opencv2/cudev.hpp
index a5fb4f6967..565efa1c6a 100644
--- a/modules/cudev/include/opencv2/cudev.hpp
+++ b/modules/cudev/include/opencv2/cudev.hpp
@@ -109,4 +109,11 @@
#include "cudev/expr/unary_op.hpp"
#include "cudev/expr/warping.hpp"
+/**
+ @addtogroup cuda
+ @{
+ @defgroup cudev Device layer
+ @}
+*/
+
#endif
diff --git a/modules/cudev/include/opencv2/cudev/block/block.hpp b/modules/cudev/include/opencv2/cudev/block/block.hpp
index 385e1713e5..e8d59bb20b 100644
--- a/modules/cudev/include/opencv2/cudev/block/block.hpp
+++ b/modules/cudev/include/opencv2/cudev/block/block.hpp
@@ -50,6 +50,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
struct Block
{
__device__ __forceinline__ static uint blockId()
@@ -122,6 +125,9 @@ __device__ __forceinline__ static void blockTransfrom(InIt1 beg1, InIt1 end1, In
for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
*o = op(*t1, *t2);
}
+
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/block/dynamic_smem.hpp b/modules/cudev/include/opencv2/cudev/block/dynamic_smem.hpp
index 9f9ba60009..e52f829bf6 100644
--- a/modules/cudev/include/opencv2/cudev/block/dynamic_smem.hpp
+++ b/modules/cudev/include/opencv2/cudev/block/dynamic_smem.hpp
@@ -50,6 +50,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct DynamicSharedMem
{
__device__ __forceinline__ operator T*()
@@ -81,6 +84,8 @@ template <> struct DynamicSharedMem
}
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/block/reduce.hpp b/modules/cudev/include/opencv2/cudev/block/reduce.hpp
index 4c9022631b..74c8fcac77 100644
--- a/modules/cudev/include/opencv2/cudev/block/reduce.hpp
+++ b/modules/cudev/include/opencv2/cudev/block/reduce.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// blockReduce
template
@@ -123,6 +126,8 @@ __device__ __forceinline__ void blockReduceKeyVal(const tuple(skeys, key, svals, val, tid, cmp);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/block/scan.hpp b/modules/cudev/include/opencv2/cudev/block/scan.hpp
index c54dfef9f3..3369cff987 100644
--- a/modules/cudev/include/opencv2/cudev/block/scan.hpp
+++ b/modules/cudev/include/opencv2/cudev/block/scan.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__device__ T blockScanInclusive(T data, volatile T* smem, uint tid)
{
@@ -96,6 +99,8 @@ __device__ __forceinline__ T blockScanExclusive(T data, volatile T* smem, uint t
return blockScanInclusive(data, smem, tid) - data;
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/block/vec_distance.hpp b/modules/cudev/include/opencv2/cudev/block/vec_distance.hpp
index c48e9146ef..767d32a466 100644
--- a/modules/cudev/include/opencv2/cudev/block/vec_distance.hpp
+++ b/modules/cudev/include/opencv2/cudev/block/vec_distance.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// NormL1
template struct NormL1
@@ -179,6 +182,8 @@ struct NormHamming
}
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/common.hpp b/modules/cudev/include/opencv2/cudev/common.hpp
index c8a7b7da25..f475e20b68 100644
--- a/modules/cudev/include/opencv2/cudev/common.hpp
+++ b/modules/cudev/include/opencv2/cudev/common.hpp
@@ -52,6 +52,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
using namespace cv::cuda;
// CV_CUDEV_ARCH
@@ -84,6 +87,8 @@ __host__ __device__ __forceinline__ int divUp(int total, int grain)
#define CV_PI_F ((float)CV_PI)
#define CV_LOG2_F ((float)CV_LOG2)
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/binary_func.hpp b/modules/cudev/include/opencv2/cudev/expr/binary_func.hpp
index f35ea2dc3c..2777a1e185 100644
--- a/modules/cudev/include/opencv2/cudev/expr/binary_func.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/binary_func.hpp
@@ -55,6 +55,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
#define CV_CUDEV_EXPR_BINARY_FUNC(name) \
template \
__host__ Expr::ptr_type, typename PtrTraits::ptr_type, name ## _func::value_type, typename PtrTraits::value_type>::type> > > \
@@ -70,6 +73,8 @@ CV_CUDEV_EXPR_BINARY_FUNC(absdiff)
#undef CV_CUDEV_EXPR_BINARY_FUNC
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/binary_op.hpp b/modules/cudev/include/opencv2/cudev/expr/binary_op.hpp
index f7e9655720..7533946fcc 100644
--- a/modules/cudev/include/opencv2/cudev/expr/binary_op.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/binary_op.hpp
@@ -58,6 +58,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// Binary Operations
#define CV_CUDEV_EXPR_BINOP_INST(op, functor) \
@@ -230,6 +233,8 @@ CV_CUDEV_EXPR_BINOP_INST(>>, bit_rshift)
#undef CV_CUDEV_EXPR_BINOP_INST
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/color.hpp b/modules/cudev/include/opencv2/cudev/expr/color.hpp
index 13f07c15a4..f53de78b3c 100644
--- a/modules/cudev/include/opencv2/cudev/expr/color.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/color.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
#define CV_CUDEV_EXPR_CVTCOLOR_INST(name) \
template \
__host__ Expr::ptr_type, name ## _func::value_type>::elem_type> > > \
@@ -277,6 +280,8 @@ CV_CUDEV_EXPR_CVTCOLOR_INST(Luv4_to_LBGRA)
#undef CV_CUDEV_EXPR_CVTCOLOR_INST
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/deriv.hpp b/modules/cudev/include/opencv2/cudev/expr/deriv.hpp
index 822a86b9a3..da51cc711f 100644
--- a/modules/cudev/include/opencv2/cudev/expr/deriv.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/deriv.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// derivX
template
@@ -116,6 +119,8 @@ laplacian_(const SrcPtr& src)
return makeExpr(laplacianPtr(src));
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/expr.hpp b/modules/cudev/include/opencv2/cudev/expr/expr.hpp
index 46c780b4a2..cdc8612173 100644
--- a/modules/cudev/include/opencv2/cudev/expr/expr.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/expr.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct Expr
{
Body body;
@@ -87,6 +90,8 @@ template struct PtrTraits< Expr >
}
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/per_element_func.hpp b/modules/cudev/include/opencv2/cudev/expr/per_element_func.hpp
index 56a067de9d..d7ecd3bb06 100644
--- a/modules/cudev/include/opencv2/cudev/expr/per_element_func.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/per_element_func.hpp
@@ -56,6 +56,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// min/max
template
@@ -127,6 +130,8 @@ lut_(const SrcPtr& src, const TablePtr& tbl)
return makeExpr(lutPtr(src, tbl));
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/reduction.hpp b/modules/cudev/include/opencv2/cudev/expr/reduction.hpp
index 1f0a3ff0e5..598fb4f86c 100644
--- a/modules/cudev/include/opencv2/cudev/expr/reduction.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/reduction.hpp
@@ -56,6 +56,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// sum
template struct SumExprBody
@@ -254,6 +257,8 @@ integral_(const SrcPtr& src)
return makeExpr(body);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/unary_func.hpp b/modules/cudev/include/opencv2/cudev/expr/unary_func.hpp
index a30f6a6f3c..b19cec8272 100644
--- a/modules/cudev/include/opencv2/cudev/expr/unary_func.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/unary_func.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
#define CV_CUDEV_EXPR_UNARY_FUNC(name) \
template \
__host__ Expr::ptr_type, name ## _func::value_type> > > \
@@ -93,6 +96,8 @@ pow_(const SrcPtr& src, float power)
return makeExpr(transformPtr(src, bind2nd(pow_func::value_type>(), power)));
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/unary_op.hpp b/modules/cudev/include/opencv2/cudev/expr/unary_op.hpp
index 905013e42f..c5fabe4ac9 100644
--- a/modules/cudev/include/opencv2/cudev/expr/unary_op.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/unary_op.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
#define CV_CUDEV_EXPR_UNOP_INST(op, functor) \
template \
__host__ Expr >::ptr_type, functor > > \
@@ -89,6 +92,8 @@ CV_CUDEV_EXPR_UNOP_INST(~, bit_not)
#undef CV_CUDEV_EXPR_UNOP_INST
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/expr/warping.hpp b/modules/cudev/include/opencv2/cudev/expr/warping.hpp
index f942a3fb6f..e1f78b9689 100644
--- a/modules/cudev/include/opencv2/cudev/expr/warping.hpp
+++ b/modules/cudev/include/opencv2/cudev/expr/warping.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// resize
template
@@ -166,6 +169,8 @@ transpose_(const SrcPtr& src)
return makeExpr(body);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp b/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp
index 8be854780a..5134d04ed9 100644
--- a/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp
+++ b/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// Various 3/4-channel to 3/4-channel RGB transformations
#define CV_CUDEV_RGB2RGB_INST(name, scn, dcn, bidx) \
@@ -469,6 +472,8 @@ CV_CUDEV_RGB5x52GRAY_INST(BGR565_to_GRAY, 6)
#undef CV_CUDEV_RGB5x52GRAY_INST
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/functional/functional.hpp b/modules/cudev/include/opencv2/cudev/functional/functional.hpp
index 7934f78b94..125b66f07a 100644
--- a/modules/cudev/include/opencv2/cudev/functional/functional.hpp
+++ b/modules/cudev/include/opencv2/cudev/functional/functional.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// Function Objects
template struct unary_function
@@ -873,6 +876,8 @@ template struct IsBinaryFunction
enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/functional/tuple_adapter.hpp b/modules/cudev/include/opencv2/cudev/functional/tuple_adapter.hpp
index d3a40db0ea..ff075dc2b3 100644
--- a/modules/cudev/include/opencv2/cudev/functional/tuple_adapter.hpp
+++ b/modules/cudev/include/opencv2/cudev/functional/tuple_adapter.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct UnaryTupleAdapter
{
typedef typename Op::result_type result_type;
@@ -93,6 +96,8 @@ __host__ __device__ BinaryTupleAdapter binaryTupleAdapter(const Op&
return a;
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/copy.hpp b/modules/cudev/include/opencv2/cudev/grid/copy.hpp
index d7d3ea8343..1d30f99763 100644
--- a/modules/cudev/include/opencv2/cudev/grid/copy.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/copy.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridCopy_(const SrcPtr& src, GpuMat_& dst, const MaskPtr& mask, Stream& stream = Stream::Null())
{
@@ -447,6 +450,8 @@ __host__ void gridCopy_(const SrcPtrTuple& src, const tuple< GlobPtrSz, Glob
gridCopy_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/histogram.hpp b/modules/cudev/include/opencv2/cudev/grid/histogram.hpp
index ecb1a19c85..154f73771b 100644
--- a/modules/cudev/include/opencv2/cudev/grid/histogram.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/histogram.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridHistogram_(const SrcPtr& src, GpuMat_& dst, const MaskPtr& mask, Stream& stream = Stream::Null())
{
@@ -114,6 +117,8 @@ __host__ void gridHistogram(const SrcPtr& src, GpuMat_& dst, Stream& st
gridHistogram_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/integral.hpp b/modules/cudev/include/opencv2/cudev/grid/integral.hpp
index d948c1267f..6312f44772 100644
--- a/modules/cudev/include/opencv2/cudev/grid/integral.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/integral.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridIntegral(const SrcPtr& src, GpuMat_& dst, Stream& stream = Stream::Null())
{
@@ -64,6 +67,8 @@ __host__ void gridIntegral(const SrcPtr& src, GpuMat_& dst, Stream& str
integral_detail::integral(shrinkPtr(src), shrinkPtr(dst), rows, cols, StreamAccessor::getStream(stream));
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/pyramids.hpp b/modules/cudev/include/opencv2/cudev/grid/pyramids.hpp
index 99833bd3f7..22eafe69fb 100644
--- a/modules/cudev/include/opencv2/cudev/grid/pyramids.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/pyramids.hpp
@@ -55,6 +55,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridPyrDown_(const SrcPtr& src, GpuMat_& dst, Stream& stream = Stream::Null())
{
@@ -83,6 +86,8 @@ __host__ void gridPyrUp(const SrcPtr& src, GpuMat_& dst, Stream& stream
pyramids_detail::pyrUp(shrinkPtr(src), shrinkPtr(dst), rows, cols, dst.rows, dst.cols, StreamAccessor::getStream(stream));
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/reduce.hpp b/modules/cudev/include/opencv2/cudev/grid/reduce.hpp
index 3861ae2281..4551bc886b 100644
--- a/modules/cudev/include/opencv2/cudev/grid/reduce.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/reduce.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridCalcSum_(const SrcPtr& src, GpuMat_& dst, const MaskPtr& mask, Stream& stream = Stream::Null())
{
@@ -370,6 +373,8 @@ __host__ void gridCountNonZero(const SrcPtr& src, GpuMat_& dst, Stream&
gridCountNonZero_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/reduce_to_vec.hpp b/modules/cudev/include/opencv2/cudev/grid/reduce_to_vec.hpp
index 361d40d1c8..595ee8be6f 100644
--- a/modules/cudev/include/opencv2/cudev/grid/reduce_to_vec.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/reduce_to_vec.hpp
@@ -59,6 +59,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct Sum : plus
{
typedef T work_type;
@@ -225,6 +228,8 @@ __host__ void gridReduceToColumn(const SrcPtr& src, GpuMat_& dst, Strea
gridReduceToColumn_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/split_merge.hpp b/modules/cudev/include/opencv2/cudev/grid/split_merge.hpp
index ed7e8ee605..1a7134793b 100644
--- a/modules/cudev/include/opencv2/cudev/grid/split_merge.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/split_merge.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridMerge_(const SrcPtrTuple& src, GpuMat_& dst, const MaskPtr& mask, Stream& stream = Stream::Null())
{
@@ -579,6 +582,8 @@ __host__ void gridSplit(const SrcPtr& src, GlobPtrSz (&dst)[COUNT], Str
gridSplit_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/transform.hpp b/modules/cudev/include/opencv2/cudev/grid/transform.hpp
index 62555ab5ae..2f16f7d392 100644
--- a/modules/cudev/include/opencv2/cudev/grid/transform.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/transform.hpp
@@ -57,6 +57,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridTransformUnary_(const SrcPtr& src, GpuMat_& dst, const UnOp& op, const MaskPtr& mask, Stream& stream = Stream::Null())
{
@@ -536,6 +539,8 @@ __host__ void gridTransformTuple(const SrcPtr& src, const tuple< GlobPtrSz,
gridTransformTuple_(src, dst, op, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/grid/transpose.hpp b/modules/cudev/include/opencv2/cudev/grid/transpose.hpp
index cf1bf8303e..0d7a19573d 100644
--- a/modules/cudev/include/opencv2/cudev/grid/transpose.hpp
+++ b/modules/cudev/include/opencv2/cudev/grid/transpose.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template
__host__ void gridTranspose_(const SrcPtr& src, GpuMat_& dst, Stream& stream = Stream::Null())
{
@@ -98,6 +101,8 @@ __host__ void gridTranspose(const SrcPtr& src, const GlobPtrSz& dst, St
gridTranspose_(src, dst, stream);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/constant.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/constant.hpp
index d3c56e7713..b3c5f5f23b 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/constant.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/constant.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct ConstantPtr
{
typedef T value_type;
@@ -88,6 +91,8 @@ template struct PtrTraits< ConstantPtrSz > : PtrTraitsBase< Cons
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/deriv.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/deriv.hpp
index 097007400f..95088177f4 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/deriv.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/deriv.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// derivX
template struct DerivXPtr
@@ -388,6 +391,8 @@ template struct PtrTraits< LaplacianPtrSz struct BrdConstant
@@ -214,6 +217,8 @@ __host__ BrdBase::ptr_type> brdWrap(const Sr
return b;
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/glob.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/glob.hpp
index 7385926638..3563e56fcc 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/glob.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/glob.hpp
@@ -51,6 +51,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct GlobPtr
{
typedef T value_type;
@@ -106,6 +109,8 @@ template struct PtrTraits< GlobPtrSz > : PtrTraitsBase
class GpuMat_ : public GpuMat
{
@@ -154,6 +157,8 @@ template struct PtrTraits< GpuMat_ > : PtrTraitsBase,
{
};
+//! @}
+
}}
#include "detail/gpumat.hpp"
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/interpolation.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/interpolation.hpp
index e86d7191ed..256d4fd00a 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/interpolation.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/interpolation.hpp
@@ -55,6 +55,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// Nearest
template struct NearestInterPtr
@@ -380,6 +383,8 @@ template struct PtrTraits< CommonAreaInterPtrSz > : PtrTr
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/lut.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/lut.hpp
index accf545617..26a3725c08 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/lut.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/lut.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct LutPtr
{
typedef typename PtrTraits::value_type value_type;
@@ -95,6 +98,8 @@ template struct PtrTraits< LutPtrSz struct PtrTraits< SingleMaskChannelsSz > : Ptr
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/remap.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/remap.hpp
index db2669a40a..9d8745f94e 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/remap.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/remap.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct RemapPtr1
{
typedef typename PtrTraits::value_type value_type;
@@ -149,6 +152,8 @@ template struct PtrTraits< RemapPtr
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/resize.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/resize.hpp
index 10a4bad906..63ae7eb8a1 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/resize.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/resize.hpp
@@ -54,6 +54,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct ResizePtr
{
typedef typename PtrTraits::value_type value_type;
@@ -98,6 +101,8 @@ template struct PtrTraits< ResizePtrSz > : PtrTraitsBase<
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/texture.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/texture.hpp
index 6df4a783d8..6fa83e631e 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/texture.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/texture.hpp
@@ -92,6 +92,9 @@ namespace
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
#if CUDART_VERSION >= 5050
template struct TexturePtr
@@ -248,6 +251,8 @@ template struct PtrTraits< Texture > : PtrTraitsBase,
#endif
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/traits.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/traits.hpp
index 7fb4b32b17..f1552cafe8 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/traits.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/traits.hpp
@@ -50,6 +50,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
template struct PtrTraitsBase
{
typedef Ptr2DSz ptr_sz_type;
@@ -96,6 +99,8 @@ __host__ int getCols(const Ptr2DSz& ptr)
return PtrTraits::getCols(ptr);
}
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/transform.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/transform.hpp
index f540e75213..b6edb913d1 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/transform.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/transform.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// UnaryTransformPtr
template struct UnaryTransformPtr
@@ -146,6 +149,8 @@ template struct PtrTraits< BinaryTransf
{
};
+//! @}
+
}}
#endif
diff --git a/modules/cudev/include/opencv2/cudev/ptr2d/warping.hpp b/modules/cudev/include/opencv2/cudev/ptr2d/warping.hpp
index 80e5fbeef0..c9d00833ff 100644
--- a/modules/cudev/include/opencv2/cudev/ptr2d/warping.hpp
+++ b/modules/cudev/include/opencv2/cudev/ptr2d/warping.hpp
@@ -53,6 +53,9 @@
namespace cv { namespace cudev {
+//! @addtogroup cudev
+//! @{
+
// affine
struct AffineMapPtr
@@ -147,6 +150,8 @@ warpPerspectivePtr(const SrcPtr& src, Size dstSize, const GpuMat_