converted user guide & tutorials from tex to rst; added them into the whole documentation tree; added html_docs target.
parent
6facf8ba3b
commit
b699e946b5
75 changed files with 1506 additions and 2120 deletions
@ -1,185 +0,0 @@ |
||||
\ProvidesPackage{opencv} |
||||
|
||||
\lstset{ % |
||||
language=Python, % choose the language of the code |
||||
%basicstyle=\footnotesize, % the size of the fonts that are used for the code |
||||
%numbers=left, % where to put the line-numbers |
||||
%numberstyle=\footnotesize, % the size of the fonts that are used for the line-numbers |
||||
%stepnumber=2, % the step between two line-numbers. If it's 1 each line will be numbered |
||||
%numbersep=5pt, % how far the line-numbers are from the code |
||||
%backgroundcolor=\color{white}, % choose the background color. You must add \usepackage{color} |
||||
showspaces=false, % show spaces adding particular underscores |
||||
showstringspaces=false, % underline spaces within strings |
||||
showtabs=false, % show tabs within strings adding particular underscores |
||||
%frame=single, % adds a frame around the code |
||||
%tabsize=2, % sets default tabsize to 2 spaces |
||||
%captionpos=b, % sets the caption-position to bottom |
||||
%breaklines=true, % sets automatic line breaking |
||||
%breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace |
||||
%escapeinside={\%*}{*)} % if you want to add a comment within your code |
||||
} |
||||
|
||||
\newcommand{\wikiHref}[1]{\hspace{0.1in}\href{http://opencv.willowgarage.com/wiki/documentation/\targetlang/\curModule/#1}{(view/add comments)}} |
||||
\newcommand{\curModule}{} |
||||
|
||||
\newcommand{\cvclass}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{cpp}} |
||||
{ |
||||
\subsection[cv::#1]{cv::#1\wikiHref{#1}} |
||||
%\addcontentsline{toc}{subsection}{#1} |
||||
\index{cv...!#1} |
||||
} |
||||
{ |
||||
\subsection[#1]{#1\wikiHref{#1}} |
||||
%\addcontentsline{toc}{subsection}{#1} |
||||
\index{cv...!#1} |
||||
} |
||||
} |
||||
|
||||
\newcommand{\cvfunc}[1]{ |
||||
\subsection[#1]{#1\wikiHref{#1}} |
||||
%\addcontentsline{toc}{subsection}{#1} |
||||
\index{cv...!#1} |
||||
} |
||||
|
||||
\newcommand{\cvstruct}[1]{ |
||||
\subsection[#1]{#1\wikiHref{#1}} |
||||
%\addcontentsline{toc}{subsection}{#1} |
||||
\index{cv...!#1} |
||||
} |
||||
|
||||
\newcommand{\cvmacro}[1]{ |
||||
\subsection[#1]{#1\wikiHref{#1}} |
||||
%\addcontentsline{toc}{subsection}{#1} |
||||
\index{cv...!#1} |
||||
} |
||||
|
||||
\newcommand{\cvarg}[2]{ |
||||
\item[\texttt{#1}] #2 |
||||
} |
||||
|
||||
|
||||
\newcommand{\cvFunc}[2]{ |
||||
\ifthenelse{\equal{\targetlang}{c}} |
||||
{\subsection[cv::#1]{cv#1\wikiHref{#1}}\index{cv#1}\label{cfunc.#1}} |
||||
{\ifthenelse{\equal{\targetlang}{cpp}} |
||||
{\subsection[cv::#2]{cv::#2\wikiHref{#2}}\index{cv::#2}\label{cppfunc.#2}} |
||||
{\subsection[cv::#1]{cv.#1\wikiHref{#1}}\index{cv.#1}\label{pyfunc.#1}}}} |
||||
|
||||
\newcommand{\cvCPyFunc}[1]{\cvFunc{#1}{}} |
||||
\newcommand{\cvCppFunc}[1]{\cvFunc{}{#1}} |
||||
|
||||
\newcommand{\cvCross}[2]{ |
||||
\ifthenelse{\equal{\targetlang}{c}} |
||||
{\hyperref[cfunc.#1]{cv#1}} |
||||
{\ifthenelse{\equal{\targetlang}{cpp}} |
||||
{\hyperref[cppfunc.#2]{cv::#2}} |
||||
{\hyperref[pyfunc.#1]{cv.#1}}}} |
||||
|
||||
\newcommand{\cvCPyCross}[1]{\cvCross{#1}{}} |
||||
\newcommand{\cvCppCross}[1]{\cvCross{}{#1}} |
||||
|
||||
\newcommand{\cvdefC}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{c}}{ |
||||
\begin{shaded} |
||||
\begin{alltt} |
||||
\setlength{\parindent}{1in} |
||||
#1 |
||||
\end{alltt} |
||||
\end{shaded} |
||||
}{} |
||||
} |
||||
|
||||
\newcommand{\cvdefCpp}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{cpp}}{ |
||||
\begin{shaded} |
||||
\begin{alltt} |
||||
\setlength{\parindent}{1in} |
||||
#1 |
||||
\end{alltt} |
||||
\end{shaded} |
||||
}{} |
||||
} |
||||
|
||||
\newcommand{\cvdefPy}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{python}}{ |
||||
\begin{shaded} |
||||
\begin{alltt} |
||||
\setlength{\parindent}{1in} |
||||
#1 |
||||
\end{alltt} |
||||
\end{shaded} |
||||
}{} |
||||
} |
||||
|
||||
\newcommand{\cvC}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{c}}{#1}{} |
||||
} |
||||
|
||||
\newcommand{\cvCpp}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{cpp}}{#1}{} |
||||
} |
||||
|
||||
\newcommand{\cvPy}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{python}}{#1}{}} |
||||
|
||||
\newcommand{\cvCPy}[1]{ |
||||
\ifthenelse{\equal{\targetlang}{c}\or\equal{\targetlang}{python}}{#1}{} |
||||
} |
||||
|
||||
\newcommand{\cvcode}[1]{ |
||||
\begin{shaded} |
||||
\begin{alltt} |
||||
\setlength{\parindent}{1in} |
||||
#1 |
||||
\end{alltt} |
||||
\end{shaded} |
||||
} |
||||
|
||||
%\newcommand{\cross}[1]{#1 (page \pageref{#1})} |
||||
\newcommand{\cross}[1]{ \hyperref[#1]{#1} } |
||||
|
||||
\newcommand{\matTT}[9]{ |
||||
\[ |
||||
\left|\begin{array}{ccc} |
||||
#1 & #2 & #3\\ |
||||
#4 & #5 & #6\\ |
||||
#7 & #8 & #9 |
||||
\end{array}\right| |
||||
\] |
||||
} |
||||
|
||||
\newcommand{\fork}[4]{ |
||||
\left\{ |
||||
\begin{array}{l l} |
||||
#1 & \mbox{#2}\\ |
||||
#3 & \mbox{#4}\\ |
||||
\end{array} \right.} |
||||
\newcommand{\forkthree}[6]{ |
||||
\left\{ |
||||
\begin{array}{l l} |
||||
#1 & \mbox{#2}\\ |
||||
#3 & \mbox{#4}\\ |
||||
#5 & \mbox{#6}\\ |
||||
\end{array} \right.} |
||||
|
||||
\newcommand{\vecthree}[3]{ |
||||
\begin{bmatrix} |
||||
#1\\ |
||||
#2\\ |
||||
#3 |
||||
\end{bmatrix} |
||||
} |
||||
|
||||
\newcommand{\vecthreethree}[9]{ |
||||
\begin{bmatrix} |
||||
#1 & #2 & #3\\ |
||||
#4 & #5 & #6\\ |
||||
#7 & #8 & #9 |
||||
\end{bmatrix} |
||||
} |
||||
|
||||
% allow special plastex handling |
||||
\newif\ifplastex |
||||
\plastexfalse |
||||
|
@ -1,20 +0,0 @@ |
||||
project(opencv_refman1) |
||||
|
||||
file(GLOB_RECURSE OPENCV1_FILES_PICT pics/*.png pics/*.jpg) |
||||
file(GLOB_RECURSE OPENCV1_FILES_RST *.rst) |
||||
|
||||
add_custom_target(refman1 |
||||
${SPHINX_BUILD} |
||||
-b latex -c ${CMAKE_CURRENT_SOURCE_DIR} |
||||
${CMAKE_CURRENT_SOURCE_DIR} . |
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory |
||||
${CMAKE_CURRENT_SOURCE_DIR}/../pics ${CMAKE_CURRENT_BINARY_DIR}/pics |
||||
COMMAND ${CMAKE_COMMAND} -E copy |
||||
${CMAKE_CURRENT_SOURCE_DIR}/../mymath.sty ${CMAKE_CURRENT_BINARY_DIR} |
||||
COMMAND ${PDFLATEX_COMPILER} opencv1x |
||||
COMMAND ${PDFLATEX_COMPILER} opencv1x |
||||
DEPENDS conf.py ${OPENCV1_FILES_RST} ${OPENCV1_FILES_PICT} |
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} |
||||
COMMENT "Generating the OpenCV 1.x Reference Manual") |
||||
|
||||
#install(FILES ${CURRENT_BINARY_DIR}/opencv1x.pdf DESTINATION "${OPENCV_DOC_INSTALL_PATH}" COMPONENT main) |
File diff suppressed because it is too large
Load Diff
@ -1,220 +0,0 @@ |
||||
# -*- coding: utf-8 -*- |
||||
# |
||||
# opencvstd documentation build configuration file, created by |
||||
# sphinx-quickstart on Mon Feb 14 00:30:43 2011. |
||||
# |
||||
# This file is execfile()d with the current directory set to its containing dir. |
||||
# |
||||
# Note that not all possible configuration values are present in this |
||||
# autogenerated file. |
||||
# |
||||
# All configuration values have a default; values that are commented out |
||||
# serve to show the default. |
||||
|
||||
import sys, os |
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory, |
||||
# add these directories to sys.path here. If the directory is relative to the |
||||
# documentation root, use os.path.abspath to make it absolute, like shown here. |
||||
#sys.path.insert(0, os.path.abspath('.')) |
||||
|
||||
# -- General configuration ----------------------------------------------------- |
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here. |
||||
#needs_sphinx = '1.0' |
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions |
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. |
||||
extensions = ['sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.todo'] |
||||
doctest_test_doctest_blocks = 'block' |
||||
|
||||
# Add any paths that contain templates here, relative to this directory. |
||||
templates_path = ['_templates'] |
||||
|
||||
# The suffix of source filenames. |
||||
source_suffix = '.rst' |
||||
|
||||
# The encoding of source files. |
||||
#source_encoding = 'utf-8-sig' |
||||
|
||||
# The master toctree document. |
||||
master_doc = 'index' |
||||
|
||||
# General information about the project. |
||||
project = u'opencvrefman1x' |
||||
copyright = u'2011, opencv dev team' |
||||
|
||||
# The version info for the project you're documenting, acts as replacement for |
||||
# |version| and |release|, also used in various other places throughout the |
||||
# built documents. |
||||
# |
||||
# The short X.Y version. |
||||
version = '2.3' |
||||
# The full version, including alpha/beta/rc tags. |
||||
release = '2.3' |
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation |
||||
# for a list of supported languages. |
||||
#language = None |
||||
|
||||
# There are two options for replacing |today|: either, you set today to some |
||||
# non-false value, then it is used: |
||||
#today = '' |
||||
# Else, today_fmt is used as the format for a strftime call. |
||||
#today_fmt = '%B %d, %Y' |
||||
|
||||
# List of patterns, relative to source directory, that match files and |
||||
# directories to ignore when looking for source files. |
||||
exclude_patterns = [] |
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents. |
||||
#default_role = None |
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text. |
||||
#add_function_parentheses = True |
||||
|
||||
# If true, the current module name will be prepended to all description |
||||
# unit titles (such as .. function::). |
||||
#add_module_names = True |
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the |
||||
# output. They are ignored by default. |
||||
#show_authors = False |
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use. |
||||
pygments_style = 'sphinx' |
||||
|
||||
# A list of ignored prefixes for module index sorting. |
||||
#modindex_common_prefix = [] |
||||
|
||||
todo_include_todos=True |
||||
|
||||
# -- Options for HTML output --------------------------------------------------- |
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for |
||||
# a list of builtin themes. |
||||
html_theme = 'blue' |
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme |
||||
# further. For a list of options available for each theme, see the |
||||
# documentation. |
||||
#html_theme_options = {} |
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory. |
||||
html_theme_path = ['../_themes'] |
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to |
||||
# "<project> v<release> documentation". |
||||
#html_title = None |
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title. |
||||
#html_short_title = None |
||||
|
||||
# The name of an image file (relative to this directory) to place at the top |
||||
# of the sidebar. |
||||
html_logo = '../opencv-logo2.png' |
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the |
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 |
||||
# pixels large. |
||||
#html_favicon = None |
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here, |
||||
# relative to this directory. They are copied after the builtin static files, |
||||
# so a file named "default.css" will overwrite the builtin "default.css". |
||||
html_static_path = ['../_static'] |
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, |
||||
# using the given strftime format. |
||||
#html_last_updated_fmt = '%b %d, %Y' |
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to |
||||
# typographically correct entities. |
||||
#html_use_smartypants = True |
||||
|
||||
# Custom sidebar templates, maps document names to template names. |
||||
#html_sidebars = {} |
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to |
||||
# template names. |
||||
#html_additional_pages = {} |
||||
|
||||
# If false, no module index is generated. |
||||
#html_domain_indices = True |
||||
|
||||
# If false, no index is generated. |
||||
#html_use_index = True |
||||
|
||||
# If true, the index is split into individual pages for each letter. |
||||
#html_split_index = False |
||||
|
||||
# If true, links to the reST sources are added to the pages. |
||||
#html_show_sourcelink = True |
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. |
||||
#html_show_sphinx = True |
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. |
||||
#html_show_copyright = True |
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will |
||||
# contain a <link> tag referring to it. The value of this option must be the |
||||
# base URL from which the finished HTML is served. |
||||
#html_use_opensearch = '' |
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml"). |
||||
#html_file_suffix = None |
||||
|
||||
# Output file base name for HTML help builder. |
||||
htmlhelp_basename = 'opencv1x' |
||||
|
||||
|
||||
# -- Options for LaTeX output -------------------------------------------------- |
||||
|
||||
# The paper size ('letter' or 'a4'). |
||||
#latex_paper_size = 'letter' |
||||
|
||||
# The font size ('10pt', '11pt' or '12pt'). |
||||
#latex_font_size = '10pt' |
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples |
||||
# (source start file, target name, title, author, documentclass [howto/manual]). |
||||
latex_documents = [ |
||||
('index', 'opencv1x.tex', u'The OpenCV 1.x API Reference Manual', |
||||
u'', 'manual'), |
||||
] |
||||
|
||||
latex_elements = {'preamble': '\usepackage{mymath}\usepackage{amssymb}\usepackage{amsmath}\usepackage{bbm}'} |
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of |
||||
# the title page. |
||||
#latex_logo = None |
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts, |
||||
# not chapters. |
||||
latex_use_parts = True |
||||
|
||||
# If true, show page references after internal links. |
||||
#latex_show_pagerefs = False |
||||
|
||||
# If true, show URL addresses after external links. |
||||
#latex_show_urls = False |
||||
|
||||
# Additional stuff for the LaTeX preamble. |
||||
#latex_preamble = '' |
||||
|
||||
# Documents to append as an appendix to all manuals. |
||||
#latex_appendices = [] |
||||
|
||||
# If false, no module index is generated. |
||||
#latex_domain_indices = True |
||||
|
||||
|
||||
# -- Options for manual page output -------------------------------------------- |
||||
|
||||
# One entry per manual page. List of tuples |
||||
# (source start file, name, description, authors, manual section). |
||||
man_pages = [ |
||||
('index', 'opencv1x', u'The OpenCV 1.x API Reference Manual', |
||||
[u'opencv-dev@itseez.com'], 1) |
||||
] |
@ -1,16 +0,0 @@ |
||||
Welcome to opencv 1.x reference manual |
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
||||
|
||||
Contents: |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
c/c_index |
||||
py/py_index |
||||
bibliography |
||||
|
||||
Indices and tables |
||||
~~~~~~~~~~~~~~~~~~ |
||||
* :ref:`genindex` |
||||
* :ref:`search` |
File diff suppressed because it is too large
Load Diff
@ -1,107 +0,0 @@ |
||||
\documentclass[11pt]{book} |
||||
|
||||
\usepackage{cite} |
||||
\usepackage[pdftex]{graphicx} |
||||
\usepackage{titlesec} |
||||
\usepackage{listings} |
||||
\usepackage{fancyvrb} |
||||
\usepackage[svgnames]{xcolor} |
||||
\usepackage{framed} |
||||
\usepackage{amsmath} |
||||
\usepackage{amssymb} |
||||
\usepackage{bbm} |
||||
\usepackage{hyperref} |
||||
\usepackage{makeidx} |
||||
\usepackage{color} |
||||
\usepackage{verbatim} |
||||
|
||||
\setcounter{secnumdepth}{1} |
||||
|
||||
\definecolor{shadecolor}{gray}{0.95} % Background color of title bars |
||||
\lstset{ |
||||
language=C, |
||||
basicstyle=\small\ttfamily, |
||||
backgroundcolor=\color{shadecolor} |
||||
} |
||||
|
||||
\definecolor{cvlinkcolor}{rgb}{0.0 0.3 0.8} |
||||
|
||||
% taken from http://en.wikibooks.org/wiki/LaTeX/Hyperlinks |
||||
\hypersetup{ |
||||
bookmarks=true, % show bookmarks bar? |
||||
unicode=false, % non-Latin characters in Acrobat’s bookmarks |
||||
%pdftoolbar=true, % show Acrobat’s toolbar? |
||||
%pdfmenubar=true, % show Acrobat’s menu? |
||||
%pdffitwindow=false, % window fit to page when opened |
||||
%pdfstartview={FitH}, % fits the width of the page to the window |
||||
%pdftitle={My title}, % title |
||||
%pdfauthor={Author}, % author |
||||
%pdfsubject={Subject}, % subject of the document |
||||
%pdfcreator={Creator}, % creator of the document |
||||
%pdfproducer={Producer}, % producer of the document |
||||
%pdfkeywords={keywords}, % list of keywords |
||||
%pdfnewwindow=true, % links in new window |
||||
colorlinks=true, % false: boxed links; true: colored links |
||||
linkcolor=cvlinkcolor, % color of internal links |
||||
citecolor=cvlinkcolor, % color of links to bibliography |
||||
filecolor=magenta, % color of file links |
||||
urlcolor=cyan % color of external links |
||||
} |
||||
|
||||
\makeindex |
||||
|
||||
\newcommand{\piRsquare}{\pi r^2} % This is my own macro !!! |
||||
|
||||
\usepackage{helvetica} |
||||
\usepackage{ifthen} |
||||
\usepackage{alltt} |
||||
\usepackage{opencv} |
||||
|
||||
%%% Margins %%% |
||||
\oddsidemargin 0.0in |
||||
\evensidemargin 0.0in |
||||
\textwidth 6.5in |
||||
%\headheight 1.0in |
||||
%\topmargin 0.5in |
||||
%\textheight 9.0in |
||||
%\footheight 1.0in |
||||
%%%%%%%%%%%%%%% |
||||
|
||||
\title{OpenCV Tutorials} % used by \maketitle |
||||
\author{v2.2} % used by \maketitle |
||||
\date{February, 2011} % used by \maketitle |
||||
|
||||
\begin{document} |
||||
\maketitle % automatic title! |
||||
|
||||
\setcounter{tocdepth}{8} |
||||
\tableofcontents |
||||
|
||||
\titleformat{\subsection} |
||||
{\titlerule |
||||
\vspace{.8ex}% |
||||
\normalfont\bfseries\Large} |
||||
{\thesection.}{.5em}{} |
||||
|
||||
%%% Define these to get rid of warnings |
||||
\def\genc{true} |
||||
\def\genpy{true} |
||||
\def\gencpp{true} |
||||
|
||||
\newif\ifC |
||||
\newif\ifPy |
||||
\newif\ifCpp |
||||
\newif\ifCPy |
||||
|
||||
\Cfalse |
||||
\Cpptrue |
||||
\Pyfalse |
||||
\CPyfalse |
||||
\def\targetlang{cpp} |
||||
\part{C++ API tutorials} |
||||
\input{tutorials/opencv_tutorials_body} |
||||
|
||||
\addcontentsline{toc}{part}{Index} |
||||
\printindex |
||||
|
||||
\end{document} % End of document. |
@ -1,107 +0,0 @@ |
||||
\documentclass[11pt]{book} |
||||
|
||||
\usepackage{cite} |
||||
\usepackage[pdftex]{graphicx} |
||||
\usepackage{titlesec} |
||||
\usepackage{listings} |
||||
\usepackage{fancyvrb} |
||||
\usepackage[svgnames]{xcolor} |
||||
\usepackage{framed} |
||||
\usepackage{amsmath} |
||||
\usepackage{amssymb} |
||||
\usepackage{bbm} |
||||
\usepackage{hyperref} |
||||
\usepackage{makeidx} |
||||
\usepackage{color} |
||||
\usepackage{verbatim} |
||||
|
||||
\setcounter{secnumdepth}{1} |
||||
|
||||
\definecolor{shadecolor}{gray}{0.95} % Background color of title bars |
||||
\lstset{ |
||||
language=C, |
||||
basicstyle=\small\ttfamily, |
||||
backgroundcolor=\color{shadecolor} |
||||
} |
||||
|
||||
\definecolor{cvlinkcolor}{rgb}{0.0 0.3 0.8} |
||||
|
||||
% taken from http://en.wikibooks.org/wiki/LaTeX/Hyperlinks |
||||
\hypersetup{ |
||||
bookmarks=true, % show bookmarks bar? |
||||
unicode=false, % non-Latin characters in Acrobat’s bookmarks |
||||
%pdftoolbar=true, % show Acrobat’s toolbar? |
||||
%pdfmenubar=true, % show Acrobat’s menu? |
||||
%pdffitwindow=false, % window fit to page when opened |
||||
%pdfstartview={FitH}, % fits the width of the page to the window |
||||
%pdftitle={My title}, % title |
||||
%pdfauthor={Author}, % author |
||||
%pdfsubject={Subject}, % subject of the document |
||||
%pdfcreator={Creator}, % creator of the document |
||||
%pdfproducer={Producer}, % producer of the document |
||||
%pdfkeywords={keywords}, % list of keywords |
||||
%pdfnewwindow=true, % links in new window |
||||
colorlinks=true, % false: boxed links; true: colored links |
||||
linkcolor=cvlinkcolor, % color of internal links |
||||
citecolor=cvlinkcolor, % color of links to bibliography |
||||
filecolor=magenta, % color of file links |
||||
urlcolor=cyan % color of external links |
||||
} |
||||
|
||||
\makeindex |
||||
|
||||
\newcommand{\piRsquare}{\pi r^2} % This is my own macro !!! |
||||
|
||||
\usepackage{helvetica} |
||||
\usepackage{ifthen} |
||||
\usepackage{alltt} |
||||
\usepackage{opencv} |
||||
|
||||
%%% Margins %%% |
||||
\oddsidemargin 0.0in |
||||
\evensidemargin 0.0in |
||||
\textwidth 6.5in |
||||
%\headheight 1.0in |
||||
%\topmargin 0.5in |
||||
%\textheight 9.0in |
||||
%\footheight 1.0in |
||||
%%%%%%%%%%%%%%% |
||||
|
||||
\title{OpenCV User Guide} % used by \maketitle |
||||
\author{v2.2} % used by \maketitle |
||||
\date{December, 2010} % used by \maketitle |
||||
|
||||
\begin{document} |
||||
\maketitle % automatic title! |
||||
|
||||
\setcounter{tocdepth}{8} |
||||
\tableofcontents |
||||
|
||||
\titleformat{\subsection} |
||||
{\titlerule |
||||
\vspace{.8ex}% |
||||
\normalfont\bfseries\Large} |
||||
{\thesection.}{.5em}{} |
||||
|
||||
%%% Define these to get rid of warnings |
||||
\def\genc{true} |
||||
\def\genpy{true} |
||||
\def\gencpp{true} |
||||
|
||||
\newif\ifC |
||||
\newif\ifPy |
||||
\newif\ifCpp |
||||
\newif\ifCPy |
||||
|
||||
\Cfalse |
||||
\Cpptrue |
||||
\Pyfalse |
||||
\CPyfalse |
||||
\def\targetlang{cpp} |
||||
\part{C++ API User Guide} |
||||
\input{user_guide/opencv_guide_body} |
||||
|
||||
\addcontentsline{toc}{part}{Index} |
||||
\printindex |
||||
|
||||
\end{document} % End of document. |
@ -0,0 +1,64 @@ |
||||
####### |
||||
Calib3D |
||||
####### |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
Camera calibration |
||||
================== |
||||
|
||||
The goal of this tutorial is to learn how to calibrate a camera given a set of chessboard images. |
||||
|
||||
*Test data*: use images in your data/chess folder. |
||||
|
||||
#. |
||||
Compile opencv with samples by setting ``BUILD_EXAMPLES`` to ``ON`` in cmake configuration. |
||||
|
||||
#. |
||||
Go to ``bin`` folder and use ``imagelist_creator`` to create an ``XML/YAML`` list of your images. |
||||
|
||||
#. |
||||
Then, run ``calibration`` sample to get camera parameters. Use square size equal to 3cm. |
||||
|
||||
Pose estimation |
||||
=============== |
||||
|
||||
Now, let us write a code that detects a chessboard in a new image and finds its distance from the camera. You can apply the same method to any object with known 3D geometry that you can detect in an image. |
||||
|
||||
*Test data*: use chess_test*.jpg images from your data folder. |
||||
|
||||
#. |
||||
Create an empty console project. Load a test image: :: |
||||
|
||||
Mat img = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
|
||||
#. |
||||
Detect a chessboard in this image using findChessboard function. :: |
||||
|
||||
bool found = findChessboardCorners( img, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH ); |
||||
|
||||
#. |
||||
Now, write a function that generates a ``vector<Point3f>`` array of 3d coordinates of a chessboard in any coordinate system. For simplicity, let us choose a system such that one of the chessboard corners is in the origin and the board is in the plane *z = 0*. |
||||
|
||||
#. |
||||
Read camera parameters from XML/YAML file: :: |
||||
|
||||
FileStorage fs(filename, FileStorage::READ); |
||||
Mat intrinsics, distortion; |
||||
fs["camera_matrix"] >> intrinsics; |
||||
fs["distortion_coefficients"] >> distortion; |
||||
|
||||
#. |
||||
Now we are ready to find chessboard pose by running ``solvePnP``: :: |
||||
|
||||
vector<Point3f> boardPoints; |
||||
// fill the array |
||||
... |
||||
|
||||
solvePnP(Mat(boardPoints), Mat(foundBoardCorners), cameraMatrix, |
||||
distCoeffs, rvec, tvec, false); |
||||
|
||||
#. |
||||
Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``). |
||||
|
||||
Question: how to calculate the distance from the camera origin to any of the corners? |
@ -1,56 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Camera calibration} |
||||
The goal of this tutorial is to learn how to calibrate a camera given a set of chessboard images. |
||||
|
||||
\texttt{Test data}: use images in your data/chess folder. |
||||
|
||||
Compile opencv with samples by setting BUILD\_EXAMPLES to ON in cmake configuration. |
||||
|
||||
Go to bin folder and use \texttt{imagelist\_creator} to create an xml/yaml list of your images. Then, run \texttt{calibration} sample to get camera parameters. Use square size equal to 3cm. |
||||
|
||||
\section{Pose estimation} |
||||
Now, let us write a code that detects a chessboard in a new image and finds its distance from the camera. You can apply the same method to any object with knwon 3d geometry that you can detect in an image. |
||||
|
||||
\texttt{Test data}: use chess\_test*.jpg images from your data folder. |
||||
|
||||
Create an empty console project. Load a test image: |
||||
\begin{lstlisting} |
||||
Mat img = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
\end{lstlisting} |
||||
|
||||
Detect a chessboard in this image using findChessboard function. |
||||
\begin{lstlisting} |
||||
bool found = findChessboardCorners( img, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH ); |
||||
\end{lstlisting} |
||||
|
||||
Now, write a function that generates a \texttt{vector<Point3f>} array of 3d coordinates of a chessboard in any coordinate system. For simplicity, let us choose a system such that one of the chessboard corners is in the origin and the board is in the plane \(z = 0\). |
||||
|
||||
Read camera parameters from xml/yaml file: |
||||
\begin{lstlisting} |
||||
FileStorage fs(filename, FileStorage::READ); |
||||
Mat intrinsics, distortion; |
||||
fs["camera_matrix"] >> intrinsics; |
||||
fs["distortion_coefficients"] >> distortion; |
||||
\end{lstlisting} |
||||
|
||||
Now we are ready to find chessboard pose by running solvePnP: |
||||
\begin{lstlisting} |
||||
vector<Point3f> boardPoints; |
||||
// fill the array |
||||
... |
||||
|
||||
solvePnP(Mat(boardPoints), Mat(foundBoardCorners), cameraMatrix, |
||||
distCoeffs, rvec, tvec, false); |
||||
\end{lstlisting} |
||||
|
||||
Calculate reprojection error like it is done in \texttt{calibration} sample (see textttt{opencv/samples/cpp/calibration.cpp}, function \texttt{computeReprojectionErrors}). |
||||
|
||||
How to calculate the distance from the camera origin to any of the corners? |
||||
\fi |
@ -0,0 +1,74 @@ |
||||
########## |
||||
Features2D |
||||
########## |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
Detection of planar objects |
||||
=========================== |
||||
|
||||
The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting known planar objects in scenes. |
||||
|
||||
*Test data*: use images in your data folder, for instance, ``box.png`` and ``box_in_scene.png``. |
||||
|
||||
#. |
||||
Create a new console project. Read two input images. :: |
||||
|
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); |
||||
|
||||
#. |
||||
Detect keypoints in both images. :: |
||||
|
||||
// detecting keypoints |
||||
FastFeatureDetector detector(15); |
||||
vector<KeyPoint> keypoints1; |
||||
detector.detect(img1, keypoints1); |
||||
|
||||
... // do the same for the second image |
||||
|
||||
#. |
||||
Compute descriptors for each of the keypoints. :: |
||||
|
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
|
||||
... // process keypoints from the second image as well |
||||
|
||||
#. |
||||
Now, find the closest matches between descriptors from the first image to the second: :: |
||||
|
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
|
||||
#. |
||||
Visualize the results: :: |
||||
|
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
|
||||
#. |
||||
Find the homography transformation between two sets of points: :: |
||||
|
||||
vector<Point2f> points1, points2; |
||||
// fill the arrays with the points |
||||
.... |
||||
Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold); |
||||
|
||||
|
||||
#. |
||||
Create a set of inlier matches and draw them. Use perspectiveTransform function to map points with homography: |
||||
|
||||
Mat points1Projected; |
||||
perspectiveTransform(Mat(points1), points1Projected, H); |
||||
|
||||
#. |
||||
Use ``drawMatches`` for drawing inliers. |
@ -1,67 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Detection of planar objects} |
||||
The goal of this tutorial is to learn how to use features2d and calib3d modules for detecting known planar objects in scenes. |
||||
|
||||
\texttt{Test data}: use images in your data folder, for instance, box.png and box\_in\_scene.png. |
||||
|
||||
Create a new console project. Read two input images. Example: |
||||
\begin{lstlisting} |
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
\end{lstlisting} |
||||
|
||||
Detect keypoints in both images. Example: |
||||
\begin{lstlisting} |
||||
// detecting keypoints |
||||
FastFeatureDetector detector(15); |
||||
vector<KeyPoint> keypoints1; |
||||
detector.detect(img1, keypoints1); |
||||
\end{lstlisting} |
||||
|
||||
Compute descriptors for each of the keypoints. Example: |
||||
\begin{lstlisting} |
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
\end{lstlisting} |
||||
|
||||
Now, find the closest matches between descriptors from the first image to the second: |
||||
\begin{lstlisting} |
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
\end{lstlisting} |
||||
|
||||
Visualize the results: |
||||
\begin{lstlisting} |
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
\end{lstlisting} |
||||
|
||||
Find the homography transformation between two sets of points: |
||||
\begin{lstlisting} |
||||
vector<Point2f> points1, points2; |
||||
// fill the arrays with the points |
||||
.... |
||||
Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold); |
||||
\end{lstlisting} |
||||
|
||||
Create a set of inlier matches and draw them. Use perspectiveTransform function to map points with homography: |
||||
\begin{lstlisting} |
||||
Mat points1Projected; |
||||
perspectiveTransform(Mat(points1), points1Projected, H); |
||||
\end{lstlisting} |
||||
Use drawMatches for drawing inliers. |
||||
\fi |
@ -1,12 +0,0 @@ |
||||
\chapter{Prerequisites} |
||||
\renewcommand{\curModule}{Prerequisites} |
||||
\input{tutorials/prerequisites} |
||||
|
||||
|
||||
\chapter{Features2d} |
||||
\renewcommand{\curModule}{Features2d} |
||||
\input{tutorials/features2d} |
||||
|
||||
\chapter{Calib3d} |
||||
\renewcommand{\curModule}{Calib3d} |
||||
\input{tutorials/calib3d} |
@ -0,0 +1,5 @@ |
||||
############# |
||||
Prerequisites |
||||
############# |
||||
|
||||
Download the latest release of opencv from \url{http://sourceforge.net/projects/opencvlibrary/}. You will need cmake and your favorite compiler environment in order to build opencv from sources. Please refer to the installation guide \url{http://opencv.willowgarage.com/wiki/InstallGuide} for detailed instructions. |
@ -1,12 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Prerequisites} |
||||
Download the latest release of opencv from \url{http://sourceforge.net/projects/opencvlibrary/}. You will need cmake and your favorite compiler environment in order to build opencv from sources. Please refer to the installation guide \url{http://opencv.willowgarage.com/wiki/InstallGuide} for detailed instructions. |
||||
|
||||
\fi |
@ -0,0 +1,10 @@ |
||||
################# |
||||
OpenCV Tutorials |
||||
################# |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
prerequisites.rst |
||||
features2d.rst |
||||
calib3d.rst |
@ -1,12 +0,0 @@ |
||||
|
||||
\chapter{cv::Mat. Operations with images.} |
||||
\renewcommand{\curModule}{cv::Mat. Operations with images.} |
||||
\input{user_guide/user_mat} |
||||
|
||||
\chapter{Features2d.} |
||||
\renewcommand{\curModule}{Features2d} |
||||
\input{user_guide/user_features2d} |
||||
|
||||
\chapter{Highgui.} |
||||
\renewcommand{\curModule}{Highgui.} |
||||
\input{user_guide/user_highgui} |
@ -0,0 +1,97 @@ |
||||
********** |
||||
Features2d |
||||
********** |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
Detectors |
||||
========= |
||||
|
||||
Descriptors |
||||
=========== |
||||
|
||||
Matching keypoints |
||||
================== |
||||
|
||||
The code |
||||
------- |
||||
We will start with a short sample ``opencv/samples/cpp/matcher_simple.cpp``: :: |
||||
|
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); |
||||
if(img1.empty() || img2.empty()) |
||||
{ |
||||
printf("Can't read one of the images\n"); |
||||
return -1; |
||||
} |
||||
|
||||
// detecting keypoints |
||||
SurfFeatureDetector detector(400); |
||||
vector<KeyPoint> keypoints1, keypoints2; |
||||
detector.detect(img1, keypoints1); |
||||
detector.detect(img2, keypoints2); |
||||
|
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1, descriptors2; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
extractor.compute(img2, keypoints2, descriptors2); |
||||
|
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
|
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
|
||||
The code explained |
||||
------------------ |
||||
|
||||
Let us break the code down. :: |
||||
|
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); |
||||
if(img1.empty() || img2.empty()) |
||||
{ |
||||
printf("Can't read one of the images\n"); |
||||
return -1; |
||||
} |
||||
|
||||
We load two images and check if they are loaded correctly.:: |
||||
|
||||
// detecting keypoints |
||||
FastFeatureDetector detector(15); |
||||
vector<KeyPoint> keypoints1, keypoints2; |
||||
detector.detect(img1, keypoints1); |
||||
detector.detect(img2, keypoints2); |
||||
|
||||
First, we create an instance of a keypoint detector. All detectors inherit the abstract ``FeatureDetector`` interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors (For instance, *FAST* threshold has the meaning of pixel intensity difference and usually varies in the region *[0,40]*. *SURF* threshold is applied to a Hessian of an image and usually takes on values larger than *100*), so use defaults in case of doubt. :: |
||||
|
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1, descriptors2; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
extractor.compute(img2, keypoints2, descriptors2); |
||||
|
||||
We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: |
||||
|
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
|
||||
Now that we have descriptors for both images, we can match them. First, we create a matcher that for each descriptor from image 2 does exhaustive search for the nearest descriptor in image 1 using Euclidean metric. Manhattan distance is also implemented as well as a Hamming distance for Brief descriptor. The output vector ``matches`` contains pairs of corresponding points indices. :: |
||||
|
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
|
||||
The final part of the sample is about visualizing the matching results. |
@ -0,0 +1,83 @@ |
||||
******* |
||||
HighGUI |
||||
******* |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
Using Kinect sensor |
||||
=================== |
||||
|
||||
Kinect sensor is supported through ``VideoCapture`` class. Depth map, RGB image and some other formats of Kinect output can be retrieved by using familiar interface of ``VideoCapture``. |
||||
|
||||
In order to use Kinect with OpenCV you should do the following preliminary steps: |
||||
|
||||
#. |
||||
Install OpenNI library and PrimeSensor Module for OpenNI from here \url{http://www.openni.org/downloadfiles}. The installation should be done to default folders listed in the instructions of these products: |
||||
|
||||
.. code-block:: text |
||||
|
||||
OpenNI: |
||||
Linux & MacOSX: |
||||
Libs into: /usr/lib |
||||
Includes into: /usr/include/ni |
||||
Windows: |
||||
Libs into: c:/Program Files/OpenNI/Lib |
||||
Includes into: c:/Program Files/OpenNI/Include |
||||
PrimeSensor Module: |
||||
Linux & MacOSX: |
||||
Bins into: /usr/bin |
||||
Windows: |
||||
Bins into: c:/Program Files/Prime Sense/Sensor/Bin |
||||
|
||||
If one or both products were installed to the other folders, the user should change corresponding CMake variables ``OPENNI_LIB_DIR``, ``OPENNI_INCLUDE_DIR`` or/and ``OPENNI_PRIME_SENSOR_MODULE_BIN_DIR``. |
||||
|
||||
#. |
||||
Configure OpenCV with OpenNI support by setting \texttt{WITH\_OPENNI} flag in CMake. If OpenNI is found in default install folders OpenCV will be built with OpenNI library regardless of whether PrimeSensor Module is found or not. If PrimeSensor Module was not found you will get a warning in CMake log. Without PrimeSensor module OpenCV will be successfully compiled with OpenNI library, but ``VideoCapture`` object will not grab data from Kinect sensor. |
||||
|
||||
#. |
||||
Build OpenCV. |
||||
|
||||
VideoCapture can retrieve the following Kinect data: |
||||
|
||||
#. |
||||
data given from depth generator: |
||||
* ``OPENNI_DEPTH_MAP`` - depth values in mm (CV_16UC1) |
||||
* ``OPENNI_POINT_CLOUD_MAP`` - XYZ in meters (CV_32FC3) |
||||
* ``OPENNI_DISPARITY_MAP`` - disparity in pixels (CV_8UC1) |
||||
* ``OPENNI_DISPARITY_MAP_32F`` - disparity in pixels (CV_32FC1) |
||||
* ``OPENNI_VALID_DEPTH_MASK`` - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1) |
||||
#. |
||||
data given from RGB image generator: |
||||
* ``OPENNI_BGR_IMAGE`` - color image (CV_8UC3) |
||||
* ``OPENNI_GRAY_IMAGE`` - gray image (CV_8UC1) |
||||
|
||||
In order to get depth map from Kinect use ``VideoCapture::operator >>``, e. g. :: |
||||
|
||||
VideoCapture capture(0); // or CV_CAP_OPENNI |
||||
for(;;) |
||||
{ |
||||
Mat depthMap; |
||||
capture >> depthMap; |
||||
|
||||
if( waitKey( 30 ) >= 0 ) |
||||
break; |
||||
} |
||||
|
||||
For getting several Kinect maps use ``VideoCapture::grab`` and ``VideoCapture::retrieve``, e.g. :: |
||||
|
||||
VideoCapture capture(0); // or CV_CAP_OPENNI |
||||
for(;;) |
||||
{ |
||||
Mat depthMap; |
||||
Mat rgbImage |
||||
|
||||
capture.grab(); |
||||
|
||||
capture.retrieve( depthMap, OPENNI_DEPTH_MAP ); |
||||
capture.retrieve( bgrImage, OPENNI_BGR_IMAGE ); |
||||
|
||||
if( waitKey( 30 ) >= 0 ) |
||||
break; |
||||
} |
||||
|
||||
For more information please refer to a Kinect example of usage ``kinect_maps.cpp`` in ``opencv/samples/cpp`` folder. |
@ -0,0 +1,147 @@ |
||||
********************** |
||||
Operations with images |
||||
********************** |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
Input/Output |
||||
============ |
||||
|
||||
Images |
||||
------ |
||||
|
||||
Load an image from a file: :: |
||||
|
||||
Mat img = imread(filename) |
||||
|
||||
If you read a jpg file, a 3 channel image is created by default. If you need a grayscale image, use: :: |
||||
|
||||
Mat img = imread(filename, 0); |
||||
|
||||
Save an image to a file: :: |
||||
|
||||
Mat img = imwrite(filename); |
||||
|
||||
XML/YAML |
||||
-------- |
||||
|
||||
TBD |
||||
|
||||
Basic operations with images |
||||
============================ |
||||
|
||||
Accessing pixel intensity values |
||||
-------------------------------- |
||||
|
||||
In order to get pixel intensity value, you have to know the type of an image and the number of channels. Here is an example for a single channel grey scale image (type 8UC1) and pixel coordinates x and y: :: |
||||
|
||||
Scalar intensity = img.at<uchar>(x, y); |
||||
|
||||
``intensity.val[0]`` contains a value from 0 to 255. Now let us consider a 3 channel image with ``BGR`` color ordering (the default format returned by ``imread``): :: |
||||
|
||||
Vec3b intensity = img.at<Vec3b>(x, y); |
||||
uchar blue = intensity.val[0]; |
||||
uchar green = intensity.val[1]; |
||||
uchar red = intensity.val[2]; |
||||
|
||||
You can use the same method for floating-point images (for example, you can get such an image by running Sobel on a 3 channel image): :: |
||||
|
||||
Vec3f intensity = img.at<Vec3f>(x, y); |
||||
float blue = intensity.val[0]; |
||||
float green = intensity.val[1]; |
||||
float red = intensity.val[2]; |
||||
|
||||
The same method can be used to change pixel intensities: :: |
||||
|
||||
img.at<uchar>(x, y) = 128; |
||||
|
||||
There are functions in OpenCV, especially from calib3d module, such as ``projectPoints``, that take an array of 2D or 3D points in the form of ``Mat``. Matrix should contain exactly one column, each row corresponds to a point, matrix type should be 32FC2 or 32FC3 correspondingly. Such a matrix can be easily constructed from ``std::vector``: :: |
||||
|
||||
vector<Point2f> points; |
||||
//... fill the array |
||||
Mat pointsMat = Mat(points); |
||||
|
||||
One can access a point in this matrix using the same method \texttt{Mat::at}: :: |
||||
|
||||
Point2f point = pointsMat.at<Point2f>(i, 0); |
||||
|
||||
|
||||
Memory management and reference counting |
||||
---------------------------------------- |
||||
|
||||
``Mat`` is a structure that keeps matrix/image characteristics (rows and columns number, data type etc) and a pointer to data. So nothing prevents us from having several instances of ``Mat`` corresponding to the same data. A ``Mat`` keeps a reference count that tells if data has to be deallocated when a particular instance of ``Mat`` is destroyed. Here is an example of creating two matrices without copying data: :: |
||||
|
||||
std::vector<Point3f> points; |
||||
// .. fill the array |
||||
Mat pointsMat = Mat(points).reshape(1); |
||||
|
||||
As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. ``pointsMat`` uses data from ``points`` and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of ``points`` is longer than of ``pointsMat``. |
||||
If we need to copy the data, this is done using, for example, ``Mat::copyTo`` or ``Mat::clone``: :: |
||||
|
||||
Mat img = imread("image.jpg"); |
||||
Mat img1 = img.clone(); |
||||
|
||||
To the contrary with C API where an output image had to be created by developer, an empty output ``Mat`` can be supplied to each function. Each implementation calls ``Mat::create`` for a destination matrix. This method allocates data for a matrix if it is empty. If it is not empty and has the correct size and type, the method does nothing. If, however, size or type are different from input arguments, the data is deallocated (and lost) and a new data is allocated. For example: :: |
||||
|
||||
Mat img = imread("image.jpg"); |
||||
Mat sobelx; |
||||
Sobel(img, sobelx, CV_32F, 1, 0); |
||||
|
||||
Primitive operations |
||||
-------------------- |
||||
|
||||
There is a number of convenient operators defined on a matrix. For example, here is how we can make a black image from an existing greyscale image ``img``: :: |
||||
|
||||
img = Scalar(0); |
||||
|
||||
Selecting a region of interest: :: |
||||
|
||||
Rect r(10, 10, 100, 100); |
||||
Mat smallImg = img(r); |
||||
|
||||
A convertion from \texttt{Mat} to C API data structures: :: |
||||
|
||||
Mat img = imread("image.jpg"); |
||||
IplImage img1 = img; |
||||
CvMat m = img; |
||||
|
||||
Note that there is no data copying here. |
||||
|
||||
Conversion from color to grey scale: :: |
||||
|
||||
Mat img = imread("image.jpg"); // loading a 8UC3 image |
||||
Mat grey; |
||||
cvtColor(img, grey, CV_BGR2GRAY); |
||||
|
||||
Change image type from 8UC1 to 32FC1: :: |
||||
|
||||
src.convertTo(dst, CV_32F); |
||||
|
||||
Visualizing images |
||||
------------------ |
||||
|
||||
It is very useful to see intermediate results of your algorithm during development process. OpenCV provides a convenient way of visualizing images. A 8U image can be shown using: :: |
||||
|
||||
Mat img = imread("image.jpg"); |
||||
|
||||
namedWindow("image", CV_WINDOW_AUTOSIZE); |
||||
imshow("image", img); |
||||
waitKey(); |
||||
|
||||
A call to ``waitKey()`` starts a message passing cycle that waits for a key stroke in the ``"image"`` window. A ``32F`` image needs to be converted to ``8U`` type. For example: :: |
||||
|
||||
Mat img = imread("image.jpg"); |
||||
Mat grey; |
||||
cvtColor(img, grey, CV_BGR2GREY); |
||||
|
||||
Mat sobelx; |
||||
Sobel(grey, sobelx, CV_32F, 1, 0); |
||||
|
||||
double minVal, maxVal; |
||||
minMaxLoc(sobelx, &minVal, &maxVal); //find minimum and maximum intensities |
||||
Mat draw; |
||||
sobelx.convertTo(draw, CV_8U, 255.0/(maxVal - minVal), -minVal); |
||||
|
||||
namedWindow("image", CV_WINDOW_AUTOSIZE); |
||||
imshow("image", draw); |
||||
waitKey(); |
@ -1,97 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Detectors} |
||||
\section{Descriptors} |
||||
\section{Matching keypoints} |
||||
\subsection{The code} |
||||
We will start with a short sample opencv/samples/cpp/matcher\_simple.cpp: |
||||
|
||||
\begin{lstlisting} |
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); |
||||
if(img1.empty() || img2.empty()) |
||||
{ |
||||
printf("Can't read one of the images\n"); |
||||
return -1; |
||||
} |
||||
|
||||
// detecting keypoints |
||||
SurfFeatureDetector detector(400); |
||||
vector<KeyPoint> keypoints1, keypoints2; |
||||
detector.detect(img1, keypoints1); |
||||
detector.detect(img2, keypoints2); |
||||
|
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1, descriptors2; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
extractor.compute(img2, keypoints2, descriptors2); |
||||
|
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
|
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
\end{lstlisting} |
||||
|
||||
\subsection{The code explained} |
||||
Let us break the code down. |
||||
\begin{lstlisting} |
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); |
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); |
||||
if(img1.empty() || img2.empty()) |
||||
{ |
||||
printf("Can't read one of the images\n"); |
||||
return -1; |
||||
} |
||||
\end{lstlisting} |
||||
We load two images and check if they are loaded correctly. |
||||
|
||||
\begin{lstlisting} |
||||
// detecting keypoints |
||||
FastFeatureDetector detector(15); |
||||
vector<KeyPoint> keypoints1, keypoints2; |
||||
detector.detect(img1, keypoints1); |
||||
detector.detect(img2, keypoints2); |
||||
\end{lstlisting} |
||||
First, we create an instance of a keypoint detector. All detectors inherit the abstract FeatureDetector interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors \footnote{For instance, FAST threshold has the meaning of pixel intensity difference and usually varies in the region \([0,40]\). SURF threshold is applied to a Hessian of an image and usually takes on values larger than \(100\).} so use defaults in case of doubt. |
||||
|
||||
\begin{lstlisting} |
||||
// computing descriptors |
||||
SurfDescriptorExtractor extractor; |
||||
Mat descriptors1, descriptors2; |
||||
extractor.compute(img1, keypoints1, descriptors1); |
||||
extractor.compute(img2, keypoints2, descriptors2); |
||||
\end{lstlisting} |
||||
We create an instance of descriptor extractor. The most of OpenCV descriptors inherit DescriptorExtractor abstract interface. Then we compute descriptors for each of the keypoints. The output \texttt{Mat} of the \texttt{DescriptorExtractor::compute} method contains a descriptor in a row \(i\) for each \(i\)-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). |
||||
|
||||
\begin{lstlisting} |
||||
// matching descriptors |
||||
BruteForceMatcher<L2<float> > matcher; |
||||
vector<DMatch> matches; |
||||
matcher.match(descriptors1, descriptors2, matches); |
||||
\end{lstlisting} |
||||
Now that we have descriptors for both images, we can match them. First, we create a matcher that for each descriptor from image 2 does exhaustive search for the nearest descriptor in image 1 using Eucledian metric. Manhattan distance is also implemented as well as a Hamming distance for Brief descriptor. The output vector \texttt{matches} contains pairs of corresponding points indices. |
||||
|
||||
\begin{lstlisting} |
||||
// drawing the results |
||||
namedWindow("matches", 1); |
||||
Mat img_matches; |
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); |
||||
imshow("matches", img_matches); |
||||
waitKey(0); |
||||
\end{lstlisting} |
||||
The final part of the sample is about visualizing the matching results. |
||||
\fi |
@ -0,0 +1,10 @@ |
||||
################# |
||||
OpenCV User Guide |
||||
################# |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
ug_mat.rst |
||||
ug_features2d.rst |
||||
ug_highgui.rst |
@ -1,89 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Using Kinect sensor.} |
||||
|
||||
Kinect sensor is supported through \texttt{VideoCapture} class. Depth map, rgb image and some other formats of Kinect |
||||
output can be retrieved by using familiar interface of \texttt{VideoCapture}.\par |
||||
|
||||
In order to use Kinect with OpenCV you should do the following preliminary steps:\newline |
||||
1) Install OpenNI library and PrimeSensor Module for OpenNI from here \url{http://www.openni.org/downloadfiles}. |
||||
The installation should be done to default folders listed in the instructions of these products: |
||||
\begin{lstlisting} |
||||
OpenNI: |
||||
Linux & MacOSX: |
||||
Libs into: /usr/lib |
||||
Includes into: /usr/include/ni |
||||
Windows: |
||||
Libs into: c:/Program Files/OpenNI/Lib |
||||
Includes into: c:/Program Files/OpenNI/Include |
||||
PrimeSensor Module: |
||||
Linux & MacOSX: |
||||
Bins into: /usr/bin |
||||
Windows: |
||||
Bins into: c:/Program Files/Prime Sense/Sensor/Bin |
||||
\end{lstlisting} |
||||
If one or both products were installed to the other folders, the user should change corresponding CMake variables |
||||
(\texttt{OPENNI\_LIB\_DIR}, \texttt{OPENNI\_INCLUDE\_DIR} or/and |
||||
\texttt{OPENNI\_PRIME\_SENSOR\_MODULE\_BIN\_DIR}).\newline |
||||
2) Configure OpenCV with OpenNI support by setting \texttt{WITH\_OPENNI} flag in CMake. If OpenNI |
||||
is found in default install folders OpenCV will be built with OpenNI library regardless of whether |
||||
PrimeSensor Module is found or not. If PrimeSensor Module was not found you will get a warning |
||||
in CMake log. Without PrimeSensor module OpenCV will be successfully compiled with OpenNI library, |
||||
but \texttt{VideoCapture} object will not grab data from Kinect sensor. \par |
||||
|
||||
3) Build OpenCV.\par |
||||
|
||||
VideoCapture can retrieve the following Kinect data: |
||||
\begin{lstlisting} |
||||
a.) data given from depth generator: |
||||
OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1) |
||||
OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3) |
||||
OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1) |
||||
OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1) |
||||
OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, |
||||
not shaded etc.) (CV_8UC1) |
||||
b.) data given from RGB image generator: |
||||
OPENNI_BGR_IMAGE - color image (CV_8UC3) |
||||
OPENNI_GRAY_IMAGE - gray image (CV_8UC1) |
||||
\end{lstlisting} |
||||
|
||||
In order to get depth map from Kinect use \texttt{VideoCapture::operator >>}, e. g. |
||||
\begin{lstlisting} |
||||
VideoCapture capture(0); // or CV_CAP_OPENNI |
||||
for(;;) |
||||
{ |
||||
Mat depthMap; |
||||
|
||||
capture >> depthMap; |
||||
|
||||
if( waitKey( 30 ) >= 0 ) |
||||
break; |
||||
} |
||||
\end{lstlisting} |
||||
For getting several Kinect maps use \texttt{VideoCapture::grab + VideoCapture::retrieve}, e.g. |
||||
\begin{lstlisting} |
||||
VideoCapture capture(0); // or CV_CAP_OPENNI |
||||
for(;;) |
||||
{ |
||||
Mat depthMap; |
||||
Mat rgbImage |
||||
|
||||
capture.grab(); |
||||
|
||||
capture.retrieve( depthMap, OPENNI_DEPTH_MAP ); |
||||
capture.retrieve( bgrImage, OPENNI_BGR_IMAGE ); |
||||
|
||||
if( waitKey( 30 ) >= 0 ) |
||||
break; |
||||
} |
||||
\end{lstlisting} |
||||
|
||||
For more information please refer to a kinect example of usage \texttt{kinect\_maps.cpp} in \texttt{sample} folder. |
||||
|
||||
\fi |
@ -1,141 +0,0 @@ |
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
% % |
||||
% C++ % |
||||
% % |
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
||||
|
||||
\ifCpp |
||||
\section{Input/Output} |
||||
\subsection{Images} |
||||
Load an image from a file: |
||||
\begin{lstlisting} |
||||
Mat img = imread(filename); |
||||
\end{lstlisting} |
||||
If you read a jpg file, a 3 channel image is created by default. If you need a grayscale image, use: |
||||
\begin{lstlisting} |
||||
Mat img = imread(filename, 0); |
||||
\end{lstlisting} |
||||
Save an image to a file: |
||||
\begin{lstlisting} |
||||
Mat img = imwrite(filename); |
||||
\end{lstlisting} |
||||
\subsection{XML/YAML} |
||||
|
||||
\section{Basic operations with images} |
||||
\subsection{Accessing pixel intensity values} |
||||
In order to get pixel intensity value, you have to know the type of an image and the number of channels. Here is an example for a single channel grey scale image (type 8UC1) and pixel coordinates x and y: |
||||
\begin{lstlisting} |
||||
Scalar intensity = img.at<uchar>(x, y); |
||||
\end{lstlisting} |
||||
\texttt{intensity.val[0]} contains a value from 0 to 255. |
||||
Now let us consider a 3 channel image with \texttt{bgr} color ordering (the default format returned by imread): |
||||
\begin{lstlisting} |
||||
Vec3b intensity = img.at<Vec3b>(x, y); |
||||
uchar blue = intensity.val[0]; |
||||
uchar green = intensity.val[1]; |
||||
uchar red = intensity.val[2]; |
||||
\end{lstlisting} |
||||
You can use the same method for floating-point images (for example, you can get such an image by running Sobel on a 3 channel image): |
||||
\begin{lstlisting} |
||||
Vec3f intensity = img.at<Vec3f>(x, y); |
||||
float blue = intensity.val[0]; |
||||
float green = intensity.val[1]; |
||||
float red = intensity.val[2]; |
||||
\end{lstlisting} |
||||
The same method can be used to change pixel intensities: |
||||
\begin{lstlisting} |
||||
img.at<uchar>(x, y) = 128; |
||||
\end{lstlisting} |
||||
|
||||
|
||||
There are functions in OpenCV, especially from calib3d module, such as \texttt{projectPoints}, that take an array of 2D or 3D points in the form of \texttt{Mat}. Matrix should contain exactly one column, each row corresponds to a point, matrix type should be 32FC2 or 32FC3 correspondingly. Such a matrix can be easily constructed from std::vector: |
||||
\begin{lstlisting} |
||||
vector<Point2f> points; |
||||
//... fill the array |
||||
Mat pointsMat = Mat(points); |
||||
\end{lstlisting} |
||||
One can access a point in this matrix using the same method \texttt{Mat::at}: |
||||
\begin{lstlisting} |
||||
Point2f point = pointsMat.at<Point2f>(i, 0); |
||||
\end{lstlisting} |
||||
|
||||
\subsection{Memory management and reference counting} |
||||
\texttt{Mat} is a structure that keeps matrix/image characteristics (rows and columns number, data type etc) and a pointer to data. So nothing prevents us from having several instances of \texttt{Mat} corresponding to the same data. A \texttt{Mat} keeps a reference count that tells if data has to be deallocated when a particular instance of \texttt{Mat} is destroyed. Here is an example of creating two matrices without copying data: |
||||
\begin{lstlisting} |
||||
std::vector<Point3f> points; |
||||
// .. fill the array |
||||
Mat pointsMat = Mat(points).reshape(1); |
||||
\end{lstlisting} |
||||
As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. \texttt{pointsMat} uses data from \texttt{points} and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of \texttt{points} is longer than of \texttt{pointsMat}. |
||||
If we need to copy the data, this is done using, for example, \texttt{Mat::copyTo} or \texttt{Mat::clone}: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); |
||||
Mat img1 = img.clone(); |
||||
\end{lstlisting} |
||||
To the contrary with C API where an output image had to be created by developer, an empty output \texttt{Mat} can be supplied to each function. Each implementation calls \texttt{Mat::create} for a destination matrix. This method allocates data for a matrix if it is empty. If it is not empty and has the correct size and type, the method does nothing. If, however, size or type are different from input arguments, the data is deallocated (and lost) and a new data is allocated. For example: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); |
||||
Mat sobelx; |
||||
Sobel(img, sobelx, CV_32F, 1, 0); |
||||
\end{lstlisting} |
||||
|
||||
\subsection{Primitive operations} |
||||
There is a number of convenient operators defined on a matrix. For example, here is how we can make a black image from an existing greyscale image \texttt{img}: |
||||
\begin{lstlisting} |
||||
img = Scalar(0); |
||||
\end{lstlisting} |
||||
Selecting a region of interest: |
||||
\begin{lstlisting} |
||||
Rect r(10, 10, 100, 100); |
||||
Mat smallImg = img(r); |
||||
\end{lstlisting} |
||||
A convertion from \texttt{Mat} to C API data structures: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); |
||||
IplImage img1 = img; |
||||
CvMat m = img; |
||||
\end{lstlisting} |
||||
Note that there is no data copying here. |
||||
|
||||
Conversion from color to grey scale: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); // loading a 8UC3 image |
||||
Mat grey; |
||||
cvtColor(img, grey, CV_BGR2GRAY); |
||||
\end{lstlisting} |
||||
Change image type from 8UC1 to 32FC1: |
||||
\begin{lstlisting} |
||||
convertTo(src, dst, CV_32F); |
||||
\end{lstlisting} |
||||
|
||||
\subsection{Visualizing images} |
||||
It is very useful to see indermediate results of your algorithm during development process. OpenCV provides a convenient way of visualizing images. A 8U image can be shown using: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); |
||||
|
||||
namedWindow("image", CV_WINDOW_AUTOSIZE); |
||||
imshow("image", img); |
||||
waitKey(); |
||||
\end{lstlisting} |
||||
A call to waitKey() starts a message passing cycle that waits for a key stroke in the \texttt{"image"} window. A 32F image needs to be converted to 8U type. For example: |
||||
\begin{lstlisting} |
||||
Mat img = imread("image.jpg"); |
||||
Mat grey; |
||||
cvtColor(img, grey, CV_BGR2GREY); |
||||
|
||||
Mat sobelx; |
||||
Sobel(grey, sobelx, CV_32F, 1, 0); |
||||
|
||||
double minVal, maxVal; |
||||
minMaxLoc(sobelx, &minVal, &maxVal); //find minimum and maximum intensities |
||||
Mat draw; |
||||
sobelx.convertTo(draw, CV_8U, 255.0/(maxVal - minVal), -minVal); |
||||
|
||||
namedWindow("image", CV_WINDOW_AUTOSIZE); |
||||
imshow("image", draw); |
||||
waitKey(); |
||||
\end{lstlisting} |
||||
|
||||
|
||||
\fi |
@ -1,68 +0,0 @@ |
||||
% verbdef.sty v0.2 -- Robin Fairbairns 2000/10/06 |
||||
\ProvidesPackage{verbdef}[2000/10/06 v0.2 define verbatim csnames] |
||||
|
||||
% This package provides a single command \verbdef |
||||
% |
||||
% Usage: \verbdef\test|verbatim text| |
||||
% \verbdef*\testar{with visible spaces} |
||||
% |
||||
% \test (or \testar) above will be defined as robust commands that |
||||
% expand to typeset their `verbatim text' argument in the usual |
||||
% verbatim font (using the visible space symbol in the * case) |
||||
% |
||||
% The verbatim text argument may be delimited in the same was as the |
||||
% argument of a \verb command (see definition of \test above) or using |
||||
% braces (see definition of \testar command above) |
||||
% |
||||
% Note: if the command you're defining with \verbdef is to be used in |
||||
% a \section-type command, or a \caption, it's going to appear in the |
||||
% table of contents, or list of whatevers; in this case you must |
||||
% define the command *before* the \tableofcontents command (or |
||||
% whatever). I recommend defining the commands in the preamble of |
||||
% your document. |
||||
|
||||
% This program may be distributed and/or modified under the |
||||
% conditions of the LaTeX Project Public License, either version 1.1 |
||||
% of this license or (at your option) any later version. |
||||
% The latest version of this license is in |
||||
% http://www.latex-project.org/lppl.txt |
||||
% and version 1.1 or later is part of all distributions of LaTeX |
||||
% version 1999/06/01 or later. |
||||
% |
||||
% This program consists of the file verbdef.sty |
||||
|
||||
\newif\ifverbdef@nostar |
||||
\def\verbdef{\verbdef@nostarfalse |
||||
\@ifstar\@sverbdef\@verbdef} |
||||
\def\@verbdef{\verbdef@nostartrue\@sverbdef} |
||||
|
||||
% set up robustness of the command to be defined, set conditions for |
||||
% reading verbatim text |
||||
\def\@sverbdef#1{\edef\verbdef@tempa{\expandafter\@gobble\string#1}% |
||||
\edef#1{\noexpand\protect |
||||
\expandafter\noexpand\csname\verbdef@tempa\space\endcsname}% |
||||
\begingroup |
||||
\verb@eol@error |
||||
\let\do\@makeother \dospecials |
||||
\toks@{\verbatim@font\@noligs}% |
||||
\ifverbdef@nostar |
||||
\@vobeyspaces |
||||
\toks@\expandafter{\the\toks@\frenchspacing}% |
||||
\fi |
||||
\@verb@def} |
||||
|
||||
% |
||||
\def\@verb@def#1{% |
||||
\ifnum`#1=`\{\relax |
||||
\catcode`\}\active |
||||
\lccode`\~`\}% |
||||
\else |
||||
\catcode`#1\active |
||||
\lccode`\~`#1% |
||||
\fi |
||||
\lowercase{% |
||||
\def\@tempa##1~{% |
||||
\expandafter\xdef\csname\verbdef@tempa\space\endcsname{% |
||||
{\the\toks@##1}}}}% |
||||
\afterassignment\endgroup |
||||
\@tempa} |
@ -0,0 +1,23 @@ |
||||
.. opencvstd documentation master file, created by |
||||
sphinx-quickstart on Mon Feb 14 00:30:43 2011. |
||||
You can adapt this file completely to your liking, but it should at least |
||||
contain the root `toctree` directive. |
||||
|
||||
Welcome to opencv documentation! |
||||
================================ |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
modules/refman.rst |
||||
doc/opencv1/c/c_index.rst |
||||
doc/opencv1/py/py_index.rst |
||||
doc/user_guide/user_guide.rst |
||||
doc/tutorials/tutorials.rst |
||||
|
||||
Indices and tables |
||||
================== |
||||
|
||||
* :ref:`genindex` |
||||
* :ref:`modindex` |
||||
* :ref:`search` |
@ -1,30 +0,0 @@ |
||||
.. opencvstd documentation master file, created by |
||||
sphinx-quickstart on Mon Feb 14 00:30:43 2011. |
||||
You can adapt this file completely to your liking, but it should at least |
||||
contain the root `toctree` directive. |
||||
|
||||
Welcome to opencvstd's documentation! |
||||
===================================== |
||||
|
||||
Contents: |
||||
|
||||
.. highlight:: cpp |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
core/doc/intro.rst |
||||
core/doc/core.rst |
||||
imgproc/doc/imgproc.rst |
||||
highgui/doc/highgui.rst |
||||
video/doc/video.rst |
||||
calib3d/doc/calib3d.rst |
||||
features2d/doc/features2d.rst |
||||
objdetect/doc/objdetect.rst |
||||
ml/doc/ml.rst |
||||
gpu/doc/gpu.rst |
||||
|
||||
Indices and tables |
||||
================== |
||||
|
||||
* :ref:`genindex` * :ref:`modindex` * :ref:`search` |
@ -0,0 +1,17 @@ |
||||
############################ |
||||
OpenCV 2.x C++ API Reference |
||||
############################ |
||||
|
||||
.. toctree:: |
||||
:maxdepth: 2 |
||||
|
||||
core/doc/intro.rst |
||||
core/doc/core.rst |
||||
imgproc/doc/imgproc.rst |
||||
highgui/doc/highgui.rst |
||||
video/doc/video.rst |
||||
calib3d/doc/calib3d.rst |
||||
features2d/doc/features2d.rst |
||||
objdetect/doc/objdetect.rst |
||||
ml/doc/ml.rst |
||||
gpu/doc/gpu.rst |
Loading…
Reference in new issue