Merge remote-tracking branch 'upstream/master'

pull/3308/head
edgarriba 11 years ago
commit f317d1277f
  1. 9
      CMakeLists.txt
  2. 1
      cmake/OpenCVFindLibsVideo.cmake
  3. 6
      cmake/OpenCVFindXimea.cmake
  4. 24
      cmake/OpenCVGenConfig.cmake
  5. 274
      doc/_themes/sphinxdoc/layout.html
  6. 21
      doc/_themes/sphinxdoc/searchbox.html
  7. BIN
      doc/_themes/sphinxdoc/static/bodybg.png
  8. 466
      doc/_themes/sphinxdoc/static/default.css_t
  9. BIN
      doc/_themes/sphinxdoc/static/footerbg.png
  10. BIN
      doc/_themes/sphinxdoc/static/headerbg.png
  11. BIN
      doc/_themes/sphinxdoc/static/listitem.png
  12. BIN
      doc/_themes/sphinxdoc/static/relbg.png
  13. 4
      doc/_themes/sphinxdoc/theme.conf
  14. 4
      doc/conf.py
  15. 2
      doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.rst
  16. 2
      doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.rst
  17. 2
      doc/py_tutorials/py_core/py_optimization/py_optimization.rst
  18. 2
      doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.rst
  19. 2
      doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.rst
  20. 2
      doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.rst
  21. 2
      doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.rst
  22. 2
      doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.rst
  23. 2
      doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.rst
  24. 2
      doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.rst
  25. BIN
      doc/py_tutorials/py_objdetect/py_face_detection/images/haar.png
  26. 2
      doc/py_tutorials/py_video/py_table_of_contents_video/py_table_of_contents_video.rst
  27. BIN
      doc/tutorials/images/opencv_ios.png
  28. BIN
      doc/tutorials/introduction/table_of_content_introduction/images/opencv_ios.png
  29. 37
      doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst
  30. BIN
      doc/tutorials/ios/hello/images/header_directive.png
  31. BIN
      doc/tutorials/ios/hello/images/linking_opencv_ios.png
  32. BIN
      doc/tutorials/ios/hello/images/output.png
  33. BIN
      doc/tutorials/ios/hello/images/view_did_load.png
  34. BIN
      doc/tutorials/ios/table_of_content_ios/images/image_effects.png
  35. BIN
      doc/tutorials/ios/table_of_content_ios/images/intro.png
  36. BIN
      doc/tutorials/ios/video_processing/images/xcode_hello_ios_framework_drag_and_drop.png
  37. BIN
      doc/tutorials/ios/video_processing/images/xcode_hello_ios_frameworks_add_dependencies.png
  38. BIN
      doc/tutorials/ios/video_processing/images/xcode_hello_ios_viewcontroller_layout.png
  39. BIN
      doc/tutorials/viz/table_of_content_viz/images/image_effects.png
  40. BIN
      doc/tutorials/viz/table_of_content_viz/images/intro.png
  41. 4
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  42. 2
      modules/core/doc/basic_structures.rst
  43. 87
      modules/core/include/opencv2/core.hpp
  44. 35
      modules/core/include/opencv2/core/base.hpp
  45. 99
      modules/core/include/opencv2/core/types.hpp
  46. 661
      modules/core/src/convert.cpp
  47. 10
      modules/core/src/matmul.cpp
  48. 4
      modules/core/src/ocl.cpp
  49. 179
      modules/core/src/stat.cpp
  50. 2
      modules/features2d/CMakeLists.txt
  51. 87
      modules/features2d/test/test_nearestneighbors.cpp
  52. 1
      modules/features2d/test/test_precomp.hpp
  53. 568
      modules/highgui/src/agile_wrl.h
  54. BIN
      modules/highgui/src/files_Qt/Milky/48/15.png
  55. BIN
      modules/highgui/src/files_Qt/Milky/48/36.png
  56. BIN
      modules/highgui/src/files_Qt/Milky/48/8.png
  57. BIN
      modules/highgui/src/files_Qt/Milky/64/20.png
  58. BIN
      modules/highgui/src/files_Qt/Milky/64/3.png
  59. 7237
      modules/highgui/src/ppltasks_winrt.h
  60. 105
      modules/highgui/src/window_QT.cpp
  61. 57
      modules/imgproc/doc/colormaps.rst
  62. 1
      modules/imgproc/doc/object_detection.rst
  63. BIN
      modules/imgproc/doc/pics/bayer.png
  64. BIN
      modules/imgproc/doc/pics/integral.png
  65. BIN
      modules/imgproc/doc/pics/minenclosingtriangle.png
  66. 91
      modules/imgproc/perf/opencl/perf_houghLines.cpp
  67. 19
      modules/imgproc/src/filter.cpp
  68. 292
      modules/imgproc/src/opencl/filter2D.cl
  69. 2
      modules/imgproc/src/opencl/match_template.cl
  70. 5
      modules/imgproc/src/pyramids.cpp
  71. 7
      modules/imgproc/src/smooth.cpp
  72. 2
      modules/java/CMakeLists.txt
  73. 8
      modules/java/android_test/CMakeLists.txt
  74. 1
      modules/java/generator/config/bioinspired.filelist
  75. 308
      modules/java/generator/gen_java.py
  76. 8
      modules/java/test/CMakeLists.txt
  77. 8
      modules/ml/include/opencv2/ml.hpp
  78. 2
      modules/ml/src/gbt.cpp
  79. 9
      modules/ml/src/kdtree.cpp
  80. 97
      modules/ml/src/kdtree.hpp
  81. 158
      modules/ml/src/knearest.cpp
  82. 24
      modules/ml/test/test_emknearestkmeans.cpp
  83. 2
      modules/python/common.cmake
  84. 2
      modules/python/src2/cv2.cpp
  85. 9
      modules/stitching/src/motion_estimators.cpp
  86. 20
      modules/video/include/opencv2/video/tracking.hpp
  87. 64
      modules/video/src/bgfg_gaussmix2.cpp
  88. 258
      modules/video/src/opencl/bgfg_mog2.cl
  89. 13
      modules/videoio/CMakeLists.txt
  90. 17
      modules/videoio/src/cap.cpp
  91. 1
      modules/videoio/src/cap_ffmpeg_impl.hpp
  92. 398
      modules/videoio/src/cap_gstreamer.cpp
  93. 967
      modules/videoio/src/cap_msmf.cpp
  94. 922
      modules/videoio/src/cap_msmf.hpp
  95. 8
      modules/videoio/src/cap_ximea.cpp
  96. 157
      modules/videoio/test/test_basic_props.cpp
  97. 12
      modules/videoio/test/test_video_io.cpp
  98. 6
      samples/cpp/CMakeLists.txt
  99. 2
      samples/cpp/stereo_calib.cpp
  100. 4
      samples/python2/common.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -153,7 +153,7 @@ OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON
OCV_OPTION(WITH_LIBV4L "Use libv4l for Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM) )
OCV_OPTION(WITH_MSMF "Build VideoIO with Media Foundation support" OFF IF WIN32 )
OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID AND NOT APPLE) )
OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF)
OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" ON IF (NOT IOS) )
@ -254,7 +254,7 @@ else()
set(OPENCV_DOC_INSTALL_PATH share/OpenCV/doc)
endif()
if(WIN32)
if(WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
if(DEFINED OpenCV_RUNTIME AND DEFINED OpenCV_ARCH)
set(OpenCV_INSTALL_BINARIES_PREFIX "${OpenCV_ARCH}/${OpenCV_RUNTIME}/")
else()
@ -294,7 +294,7 @@ if(ANDROID)
else()
set(LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/lib")
set(3P_LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/3rdparty/lib${LIB_SUFFIX}")
if(WIN32)
if(WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
if(OpenCV_STATIC)
set(OPENCV_LIB_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib${LIB_SUFFIX}")
else()
@ -1043,7 +1043,8 @@ status(" ant:" ANT_EXECUTABLE THEN "${ANT_EXECUTABLE} (ver ${A
if(NOT ANDROID)
status(" JNI:" JNI_INCLUDE_DIRS THEN "${JNI_INCLUDE_DIRS}" ELSE NO)
endif()
status(" Java tests:" BUILD_TESTS AND (CAN_BUILD_ANDROID_PROJECTS OR HAVE_opencv_java) THEN YES ELSE NO)
status(" Java wrappers:" HAVE_opencv_java THEN YES ELSE NO)
status(" Java tests:" BUILD_TESTS AND opencv_test_java_BINARY_DIR THEN YES ELSE NO)
# ========================= matlab =========================
status("")

@ -254,6 +254,7 @@ if(WITH_DSHOW)
endif(WITH_DSHOW)
# --- VideoInput/Microsoft Media Foundation ---
ocv_clear_vars(HAVE_MSMF)
if(WITH_MSMF)
check_include_file(Mfapi.h HAVE_MSMF)
endif(WITH_MSMF)

@ -31,6 +31,12 @@ if(WIN32)
else()
set(XIMEA_FOUND 0)
endif()
elseif(APPLE)
if(EXISTS /Library/Frameworks/m3api.framework)
set(XIMEA_FOUND 1)
else()
set(XIMEA_FOUND 0)
endif()
else()
if(EXISTS /opt/XIMEA)
set(XIMEA_FOUND 1)

@ -154,13 +154,19 @@ if(WIN32)
endif()
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" @ONLY)
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" @ONLY)
if(BUILD_SHARED_LIBS)
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
else()
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}" COMPONENT dev)
install(FILES "${OpenCV_SOURCE_DIR}/cmake/OpenCVConfig.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/" COMPONENT dev)
if (CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
if(BUILD_SHARED_LIBS)
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
else()
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}" COMPONENT dev)
install(FILES "${OpenCV_SOURCE_DIR}/cmake/OpenCVConfig.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/" COMPONENT dev)
else ()
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig.cmake" DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib/cmake/opencv-${OPENCV_VERSION}" COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION "${OpenCV_INSTALL_BINARIES_PREFIX}lib/cmake/opencv-${OPENCV_VERSION}" FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
install(FILES "${CMAKE_BINARY_DIR}/win-install/OpenCVConfig-version.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/cmake/opencv-${OPENCV_VERSION}" COMPONENT dev)
endif ()
endif()

@ -0,0 +1,274 @@
{#
basic/layout.html
~~~~~~~~~~~~~~~~~
Master layout template for Sphinx themes.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- block doctype -%}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{%- endblock %}
{% set script_files = script_files + [pathto("_static/insertIframe.js", 1)] %}
{%- set reldelim1 = reldelim1 is not defined and ' &raquo;' or reldelim1 %}
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
(sidebars != []) %}
{%- set url_root = pathto('', 1) %}
{# XXX necessary? #}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-33108845-1']);
_gaq.push(['_setDomainName', 'opencv.org']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{%- macro relbar() %}
<div class="related">
<h3>{{ _('Navigation') }}</h3>
<ul>
{%- for rellink in rellinks %}
<li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
<a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}"
{{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
{%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
{%- endfor %}
{%- block rootrellink %}
<li><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
{%- endblock %}
{%- for parent in parents %}
<li><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
{%- endfor %}
{%- block relbaritems %} {% endblock %}
</ul>
</div>
{%- endmacro %}
{%- macro sidebar() %}
{%- if render_sidebar %}
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
{%- if logo %}
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
{%- if sidebars == None %}
{%- block sidebarsearch %}
{%- include "searchbox.html" %}
{%- endblock %}
{%- endif %}
{%- if sidebars != None %}
{#- new style sidebar: explicitly include/exclude templates #}
{%- for sidebartemplate in sidebars %}
{%- include sidebartemplate %}
{%- endfor %}
{%- else %}
{#- old style sidebars: using blocks -- should be deprecated #}
{%- block sidebartoc %}
{%- include "localtoc.html" %}
{%- endblock %}
{%- block sidebarrel %}
{%- include "relations.html" %}
{%- endblock %}
{%- if customsidebar %}
{%- include customsidebar %}
{%- endif %}
{%- endif %}
</div>
</div>
{%- endif %}
{%- endmacro %}
{%- macro script() %}
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '{{ url_root }}',
VERSION: '{{ release|e }}',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}',
HAS_SOURCE: {{ has_source|lower }}
};
</script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{%- endmacro %}
{%- macro css() %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for cssfile in css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{%- endmacro %}
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
{{ metatags }}
{%- block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{%- endblock %}
{{ css() }}
{%- if not embedded %}
{{ script() }}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{%- endif %}
{%- endif %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
<link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}" />
{%- if parents %}
<link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %}
<link href='http://fonts.googleapis.com/css?family=Open+Sans:300,400,700'
rel='stylesheet' type='text/css' />
{%- if not embedded %}
<style type="text/css">
table.right { float: right; margin-left: 20px; }
table.right td { border: 1px solid #ccc; }
</style>
<script type="text/javascript">
// intelligent scrolling of the sidebar content
$(window).scroll(function() {
var sb = $('.sphinxsidebarwrapper');
var win = $(window);
var sbh = sb.height();
var offset = $('.sphinxsidebar').position()['top'];
var wintop = win.scrollTop();
var winbot = wintop + win.innerHeight();
var curtop = sb.position()['top'];
var curbot = curtop + sbh;
// does sidebar fit in window?
if (sbh < win.innerHeight()) {
// yes: easy case -- always keep at the top
sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
$(document).height() - sbh - 200]));
} else {
// no: only scroll if top/bottom edge of sidebar is at
// top/bottom edge of window
if (curtop > wintop && curbot > winbot) {
sb.css('top', $u.max([wintop - offset - 10, 0]));
} else if (curtop < wintop && curbot < winbot) {
sb.css('top', $u.min([winbot - sbh - offset - 20,
$(document).height() - sbh - 200]));
}
}
});
</script>
{%- endif %}
{% endblock %}
</head>
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
{%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
{%- block sidebar2 %}{{ sidebar() }}{% endblock %}
<body>
{%- block content %}
<div class="document">
{%- block document %}
<div class="documentwrapper">
{%- if render_sidebar %}
<div class="bodywrapper">
{%- endif %}
<div class="body">
{% block body %} {% endblock %}
</div>
<div class="feedback">
<h2>Help and Feedback</h2>
You did not find what you were looking for?
<ul>
{% if theme_lang == 'c' %}
{% endif %}
{% if theme_lang == 'cpp' %}
<li>Try the <a href="http://docs.opencv.org/opencv_cheatsheet.pdf">Cheatsheet</a>.</li>
{% endif %}
{% if theme_lang == 'py' %}
<li>Try the <a href="cookbook.html">Cookbook</a>.</li>
{% endif %}
<li>Ask a question on the <a href="http://answers.opencv.org">Q&A forum</a>.</li>
<li>If you think something is missing or wrong in the documentation,
please file a <a href="http://code.opencv.org">bug report</a>.</li>
</ul>
</div>
{%- if render_sidebar %}
</div>
{%- endif %}
</div>
{%- endblock %}
<div class="clearer"></div>
</div>
{%- endblock %}
{%- block relbar2 %}{{ relbar() }}{% endblock %}
{%- block footer %}
<div class="footer">
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
{%- else %}
{% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
{%- endif %}
{%- if show_sphinx %}
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
{%- endif %}
{%- if show_source and has_source and sourcename %}
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source.') }}</a>
{%- endif %}
</div>
{%- endblock %}
</body>
</html>

@ -0,0 +1,21 @@
{#
basic/searchbox.html
~~~~~~~~~~~~~~~~~~~~
Sphinx sidebar template: quick search box.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- if pagename != "search" and builder != "singlehtml" %}
<div id="searchbox" style="display: none">
<h3>{{ _('Quick search') }}</h3>
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" />
<input type="submit" value="{{ _('Go') }}" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
{%- endif %}

Binary file not shown.

After

Width:  |  Height:  |  Size: 513 B

@ -0,0 +1,466 @@
/*
* sphinxdoc.css_t
* ~~~~~~~~~~~~~~~
*
* Sphinx stylesheet -- sphinxdoc theme.
*
* :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif;
font-size: 14px;
text-align: center;
background-image: url(bodybg.png);
color: black;
padding: 0;
border-right: 1px solid #0a507a;
border-left: 1px solid #0a507a;
margin: 0 auto;
min-width: 780px;
max-width: 1080px;
}
div.document {
background-color: white;
text-align: left;
}
div.bodywrapper {
margin: 0 240px 0 0;
border-right: 1px solid #0a507a;
}
div.body {
margin: 0;
padding: 0.5em 20px 20px 20px;
}
div.related {
font-size: 1em;
color: white;
}
div.related ul {
background-image: url(relbg.png);
text-align: left;
border-top: 1px solid #002e50;
border-bottom: 1px solid #002e50;
}
div.related li + li {
display: inline;
}
div.related ul li.right {
float: right;
margin-right: 5px;
}
div.related ul li a {
margin: 0;
padding: 0 5px 0 5px;
line-height: 1.75em;
color: #f9f9f0;
text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);
}
div.related ul li a:hover {
color: white;
text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
}
div.footer {
background-image: url(footerbg.png);
color: #ccc;
text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: center;
}
div.sphinxsidebarwrapper {
position: relative;
top: 0px;
padding: 0;
}
div.sphinxsidebar {
word-wrap: break-word;
margin: 0;
padding: 0 15px 15px 0;
width: 210px;
float: right;
font-size: 1em;
text-align: left;
}
div.sphinxsidebar .logo {
text-align: center;
}
div.sphinxsidebar .logo img {
width: 150px;
vertical-align: middle;
}
div.sphinxsidebar input {
border: 1px solid #aaa;
font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox input[type="text"] {
width: 160px;
}
div.sphinxsidebar #searchbox input[type="submit"] {
width: 40px;
}
div.sphinxsidebar h3 {
font-size: 1.5em;
border-top: 1px solid #0a507a;
margin-top: 1em;
margin-bottom: 0.5em;
padding-top: 0.5em;
}
div.sphinxsidebar h4 {
font-size: 1.2em;
margin-bottom: 0;
}
div.sphinxsidebar h3, div.sphinxsidebar h4 {
margin-right: -15px;
margin-left: -15px;
padding-right: 14px;
padding-left: 14px;
color: #333;
font-weight: 300;
/*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
}
div.sphinxsidebarwrapper > h3:first-child {
margin-top: 0.5em;
border: none;
}
div.sphinxsidebar h3 a {
color: #333;
}
div.sphinxsidebar ul {
color: #444;
margin-top: 7px;
padding: 0;
line-height: 130%;
}
div.sphinxsidebar ul ul {
margin-left: 20px;
list-style-image: url(listitem.png);
}
/* -- body styles ----------------------------------------------------------- */
p {
margin: 0.8em 0 0.5em 0;
}
a, a tt {
color: #2878a2;
}
a:hover, a tt:hover {
color: #68b8c2;
}
a tt {
border: 0;
}
h1, h2, h3, h4, h5, h6 {
color: #0a507a;
background-color: #e5f5ff;
font-weight: 300;
}
h1 {
margin: 10px 0 0 0;
}
h2 {
margin: 1.em 0 0.2em 0;
padding: 0;
}
h3 {
margin: 1em 0 -0.3em 0;
}
h1 { font-size: 200%; }
h2 { font-size: 160%; }
h3 { font-size: 140%; }
h4 { font-size: 120%; }
h5 { font-size: 110%; }
h6 { font-size: 100%; }
div a, h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
text-decoration: none;
}
div.body h1 a tt, div.body h2 a tt, div.body h3 a tt,
div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
color: #0a507a !important;
font-size: inherit !important;
}
a.headerlink {
color: #0a507a !important;
font-size: 12px;
margin-left: 6px;
padding: 0 4px 0 4px;
text-decoration: none !important;
float: right;
}
a.headerlink:hover {
background-color: #ccc;
color: white!important;
}
cite, code, tt {
font-family: 'Consolas', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono',
monospace;
font-size: 14px;
min-width: 780px;
max-width: 1080px;
}
tt {
color: #003048;
padding: 1px;
}
tt.descname, tt.descclassname, tt.xref {
font-size: 12px;
}
hr {
border: 1px solid #abc;
margin: 2em;
}
pre {
font-family: 'Consolas', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono',
monospace;
font-size: 13px;
letter-spacing: 0.015em;
line-height: 120%;
padding: 0.5em;
border: 1px solid #ccc;
border-radius: 2px;
background-color: #f8f8f8;
}
pre a {
color: inherit;
text-decoration: none;
}
td.linenos pre {
padding: 0.5em 0;
}
td.code pre {
max-width: 740px;
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
div.quotebar {
background-color: #f8f8f8;
max-width: 250px;
float: right;
padding: 0px 7px;
border: 1px solid #ccc;
margin-left: 1em;
}
div.topic {
background-color: #f8f8f8;
}
table {
border-collapse: collapse;
margin: 0 -0.5em 0 -0.5em;
}
table td, table th {
padding: 0.2em 0.5em 0.2em 0.5em;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
div.admonition ul li, div.warning ul li,
div.admonition ol li, div.warning ol li {
text-align: left;
}
div.admonition p.admonition-title + p {
display: inline;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
/* ------------------ our styles ----------------*/
div.body p, div.body dd, div.body li {
text-align: justify;
line-height: 130%;
margin-top: 1em;
margin-bottom: 1em;
}
div.toctree-wrapper li, ul.simple li {
margin:0;
}
/*a.toc-backref {
}*/
div.feedback {
/*background-color: #;*/
/*color: #;*/
padding: 20px 20px 30px 20px;
border-top: 1px solid #002e50;
}
div.feedback h2 {
margin: 10px 0 10px 0;
}
div.feedback a {
/*color: #;*/
font-weight: bold;
}
div.math p {
margin-top: 10px;
margin-bottom: 10px;
}
dl.function > dt:first-child {
margin-bottom: 7px;
}
dl.cfunction > dt:first-child {
margin-bottom: 7px;
color: #8080B0;
}
dl.cfunction > dt:first-child tt.descname {
color: #8080B0;
}
dl.pyfunction > dt:first-child {
margin-bottom: 7px;
}
dl.jfunction > dt:first-child {
margin-bottom: 7px;
}
table.field-list {
margin-top: 20px;
}
em.menuselection, em.guilabel {
font-family: 'Lucida Sans', 'Lucida Sans Unicode', 'Lucida Grande', Verdana,
Arial, Helvetica, sans-serif;
}
.enumeratevisibleitemswithsquare ul {
list-style: square;
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.enumeratevisibleitemswithsquare li {
margin-bottom: 0.2em;
margin-left: 0px;
margin-right: 0px;
margin-top: 0.2em;
}
.enumeratevisibleitemswithsquare p {
margin-bottom: 0pt;
margin-top: 1pt;
}
.enumeratevisibleitemswithsquare dl {
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.toctableopencv {
width: 100% ;
table-layout: fixed;
}
.toctableopencv colgroup col:first-child {
width: 100pt !important;
max-width: 100pt !important;
min-width: 100pt !important;
}
.toctableopencv colgroup col:nth-child(2) {
width: 100% !important;
}
div.body ul.search li {
text-align: left;
}
div.linenodiv {
min-width: 1em;
text-align: right;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 220 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 223 B

@ -0,0 +1,4 @@
[theme]
inherit = basic
stylesheet = default.css
pygments_style = sphinx

@ -114,7 +114,7 @@ todo_include_todos=True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'blue'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@ -133,7 +133,7 @@ html_theme_path = ['_themes']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'opencv-logo-white.png'
html_logo = 'opencv-logo2.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32

@ -48,7 +48,7 @@ Below code snippet shows a simple procedure to create disparity map.
plt.imshow(disparity,'gray')
plt.show()
Below image contains the original image (left) and its disparity map (right). As you can see, result is contaminated with high degree of noise. By adjusting the values of numDisparities and blockSize, you can get more better result.
Below image contains the original image (left) and its disparity map (right). As you can see, result is contaminated with high degree of noise. By adjusting the values of numDisparities and blockSize, you can get a better result.
.. image:: images/disparity_map.jpg
:alt: Disparity Map

@ -49,7 +49,7 @@ You can modify the pixel values the same way.
.. warning:: Numpy is a optimized library for fast array calculations. So simply accessing each and every pixel values and modifying it will be very slow and it is discouraged.
.. note:: Above mentioned method is normally used for selecting a region of array, say first 5 rows and last 3 columns like that. For individual pixel access, Numpy array methods, ``array.item()`` and ``array.itemset()`` is considered to be more better. But it always returns a scalar. So if you want to access all B,G,R values, you need to call ``array.item()`` separately for all.
.. note:: Above mentioned method is normally used for selecting a region of array, say first 5 rows and last 3 columns like that. For individual pixel access, Numpy array methods, ``array.item()`` and ``array.itemset()`` is considered to be better. But it always returns a scalar. So if you want to access all B,G,R values, you need to call ``array.item()`` separately for all.
Better pixel accessing and editing method :

@ -75,7 +75,7 @@ Measuring Performance in IPython
Sometimes you may need to compare the performance of two similar operations. IPython gives you a magic command ``%timeit`` to perform this. It runs the code several times to get more accurate results. Once again, they are suitable to measure single line codes.
For example, do you know which of the following addition operation is more better, ``x = 5; y = x**2``, ``x = 5; y = x*x``, ``x = np.uint8([5]); y = x*x`` or ``y = np.square(x)`` ? We will find it with %timeit in IPython shell.
For example, do you know which of the following addition operation is better, ``x = 5; y = x**2``, ``x = 5; y = x*x``, ``x = np.uint8([5]); y = x*x`` or ``y = np.square(x)`` ? We will find it with %timeit in IPython shell.
::
In [10]: x = 5

@ -29,7 +29,7 @@ Image is very simple. At the top of image, six small image patches are given. Qu
A and B are flat surfaces, and they are spread in a lot of area. It is difficult to find the exact location of these patches.
C and D are much more simpler. They are edges of the building. You can find an approximate location, but exact location is still difficult. It is because, along the edge, it is same everywhere. Normal to the edge, it is different. So edge is much more better feature compared to flat area, but not good enough (It is good in jigsaw puzzle for comparing continuity of edges).
C and D are much more simpler. They are edges of the building. You can find an approximate location, but exact location is still difficult. It is because, along the edge, it is same everywhere. Normal to the edge, it is different. So edge is a much better feature compared to flat area, but not good enough (It is good in jigsaw puzzle for comparing continuity of edges).
Finally, E and F are some corners of the building. And they can be easily found out. Because at corners, wherever you move this patch, it will look different. So they can be considered as a good feature. So now we move into more simpler (and widely used image) for better understanding.

@ -110,7 +110,7 @@ Now I want to apply U-SURF, so that it won't find the orientation.
>>> plt.imshow(img2),plt.show()
See the results below. All the orientations are shown in same direction. It is more faster than previous. If you are working on cases where orientation is not a problem (like panorama stitching) etc, this is more better.
See the results below. All the orientations are shown in same direction. It is more faster than previous. If you are working on cases where orientation is not a problem (like panorama stitching) etc, this is better.
.. image:: images/surf_kp2.jpg
:alt: Upright-SURF

@ -69,7 +69,7 @@ To draw a polygon, first you need coordinates of vertices. Make those points int
.. Note:: If third argument is ``False``, you will get a polylines joining all the points, not a closed shape.
.. Note:: ``cv2.polylines()`` can be used to draw multiple lines. Just create a list of all the lines you want to draw and pass it to the function. All lines will be drawn individually. It is more better and faster way to draw a group of lines than calling ``cv2.line()`` for each line.
.. Note:: ``cv2.polylines()`` can be used to draw multiple lines. Just create a list of all the lines you want to draw and pass it to the function. All lines will be drawn individually. It is a much better and faster way to draw a group of lines than calling ``cv2.line()`` for each line.
Adding Text to Images:
------------------------

@ -48,7 +48,7 @@ Creating mouse callback function has a specific format which is same everywhere.
More Advanced Demo
===================
Now we go for much more better application. In this, we draw either rectangles or circles (depending on the mode we select) by dragging the mouse like we do in Paint application. So our mouse callback function has two parts, one to draw rectangle and other to draw the circles. This specific example will be really helpful in creating and understanding some interactive applications like object tracking, image segmentation etc.
Now we go for a much better application. In this, we draw either rectangles or circles (depending on the mode we select) by dragging the mouse like we do in Paint application. So our mouse callback function has two parts, one to draw rectangle and other to draw the circles. This specific example will be really helpful in creating and understanding some interactive applications like object tracking, image segmentation etc.
::
import cv2

@ -64,7 +64,7 @@ The result we get is a two dimensional array of size 180x256. So we can show the
Method - 2 : Using Matplotlib
------------------------------
We can use **matplotlib.pyplot.imshow()** function to plot 2D histogram with different color maps. It gives us much more better idea about the different pixel density. But this also, doesn't gives us idea what color is there on a first look, unless you know the Hue values of different colors. Still I prefer this method. It is simple and better.
We can use **matplotlib.pyplot.imshow()** function to plot 2D histogram with different color maps. It gives us a much better idea about the different pixel density. But this also, doesn't gives us idea what color is there on a first look, unless you know the Hue values of different colors. Still I prefer this method. It is simple and better.
.. note:: While using this function, remember, interpolation flag should be ``nearest`` for better results.

@ -15,7 +15,7 @@ It was proposed by **Michael J. Swain , Dana H. Ballard** in their paper **Index
**What is it actually in simple words?** It is used for image segmentation or finding objects of interest in an image. In simple words, it creates an image of the same size (but single channel) as that of our input image, where each pixel corresponds to the probability of that pixel belonging to our object. In more simpler worlds, the output image will have our object of interest in more white compared to remaining part. Well, that is an intuitive explanation. (I can't make it more simpler). Histogram Backprojection is used with camshift algorithm etc.
**How do we do it ?** We create a histogram of an image containing our object of interest (in our case, the ground, leaving player and other things). The object should fill the image as far as possible for better results. And a color histogram is preferred over grayscale histogram, because color of the object is more better way to define the object than its grayscale intensity. We then "back-project" this histogram over our test image where we need to find the object, ie in other words, we calculate the probability of every pixel belonging to the ground and show it. The resulting output on proper thresholding gives us the ground alone.
**How do we do it ?** We create a histogram of an image containing our object of interest (in our case, the ground, leaving player and other things). The object should fill the image as far as possible for better results. And a color histogram is preferred over grayscale histogram, because color of the object is a better way to define the object than its grayscale intensity. We then "back-project" this histogram over our test image where we need to find the object, ie in other words, we calculate the probability of every pixel belonging to the ground and show it. The resulting output on proper thresholding gives us the ground alone.
Algorithm in Numpy
====================

@ -37,7 +37,7 @@ Now let's see it in OpenCV.
kNN in OpenCV
===============
We will do a simple example here, with two families (classes), just like above. Then in the next chapter, we will do much more better example.
We will do a simple example here, with two families (classes), just like above. Then in the next chapter, we will do an even better example.
So here, we label the Red family as **Class-0** (so denoted by 0) and Blue family as **Class-1** (denoted by 1). We create 25 families or 25 training data, and label them either Class-0 or Class-1. We do all these with the help of Random Number Generator in Numpy.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 7.4 KiB

@ -9,7 +9,7 @@ Video Analysis
.. cssclass:: toctableopencv
=========== ======================================================
|vdo_1| We have already seen an example of color-based tracking. It is simpler. This time, we see much more better algorithms like "Meanshift", and its upgraded version, "Camshift" to find and track them.
|vdo_1| We have already seen an example of color-based tracking. It is simpler. This time, we see significantly better algorithms like "Meanshift", and its upgraded version, "Camshift" to find and track them.
=========== ======================================================

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 8.1 KiB

@ -90,25 +90,24 @@ A full list, for the latest version would contain:
.. code-block:: bash
opencv_calib3d249d.lib
opencv_contrib249d.lib
opencv_core249d.lib
opencv_features2d249d.lib
opencv_flann249d.lib
opencv_gpu249d.lib
opencv_highgui249d.lib
opencv_imgproc249d.lib
opencv_legacy249d.lib
opencv_ml249d.lib
opencv_nonfree249d.lib
opencv_objdetect249d.lib
opencv_ocl249d.lib
opencv_photo249d.lib
opencv_stitching249d.lib
opencv_superres249d.lib
opencv_ts249d.lib
opencv_video249d.lib
opencv_videostab249d.lib
opencv_calib3d300d.lib
opencv_core300d.lib
opencv_features2d300d.lib
opencv_flann300d.lib
opencv_highgui300d.lib
opencv_imgcodecs300d.lib
opencv_imgproc300d.lib
opencv_ml300d.lib
opencv_objdetect300d.lib
opencv_photo300d.lib
opencv_shape300d.lib
opencv_stitching300d.lib
opencv_superres300d.lib
opencv_ts300d.lib
opencv_video300d.lib
opencv_videoio300d.lib
opencv_videostab300d.lib
The letter *d* at the end just indicates that these are the libraries required for the debug. Now click ok to save and do the same with a new property inside the Release rule section. Make sure to omit the *d* letters from the library names and to save the property sheets with the save icon above them.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 160 KiB

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.5 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.5 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

@ -580,7 +580,7 @@ Finds an object pose from 3D-2D point correspondences.
:param tvec: Output translation vector.
:param useExtrinsicGuess: Parameter used for ITERATIVE. If true (1), the function uses the provided ``rvec`` and ``tvec`` values as initial approximations of the rotation and translation vectors, respectively, and further optimizes them.
:param useExtrinsicGuess: Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided ``rvec`` and ``tvec`` values as initial approximations of the rotation and translation vectors, respectively, and further optimizes them.
:param flags: Method for solving a PnP problem:
@ -616,7 +616,7 @@ Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
:param tvec: Output translation vector.
:param useExtrinsicGuess: Parameter used for ITERATIVE. If true (1), the function uses the provided ``rvec`` and ``tvec`` values as initial approximations of the rotation and translation vectors, respectively, and further optimizes them.
:param useExtrinsicGuess: Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided ``rvec`` and ``tvec`` values as initial approximations of the rotation and translation vectors, respectively, and further optimizes them.
:param iterationsCount: Number of iterations.

@ -96,9 +96,11 @@ operation for each of the coordinates. Besides the class members listed in the d
pt1 = pt2 - pt3;
pt1 = pt2 * a;
pt1 = a * pt2;
pt1 = pt2 / a;
pt1 += pt2;
pt1 -= pt2;
pt1 *= a;
pt1 /= a;
double value = norm(pt); // L2 norm
pt1 == pt2;
pt1 != pt2;

@ -747,93 +747,6 @@ public:
int minusStep, plusStep;
};
/*!
Fast Nearest Neighbor Search Class.
The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last
approximate (or accurate) nearest neighbor search in multi-dimensional spaces.
First, a set of vectors is passed to KDTree::KDTree() constructor
or KDTree::build() method, where it is reordered.
Then arbitrary vectors can be passed to KDTree::findNearest() methods, which
find the K nearest neighbors among the vectors from the initial set.
The user can balance between the speed and accuracy of the search by varying Emax
parameter, which is the number of leaves that the algorithm checks.
The larger parameter values yield more accurate results at the expense of lower processing speed.
\code
KDTree T(points, false);
const int K = 3, Emax = INT_MAX;
int idx[K];
float dist[K];
T.findNearest(query_vec, K, Emax, idx, 0, dist);
CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]);
\endcode
*/
class CV_EXPORTS_W KDTree
{
public:
/*!
The node of the search tree.
*/
struct Node
{
Node() : idx(-1), left(-1), right(-1), boundary(0.f) {}
Node(int _idx, int _left, int _right, float _boundary)
: idx(_idx), left(_left), right(_right), boundary(_boundary) {}
//! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point)
int idx;
//! node indices of the left and the right branches
int left, right;
//! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right
float boundary;
};
//! the default constructor
CV_WRAP KDTree();
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints = false);
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, InputArray _labels,
bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, InputArray labels,
bool copyAndReorderPoints = false);
//! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves
CV_WRAP int findNearest(InputArray vec, int K, int Emax,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray dist = noArray(),
OutputArray labels = noArray()) const;
//! finds all the points from the initial set that belong to the specified box
CV_WRAP void findOrthoRange(InputArray minBounds,
InputArray maxBounds,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray labels = noArray()) const;
//! returns vectors with the specified indices
CV_WRAP void getPoints(InputArray idx, OutputArray pts,
OutputArray labels = noArray()) const;
//! return a vector with the specified index
const float* getPoint(int ptidx, int* label = 0) const;
//! returns the search space dimensionality
CV_WRAP int dims() const;
std::vector<Node> nodes; //!< all the tree nodes
CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set.
CV_PROP std::vector<int> labels; //!< the parallel array of labels.
CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it
CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it
};
/*!
Random Number Generator

@ -568,6 +568,41 @@ CV_EXPORTS int getIppStatus();
CV_EXPORTS String getIppErrorLocation();
} // ipp
#if CV_NEON
inline int32x2_t cv_vrnd_s32_f32(float32x2_t v)
{
static int32x2_t v_sign = vdup_n_s32(1 << 31),
v_05 = vreinterpret_s32_f32(vdup_n_f32(0.5f));
int32x2_t v_addition = vorr_s32(v_05, vand_s32(v_sign, vreinterpret_s32_f32(v)));
return vcvt_s32_f32(vadd_f32(v, vreinterpret_f32_s32(v_addition)));
}
inline int32x4_t cv_vrndq_s32_f32(float32x4_t v)
{
static int32x4_t v_sign = vdupq_n_s32(1 << 31),
v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f));
int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(v)));
return vcvtq_s32_f32(vaddq_f32(v, vreinterpretq_f32_s32(v_addition)));
}
inline uint32x2_t cv_vrnd_u32_f32(float32x2_t v)
{
static float32x2_t v_05 = vdup_n_f32(0.5f);
return vcvt_u32_f32(vadd_f32(v, v_05));
}
inline uint32x4_t cv_vrndq_u32_f32(float32x4_t v)
{
static float32x4_t v_05 = vdupq_n_f32(0.5f);
return vcvtq_u32_f32(vaddq_f32(v, v_05));
}
#endif
} // cv
#endif //__OPENCV_CORE_BASE_HPP__

@ -994,6 +994,30 @@ Point_<_Tp>& operator *= (Point_<_Tp>& a, double b)
return a;
}
template<typename _Tp> static inline
Point_<_Tp>& operator /= (Point_<_Tp>& a, int b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
return a;
}
template<typename _Tp> static inline
Point_<_Tp>& operator /= (Point_<_Tp>& a, float b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
return a;
}
template<typename _Tp> static inline
Point_<_Tp>& operator /= (Point_<_Tp>& a, double b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
return a;
}
template<typename _Tp> static inline
double norm(const Point_<_Tp>& pt)
{
@ -1080,6 +1104,30 @@ Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b)
return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
}
template<typename _Tp> static inline
Point_<_Tp> operator / (const Point_<_Tp>& a, int b)
{
Point_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
template<typename _Tp> static inline
Point_<_Tp> operator / (const Point_<_Tp>& a, float b)
{
Point_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
template<typename _Tp> static inline
Point_<_Tp> operator / (const Point_<_Tp>& a, double b)
{
Point_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
//////////////////////////////// 3D Point ///////////////////////////////
@ -1187,6 +1235,33 @@ Point3_<_Tp>& operator *= (Point3_<_Tp>& a, double b)
return a;
}
template<typename _Tp> static inline
Point3_<_Tp>& operator /= (Point3_<_Tp>& a, int b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
a.z = saturate_cast<_Tp>(a.z / b);
return a;
}
template<typename _Tp> static inline
Point3_<_Tp>& operator /= (Point3_<_Tp>& a, float b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
a.z = saturate_cast<_Tp>(a.z / b);
return a;
}
template<typename _Tp> static inline
Point3_<_Tp>& operator /= (Point3_<_Tp>& a, double b)
{
a.x = saturate_cast<_Tp>(a.x / b);
a.y = saturate_cast<_Tp>(a.y / b);
a.z = saturate_cast<_Tp>(a.z / b);
return a;
}
template<typename _Tp> static inline
double norm(const Point3_<_Tp>& pt)
{
@ -1272,6 +1347,30 @@ Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b)
return a * Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);
}
template<typename _Tp> static inline
Point3_<_Tp> operator / (const Point3_<_Tp>& a, int b)
{
Point3_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
template<typename _Tp> static inline
Point3_<_Tp> operator / (const Point3_<_Tp>& a, float b)
{
Point3_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
template<typename _Tp> static inline
Point3_<_Tp> operator / (const Point3_<_Tp>& a, double b)
{
Point3_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
////////////////////////////////// Size /////////////////////////////////

@ -1244,6 +1244,183 @@ struct cvtScaleAbs_SIMD<float, uchar, float>
#elif CV_NEON
template <>
struct cvtScaleAbs_SIMD<uchar, uchar, float>
{
int operator () (const uchar * src, uchar * dst, int width,
float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift);
for ( ; x <= width - 16; x += 16)
{
uint8x16_t v_src = vld1q_u8(src + x);
uint16x8_t v_half = vmovl_u8(vget_low_u8(v_src));
uint32x4_t v_quat = vmovl_u16(vget_low_u16(v_half));
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
v_quat = vmovl_u16(vget_high_u16(v_half));
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
v_half = vmovl_u8(vget_high_u8(v_src));
v_quat = vmovl_u16(vget_low_u16(v_half));
float32x4_t v_dst_2 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
v_dst_2 = vabsq_f32(vaddq_f32(v_dst_2, v_shift));
v_quat = vmovl_u16(vget_high_u16(v_half));
float32x4_t v_dst_3 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
v_dst_3 = vabsq_f32(vaddq_f32(v_dst_3, v_shift));
uint16x8_t v_dsti_0 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
uint16x8_t v_dsti_1 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_2)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_3)));
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_dsti_0), vqmovn_u16(v_dsti_1)));
}
return x;
}
};
template <>
struct cvtScaleAbs_SIMD<schar, uchar, float>
{
int operator () (const schar * src, uchar * dst, int width,
float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift);
for ( ; x <= width - 16; x += 16)
{
int8x16_t v_src = vld1q_s8(src + x);
int16x8_t v_half = vmovl_s8(vget_low_s8(v_src));
int32x4_t v_quat = vmovl_s16(vget_low_s16(v_half));
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
v_quat = vmovl_s16(vget_high_s16(v_half));
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
v_half = vmovl_s8(vget_high_s8(v_src));
v_quat = vmovl_s16(vget_low_s16(v_half));
float32x4_t v_dst_2 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
v_dst_2 = vabsq_f32(vaddq_f32(v_dst_2, v_shift));
v_quat = vmovl_s16(vget_high_s16(v_half));
float32x4_t v_dst_3 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
v_dst_3 = vabsq_f32(vaddq_f32(v_dst_3, v_shift));
uint16x8_t v_dsti_0 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
uint16x8_t v_dsti_1 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_2)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_3)));
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_dsti_0), vqmovn_u16(v_dsti_1)));
}
return x;
}
};
template <>
struct cvtScaleAbs_SIMD<ushort, uchar, float>
{
int operator () (const ushort * src, uchar * dst, int width,
float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
uint32x4_t v_half = vmovl_u16(vget_low_u16(v_src));
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_u32(v_half), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
v_half = vmovl_u16(vget_high_u16(v_src));
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_u32(v_half), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScaleAbs_SIMD<short, uchar, float>
{
int operator () (const short * src, uchar * dst, int width,
float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
int32x4_t v_half = vmovl_s16(vget_low_s16(v_src));
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(v_half), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
v_half = vmovl_s16(vget_high_s16(v_src));
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(v_half), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScaleAbs_SIMD<int, uchar, float>
{
int operator () (const int * src, uchar * dst, int width,
float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(vld1q_s32(src + x)), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
uint16x4_t v_dsti_0 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_0));
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
uint16x4_t v_dsti_1 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_1));
uint16x8_t v_dst = vcombine_u16(v_dsti_0, v_dsti_1);
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScaleAbs_SIMD<float, uchar, float>
{
@ -1257,11 +1434,11 @@ struct cvtScaleAbs_SIMD<float, uchar, float>
{
float32x4_t v_dst_0 = vmulq_n_f32(vld1q_f32(src + x), scale);
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
uint16x4_t v_dsti_0 = vqmovun_s32(vcvtq_s32_f32(v_dst_0));
uint16x4_t v_dsti_0 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_0));
float32x4_t v_dst_1 = vmulq_n_f32(vld1q_f32(src + x + 4), scale);
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
uint16x4_t v_dsti_1 = vqmovun_s32(vcvtq_s32_f32(v_dst_1));
uint16x4_t v_dsti_1 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_1));
uint16x8_t v_dst = vcombine_u16(v_dsti_0, v_dsti_1);
vst1_u8(dst + x, vqmovn_u16(v_dst));
@ -1314,6 +1491,7 @@ cvtScale_( const T* src, size_t sstep,
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
@ -1427,16 +1605,493 @@ cvtScale_<short, int, float>( const short* src, size_t sstep,
}
}
template <typename T, typename DT>
struct Cvt_SIMD
{
int operator() (const T *, DT *, int) const
{
return 0;
}
};
#if CV_NEON
// from uchar
template <>
struct Cvt_SIMD<uchar, schar>
{
int operator() (const uchar * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
vst1_s8(dst + x, vqmovn_s16(vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + x)))));
return x;
}
};
template <>
struct Cvt_SIMD<uchar, ushort>
{
int operator() (const uchar * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
vst1q_u16(dst + x, vmovl_u8(vld1_u8(src + x)));
return x;
}
};
template <>
struct Cvt_SIMD<uchar, short>
{
int operator() (const uchar * src, short * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
vst1q_s16(dst + x, vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + x))));
return x;
}
};
template <>
struct Cvt_SIMD<uchar, int>
{
int operator() (const uchar * src, int * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
vst1q_s32(dst + x, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src))));
vst1q_s32(dst + x + 4, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src))));
}
return x;
}
};
template <>
struct Cvt_SIMD<uchar, float>
{
int operator() (const uchar * src, float * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
vst1q_f32(dst + x, vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))));
vst1q_f32(dst + x + 4, vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))));
}
return x;
}
};
// from schar
template <>
struct Cvt_SIMD<schar, uchar>
{
int operator() (const schar * src, uchar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
vst1_u8(dst + x, vqmovun_s16(vmovl_s8(vld1_s8(src + x))));
return x;
}
};
template <>
struct Cvt_SIMD<schar, short>
{
int operator() (const schar * src, short * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
vst1q_s16(dst + x, vmovl_s8(vld1_s8(src + x)));
return x;
}
};
template <>
struct Cvt_SIMD<schar, int>
{
int operator() (const schar * src, int * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_s32(dst + x, vmovl_s16(vget_low_s16(v_src)));
vst1q_s32(dst + x + 4, vmovl_s16(vget_high_s16(v_src)));
}
return x;
}
};
template <>
struct Cvt_SIMD<schar, float>
{
int operator() (const schar * src, float * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_f32(dst + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))));
vst1q_f32(dst + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))));
}
return x;
}
};
// from ushort
template <>
struct Cvt_SIMD<ushort, uchar>
{
int operator() (const ushort * src, uchar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_src1), vqmovn_u16(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, int>
{
int operator() (const ushort * src, int * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
vst1q_s32(dst + x, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src))));
vst1q_s32(dst + x + 4, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src))));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, float>
{
int operator() (const ushort * src, float * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
vst1q_f32(dst + x, vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))));
vst1q_f32(dst + x + 4, vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))));
}
return x;
}
};
// from short
template <>
struct Cvt_SIMD<short, uchar>
{
int operator() (const short * src, uchar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
int16x8_t v_src1 = vld1q_s16(src + x), v_src2 = vld1q_s16(src + x + 8);
vst1q_u8(dst + x, vcombine_u8(vqmovun_s16(v_src1), vqmovun_s16(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<short, schar>
{
int operator() (const short * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
int16x8_t v_src1 = vld1q_s16(src + x), v_src2 = vld1q_s16(src + x + 8);
vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(v_src1), vqmovn_s16(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<short, ushort>
{
int operator() (const short * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
uint16x4_t v_dst1 = vqmovun_s32(vmovl_s16(vget_low_s16(v_src)));
uint16x4_t v_dst2 = vqmovun_s32(vmovl_s16(vget_high_s16(v_src)));
vst1q_u16(dst + x, vcombine_u16(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct Cvt_SIMD<short, int>
{
int operator() (const short * src, int * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
vst1q_s32(dst + x, vmovl_s16(vget_low_s16(v_src)));
vst1q_s32(dst + x + 4, vmovl_s16(vget_high_s16(v_src)));
}
return x;
}
};
template <>
struct Cvt_SIMD<short, float>
{
int operator() (const short * src, float * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
vst1q_f32(dst + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))));
vst1q_f32(dst + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))));
}
return x;
}
};
// from int
template <>
struct Cvt_SIMD<int, uchar>
{
int operator() (const int * src, uchar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
int32x4_t v_src3 = vld1q_s32(src + x + 8), v_src4 = vld1q_s32(src + x + 12);
uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovun_s32(v_src1), vqmovun_s32(v_src2)));
uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovun_s32(v_src3), vqmovun_s32(v_src4)));
vst1q_u8(dst + x, vcombine_u8(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct Cvt_SIMD<int, schar>
{
int operator() (const int * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
int32x4_t v_src3 = vld1q_s32(src + x + 8), v_src4 = vld1q_s32(src + x + 12);
int8x8_t v_dst1 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
int8x8_t v_dst2 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src3), vqmovn_s32(v_src4)));
vst1q_s8(dst + x, vcombine_s8(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct Cvt_SIMD<int, ushort>
{
int operator() (const int * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(v_src1), vqmovun_s32(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<int, short>
{
int operator() (const int * src, short * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<int, float>
{
int operator() (const int * src, float * dst, int width) const
{
int x = 0;
for ( ; x <= width - 4; x += 4)
vst1q_f32(dst + x, vcvtq_f32_s32(vld1q_s32(src + x)));
return x;
}
};
// from float
template <>
struct Cvt_SIMD<float, uchar>
{
int operator() (const float * src, uchar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
uint32x4_t v_src1 = cv_vrndq_u32_f32(vld1q_f32(src + x));
uint32x4_t v_src2 = cv_vrndq_u32_f32(vld1q_f32(src + x + 4));
uint32x4_t v_src3 = cv_vrndq_u32_f32(vld1q_f32(src + x + 8));
uint32x4_t v_src4 = cv_vrndq_u32_f32(vld1q_f32(src + x + 12));
uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovn_u32(v_src1), vqmovn_u32(v_src2)));
uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovn_u32(v_src3), vqmovn_u32(v_src4)));
vst1q_u8(dst + x, vcombine_u8(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct Cvt_SIMD<float, schar>
{
int operator() (const float * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
int32x4_t v_src1 = cv_vrndq_s32_f32(vld1q_f32(src + x));
int32x4_t v_src2 = cv_vrndq_s32_f32(vld1q_f32(src + x + 4));
int32x4_t v_src3 = cv_vrndq_s32_f32(vld1q_f32(src + x + 8));
int32x4_t v_src4 = cv_vrndq_s32_f32(vld1q_f32(src + x + 12));
int8x8_t v_dst1 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
int8x8_t v_dst2 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src3), vqmovn_s32(v_src4)));
vst1q_s8(dst + x, vcombine_s8(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct Cvt_SIMD<float, ushort>
{
int operator() (const float * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_src1 = cv_vrndq_u32_f32(vld1q_f32(src + x));
uint32x4_t v_src2 = cv_vrndq_u32_f32(vld1q_f32(src + x + 4));
vst1q_u16(dst + x, vcombine_u16(vqmovn_u32(v_src1), vqmovn_u32(v_src2)));
}
return x;
}
};
template <>
struct Cvt_SIMD<float, int>
{
int operator() (const float * src, int * dst, int width) const
{
int x = 0;
for ( ; x <= width - 4; x += 4)
vst1q_s32(dst + x, cv_vrndq_s32_f32(vld1q_f32(src + x)));
return x;
}
};
#endif
template<typename T, typename DT> static void
cvt_( const T* src, size_t sstep,
DT* dst, size_t dstep, Size size )
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
Cvt_SIMD<T, DT> vop;
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
int x = vop(src, dst, size.width);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{

@ -2102,6 +2102,16 @@ static void scaleAdd_32f(const float* src1, const float* src2, float* dst,
}
}
else
#elif CV_NEON
if (true)
{
for ( ; i <= len - 4; i += 4)
{
float32x4_t v_src1 = vld1q_f32(src1 + i), v_src2 = vld1q_f32(src2 + i);
vst1q_f32(dst + i, vaddq_f32(vmulq_n_f32(v_src1, alpha), v_src2));
}
}
else
#endif
//vz why do we need unroll here?
for( ; i <= len - 4; i += 4 )

@ -4504,8 +4504,8 @@ int predictOptimalVectorWidth(InputArray src1, InputArray src2, InputArray src3,
if (vectorWidths[0] == 1)
{
// it's heuristic
vectorWidths[CV_8U] = vectorWidths[CV_8S] = 16;
vectorWidths[CV_16U] = vectorWidths[CV_16S] = 8;
vectorWidths[CV_8U] = vectorWidths[CV_8S] = 4;
vectorWidths[CV_16U] = vectorWidths[CV_16S] = 2;
vectorWidths[CV_32S] = vectorWidths[CV_32F] = vectorWidths[CV_64F] = 1;
}

@ -63,14 +63,181 @@ template<typename T> static inline Scalar rawToScalar(const T& v)
* sum *
\****************************************************************************************/
template <typename T, typename ST>
struct Sum_SIMD
{
int operator () (const T *, const uchar *, ST *, int, int) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Sum_SIMD<uchar, int>
{
int operator () (const uchar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
int x = 0;
uint32x4_t v_sum = vdupq_n_u32(0u);
for ( ; x <= len - 16; x += 16)
{
uint8x16_t v_src = vld1q_u8(src0 + x);
uint16x8_t v_half = vmovl_u8(vget_low_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
v_half = vmovl_u8(vget_high_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
}
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src0 + x));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
}
unsigned int CV_DECL_ALIGNED(16) ar[4];
vst1q_u32(ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<schar, int>
{
int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
int x = 0;
int32x4_t v_sum = vdupq_n_s32(0);
for ( ; x <= len - 16; x += 16)
{
int8x16_t v_src = vld1q_s8(src0 + x);
int16x8_t v_half = vmovl_s8(vget_low_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
v_half = vmovl_s8(vget_high_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
}
for ( ; x <= len - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src0 + x));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
}
int CV_DECL_ALIGNED(16) ar[4];
vst1q_s32(ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<ushort, int>
{
int operator () (const ushort * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
int x = 0;
uint32x4_t v_sum = vdupq_n_u32(0u);
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src0 + x);
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_u32(v_sum, vmovl_u16(vld1_u16(src0 + x)));
unsigned int CV_DECL_ALIGNED(16) ar[4];
vst1q_u32(ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<short, int>
{
int operator () (const short * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
int x = 0;
int32x4_t v_sum = vdupq_n_s32(0u);
for ( ; x <= len - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src0 + x);
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_s32(v_sum, vmovl_s16(vld1_s16(src0 + x)));
int CV_DECL_ALIGNED(16) ar[4];
vst1q_s32(ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
#endif
template<typename T, typename ST>
static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
{
const T* src = src0;
if( !mask )
{
int i=0;
int k = cn % 4;
Sum_SIMD<T, ST> vop;
int i = vop(src0, mask, dst, len, cn), k = cn % 4;
src += i * cn;
if( k == 1 )
{
ST s0 = dst[0];
@ -86,7 +253,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
else if( k == 2 )
{
ST s0 = dst[0], s1 = dst[1];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
s0 += src[0];
s1 += src[1];
@ -97,7 +264,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
else if( k == 3 )
{
ST s0 = dst[0], s1 = dst[1], s2 = dst[2];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
s0 += src[0];
s1 += src[1];
@ -110,9 +277,9 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
for( ; k < cn; k += 4 )
{
src = src0 + k;
src = src0 + i*cn + k;
ST s0 = dst[k], s1 = dst[k+1], s2 = dst[k+2], s3 = dst[k+3];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
s0 += src[0]; s1 += src[1];
s2 += src[2]; s3 += src[3];

@ -1,2 +1,2 @@
set(the_description "2D Features Framework")
ocv_define_module(features2d opencv_imgproc opencv_flann OPTIONAL opencv_highgui)
ocv_define_module(features2d opencv_imgproc opencv_ml opencv_flann OPTIONAL opencv_highgui)

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -156,91 +157,6 @@ void NearestNeighborTest::run( int /*start_from*/ ) {
ts->set_failed_test_info( code );
}
//--------------------------------------------------------------------------------
class CV_KDTreeTest_CPP : public NearestNeighborTest
{
public:
CV_KDTreeTest_CPP() {}
protected:
virtual void createModel( const Mat& data );
virtual int checkGetPoins( const Mat& data );
virtual int findNeighbors( Mat& points, Mat& neighbors );
virtual int checkFindBoxed();
virtual void releaseModel();
KDTree* tr;
};
void CV_KDTreeTest_CPP::createModel( const Mat& data )
{
tr = new KDTree( data, false );
}
int CV_KDTreeTest_CPP::checkGetPoins( const Mat& data )
{
Mat res1( data.size(), data.type() ),
res3( data.size(), data.type() );
Mat idxs( 1, data.rows, CV_32SC1 );
for( int pi = 0; pi < data.rows; pi++ )
{
idxs.at<int>(0, pi) = pi;
// 1st way
const float* point = tr->getPoint(pi);
for( int di = 0; di < data.cols; di++ )
res1.at<float>(pi, di) = point[di];
}
// 3d way
tr->getPoints( idxs, res3 );
if( cvtest::norm( res1, data, NORM_L1) != 0 ||
cvtest::norm( res3, data, NORM_L1) != 0)
return cvtest::TS::FAIL_BAD_ACCURACY;
return cvtest::TS::OK;
}
int CV_KDTreeTest_CPP::checkFindBoxed()
{
vector<float> min( dims, static_cast<float>(minValue)), max(dims, static_cast<float>(maxValue));
vector<int> indices;
tr->findOrthoRange( min, max, indices );
// TODO check indices
if( (int)indices.size() != featuresCount)
return cvtest::TS::FAIL_BAD_ACCURACY;
return cvtest::TS::OK;
}
int CV_KDTreeTest_CPP::findNeighbors( Mat& points, Mat& neighbors )
{
const int emax = 20;
Mat neighbors2( neighbors.size(), CV_32SC1 );
int j;
for( int pi = 0; pi < points.rows; pi++ )
{
// 1st way
Mat nrow = neighbors.row(pi);
tr->findNearest( points.row(pi), neighbors.cols, emax, nrow );
// 2nd way
vector<int> neighborsIdx2( neighbors2.cols, 0 );
tr->findNearest( points.row(pi), neighbors2.cols, emax, neighborsIdx2 );
vector<int>::const_iterator it2 = neighborsIdx2.begin();
for( j = 0; it2 != neighborsIdx2.end(); ++it2, j++ )
neighbors2.at<int>(pi,j) = *it2;
}
// compare results
if( cvtest::norm( neighbors, neighbors2, NORM_L1 ) != 0 )
return cvtest::TS::FAIL_BAD_ACCURACY;
return cvtest::TS::OK;
}
void CV_KDTreeTest_CPP::releaseModel()
{
delete tr;
}
//--------------------------------------------------------------------------------
class CV_FlannTest : public NearestNeighborTest
{
@ -402,7 +318,6 @@ void CV_FlannSavedIndexTest::createModel(const cv::Mat &data)
remove( filename.c_str() );
}
TEST(Features2d_KDTree_CPP, regression) { CV_KDTreeTest_CPP test; test.safe_run(); }
TEST(Features2d_FLANN_Linear, regression) { CV_FlannLinearIndexTest test; test.safe_run(); }
TEST(Features2d_FLANN_KMeans, regression) { CV_FlannKMeansIndexTest test; test.safe_run(); }
TEST(Features2d_FLANN_KDTree, regression) { CV_FlannKDTreeIndexTest test; test.safe_run(); }

@ -13,6 +13,7 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/ml.hpp"
#include <iostream>
#endif

@ -0,0 +1,568 @@
//
// Copyright (C) Microsoft Corporation
// All rights reserved.
// Modified for native C++ WRL support by Gregory Morse
//
// Code in Details namespace is for internal usage within the library code
//
#ifndef _PLATFORM_AGILE_H_
#define _PLATFORM_AGILE_H_
#ifdef _MSC_VER
#pragma once
#endif // _MSC_VER
#include <algorithm>
#include <wrl\client.h>
template <typename T, bool TIsNotAgile> class Agile;
template <typename T>
struct UnwrapAgile
{
static const bool _IsAgile = false;
};
template <typename T>
struct UnwrapAgile<Agile<T, false>>
{
static const bool _IsAgile = true;
};
template <typename T>
struct UnwrapAgile<Agile<T, true>>
{
static const bool _IsAgile = true;
};
#define IS_AGILE(T) UnwrapAgile<T>::_IsAgile
#define __is_winrt_agile(T) (std::is_same<T, HSTRING__>::value || std::is_base_of<Microsoft::WRL::FtmBase, T>::value || std::is_base_of<IAgileObject, T>::value) //derived from Microsoft::WRL::FtmBase or IAgileObject
#define __is_win_interface(T) (std::is_base_of<IUnknown, T>::value || std::is_base_of<IInspectable, T>::value) //derived from IUnknown or IInspectable
#define __is_win_class(T) (std::is_same<T, HSTRING__>::value || std::is_base_of<Microsoft::WRL::Details::RuntimeClassBase, T>::value) //derived from Microsoft::WRL::RuntimeClass or HSTRING
namespace Details
{
IUnknown* __stdcall GetObjectContext();
HRESULT __stdcall GetProxyImpl(IUnknown*, REFIID, IUnknown*, IUnknown**);
HRESULT __stdcall ReleaseInContextImpl(IUnknown*, IUnknown*);
template <typename T>
#if _MSC_VER >= 1800
__declspec(no_refcount) inline HRESULT GetProxy(T *ObjectIn, IUnknown *ContextCallBack, T **Proxy)
#else
inline HRESULT GetProxy(T *ObjectIn, IUnknown *ContextCallBack, T **Proxy)
#endif
{
#if _MSC_VER >= 1800
return GetProxyImpl(*reinterpret_cast<IUnknown**>(&ObjectIn), __uuidof(T*), ContextCallBack, reinterpret_cast<IUnknown**>(Proxy));
#else
return GetProxyImpl(*reinterpret_cast<IUnknown**>(&const_cast<T*>(ObjectIn)), __uuidof(T*), ContextCallBack, reinterpret_cast<IUnknown**>(Proxy));
#endif
}
template <typename T>
inline HRESULT ReleaseInContext(T *ObjectIn, IUnknown *ContextCallBack)
{
return ReleaseInContextImpl(ObjectIn, ContextCallBack);
}
template <typename T>
class AgileHelper
{
__abi_IUnknown* _p;
bool _release;
public:
AgileHelper(__abi_IUnknown* p, bool release = true) : _p(p), _release(release)
{
}
AgileHelper(AgileHelper&& other) : _p(other._p), _release(other._release)
{
_other._p = nullptr;
_other._release = true;
}
AgileHelper operator=(AgileHelper&& other)
{
_p = other._p;
_release = other._release;
_other._p = nullptr;
_other._release = true;
return *this;
}
~AgileHelper()
{
if (_release && _p)
{
_p->__abi_Release();
}
}
__declspec(no_refcount) __declspec(no_release_return)
T* operator->()
{
return reinterpret_cast<T*>(_p);
}
__declspec(no_refcount) __declspec(no_release_return)
operator T * ()
{
return reinterpret_cast<T*>(_p);
}
private:
AgileHelper(const AgileHelper&);
AgileHelper operator=(const AgileHelper&);
};
template <typename T>
struct __remove_hat
{
typedef T type;
};
template <typename T>
struct __remove_hat<T*>
{
typedef T type;
};
template <typename T>
struct AgileTypeHelper
{
typename typedef __remove_hat<T>::type type;
typename typedef __remove_hat<T>::type* agileMemberType;
};
} // namespace Details
#pragma warning(push)
#pragma warning(disable: 4451) // Usage of ref class inside this context can lead to invalid marshaling of object across contexts
template <
typename T,
bool TIsNotAgile = (__is_win_class(typename Details::AgileTypeHelper<T>::type) && !__is_winrt_agile(typename Details::AgileTypeHelper<T>::type)) ||
__is_win_interface(typename Details::AgileTypeHelper<T>::type)
>
class Agile
{
static_assert(__is_win_class(typename Details::AgileTypeHelper<T>::type) || __is_win_interface(typename Details::AgileTypeHelper<T>::type), "Agile can only be used with ref class or interface class types");
typename typedef Details::AgileTypeHelper<T>::agileMemberType TypeT;
TypeT _object;
::Microsoft::WRL::ComPtr<IUnknown> _contextCallback;
ULONG_PTR _contextToken;
#if _MSC_VER >= 1800
enum class AgileState
{
NonAgilePointer = 0,
AgilePointer = 1,
Unknown = 2
};
AgileState _agileState;
#endif
void CaptureContext()
{
_contextCallback = Details::GetObjectContext();
__abi_ThrowIfFailed(CoGetContextToken(&_contextToken));
}
void SetObject(TypeT object)
{
// Capture context before setting the pointer
// If context capture fails then nothing to cleanup
Release();
if (object != nullptr)
{
::Microsoft::WRL::ComPtr<IAgileObject> checkIfAgile;
HRESULT hr = reinterpret_cast<IUnknown*>(object)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile);
// Don't Capture context if object is agile
if (hr != S_OK)
{
#if _MSC_VER >= 1800
_agileState = AgileState::NonAgilePointer;
#endif
CaptureContext();
}
#if _MSC_VER >= 1800
else
{
_agileState = AgileState::AgilePointer;
}
#endif
}
_object = object;
}
public:
Agile() throw() : _object(nullptr), _contextToken(0)
#if _MSC_VER >= 1800
, _agileState(AgileState::Unknown)
#endif
{
}
Agile(nullptr_t) throw() : _object(nullptr), _contextToken(0)
#if _MSC_VER >= 1800
, _agileState(AgileState::Unknown)
#endif
{
}
explicit Agile(TypeT object) throw() : _object(nullptr), _contextToken(0)
#if _MSC_VER >= 1800
, _agileState(AgileState::Unknown)
#endif
{
// Assumes that the source object is from the current context
SetObject(object);
}
Agile(const Agile& object) throw() : _object(nullptr), _contextToken(0)
#if _MSC_VER >= 1800
, _agileState(AgileState::Unknown)
#endif
{
// Get returns pointer valid for current context
SetObject(object.Get());
}
Agile(Agile&& object) throw() : _object(nullptr), _contextToken(0)
#if _MSC_VER >= 1800
, _agileState(AgileState::Unknown)
#endif
{
// Assumes that the source object is from the current context
Swap(object);
}
~Agile() throw()
{
Release();
}
TypeT Get() const
{
// Agile object, no proxy required
#if _MSC_VER >= 1800
if (_agileState == AgileState::AgilePointer || _object == nullptr)
#else
if (_contextToken == 0 || _contextCallback == nullptr || _object == nullptr)
#endif
{
return _object;
}
// Do the check for same context
ULONG_PTR currentContextToken;
__abi_ThrowIfFailed(CoGetContextToken(&currentContextToken));
if (currentContextToken == _contextToken)
{
return _object;
}
#if _MSC_VER >= 1800
// Different context and holding on to a non agile object
// Do the costly work of getting a proxy
TypeT localObject;
__abi_ThrowIfFailed(Details::GetProxy(_object, _contextCallback.Get(), &localObject));
if (_agileState == AgileState::Unknown)
#else
// Object is agile if it implements IAgileObject
// GetAddressOf captures the context with out knowing the type of object that it will hold
if (_object != nullptr)
#endif
{
#if _MSC_VER >= 1800
// Object is agile if it implements IAgileObject
// GetAddressOf captures the context with out knowing the type of object that it will hold
::Microsoft::WRL::ComPtr<IAgileObject> checkIfAgile;
HRESULT hr = reinterpret_cast<IUnknown*>(localObject)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile);
#else
::Microsoft::WRL::ComPtr<IAgileObject> checkIfAgile;
HRESULT hr = reinterpret_cast<IUnknown*>(_object)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile);
#endif
if (hr == S_OK)
{
auto pThis = const_cast<Agile*>(this);
#if _MSC_VER >= 1800
pThis->_agileState = AgileState::AgilePointer;
#endif
pThis->_contextToken = 0;
pThis->_contextCallback = nullptr;
return _object;
}
#if _MSC_VER >= 1800
else
{
auto pThis = const_cast<Agile*>(this);
pThis->_agileState = AgileState::NonAgilePointer;
}
#endif
}
#if _MSC_VER < 1800
// Different context and holding on to a non agile object
// Do the costly work of getting a proxy
TypeT localObject;
__abi_ThrowIfFailed(Details::GetProxy(_object, _contextCallback.Get(), &localObject));
#endif
return localObject;
}
TypeT* GetAddressOf() throw()
{
Release();
CaptureContext();
return &_object;
}
TypeT* GetAddressOfForInOut() throw()
{
CaptureContext();
return &_object;
}
TypeT operator->() const throw()
{
return Get();
}
Agile& operator=(nullptr_t) throw()
{
Release();
return *this;
}
Agile& operator=(TypeT object) throw()
{
Agile(object).Swap(*this);
return *this;
}
Agile& operator=(Agile object) throw()
{
// parameter is by copy which gets pointer valid for current context
object.Swap(*this);
return *this;
}
#if _MSC_VER < 1800
Agile& operator=(IUnknown* lp) throw()
{
// bump ref count
::Microsoft::WRL::ComPtr<IUnknown> spObject(lp);
// put it into Platform Object
Platform::Object object;
*(IUnknown**)(&object) = spObject.Detach();
SetObject(object);
return *this;
}
#endif
void Swap(Agile& object)
{
std::swap(_object, object._object);
std::swap(_contextCallback, object._contextCallback);
std::swap(_contextToken, object._contextToken);
#if _MSC_VER >= 1800
std::swap(_agileState, object._agileState);
#endif
}
// Release the interface and set to NULL
void Release() throw()
{
if (_object)
{
// Cast to IInspectable (no QI)
IUnknown* pObject = *(IUnknown**)(&_object);
// Set * to null without release
*(IUnknown**)(&_object) = nullptr;
ULONG_PTR currentContextToken;
__abi_ThrowIfFailed(CoGetContextToken(&currentContextToken));
if (_contextToken == 0 || _contextCallback == nullptr || _contextToken == currentContextToken)
{
pObject->Release();
}
else
{
Details::ReleaseInContext(pObject, _contextCallback.Get());
}
_contextCallback = nullptr;
_contextToken = 0;
#if _MSC_VER >= 1800
_agileState = AgileState::Unknown;
#endif
}
}
bool operator==(nullptr_t) const throw()
{
return _object == nullptr;
}
bool operator==(const Agile& other) const throw()
{
return _object == other._object && _contextToken == other._contextToken;
}
bool operator<(const Agile& other) const throw()
{
if (reinterpret_cast<void*>(_object) < reinterpret_cast<void*>(other._object))
{
return true;
}
return _object == other._object && _contextToken < other._contextToken;
}
};
template <typename T>
class Agile<T, false>
{
static_assert(__is_win_class(typename Details::AgileTypeHelper<T>::type) || __is_win_interface(typename Details::AgileTypeHelper<T>::type), "Agile can only be used with ref class or interface class types");
typename typedef Details::AgileTypeHelper<T>::agileMemberType TypeT;
TypeT _object;
public:
Agile() throw() : _object(nullptr)
{
}
Agile(nullptr_t) throw() : _object(nullptr)
{
}
explicit Agile(TypeT object) throw() : _object(object)
{
}
Agile(const Agile& object) throw() : _object(object._object)
{
}
Agile(Agile&& object) throw() : _object(nullptr)
{
Swap(object);
}
~Agile() throw()
{
Release();
}
TypeT Get() const
{
return _object;
}
TypeT* GetAddressOf() throw()
{
Release();
return &_object;
}
TypeT* GetAddressOfForInOut() throw()
{
return &_object;
}
TypeT operator->() const throw()
{
return Get();
}
Agile& operator=(nullptr_t) throw()
{
Release();
return *this;
}
Agile& operator=(TypeT object) throw()
{
if (_object != object)
{
_object = object;
}
return *this;
}
Agile& operator=(Agile object) throw()
{
object.Swap(*this);
return *this;
}
#if _MSC_VER < 1800
Agile& operator=(IUnknown* lp) throw()
{
Release();
// bump ref count
::Microsoft::WRL::ComPtr<IUnknown> spObject(lp);
// put it into Platform Object
Platform::Object object;
*(IUnknown**)(&object) = spObject.Detach();
_object = object;
return *this;
}
#endif
// Release the interface and set to NULL
void Release() throw()
{
_object = nullptr;
}
void Swap(Agile& object)
{
std::swap(_object, object._object);
}
bool operator==(nullptr_t) const throw()
{
return _object == nullptr;
}
bool operator==(const Agile& other) const throw()
{
return _object == other._object;
}
bool operator<(const Agile& other) const throw()
{
return reinterpret_cast<void*>(_object) < reinterpret_cast<void*>(other._object);
}
};
#pragma warning(pop)
template<class U>
bool operator==(nullptr_t, const Agile<U>& a) throw()
{
return a == nullptr;
}
template<class U>
bool operator!=(const Agile<U>& a, nullptr_t) throw()
{
return !(a == nullptr);
}
template<class U>
bool operator!=(nullptr_t, const Agile<U>& a) throw()
{
return !(a == nullptr);
}
template<class U>
bool operator!=(const Agile<U>& a, const Agile<U>& b) throw()
{
return !(a == b);
}
#endif // _PLATFORM_AGILE_H_

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.1 KiB

After

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.1 KiB

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.1 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

File diff suppressed because it is too large Load Diff

@ -76,6 +76,17 @@ static const unsigned int threshold_zoom_img_region = 30;
static CvWinProperties* global_control_panel = NULL;
//end static and global
// Declaration
Qt::ConnectionType autoBlockingConnection();
// Implementation - this allows us to do blocking whilst automatically selecting the right
// behaviour for in-thread and out-of-thread launches of cv windows. Qt strangely doesn't
// cater for this, but does for strictly queued connections.
Qt::ConnectionType autoBlockingConnection() {
return (QThread::currentThread() != QApplication::instance()->thread())
? Qt::BlockingQueuedConnection
: Qt::DirectConnection;
}
CV_IMPL CvFont cvFontQt(const char* nameFont, int pointSize,CvScalar color,int weight,int style, int spacing)
{
@ -104,7 +115,7 @@ CV_IMPL void cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont*
QMetaObject::invokeMethod(guiMainThread,
"putText",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(void*, (void*) img),
Q_ARG(QString,QString(text)),
Q_ARG(QPoint, QPoint(org.x,org.y)),
@ -120,8 +131,7 @@ double cvGetRatioWindow_QT(const char* name)
double result = -1;
QMetaObject::invokeMethod(guiMainThread,
"getRatioWindow",
//Qt::DirectConnection,
Qt::AutoConnection,
autoBlockingConnection(),
Q_RETURN_ARG(double, result),
Q_ARG(QString, QString(name)));
@ -137,7 +147,7 @@ void cvSetRatioWindow_QT(const char* name,double prop_value)
QMetaObject::invokeMethod(guiMainThread,
"setRatioWindow",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(double, prop_value));
}
@ -151,8 +161,7 @@ double cvGetPropWindow_QT(const char* name)
double result = -1;
QMetaObject::invokeMethod(guiMainThread,
"getPropWindow",
//Qt::DirectConnection,
Qt::AutoConnection,
autoBlockingConnection(),
Q_RETURN_ARG(double, result),
Q_ARG(QString, QString(name)));
@ -167,7 +176,7 @@ void cvSetPropWindow_QT(const char* name,double prop_value)
QMetaObject::invokeMethod(guiMainThread,
"setPropWindow",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(double, prop_value));
}
@ -180,7 +189,7 @@ void cvSetModeWindow_QT(const char* name, double prop_value)
QMetaObject::invokeMethod(guiMainThread,
"toggleFullScreen",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(double, prop_value));
}
@ -195,7 +204,7 @@ double cvGetModeWindow_QT(const char* name)
QMetaObject::invokeMethod(guiMainThread,
"isFullScreen",
Qt::AutoConnection,
autoBlockingConnection(),
Q_RETURN_ARG(double, result),
Q_ARG(QString, QString(name)));
@ -210,8 +219,7 @@ CV_IMPL void cvDisplayOverlay(const char* name, const char* text, int delayms)
QMetaObject::invokeMethod(guiMainThread,
"displayInfo",
Qt::AutoConnection,
//Qt::DirectConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(QString, QString(text)),
Q_ARG(int, delayms));
@ -225,7 +233,7 @@ CV_IMPL void cvSaveWindowParameters(const char* name)
QMetaObject::invokeMethod(guiMainThread,
"saveWindowParameters",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)));
}
@ -237,7 +245,7 @@ CV_IMPL void cvLoadWindowParameters(const char* name)
QMetaObject::invokeMethod(guiMainThread,
"loadWindowParameters",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)));
}
@ -249,8 +257,7 @@ CV_IMPL void cvDisplayStatusBar(const char* name, const char* text, int delayms)
QMetaObject::invokeMethod(guiMainThread,
"displayStatusBar",
Qt::AutoConnection,
//Qt::DirectConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(QString, QString(text)),
Q_ARG(int, delayms));
@ -328,7 +335,6 @@ CV_IMPL int cvWaitKey(int delay)
guiMainThread->bTimeOut = false;
}
return result;
}
@ -400,7 +406,6 @@ static CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar typ
static CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL)
{
QString nameQt(name_trackbar);
if ((!name_window || !name_window[0]) && global_control_panel) //window name is null and we have a control panel
layout = global_control_panel->myLayout;
@ -470,17 +475,18 @@ CV_IMPL int cvNamedWindow(const char* name, int flags)
{
if (!guiMainThread)
guiMainThread = new GuiReceiver;
if (multiThreads)
if (QThread::currentThread() != QApplication::instance()->thread()) {
multiThreads = true;
QMetaObject::invokeMethod(guiMainThread,
"createWindow",
Qt::BlockingQueuedConnection,
Qt::BlockingQueuedConnection, // block so that we can do useful stuff once we confirm it is created
Q_ARG(QString, QString(name)),
Q_ARG(int, flags));
else
} else {
guiMainThread->createWindow(QString(name), flags);
}
return 1; //Dummy value
return 1; //Dummy value - probably should return the result of the invocation.
}
@ -491,8 +497,7 @@ CV_IMPL void cvDestroyWindow(const char* name)
QMetaObject::invokeMethod(guiMainThread,
"destroyWindow",
//Qt::BlockingQueuedConnection,
Qt::AutoConnection,
Qt::AutoConnection, // if another thread is controlling, let it handle it without blocking ourselves here
Q_ARG(QString, QString(name)));
}
@ -501,11 +506,10 @@ CV_IMPL void cvDestroyAllWindows()
{
if (!guiMainThread)
return;
QMetaObject::invokeMethod(guiMainThread,
"destroyAllWindow",
//Qt::BlockingQueuedConnection,
Qt::AutoConnection);
Qt::AutoConnection // if another thread is controlling, let it handle it without blocking ourselves here
);
}
@ -531,26 +535,21 @@ CV_IMPL void cvMoveWindow(const char* name, int x, int y)
{
if (!guiMainThread)
CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );
QMetaObject::invokeMethod(guiMainThread,
"moveWindow",
//Qt::BlockingQueuedConnection,
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(int, x),
Q_ARG(int, y));
}
CV_IMPL void cvResizeWindow(const char* name, int width, int height)
{
if (!guiMainThread)
CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );
QMetaObject::invokeMethod(guiMainThread,
"resizeWindow",
//Qt::BlockingQueuedConnection,
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(int, width),
Q_ARG(int, height));
@ -564,7 +563,7 @@ CV_IMPL int cvCreateTrackbar2(const char* name_bar, const char* window_name, int
QMetaObject::invokeMethod(guiMainThread,
"addSlider2",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name_bar)),
Q_ARG(QString, QString(window_name)),
Q_ARG(void*, (void*)val),
@ -589,7 +588,7 @@ CV_IMPL int cvCreateTrackbar(const char* name_bar, const char* window_name, int*
QMetaObject::invokeMethod(guiMainThread,
"addSlider",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(name_bar)),
Q_ARG(QString, QString(window_name)),
Q_ARG(void*, (void*)value),
@ -610,7 +609,7 @@ CV_IMPL int cvCreateButton(const char* button_name, CvButtonCallback on_change,
QMetaObject::invokeMethod(guiMainThread,
"addButton",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(button_name)),
Q_ARG(int, button_type),
Q_ARG(int, initial_button_state),
@ -660,13 +659,17 @@ CV_IMPL void cvShowImage(const char* name, const CvArr* arr)
{
if (!guiMainThread)
guiMainThread = new GuiReceiver;
QMetaObject::invokeMethod(guiMainThread,
"showImage",
//Qt::BlockingQueuedConnection,
Qt::DirectConnection,
Q_ARG(QString, QString(name)),
Q_ARG(void*, (void*)arr));
if (QThread::currentThread() != QApplication::instance()->thread()) {
multiThreads = true;
QMetaObject::invokeMethod(guiMainThread,
"showImage",
autoBlockingConnection(),
Q_ARG(QString, QString(name)),
Q_ARG(void*, (void*)arr)
);
} else {
guiMainThread->showImage(QString(name), (void*)arr);
}
}
@ -679,7 +682,7 @@ CV_IMPL void cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallba
QMetaObject::invokeMethod(guiMainThread,
"setOpenGlDrawCallback",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(window_name)),
Q_ARG(void*, (void*)callback),
Q_ARG(void*, userdata));
@ -693,7 +696,7 @@ CV_IMPL void cvSetOpenGlContext(const char* window_name)
QMetaObject::invokeMethod(guiMainThread,
"setOpenGlContext",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(window_name)));
}
@ -705,7 +708,7 @@ CV_IMPL void cvUpdateWindow(const char* window_name)
QMetaObject::invokeMethod(guiMainThread,
"updateWindow",
Qt::AutoConnection,
autoBlockingConnection(),
Q_ARG(QString, QString(window_name)));
}
@ -720,7 +723,7 @@ double cvGetOpenGlProp_QT(const char* name)
{
QMetaObject::invokeMethod(guiMainThread,
"isOpenGl",
Qt::AutoConnection,
autoBlockingConnection(),
Q_RETURN_ARG(double, result),
Q_ARG(QString, QString(name)));
}
@ -741,6 +744,9 @@ GuiReceiver::GuiReceiver() : bTimeOut(false), nb_windows(0)
timer = new QTimer(this);
QObject::connect(timer, SIGNAL(timeout()), this, SLOT(timeOut()));
timer->setSingleShot(true);
if ( doesExternalQAppExist ) {
moveToThread(QApplication::instance()->thread());
}
}
@ -964,6 +970,7 @@ void GuiReceiver::showImage(QString name, void* arr)
void GuiReceiver::destroyWindow(QString name)
{
QPointer<CvWindow> w = icvFindWindowByName(name);
if (w)
@ -1525,7 +1532,6 @@ CvWinProperties::~CvWinProperties()
CvWindow::CvWindow(QString name, int arg2)
{
type = type_CvWindow;
moveToThread(qApp->instance()->thread());
param_flags = arg2 & 0x0000000F;
param_gui_mode = arg2 & 0x000000F0;
@ -2365,7 +2371,6 @@ void DefaultViewPort::updateImage(const CvArr* arr)
//use to compute mouse coordinate, I need to update the ratio here and in resizeEvent
ratioX = width() / float(image2Draw_mat->cols);
ratioY = height() / float(image2Draw_mat->rows);
updateGeometry();
}

@ -2,7 +2,7 @@ ColorMaps in OpenCV
===================
applyColorMap
---------------------
-------------
Applies a GNU Octave/MATLAB equivalent colormap on a given image.
@ -34,45 +34,48 @@ Currently the following GNU Octave/MATLAB equivalent colormaps are implemented:
Description
-----------
===========
The human perception isn't built for observing fine changes in grayscale images. Human eyes are more sensitive to observing changes between colors, so you often need to recolor your grayscale images to get a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your computer vision application.
In OpenCV 2.4 you only need :ocv:func:`applyColorMap` to apply a colormap on a given image. The following sample code reads the path to an image from command line, applies a Jet colormap on it and shows the result:
In OpenCV you only need :ocv:func:`applyColorMap` to apply a colormap on a given image. The following sample code reads the path to an image from command line, applies a Jet colormap on it and shows the result:
.. code-block:: cpp
#include <opencv2/contrib.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
int main(int argc, const char *argv[]) {
// Get the path to the image, if it was given
// if no arguments were given.
String filename;
if (argc > 1) {
filename = String(argv[1]);
#include <iostream>
using namespace std;
int main(int argc, const char *argv[])
{
// We need an input image. (can be grayscale or color)
if (argc < 2)
{
cerr << "We need an image to process here. Please run: colorMap [path_to_image]" << endl;
return -1;
}
// The following lines show how to apply a colormap on a given image
// and show it with cv::imshow example with an image. An exception is
// thrown if the path to the image is invalid.
if(!filename.empty()) {
Mat img0 = imread(filename);
// Throw an exception, if the image can't be read:
if(img0.empty()) {
CV_Error(CV_StsBadArg, "Sample image is empty. Please adjust your path, so it points to a valid input image!");
}
// Holds the colormap version of the image:
Mat cm_img0;
// Apply the colormap:
applyColorMap(img0, cm_img0, COLORMAP_JET);
// Show the result:
imshow("cm_img0", cm_img0);
waitKey(0);
Mat img_in = imread(argv[1]);
if(img_in.empty())
{
cerr << "Sample image (" << argv[1] << ") is empty. Please adjust your path, so it points to a valid input image!" << endl;
return -1;
}
// Holds the colormap version of the image:
Mat img_color;
// Apply the colormap:
applyColorMap(img_in, img_color, COLORMAP_JET);
// Show the result:
imshow("colorMap", img_color);
waitKey(0);
return 0;
}

@ -28,6 +28,7 @@ methods (
:math:`I` denotes ``image``, :math:`T` ``template``, :math:`R` ``result`` ). The summation is done over template and/or the
image patch:
:math:`x' = 0...w-1, y' = 0...h-1`
* method=CV\_TM\_SQDIFF
.. math::

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 11 KiB

@ -0,0 +1,91 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2014, Itseez, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace cvtest {
namespace ocl {
///////////// HoughLines //////////////////////
struct Vec2fComparator
{
bool operator()(const Vec2f& a, const Vec2f b) const
{
if(a[0] != b[0]) return a[0] < b[0];
else return a[1] < b[1];
}
};
typedef std::tr1::tuple<Size, double, double> ImageSize_RhoStep_ThetaStep_t;
typedef TestBaseWithParam<ImageSize_RhoStep_ThetaStep_t> HoughLinesFixture;
OCL_PERF_TEST_P(HoughLinesFixture, HoughLines, Combine(OCL_TEST_SIZES,
Values( 0.1, 1 ),
Values( CV_PI / 180.0, 0.1 )))
{
const Size srcSize = get<0>(GetParam());
double rhoStep = get<1>(GetParam());
double thetaStep = get<2>(GetParam());
int threshold = 250;
UMat usrc(srcSize, CV_8UC1), lines(1, 1, CV_32FC2);
Mat src(srcSize, CV_8UC1);
src.setTo(Scalar::all(0));
line(src, Point(0, 100), Point(src.cols, 100), Scalar::all(255), 1);
line(src, Point(0, 200), Point(src.cols, 200), Scalar::all(255), 1);
line(src, Point(0, 400), Point(src.cols, 400), Scalar::all(255), 1);
line(src, Point(100, 0), Point(100, src.rows), Scalar::all(255), 1);
line(src, Point(200, 0), Point(200, src.rows), Scalar::all(255), 1);
line(src, Point(400, 0), Point(400, src.rows), Scalar::all(255), 1);
src.copyTo(usrc);
declare.in(usrc).out(lines);
OCL_TEST_CYCLE() cv::HoughLines(usrc, lines, rhoStep, thetaStep, threshold);
Mat result;
lines.copyTo(result);
std::sort(result.begin<Vec2f>(), result.end<Vec2f>(), Vec2fComparator());
SANITY_CHECK(result, 1e-6);
}
///////////// HoughLinesP /////////////////////
typedef std::tr1::tuple<string, double, double> Image_RhoStep_ThetaStep_t;
typedef TestBaseWithParam<Image_RhoStep_ThetaStep_t> HoughLinesPFixture;
OCL_PERF_TEST_P(HoughLinesPFixture, HoughLinesP, Combine(Values("cv/shared/pic5.png", "stitching/a1.png"),
Values( 0.1, 1 ),
Values( CV_PI / 180.0, 0.1 )))
{
string filename = get<0>(GetParam());
double rhoStep = get<1>(GetParam());
double thetaStep = get<2>(GetParam());
int threshold = 100;
double minLineLength = 50, maxGap = 5;
Mat image = imread(getDataPath(filename), IMREAD_GRAYSCALE);
Canny(image, image, 50, 200, 3);
UMat usrc, lines(1, 1, CV_32SC4);
image.copyTo(usrc);
declare.in(usrc).out(lines);
OCL_TEST_CYCLE() cv::HoughLinesP(usrc, lines, rhoStep, thetaStep, threshold, minLineLength, maxGap);
EXPECT_NE((int) lines.total(), 0);
SANITY_CHECK_NOTHING();
}
} } // namespace cvtest::ocl
#endif // HAVE_OPENCL

@ -3206,9 +3206,9 @@ static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
src.locateROI(wholeSize, ofs);
}
size_t maxWorkItemSizes[32];
device.maxWorkItemSizes(maxWorkItemSizes);
size_t tryWorkItems = maxWorkItemSizes[0];
size_t tryWorkItems = device.maxWorkGroupSize();
if (device.isIntel() && 128 < tryWorkItems)
tryWorkItems = 128;
char cvt[2][40];
// For smaller filter kernels, there is a special kernel that is more
@ -3288,13 +3288,6 @@ static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
size_t BLOCK_SIZE = tryWorkItems;
while (BLOCK_SIZE > 32 && BLOCK_SIZE >= (size_t)ksize.width * 2 && BLOCK_SIZE > (size_t)sz.width * 2)
BLOCK_SIZE /= 2;
#if 1 // TODO Mode with several blocks requires a much more VGPRs, so this optimization is not actual for the current devices
size_t BLOCK_SIZE_Y = 1;
#else
size_t BLOCK_SIZE_Y = 8; // TODO Check heuristic value on devices
while (BLOCK_SIZE_Y < BLOCK_SIZE / 8 && BLOCK_SIZE_Y * src.clCxt->getDeviceInfo().maxComputeUnits * 32 < (size_t)src.rows)
BLOCK_SIZE_Y *= 2;
#endif
if ((size_t)ksize.width > BLOCK_SIZE)
return false;
@ -3310,12 +3303,12 @@ static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
if ((w < ksize.width) || (h < ksize.height))
return false;
String opts = format("-D LOCAL_SIZE=%d -D BLOCK_SIZE_Y=%d -D cn=%d "
String opts = format("-D LOCAL_SIZE=%d -D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D KERNEL_SIZE_Y2_ALIGNED=%d -D %s -D %s -D %s%s%s "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s",
(int)BLOCK_SIZE, (int)BLOCK_SIZE_Y, cn, anchor.x, anchor.y,
(int)BLOCK_SIZE, cn, anchor.x, anchor.y,
ksize.width, ksize.height, kernel_size_y2_aligned, borderMap[borderType],
extra_extrapolation ? "EXTRA_EXTRAPOLATION" : "NO_EXTRA_EXTRAPOLATION",
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
@ -3327,7 +3320,7 @@ static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
localsize[0] = BLOCK_SIZE;
globalsize[0] = DIVUP(sz.width, BLOCK_SIZE - (ksize.width - 1)) * BLOCK_SIZE;
globalsize[1] = DIVUP(sz.height, BLOCK_SIZE_Y);
globalsize[1] = sz.height;
if (!k.create("filter2D", cv::ocl::imgproc::filter2D_oclsrc, opts))
return false;

@ -39,108 +39,94 @@
//
//M*/
#ifdef BORDER_REPLICATE
//BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i))
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr))
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i))
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr))
#endif
#ifdef BORDER_REFLECT
//BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i))
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr))
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i))
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr))
#endif
#ifdef BORDER_REFLECT_101
//BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i))
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr))
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i))
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr))
#endif
//blur function does not support BORDER_WRAP
#ifdef BORDER_WRAP
//BORDER_WRAP: cdefgh|abcdefgh|abcdefg
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i))
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr))
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i))
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr))
#endif
#ifdef EXTRA_EXTRAPOLATION // border > src image size
#ifdef BORDER_CONSTANT
// None
// CCCCCC|abcdefgh|CCCCCCC
#define EXTRAPOLATE(x, minV, maxV)
#elif defined BORDER_REPLICATE
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
// aaaaaa|abcdefgh|hhhhhhh
#define EXTRAPOLATE(x, minV, maxV) \
{ \
x = max(min(x, maxX - 1), minX); \
y = max(min(y, maxY - 1), minY); \
(x) = clamp((x), (minV), (maxV)-1); \
}
#elif defined BORDER_WRAP
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
// cdefgh|abcdefgh|abcdefg
#define EXTRAPOLATE(x, minV, maxV) \
{ \
if (x < minX) \
x -= ((x - maxX + 1) / maxX) * maxX; \
if (x >= maxX) \
x %= maxX; \
if (y < minY) \
y -= ((y - maxY + 1) / maxY) * maxY; \
if (y >= maxY) \
y %= maxY; \
if ((x) < (minV)) \
(x) += ((maxV) - (minV)); \
if ((x) >= (maxV)) \
(x) -= ((maxV) - (minV)); \
}
#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)
#define EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, delta) \
#elif defined BORDER_REFLECT
// fedcba|abcdefgh|hgfedcb
#define EXTRAPOLATE(x, minV, maxV) \
{ \
if (maxX - minX == 1) \
x = minX; \
if ((maxV) - (minV) == 1) \
(x) = (minV); \
else \
do \
while ((x) >= (maxV) || (x) < (minV)) \
{ \
if (x < minX) \
x = minX - (x - minX) - 1 + delta; \
if ((x) < (minV)) \
(x) = (minV) - ((x) - (minV)) - 1; \
else \
x = maxX - 1 - (x - maxX) - delta; \
(x) = (maxV) - 1 - ((x) - (maxV)); \
} \
while (x >= maxX || x < minX); \
\
if (maxY - minY == 1) \
y = minY; \
}
#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101
// gfedcb|abcdefgh|gfedcba
#define EXTRAPOLATE(x, minV, maxV) \
{ \
if ((maxV) - (minV) == 1) \
(x) = (minV); \
else \
do \
while ((x) >= (maxV) || (x) < (minV)) \
{ \
if (y < minY) \
y = minY - (y - minY) - 1 + delta; \
if ((x) < (minV)) \
(x) = (minV) - ((x) - (minV)); \
else \
y = maxY - 1 - (y - maxY) - delta; \
(x) = (maxV) - 1 - ((x) - (maxV)) - 1; \
} \
while (y >= maxY || y < minY); \
}
#ifdef BORDER_REFLECT
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 0)
#elif defined(BORDER_REFLECT_101) || defined(BORDER_REFLECT101)
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 1)
#endif
#else
#error No extrapolation method
#endif
#else
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
#ifdef BORDER_CONSTANT
// CCCCCC|abcdefgh|CCCCCCC
#define EXTRAPOLATE(x, minV, maxV)
#elif defined BORDER_REPLICATE
// aaaaaa|abcdefgh|hhhhhhh
#define EXTRAPOLATE(x, minV, maxV) \
{ \
(x) = clamp((x), (minV), (maxV)-1); \
}
#elif defined BORDER_WRAP
// cdefgh|abcdefgh|abcdefg
#define EXTRAPOLATE(x, minV, maxV) \
{ \
if ((x) < (minV)) \
(x) += (((minV) - (x)) / ((maxV) - (minV)) + 1) * ((maxV) - (minV)); \
if ((x) >= (maxV)) \
(x) = ((x) - (minV)) % ((maxV) - (minV)) + (minV); \
}
#elif defined BORDER_REFLECT
// fedcba|abcdefgh|hgfedcb
#define EXTRAPOLATE(x, minV, maxV) \
{ \
int _row = y - minY, _col = x - minX; \
_row = ADDR_H(_row, 0, maxY - minY); \
_row = ADDR_B(_row, maxY - minY, _row); \
y = _row + minY; \
\
_col = ADDR_L(_col, 0, maxX - minX); \
_col = ADDR_R(_col, maxX - minX, _col); \
x = _col + minX; \
(x) = clamp((x), 2 * (minV) - (x) - 1, 2 * (maxV) - (x) - 1); \
}
#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101
// gfedcb|abcdefgh|gfedcba
#define EXTRAPOLATE(x, minV, maxV) \
{ \
(x) = clamp((x), 2 * (minV) - (x), 2 * (maxV) - (x) - 2); \
}
#else
#error No extrapolation method
#endif
#endif //EXTRA_EXTRAPOLATION
#ifdef DOUBLE_SUPPORT
#ifdef cl_amd_fp64
@ -162,43 +148,21 @@
#define DSTSIZE (int)sizeof(dstT1) * cn
#endif
#define noconvert
struct RectCoords
{
int x1, y1, x2, y2;
};
inline WT readSrcPixel(int2 pos, __global const uchar * srcptr, int src_step, const struct RectCoords srcCoords)
{
#ifdef BORDER_ISOLATED
if (pos.x >= srcCoords.x1 && pos.y >= srcCoords.y1 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)
#else
if (pos.x >= 0 && pos.y >= 0 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)
#endif
{
return convertToWT(loadpix(srcptr + mad24(pos.y, src_step, pos.x * SRCSIZE)));
}
else
{
#ifdef BORDER_CONSTANT
return (WT)(0);
#else
int selected_col = pos.x, selected_row = pos.y;
#define UPDATE_COLUMN_SUM(col) \
__constant WT1 * k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * col]; \
WT tmp_sum = 0; \
for (int sy = 0; sy < KERNEL_SIZE_Y; sy++) \
tmp_sum += data[sy] * k[sy]; \
sumOfCols[local_id] = tmp_sum; \
barrier(CLK_LOCAL_MEM_FENCE);
EXTRAPOLATE(selected_col, selected_row,
#ifdef BORDER_ISOLATED
srcCoords.x1, srcCoords.y1,
#else
0, 0,
#endif
srcCoords.x2, srcCoords.y2
);
#define UPDATE_TOTAL_SUM(col) \
int id = local_id + col - ANCHOR_X; \
if (id >= 0 && id < LOCAL_SIZE) \
total_sum += sumOfCols[id]; \
barrier(CLK_LOCAL_MEM_FENCE);
return convertToWT(loadpix(srcptr + mad24(selected_row, src_step, selected_col * SRCSIZE)));
#endif
}
}
#define noconvert
#define DIG(a) a,
__constant WT1 kernelData[] = { COEFF };
@ -206,77 +170,71 @@ __constant WT1 kernelData[] = { COEFF };
__kernel void filter2D(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,
__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols, float delta)
{
const struct RectCoords srcCoords = { srcOffsetX, srcOffsetY, srcEndX, srcEndY }; // for non-isolated border: offsetX, offsetY, wholeX, wholeY
int local_id = get_local_id(0);
int x = local_id + (LOCAL_SIZE - (KERNEL_SIZE_X - 1)) * get_group_id(0) - ANCHOR_X;
int y = get_global_id(1) * BLOCK_SIZE_Y;
int y = get_global_id(1);
WT data[KERNEL_SIZE_Y];
__local WT sumOfCols[LOCAL_SIZE];
int2 srcPos = (int2)(srcCoords.x1 + x, srcCoords.y1 + y - ANCHOR_Y);
#ifdef BORDER_ISOLATED
int srcBeginX = srcOffsetX;
int srcBeginY = srcOffsetY;
#else
int srcBeginX = 0;
int srcBeginY = 0;
#endif
int2 pos = (int2)(x, y);
__global dstT * dst = (__global dstT *)(dstptr + mad24(pos.y, dst_step, mad24(pos.x, DSTSIZE, dst_offset))); // Pointer can be out of bounds!
bool writeResult = local_id >= ANCHOR_X && local_id < LOCAL_SIZE - (KERNEL_SIZE_X - 1 - ANCHOR_X) &&
pos.x >= 0 && pos.x < cols;
int srcX = srcOffsetX + x;
int srcY = srcOffsetY + y - ANCHOR_Y;
#if BLOCK_SIZE_Y > 1
bool readAllpixels = true;
int sy_index = 0; // current index in data[] array
__global dstT *dst = (__global dstT *)(dstptr + mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset))); // Pointer can be out of bounds!
dstRowsMax = min(rows, pos.y + BLOCK_SIZE_Y);
for ( ;
pos.y < dstRowsMax;
pos.y++, dst = (__global dstT *)((__global uchar *)dst + dst_step))
#endif
#ifdef BORDER_CONSTANT
if (srcX >= srcBeginX && srcX < srcEndX)
{
for (
#if BLOCK_SIZE_Y > 1
int sy = readAllpixels ? 0 : -1; sy < (readAllpixels ? KERNEL_SIZE_Y : 0);
#else
int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y;
#endif
sy++, srcPos.y++)
for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)
{
data[sy + sy_index] = readSrcPixel(srcPos, srcptr, src_step, srcCoords);
if (srcY >= srcBeginY && srcY < srcEndY)
data[sy + sy_index] = convertToWT(loadpix(srcptr + mad24(srcY, src_step, srcX * SRCSIZE)));
else
data[sy + sy_index] = (WT)(0);
}
WT total_sum = 0;
for (int sx = 0; sx < KERNEL_SIZE_X; sx++)
}
else
{
for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)
{
{
__constant WT1 * k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * sx
#if BLOCK_SIZE_Y > 1
+ KERNEL_SIZE_Y - sy_index
data[sy + sy_index] = (WT)(0);
}
}
#else
EXTRAPOLATE(srcX, srcBeginX, srcEndX);
for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)
{
int tempY = srcY;
EXTRAPOLATE(tempY, srcBeginY, srcEndY);
data[sy + sy_index] = convertToWT(loadpix(srcptr + mad24(tempY, src_step, srcX * SRCSIZE)));
}
#endif
];
WT tmp_sum = 0;
for (int sy = 0; sy < KERNEL_SIZE_Y; sy++)
tmp_sum += data[sy] * k[sy];
sumOfCols[local_id] = tmp_sum;
barrier(CLK_LOCAL_MEM_FENCE);
}
int id = local_id + sx - ANCHOR_X;
if (id >= 0 && id < LOCAL_SIZE)
total_sum += sumOfCols[id];
barrier(CLK_LOCAL_MEM_FENCE);
}
WT total_sum = 0;
for (int sx = 0; sx < ANCHOR_X; sx++)
{
UPDATE_COLUMN_SUM(sx);
UPDATE_TOTAL_SUM(sx);
}
if (writeResult)
storepix(convertToDstT(total_sum + (WT)(delta)), dst);
__constant WT1 * k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * ANCHOR_X];
for (int sy = 0; sy < KERNEL_SIZE_Y; sy++)
total_sum += data[sy] * k[sy];
#if BLOCK_SIZE_Y > 1
readAllpixels = false;
#if BLOCK_SIZE_Y > KERNEL_SIZE_Y
sy_index = sy_index + 1 <= KERNEL_SIZE_Y ? sy_index + 1 : 1;
#else
sy_index++;
#endif
#endif // BLOCK_SIZE_Y == 1
for (int sx = ANCHOR_X + 1; sx < KERNEL_SIZE_X; sx++)
{
UPDATE_COLUMN_SUM(sx);
UPDATE_TOTAL_SUM(sx);
}
if (local_id >= ANCHOR_X && local_id < LOCAL_SIZE - (KERNEL_SIZE_X - 1 - ANCHOR_X) && x >= 0 && x < cols)
storepix(convertToDstT(total_sum + (WT)(delta)), dst);
}

@ -161,7 +161,7 @@ __kernel void matchTemplate_Naive_CCORR(__global const uchar * srcptr, int src_s
for (int j = 0; j < template_cols; ++j)
{
T temp = (T)(template[j]);
T src = *(__global const T*)(srcptr + ind + j*(int)sizeof(T1));
T src = vload4(0, (__global const T1*)(srcptr + ind + j*(int)sizeof(T1)));
sum = mad(convertToWT(src), convertToWT(temp), sum);

@ -341,12 +341,9 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
for( int y = 0; y < ssize.height; y++ )
{
T* dst0 = _dst.ptr<T>(y*2);
T* dst1 = _dst.ptr<T>(y*2+1);
T* dst1 = _dst.ptr<T>(std::min(y*2+1, dsize.height-1));
WT *row0, *row1, *row2;
if( y*2+1 >= dsize.height )
dst1 = dst0;
// fill the ring buffer (horizontal convolution and decimation)
for( ; sy <= y + 1; sy++ )
{

@ -2163,7 +2163,7 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \
{ \
Ipp8u * buffer = ippsMalloc_8u(bufSize); \
IppStatus status = ippiFilterMedianBorder_##flavor(src0.ptr<ippType>(), (int)src0.step, \
IppStatus status = ippiFilterMedianBorder_##flavor(src.ptr<ippType>(), (int)src.step, \
dst.ptr<ippType>(), (int)dst.step, dstRoiSize, maskSize, \
ippBorderRepl, (ippType)0, buffer); \
ippsFree(buffer); \
@ -2178,6 +2178,11 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
{
Ipp32s bufSize;
IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
Mat src;
if( dst.data != src0.data )
src = src0;
else
src0.copyTo(src);
int type = src0.type();
if (type == CV_8UC1)

@ -8,7 +8,7 @@ if(IOS OR NOT PYTHON_DEFAULT_AVAILABLE OR NOT ANT_EXECUTABLE OR NOT (JNI_FOUND O
endif()
set(the_description "The java bindings")
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_calib3d opencv_photo)
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_calib3d opencv_photo opencv_bioinspired)
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp")
if(NOT ANDROID)

@ -1,4 +1,10 @@
ocv_check_dependencies(opencv_java ${OPENCV_MODULE_opencv_java_OPT_DEPS} ${OPENCV_MODULE_opencv_java_REQ_DEPS})
# list of modules covered with tests
set(tested_modules opencv_calib3d opencv_core opencv_features2d opencv_highgui opencv_imgproc opencv_objdetect opencv_photo opencv_video)
# opencv_ml is broken
#list(APPEND tested_modules opencv_ml)
ocv_check_dependencies(opencv_java ${tested_modules})
if(NOT OCV_DEPENDENCIES_FOUND OR NOT ANT_EXECUTABLE OR NOT ANDROID_EXECUTABLE OR NOT ANDROID_TOOLS_Pkg_Revision GREATER 13)
return()

@ -0,0 +1 @@
include/opencv2/bioinspired/retina.hpp

@ -1,12 +1,13 @@
#!/usr/bin/env python
import sys, re, os.path
import logging
from string import Template
try:
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
except:
from StringIO import StringIO
class_ignore_list = (
#core
@ -150,32 +151,18 @@ missing_consts = \
"Calib3d":
{
'private' :
'public' :
(
('CV_LMEDS', 4),
('CV_RANSAC', 8),
('CV_FM_LMEDS', 'CV_LMEDS'),
('CV_FM_RANSAC','CV_RANSAC'),
('CV_FM_7POINT', 1),
('CV_FM_8POINT', 2),
('CV_CALIB_USE_INTRINSIC_GUESS', 1),
('CV_CALIB_FIX_ASPECT_RATIO', 2),
('CV_CALIB_FIX_PRINCIPAL_POINT', 4),
('CV_CALIB_ZERO_TANGENT_DIST', 8),
('CV_CALIB_FIX_FOCAL_LENGTH', 16),
('CV_CALIB_FIX_K1', 32),
('CV_CALIB_FIX_K2', 64),
('CV_CALIB_FIX_K3', 128),
('CV_CALIB_FIX_K4', 2048),
('CV_CALIB_FIX_K5', 4096),
('CV_CALIB_FIX_K6', 8192),
('CV_CALIB_RATIONAL_MODEL', 16384),
('CV_CALIB_THIN_PRISM_MODEL',32768),
('CV_CALIB_FIX_S1_S2_S3_S4', 65536),
('CV_CALIB_FIX_INTRINSIC', 256),
('CV_CALIB_SAME_FOCAL_LENGTH', 512),
('CV_CALIB_ZERO_DISPARITY', 1024),
) # public
('CALIB_USE_INTRINSIC_GUESS', '1'),
('CALIB_RECOMPUTE_EXTRINSIC', '2'),
('CALIB_CHECK_COND', '4'),
('CALIB_FIX_SKEW', '8'),
('CALIB_FIX_K1', '16'),
('CALIB_FIX_K2', '32'),
('CALIB_FIX_K3', '64'),
('CALIB_FIX_K4', '128'),
('CALIB_FIX_INTRINSIC', '256')
)
}, # Calib3d
"Video":
@ -635,6 +622,51 @@ func_arg_fix = {
}, # '', i.e. no class
} # func_arg_fix
def objdump(obj):
attrs = ["%s='%s'" % (attr, getattr(obj, attr)) for attr in dir(obj) if not attr.startswith("__") and not hasattr(getattr(obj, attr), "__call__")]
return "%s [%s]" % (obj.__class__.__name__, " ; ".join(attrs))
class DebuggedObject(object):
def __init__(self):
pass
def __str__(self):
return objdump(self)
class GeneralInfo(DebuggedObject):
def __init__(self, name, namespaces):
DebuggedObject.__init__(self)
self.namespace, self.classpath, self.classname, self.name = self.parseName(name, namespaces)
def parseName(self, name, namespaces):
'''
input: full name and available namespaces
returns: (namespace, classpath, classname, name)
'''
name = name[name.find(" ")+1:].strip() # remove struct/class/const prefix
spaceName = ""
localName = name # <classes>.<name>
for namespace in sorted(namespaces, key=len, reverse=True):
if name.startswith(namespace + "."):
spaceName = namespace
localName = name.replace(namespace + ".", "")
break
pieces = localName.split(".")
if len(pieces) > 2: # <class>.<class>.<class>.<name>
return spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1]
elif len(pieces) == 2: # <class>.<name>
return spaceName, pieces[0], pieces[0], pieces[1]
elif len(pieces) == 1: # <name>
return spaceName, "", "", pieces[0]
else:
return spaceName, "", "" # error?!
def fullName(self, isCPP=False):
result = ".".join([self.fullClass(), self.name])
return result if not isCPP else result.replace(".", "::")
def fullClass(self, isCPP=False):
result = ".".join([f for f in [self.namespace] + self.classpath.split(".") if len(f)>0])
return result if not isCPP else result.replace(".", "::")
def getLibVersion(version_hpp_path):
version_file = open(version_hpp_path, "rt").read()
@ -644,26 +676,24 @@ def getLibVersion(version_hpp_path):
status = re.search("^W*#\W*define\W+CV_VERSION_STATUS\W+\"(.*?)\"\W*$", version_file, re.MULTILINE).group(1)
return (major, minor, revision, status)
class ConstInfo(object):
def __init__(self, cname, name, val, addedManually=False):
self.cname = cname
self.name = re.sub(r"^Cv", "", name)
self.value = val
class ConstInfo(GeneralInfo):
def __init__(self, decl, addedManually=False, namespaces=[]):
GeneralInfo.__init__(self, decl[0], namespaces)
self.cname = self.name.replace(".", "::")
self.value = decl[1]
self.addedManually = addedManually
class ClassPropInfo(object):
class ClassPropInfo(DebuggedObject):
def __init__(self, decl): # [f_ctype, f_name, '', '/RW']
DebuggedObject.__init__(self)
self.ctype = decl[0]
self.name = decl[1]
self.rw = "/RW" in decl[3]
class ClassInfo(object):
def __init__(self, decl): # [ 'class/struct cname', ': base', [modlist] ]
name = decl[0]
name = name[name.find(" ")+1:].strip()
self.cname = self.name = self.jname = re.sub(r"^cv\.", "", name)
self.cname = self.cname.replace(".", "::")
class ClassInfo(GeneralInfo):
def __init__(self, decl, namespaces=[]): # [ 'class/struct cname', ': base', [modlist] ]
GeneralInfo.__init__(self, decl[0], namespaces)
self.cname = self.name.replace(".", "::")
self.methods = {}
self.methods_suffixes = {}
self.consts = [] # using a list to save the occurence order
@ -679,8 +709,9 @@ class ClassInfo(object):
#self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
class ArgInfo(object):
class ArgInfo(DebuggedObject):
def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]
DebuggedObject.__init__(self)
self.pointer = False
ctype = arg_tuple[0]
if ctype.endswith("*"):
@ -700,18 +731,12 @@ class ArgInfo(object):
self.out = "IO"
class FuncInfo(object):
def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]
name = re.sub(r"^cv\.", "", decl[0])
self.cname = name.replace(".", "::")
classname = ""
dpos = name.rfind(".")
if dpos >= 0:
classname = name[:dpos]
name = name[dpos+1:]
self.classname = classname
self.jname = self.name = name
if "[" in name:
class FuncInfo(GeneralInfo):
def __init__(self, decl, namespaces=[]): # [ funcname, return_ctype, [modifiers], [args] ]
GeneralInfo.__init__(self, decl[0], namespaces)
self.cname = self.name.replace(".", "::")
self.jname = self.name
if "[" in self.name:
self.jname = "getelem"
for m in decl[2]:
if m.startswith("="):
@ -719,26 +744,22 @@ class FuncInfo(object):
self.static = ["","static"][ "/S" in decl[2] ]
self.ctype = re.sub(r"^CvTermCriteria", "TermCriteria", decl[1] or "")
self.args = []
func_fix_map = func_arg_fix.get(classname, {}).get(self.jname, {})
func_fix_map = func_arg_fix.get(self.classname, {}).get(self.jname, {})
for a in decl[3]:
arg = a[:]
arg_fix_map = func_fix_map.get(arg[1], {})
arg[0] = arg_fix_map.get('ctype', arg[0]) #fixing arg type
arg[3] = arg_fix_map.get('attrib', arg[3]) #fixing arg attrib
ai = ArgInfo(arg)
self.args.append(ai)
self.args.append(ArgInfo(arg))
class FuncFamilyInfo(object):
def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]
class FuncFamilyInfo(DebuggedObject):
def __init__(self, decl, namespaces=set()): # [ funcname, return_ctype, [modifiers], [args] ]
DebuggedObject.__init__(self)
self.funcs = []
self.funcs.append( FuncInfo(decl) )
self.funcs.append( FuncInfo(decl, namespaces) )
self.jname = self.funcs[0].jname
self.isconstructor = self.funcs[0].name == self.funcs[0].classname
def add_func(self, fi):
self.funcs.append( fi )
@ -758,6 +779,7 @@ class JavaWrapperGenerator(object):
self.def_args_hist = {} # { def_args_cnt : funcs_cnt }
self.classes_map = []
self.classes_simple = []
self.namespaces = set(["cv"])
def add_class_code_stream(self, class_name, cls_base = ''):
jname = self.classes[class_name].jname
@ -833,106 +855,118 @@ public class %(jc)s {
def add_class(self, decl):
classinfo = ClassInfo(decl)
logging.info("class: %s", decl)
classinfo = ClassInfo(decl, namespaces=self.namespaces)
if classinfo.name in class_ignore_list:
logging.info('ignored: %s', classinfo)
return
name = classinfo.name
if name in self.classes:
print "Generator error: class %s (%s) is duplicated" % \
(name, classinfo.cname)
print("Generator error: class %s (%s) is duplicated" % \
(name, classinfo.cname))
logging.info('duplicated: %s', classinfo)
return
self.classes[name] = classinfo
if name in type_dict:
print "Duplicated class: " + name
print("Duplicated class: " + name)
logging.info('duplicated: %s', classinfo)
return
if '/Simple' in decl[2]:
self.classes_simple.append(name)
if ('/Map' in decl[2]):
self.classes_map.append(name)
#adding default c-tor
ffi = FuncFamilyInfo(['cv.'+name+'.'+name, '', [], []])
ffi = FuncFamilyInfo(['cv.'+name+'.'+name, '', [], []], namespaces=self.namespaces)
classinfo.methods[ffi.jname] = ffi
type_dict[name] = \
{ "j_type" : classinfo.jname,
"jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
"jni_name" : "(*("+name+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
"jni_name" : "(*("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
"suffix" : "J" }
type_dict[name+'*'] = \
{ "j_type" : classinfo.jname,
"jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
"jni_name" : "("+name+"*)%(n)s_nativeObj", "jni_type" : "jlong",
"jni_name" : "("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj", "jni_type" : "jlong",
"suffix" : "J" }
# missing_consts { Module : { public : [[name, val],...], private : [[]...] } }
if name in missing_consts:
if 'private' in missing_consts[name]:
for (n, val) in missing_consts[name]['private']:
classinfo.private_consts.append( ConstInfo(n, n, val, True) )
classinfo.private_consts.append( ConstInfo([n, val], addedManually=True) )
if 'public' in missing_consts[name]:
for (n, val) in missing_consts[name]['public']:
classinfo.consts.append( ConstInfo(n, n, val, True) )
classinfo.consts.append( ConstInfo([n, val], addedManually=True) )
# class props
for p in decl[3]:
if True: #"vector" not in p[0]:
classinfo.props.append( ClassPropInfo(p) )
else:
print "Skipped property: [%s]" % name, p
print("Skipped property: [%s]" % name, p)
self.add_class_code_stream(name, classinfo.base)
if classinfo.base:
self.get_imports(name, classinfo.base)
type_dict["Ptr_"+name] = \
{ "j_type" : name,
"jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
"jni_name" : "Ptr<"+name+">(("+name+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
"suffix" : "J" }
logging.info('ok: %s', classinfo)
def add_const(self, decl): # [ "const cname", val, [], [] ]
name = decl[0].replace("const ", "").strip()
name = re.sub(r"^cv\.", "", name)
cname = name.replace(".", "::")
logging.info("constant: %s", decl)
constinfo = ConstInfo(decl, namespaces=self.namespaces)
for c in const_ignore_list:
if re.match(c, name):
if re.match(c, constinfo.name):
logging.info('ignored: %s', constinfo)
return
# class member?
dpos = name.rfind(".")
if dpos >= 0:
classname = name[:dpos]
name = name[dpos+1:]
else:
classname = self.Module
if classname not in self.classes:
if len(constinfo.classname) == 0:
constinfo.classname = self.Module
if constinfo.classname not in self.classes:
# this class isn't wrapped
# skipping this const
logging.info('class not found: %s', constinfo)
return
consts = self.classes[classname].consts
consts = self.classes[constinfo.classname].consts
for c in const_private_list:
if re.match(c, name):
consts = self.classes[classname].private_consts
if re.match(c, constinfo.name):
consts = self.classes[constinfo.classname].private_consts
break
constinfo = ConstInfo(cname, name, decl[1])
# checking duplication
for list in self.classes[classname].consts, self.classes[classname].private_consts:
for list in self.classes[constinfo.classname].consts, self.classes[constinfo.classname].private_consts:
for c in list:
if c.name == constinfo.name:
if c.addedManually:
logging.info('manual: %s', constinfo)
return
print "Generator error: constant %s (%s) is duplicated" \
% (constinfo.name, constinfo.cname)
sys.exit(-1)
print("Generator error: constant %s (%s) is duplicated" \
% (constinfo.name, constinfo.cname))
logging.info('duplicated: %s', constinfo)
return
consts.append(constinfo)
logging.info('ok: %s', constinfo)
def add_func(self, decl):
ffi = FuncFamilyInfo(decl)
logging.info("function: %s", decl)
ffi = FuncFamilyInfo(decl, namespaces=self.namespaces)
classname = ffi.funcs[0].classname or self.Module
if classname in class_ignore_list:
logging.info('ignored: %s', ffi)
return
if classname in ManualFuncs and ffi.jname in ManualFuncs[classname]:
logging.info('manual: %s', ffi)
return
if classname not in self.classes:
print "Generator error: the class %s for method %s is missing" % \
(classname, ffi.jname)
sys.exit(-1)
print("Generator error: the class %s for method %s is missing" % \
(classname, ffi.jname))
logging.info('not found: %s', ffi)
return
func_map = self.classes[classname].methods
if ffi.jname in func_map:
func_map[ffi.jname].add_func(ffi.funcs[0])
@ -941,6 +975,7 @@ public class %(jc)s {
# calc args with def val
cnt = len([a for a in ffi.funcs[0].args if a.defval])
self.def_args_hist[cnt] = self.def_args_hist.get(cnt, 0) + 1
logging.info('ok: %s', ffi)
def save(self, path, buf):
f = open(path, "wt")
@ -958,6 +993,9 @@ public class %(jc)s {
# scan the headers and build more descriptive maps of classes, consts, functions
for hdr in srcfiles:
decls = parser.parse(hdr)
self.namespaces = parser.namespaces
logging.info("=== Header: %s", hdr)
logging.info("=== Namespaces: %s", parser.namespaces)
for decl in decls:
name = decl[0]
if name.startswith("struct") or name.startswith("class"):
@ -1049,7 +1087,7 @@ extern "C" {
self.save(output_path+"/"+module+".txt", report.getvalue())
#print "Done %i of %i funcs." % (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))
#print("Done %i of %i funcs." % (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list)))
@ -1082,7 +1120,11 @@ extern "C" {
imports.add("java.lang.String")
return
def fullTypeName(self, t):
if t in self.classes:
return self.classes[t].fullName(isCPP=True)
else:
return t
def gen_func(self, fi, prop_name=''):
j_code = self.java_code[fi.classname or self.Module]["j_code"]
@ -1114,7 +1156,7 @@ extern "C" {
msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype
self.skipped_func_list.append(c_decl + "\n" + msg)
j_code.write( " "*4 + msg )
print "SKIP:", c_decl.strip(), "\t due to RET type", fi.ctype
print("SKIP:", c_decl.strip(), "\t due to RET type", fi.ctype)
return
for a in fi.args:
if a.ctype not in type_dict:
@ -1126,7 +1168,7 @@ extern "C" {
msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or "I")
self.skipped_func_list.append(c_decl + "\n" + msg)
j_code.write( " "*4 + msg )
print "SKIP:", c_decl.strip(), "\t due to ARG type", a.ctype, "/" + (a.out or "I")
print("SKIP:", c_decl.strip(), "\t due to ARG type", a.ctype, "/" + (a.out or "I"))
return
self.ported_func_list.append(c_decl)
@ -1256,6 +1298,9 @@ extern "C" {
j_prologue.append( j_type + ' retVal = new Array' + j_type+'();')
self.classes[fi.classname or self.Module].imports.add('java.util.ArrayList')
j_epilogue.append('Converters.Mat_to_' + ret_type + '(retValMat, retVal);')
elif ret_type.startswith("Ptr_"):
ret_val = type_dict[fi.ctype]["j_type"] + " retVal = new " + type_dict[ret_type]["j_type"] + "("
tail = ")"
elif ret_type == "void":
ret_val = ""
ret = "return;"
@ -1327,7 +1372,10 @@ extern "C" {
ret = "return env->NewStringUTF(_retval_.c_str());"
default = 'return env->NewStringUTF("");'
elif fi.ctype in self.classes: # wrapped class:
ret = "return (jlong) new %s(_retval_);" % fi.ctype
ret = "return (jlong) new %s(_retval_);" % self.fullTypeName(fi.ctype)
elif fi.ctype.startswith('Ptr_'):
c_prologue.append("typedef Ptr<%s> %s;" % (self.fullTypeName(fi.ctype[4:]), fi.ctype))
ret = "return (jlong)(new %(ctype)s(_retval_));" % { 'ctype':fi.ctype }
elif ret_type in self.classes: # pointer to wrapped class:
ret = "return (jlong) _retval_;"
elif type_dict[fi.ctype]["jni_type"] == "jdoubleArray":
@ -1341,8 +1389,8 @@ extern "C" {
else:
name = prop_name + ";//"
cvname = "cv::" + name
retval = fi.ctype + " _retval_ = "
cvname = fi.fullName(isCPP=True)
retval = self.fullTypeName(fi.ctype) + " _retval_ = "
if fi.ctype == "void":
retval = ""
elif fi.ctype == "String":
@ -1351,17 +1399,17 @@ extern "C" {
retval = type_dict[fi.ctype]['jni_var'] % {"n" : '_ret_val_vector_'} + " = "
c_epilogue.append("Mat* _retval_ = new Mat();")
c_epilogue.append(fi.ctype+"_to_Mat(_ret_val_vector_, *_retval_);")
if fi.classname:
if len(fi.classname)>0:
if not fi.ctype: # c-tor
retval = fi.classname + "* _retval_ = "
cvname = "new " + fi.classname
retval = fi.fullClass(isCPP=True) + "* _retval_ = "
cvname = "new " + fi.fullClass(isCPP=True)
elif fi.static:
cvname = "%s::%s" % (fi.classname, name)
cvname = fi.fullName(isCPP=True)
else:
cvname = "me->" + name
cvname = ("me->" if not self.isSmartClass(fi.classname) else "(*me)->") + name
c_prologue.append(\
"%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL" \
% { "cls" : fi.classname} \
% { "cls" : self.smartWrap(fi.classname, fi.fullClass(isCPP=True))} \
)
cvargs = []
for a in args:
@ -1450,8 +1498,7 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
%s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.consts])
)
# c-tors
fflist = ci.methods.items()
fflist.sort()
fflist = sorted(ci.methods.items())
for n, ffi in fflist:
if ffi.isconstructor:
for fi in ffi.funcs:
@ -1465,15 +1512,13 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
# props
for pi in ci.props:
# getter
getter_name = name + ".get_" + pi.name
#print getter_name
fi = FuncInfo( [getter_name, pi.ctype, [], []] ) # [ funcname, return_ctype, [modifiers], [args] ]
getter_name = ci.fullName() + ".get_" + pi.name
fi = FuncInfo( [getter_name, pi.ctype, [], []], self.namespaces ) # [ funcname, return_ctype, [modifiers], [args] ]
self.gen_func(fi, pi.name)
if pi.rw:
#setter
setter_name = name + ".set_" + pi.name
#print setter_name
fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ] )
setter_name = ci.fullName() + ".set_" + pi.name
fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ], self.namespaces)
self.gen_func(fi, pi.name)
# manual ports
@ -1514,16 +1559,30 @@ JNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete
delete (%(cls)s*) self;
}
""" % {"module" : module, "cls" : name, "j_cls" : ci.jname.replace('_', '_1')}
""" % {"module" : module, "cls" : self.smartWrap(ci.name, ci.fullName(isCPP=True)), "j_cls" : ci.jname.replace('_', '_1')}
)
def isSmartClass(self, classname):
'''
Check if class stores Ptr<T>* instead of T* in nativeObj field
'''
return classname in self.classes and self.classes[classname].base
def smartWrap(self, name, fullname):
'''
Wraps fullname with Ptr<> if needed
'''
if self.isSmartClass(name):
return "Ptr<" + fullname + ">"
return fullname
if __name__ == "__main__":
if len(sys.argv) < 4:
print "Usage:\n", \
print("Usage:\n", \
os.path.basename(sys.argv[0]), \
"<full path to hdr_parser.py> <module name> <C++ header> [<C++ header>...]"
print "Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv])
"<full path to hdr_parser.py> <module name> <C++ header> [<C++ header>...]")
print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv]))
exit(0)
dstdir = "."
@ -1534,6 +1593,7 @@ if __name__ == "__main__":
import hdr_parser
module = sys.argv[2]
srcfiles = sys.argv[3:]
#print "Generating module '" + module + "' from headers:\n\t" + "\n\t".join(srcfiles)
logging.basicConfig(filename='%s/%s.log' % (dstdir, module), filemode='w', level=logging.INFO)
#print("Generating module '" + module + "' from headers:\n\t" + "\n\t".join(srcfiles))
generator = JavaWrapperGenerator()
generator.gen(srcfiles, module, dstdir)

@ -1,4 +1,10 @@
ocv_check_dependencies(opencv_java ${OPENCV_MODULE_opencv_java_OPT_DEPS} ${OPENCV_MODULE_opencv_java_REQ_DEPS})
# list of modules covered with tests
set(tested_modules opencv_calib3d opencv_core opencv_features2d opencv_highgui opencv_imgproc opencv_objdetect opencv_photo opencv_video)
# opencv_ml is broken
#list(APPEND tested_modules opencv_ml)
ocv_check_dependencies(opencv_java ${tested_modules})
if(NOT OCV_DEPENDENCIES_FOUND)
return()

@ -12,6 +12,7 @@
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -228,10 +229,12 @@ public:
class CV_EXPORTS_W_MAP Params
{
public:
Params(int defaultK=10, bool isclassifier=true);
Params(int defaultK=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=BRUTE_FORCE);
CV_PROP_RW int defaultK;
CV_PROP_RW bool isclassifier;
CV_PROP_RW int Emax; // for implementation with KDTree
CV_PROP_RW int algorithmType;
};
virtual void setParams(const Params& p) = 0;
virtual Params getParams() const = 0;
@ -239,6 +242,9 @@ public:
OutputArray results,
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const = 0;
enum { BRUTE_FORCE=1, KDTREE=2 };
static Ptr<KNearest> create(const Params& params=Params());
};

@ -268,6 +268,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S );
for (int i=0; i<sample_idx_len; ++i)
sample_idx->data.i[i] = _sample_idx->data.i[i];
std::sort(sample_idx->data.i, sample_idx->data.i + sample_idx_len);
} break;
case CV_8S:
case CV_8U:
@ -284,7 +285,6 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
} break;
default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector.");
}
std::sort(sample_idx->data.fl, sample_idx->data.fl + sample_idx_len);
}
else
{

@ -13,6 +13,7 @@
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -42,13 +43,14 @@
//M*/
#include "precomp.hpp"
#include "kdtree.hpp"
namespace cv
{
namespace ml
{
// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and
// adopted to work with the new OpenCV data structures. It's in cxcore to be shared by
// both cv (CvFeatureTree) and ml (kNN).
// adopted to work with the new OpenCV data structures.
// The algorithm is taken from:
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
@ -529,3 +531,4 @@ int KDTree::dims() const
}
}
}

@ -0,0 +1,97 @@
#ifndef KDTREE_H
#define KDTREE_H
#include "precomp.hpp"
namespace cv
{
namespace ml
{
/*!
Fast Nearest Neighbor Search Class.
The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last
approximate (or accurate) nearest neighbor search in multi-dimensional spaces.
First, a set of vectors is passed to KDTree::KDTree() constructor
or KDTree::build() method, where it is reordered.
Then arbitrary vectors can be passed to KDTree::findNearest() methods, which
find the K nearest neighbors among the vectors from the initial set.
The user can balance between the speed and accuracy of the search by varying Emax
parameter, which is the number of leaves that the algorithm checks.
The larger parameter values yield more accurate results at the expense of lower processing speed.
\code
KDTree T(points, false);
const int K = 3, Emax = INT_MAX;
int idx[K];
float dist[K];
T.findNearest(query_vec, K, Emax, idx, 0, dist);
CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]);
\endcode
*/
class CV_EXPORTS_W KDTree
{
public:
/*!
The node of the search tree.
*/
struct Node
{
Node() : idx(-1), left(-1), right(-1), boundary(0.f) {}
Node(int _idx, int _left, int _right, float _boundary)
: idx(_idx), left(_left), right(_right), boundary(_boundary) {}
//! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point)
int idx;
//! node indices of the left and the right branches
int left, right;
//! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right
float boundary;
};
//! the default constructor
CV_WRAP KDTree();
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints = false);
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, InputArray _labels,
bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, InputArray labels,
bool copyAndReorderPoints = false);
//! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves
CV_WRAP int findNearest(InputArray vec, int K, int Emax,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray dist = noArray(),
OutputArray labels = noArray()) const;
//! finds all the points from the initial set that belong to the specified box
CV_WRAP void findOrthoRange(InputArray minBounds,
InputArray maxBounds,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray labels = noArray()) const;
//! returns vectors with the specified indices
CV_WRAP void getPoints(InputArray idx, OutputArray pts,
OutputArray labels = noArray()) const;
//! return a vector with the specified index
const float* getPoint(int ptidx, int* label = 0) const;
//! returns the search space dimensionality
CV_WRAP int dims() const;
std::vector<Node> nodes; //!< all the tree nodes
CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set.
CV_PROP std::vector<int> labels; //!< the parallel array of labels.
CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it
CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it
};
}
}
#endif

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "kdtree.hpp"
/****************************************************************************************\
* K-Nearest Neighbors Classifier *
@ -49,13 +50,14 @@
namespace cv {
namespace ml {
KNearest::Params::Params(int k, bool isclassifier_)
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) :
defaultK(k),
isclassifier(isclassifier_),
Emax(Emax_),
algorithmType(algorithmType_)
{
defaultK = k;
isclassifier = isclassifier_;
}
class KNearestImpl : public KNearest
{
public:
@ -352,8 +354,156 @@ public:
Params params;
};
class KNearestKDTreeImpl : public KNearest
{
public:
KNearestKDTreeImpl(const Params& p)
{
params = p;
}
virtual ~KNearestKDTreeImpl() {}
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
String getDefaultModelName() const { return "opencv_ml_knn_kd"; }
void clear()
{
samples.release();
responses.release();
}
int getVarCount() const { return samples.cols; }
bool train( const Ptr<TrainData>& data, int flags )
{
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
if( !update )
{
clear();
}
else
{
CV_Assert( new_samples.cols == samples.cols &&
new_responses.cols == responses.cols );
}
samples.push_back(new_samples);
responses.push_back(new_responses);
tr.build(samples);
return true;
}
float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const
{
float result = 0.f;
CV_Assert( 0 < k );
Mat test_samples = _samples.getMat();
CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols );
int testcount = test_samples.rows;
if( testcount == 0 )
{
_results.release();
_neighborResponses.release();
_dists.release();
return 0.f;
}
Mat res, nr, d;
if( _results.needed() )
{
_results.create(testcount, 1, CV_32F);
res = _results.getMat();
}
if( _neighborResponses.needed() )
{
_neighborResponses.create(testcount, k, CV_32F);
nr = _neighborResponses.getMat();
}
if( _dists.needed() )
{
_dists.create(testcount, k, CV_32F);
d = _dists.getMat();
}
for (int i=0; i<test_samples.rows; ++i)
{
Mat _res, _nr, _d;
if (res.rows>i)
{
_res = res.row(i);
}
if (nr.rows>i)
{
_nr = nr.row(i);
}
if (d.rows>i)
{
_d = d.row(i);
}
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray());
}
return result; // currently always 0
}
float predict(InputArray inputs, OutputArray outputs, int) const
{
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)params.isclassifier;
fs << "default_k" << params.defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
void read( const FileNode& fn )
{
clear();
params.isclassifier = (int)fn["is_classifier"] != 0;
params.defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
KDTree tr;
Mat samples;
Mat responses;
Params params;
};
Ptr<KNearest> KNearest::create(const Params& p)
{
if (KDTREE==p.algorithmType)
{
return makePtr<KNearestKDTreeImpl>(p);
}
return makePtr<KNearestImpl>(p);
}

@ -312,9 +312,11 @@ void CV_KNearestTest::run( int /*start_from*/ )
generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32FC1 );
int code = cvtest::TS::OK;
Ptr<KNearest> knearest = KNearest::create(true);
knearest->train(trainData, cv::ml::ROW_SAMPLE, trainLabels);
knearest->findNearest( testData, 4, bestLabels);
// KNearest default implementation
Ptr<KNearest> knearest = KNearest::create();
knearest->train(trainData, ml::ROW_SAMPLE, trainLabels);
knearest->findNearest(testData, 4, bestLabels);
float err;
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
{
@ -326,6 +328,22 @@ void CV_KNearestTest::run( int /*start_from*/ )
ts->printf( cvtest::TS::LOG, "Bad accuracy (%f) on test data.\n", err );
code = cvtest::TS::FAIL_BAD_ACCURACY;
}
// KNearest KDTree implementation
Ptr<KNearest> knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE));
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
knearestKdt->findNearest(testData, 4, bestLabels);
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
{
ts->printf( cvtest::TS::LOG, "Bad output labels.\n" );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
}
else if( err > 0.01f )
{
ts->printf( cvtest::TS::LOG, "Bad accuracy (%f) on test data.\n", err );
code = cvtest::TS::FAIL_BAD_ACCURACY;
}
ts->set_failed_test_info( code );
}

@ -18,6 +18,8 @@ ocv_list_filterout(candidate_deps "^opencv_matlab$")
ocv_list_filterout(candidate_deps "^opencv_ts$")
ocv_list_filterout(candidate_deps "^opencv_adas$")
ocv_list_filterout(candidate_deps "^opencv_tracking$")
ocv_list_filterout(candidate_deps "^opencv_bioinspired$")
ocv_list_filterout(candidate_deps "^opencv_java$")
ocv_add_module(${MODULE_NAME} BINDINGS OPTIONAL ${candidate_deps})

@ -219,7 +219,7 @@ static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
if( PyInt_Check(o) )
{
double v[] = {PyInt_AsLong((PyObject*)o), 0., 0., 0.};
double v[] = {(double)PyInt_AsLong((PyObject*)o), 0., 0., 0.};
m = Mat(4, 1, CV_64F, v).clone();
return true;
}

@ -629,7 +629,14 @@ void waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind)
for (size_t i = 0; i < rmats.size(); ++i)
img_k += rmats[i].col(2);
Mat rg0 = rg1.cross(img_k);
rg0 /= norm(rg0);
double rg0_norm = norm(rg0);
if( rg0_norm <= DBL_MIN )
{
return;
}
rg0 /= rg0_norm;
Mat rg2 = rg0.cross(rg1);

@ -129,16 +129,16 @@ public:
//! updates the predicted state from the measurement
CV_WRAP const Mat& correct( const Mat& measurement );
Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
Mat transitionMatrix; //!< state transition matrix (A)
Mat controlMatrix; //!< control matrix (B) (not used if there is no control)
Mat measurementMatrix; //!< measurement matrix (H)
Mat processNoiseCov; //!< process noise covariance matrix (Q)
Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
CV_PROP_RW Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
CV_PROP_RW Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
CV_PROP_RW Mat transitionMatrix; //!< state transition matrix (A)
CV_PROP_RW Mat controlMatrix; //!< control matrix (B) (not used if there is no control)
CV_PROP_RW Mat measurementMatrix; //!< measurement matrix (H)
CV_PROP_RW Mat processNoiseCov; //!< process noise covariance matrix (Q)
CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
CV_PROP_RW Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
CV_PROP_RW Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
CV_PROP_RW Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
// temporary matrices
Mat temp1;

@ -188,10 +188,11 @@ public:
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
CV_Assert( nmixtures <= 255);
if (ocl::useOpenCL() && opencl_ON)
{
kernel_apply.create("mog2_kernel", ocl::video::bgfg_mog2_oclsrc, format("-D CN=%d -D NMIXTURES=%d", nchannels, nmixtures));
create_ocl_apply_kernel();
kernel_getBg.create("getBackgroundImage2_kernel", ocl::video::bgfg_mog2_oclsrc, format( "-D CN=%d -D NMIXTURES=%d", nchannels, nmixtures));
if (kernel_apply.empty() || kernel_getBg.empty())
@ -213,7 +214,7 @@ public:
u_mean.setTo(Scalar::all(0));
//make the array for keeping track of the used modes per pixel - all zeros at start
u_bgmodelUsedModes.create(frameSize, CV_32FC1);
u_bgmodelUsedModes.create(frameSize, CV_8UC1);
u_bgmodelUsedModes.setTo(cv::Scalar::all(0));
}
else
@ -259,7 +260,17 @@ public:
virtual void setComplexityReductionThreshold(double ct) { fCT = (float)ct; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
virtual void setDetectShadows(bool detectshadows)
{
if ((bShadowDetection && detectshadows) || (!bShadowDetection && !detectshadows))
return;
bShadowDetection = detectshadows;
if (!kernel_apply.empty())
{
create_ocl_apply_kernel();
CV_Assert( !kernel_apply.empty() );
}
}
virtual int getShadowValue() const { return nShadowDetection; }
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
@ -372,6 +383,7 @@ protected:
bool ocl_getBackgroundImage(OutputArray backgroundImage) const;
bool ocl_apply(InputArray _image, OutputArray _fgmask, double learningRate=-1);
void create_ocl_apply_kernel();
};
struct GaussBGStatModel2Params
@ -745,16 +757,11 @@ bool BackgroundSubtractorMOG2Impl::ocl_apply(InputArray _image, OutputArray _fgm
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
CV_Assert(learningRate >= 0);
UMat fgmask(_image.size(), CV_32SC1);
fgmask.setTo(cv::Scalar::all(1));
_fgmask.create(_image.size(), CV_8U);
UMat fgmask = _fgmask.getUMat();
const double alpha1 = 1.0f - learningRate;
int detectShadows_flag = 0;
if(bShadowDetection)
detectShadows_flag = 1;
UMat frame = _image.getUMat();
float varMax = MAX(fVarMin, fVarMax);
@ -762,16 +769,15 @@ bool BackgroundSubtractorMOG2Impl::ocl_apply(InputArray _image, OutputArray _fgm
int idxArg = 0;
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::ReadOnly(frame));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::ReadWriteNoSize(u_bgmodelUsedModes));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::ReadWriteNoSize(u_weight));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::ReadWriteNoSize(u_mean));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::ReadWriteNoSize(u_variance));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::PtrReadWrite(u_bgmodelUsedModes));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::PtrReadWrite(u_weight));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::PtrReadWrite(u_mean));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::PtrReadWrite(u_variance));
idxArg = kernel_apply.set(idxArg, ocl::KernelArg::WriteOnlyNoSize(fgmask));
idxArg = kernel_apply.set(idxArg, (float)learningRate); //alphaT
idxArg = kernel_apply.set(idxArg, (float)alpha1);
idxArg = kernel_apply.set(idxArg, (float)(-learningRate*fCT)); //prune
idxArg = kernel_apply.set(idxArg, detectShadows_flag);
idxArg = kernel_apply.set(idxArg, (float)varThreshold); //c_Tb
idxArg = kernel_apply.set(idxArg, backgroundRatio); //c_TB
@ -780,18 +786,11 @@ bool BackgroundSubtractorMOG2Impl::ocl_apply(InputArray _image, OutputArray _fgm
idxArg = kernel_apply.set(idxArg, varMax);
idxArg = kernel_apply.set(idxArg, fVarInit);
idxArg = kernel_apply.set(idxArg, fTau);
kernel_apply.set(idxArg, nShadowDetection);
if (bShadowDetection)
kernel_apply.set(idxArg, nShadowDetection);
size_t globalsize[] = {frame.cols, frame.rows, 1};
if (!(kernel_apply.run(2, globalsize, NULL, true)))
return false;
_fgmask.create(_image.size(),CV_8U);
UMat temp = _fgmask.getUMat();
fgmask.convertTo(temp, CV_8U);
return true;
return kernel_apply.run(2, globalsize, NULL, true);
}
bool BackgroundSubtractorMOG2Impl::ocl_getBackgroundImage(OutputArray _backgroundImage) const
@ -802,10 +801,10 @@ bool BackgroundSubtractorMOG2Impl::ocl_getBackgroundImage(OutputArray _backgroun
UMat dst = _backgroundImage.getUMat();
int idxArg = 0;
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::ReadOnly(u_bgmodelUsedModes));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(u_weight));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(u_mean));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::WriteOnlyNoSize(dst));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::PtrReadOnly(u_bgmodelUsedModes));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::PtrReadOnly(u_weight));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::PtrReadOnly(u_mean));
idxArg = kernel_getBg.set(idxArg, ocl::KernelArg::WriteOnly(dst));
kernel_getBg.set(idxArg, backgroundRatio);
size_t globalsize[2] = {u_bgmodelUsedModes.cols, u_bgmodelUsedModes.rows};
@ -815,6 +814,13 @@ bool BackgroundSubtractorMOG2Impl::ocl_getBackgroundImage(OutputArray _backgroun
#endif
void BackgroundSubtractorMOG2Impl::create_ocl_apply_kernel()
{
int nchannels = CV_MAT_CN(frameType);
String opts = format("-D CN=%d -D NMIXTURES=%d%s", nchannels, nmixtures, bShadowDetection ? " -D SHADOW_DETECT" : "");
kernel_apply.create("mog2_kernel", ocl::video::bgfg_mog2_oclsrc, opts);
}
void BackgroundSubtractorMOG2Impl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{
bool needToInitialize = nframes == 0 || learningRate >= 1 || _image.size() != frameSize || _image.type() != frameType;

@ -7,11 +7,6 @@
#define frameToMean(a, b) (b) = *(a);
#define meanToFrame(a, b) *b = convert_uchar_sat(a);
inline float sqr(float val)
{
return val * val;
}
inline float sum(float val)
{
return val;
@ -34,63 +29,45 @@ inline float sum(float val)
b.z = a[2]; \
b.w = 0.0f;
inline float sqr(const float4 val)
{
return val.x * val.x + val.y * val.y + val.z * val.z;
}
inline float sum(const float4 val)
{
return (val.x + val.y + val.z);
}
inline void swap4(__global float4* ptr, int x, int y, int k, int rows, int ptr_step)
{
float4 val = ptr[(k * rows + y) * ptr_step + x];
ptr[(k * rows + y) * ptr_step + x] = ptr[((k + 1) * rows + y) * ptr_step + x];
ptr[((k + 1) * rows + y) * ptr_step + x] = val;
}
#endif
inline void swap(__global float* ptr, int x, int y, int k, int rows, int ptr_step)
{
float val = ptr[(k * rows + y) * ptr_step + x];
ptr[(k * rows + y) * ptr_step + x] = ptr[((k + 1) * rows + y) * ptr_step + x];
ptr[((k + 1) * rows + y) * ptr_step + x] = val;
}
__kernel void mog2_kernel(__global const uchar* frame, int frame_step, int frame_offset, int frame_row, int frame_col, //uchar || uchar3
__global uchar* modesUsed, int modesUsed_step, int modesUsed_offset, //int
__global uchar* weight, int weight_step, int weight_offset, //float
__global uchar* mean, int mean_step, int mean_offset, //T_MEAN=float || float4
__global uchar* variance, int var_step, int var_offset, //float
__global uchar* fgmask, int fgmask_step, int fgmask_offset, //int
__kernel void mog2_kernel(__global const uchar* frame, int frame_step, int frame_offset, int frame_row, int frame_col, //uchar || uchar3
__global uchar* modesUsed, //uchar
__global uchar* weight, //float
__global uchar* mean, //T_MEAN=float || float4
__global uchar* variance, //float
__global uchar* fgmask, int fgmask_step, int fgmask_offset, //uchar
float alphaT, float alpha1, float prune,
int detectShadows_flag,
float c_Tb, float c_TB, float c_Tg, float c_varMin, //constants
float c_varMax, float c_varInit, float c_tau, uchar c_shadowVal)
float c_Tb, float c_TB, float c_Tg, float c_varMin, //constants
float c_varMax, float c_varInit, float c_tau
#ifdef SHADOW_DETECT
, uchar c_shadowVal
#endif
)
{
int x = get_global_id(0);
int y = get_global_id(1);
weight_step/= sizeof(float);
var_step /= sizeof(float);
mean_step /= (sizeof(float)*cnMode);
if( x < frame_col && y < frame_row)
{
__global const uchar* _frame = (frame + mad24( y, frame_step, x*CN + frame_offset));
__global const uchar* _frame = (frame + mad24(y, frame_step, mad24(x, CN, frame_offset)));
T_MEAN pix;
frameToMean(_frame, pix);
bool background = false; // true - the pixel classified as background
uchar foreground = 255; // 0 - the pixel classified as background
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
__global int* _modesUsed = (__global int*)(modesUsed + mad24( y, modesUsed_step, x*(int)(sizeof(int))));
int nmodes = _modesUsed[0];
int nNewModes = nmodes; //current number of modes in GMM
int pt_idx = mad24(y, frame_col, x);
int idx_step = frame_row * frame_col;
__global uchar* _modesUsed = modesUsed + pt_idx;
uchar nmodes = _modesUsed[0];
float totalWeight = 0.0f;
@ -98,114 +75,130 @@ __kernel void mog2_kernel(__global const uchar* frame, int frame_step, int frame
__global float* _variance = (__global float*)(variance);
__global T_MEAN* _mean = (__global T_MEAN*)(mean);
for (int mode = 0; mode < nmodes; ++mode)
uchar mode = 0;
for (; mode < nmodes; ++mode)
{
int mode_idx = mad24(mode, idx_step, pt_idx);
float c_weight = mad(alpha1, _weight[mode_idx], prune);
float c_weight = alpha1 * _weight[(mode * frame_row + y) * weight_step + x] + prune;
int swap_count = 0;
if (!fitsPDF)
{
float c_var = _variance[(mode * frame_row + y) * var_step + x];
float c_var = _variance[mode_idx];
T_MEAN c_mean = _mean[(mode * frame_row + y) * mean_step + x];
T_MEAN c_mean = _mean[mode_idx];
T_MEAN diff = c_mean - pix;
float dist2 = sqr(diff);
T_MEAN diff = c_mean - pix;
float dist2 = dot(diff, diff);
if (totalWeight < c_TB && dist2 < c_Tb * c_var)
background = true;
if (totalWeight < c_TB && dist2 < c_Tb * c_var)
foreground = 0;
if (dist2 < c_Tg * c_var)
{
fitsPDF = true;
c_weight += alphaT;
float k = alphaT / c_weight;
if (dist2 < c_Tg * c_var)
{
fitsPDF = true;
c_weight += alphaT;
float k = alphaT / c_weight;
T_MEAN mean_new = mad((T_MEAN)-k, diff, c_mean);
float variance_new = clamp(mad(k, (dist2 - c_var), c_var), c_varMin, c_varMax);
_mean[(mode * frame_row + y) * mean_step + x] = c_mean - k * diff;
for (int i = mode; i > 0; --i)
{
int prev_idx = mode_idx - idx_step;
if (c_weight < _weight[prev_idx])
break;
float varnew = c_var + k * (dist2 - c_var);
varnew = fmax(varnew, c_varMin);
varnew = fmin(varnew, c_varMax);
_weight[mode_idx] = _weight[prev_idx];
_variance[mode_idx] = _variance[prev_idx];
_mean[mode_idx] = _mean[prev_idx];
_variance[(mode * frame_row + y) * var_step + x] = varnew;
for (int i = mode; i > 0; --i)
{
if (c_weight < _weight[((i - 1) * frame_row + y) * weight_step + x])
break;
swap_count++;
swap(_weight, x, y, i - 1, frame_row, weight_step);
swap(_variance, x, y, i - 1, frame_row, var_step);
#if (CN==1)
swap(_mean, x, y, i - 1, frame_row, mean_step);
#else
swap4(_mean, x, y, i - 1, frame_row, mean_step);
#endif
}
mode_idx = prev_idx;
}
} // !fitsPDF
_mean[mode_idx] = mean_new;
_variance[mode_idx] = variance_new;
_weight[mode_idx] = c_weight; //update weight by the calculated value
totalWeight += c_weight;
mode ++;
break;
}
if (c_weight < -prune)
{
c_weight = 0.0f;
nmodes--;
}
_weight[((mode - swap_count) * frame_row + y) * weight_step + x] = c_weight; //update weight by the calculated value
_weight[mode_idx] = c_weight; //update weight by the calculated value
totalWeight += c_weight;
}
totalWeight = 1.f / totalWeight;
for (int mode = 0; mode < nmodes; ++mode)
_weight[(mode * frame_row + y) * weight_step + x] *= totalWeight;
for (; mode < nmodes; ++mode)
{
int mode_idx = mad24(mode, idx_step, pt_idx);
float c_weight = mad(alpha1, _weight[mode_idx], prune);
if (c_weight < -prune)
{
c_weight = 0.0f;
nmodes = mode;
break;
}
_weight[mode_idx] = c_weight; //update weight by the calculated value
totalWeight += c_weight;
}
nmodes = nNewModes;
if (0.f < totalWeight)
{
totalWeight = 1.f / totalWeight;
for (int mode = 0; mode < nmodes; ++mode)
_weight[mad24(mode, idx_step, pt_idx)] *= totalWeight;
}
if (!fitsPDF)
{
int mode = nmodes == (NMIXTURES) ? (NMIXTURES) - 1 : nmodes++;
uchar mode = nmodes == (NMIXTURES) ? (NMIXTURES) - 1 : nmodes++;
int mode_idx = mad24(mode, idx_step, pt_idx);
if (nmodes == 1)
_weight[(mode * frame_row + y) * weight_step + x] = 1.f;
_weight[mode_idx] = 1.f;
else
{
_weight[(mode * frame_row + y) * weight_step + x] = alphaT;
_weight[mode_idx] = alphaT;
for (int i = 0; i < nmodes - 1; ++i)
_weight[(i * frame_row + y) * weight_step + x] *= alpha1;
for (int i = pt_idx; i < mode_idx; i += idx_step)
_weight[i] *= alpha1;
}
_mean[(mode * frame_row + y) * mean_step + x] = pix;
_variance[(mode * frame_row + y) * var_step + x] = c_varInit;
for (int i = nmodes - 1; i > 0; --i)
{
if (alphaT < _weight[((i - 1) * frame_row + y) * weight_step + x])
int prev_idx = mode_idx - idx_step;
if (alphaT < _weight[prev_idx])
break;
swap(_weight, x, y, i - 1, frame_row, weight_step);
swap(_variance, x, y, i - 1, frame_row, var_step);
#if (CN==1)
swap(_mean, x, y, i - 1, frame_row, mean_step);
#else
swap4(_mean, x, y, i - 1, frame_row, mean_step);
#endif
_weight[mode_idx] = _weight[prev_idx];
_variance[mode_idx] = _variance[prev_idx];
_mean[mode_idx] = _mean[prev_idx];
mode_idx = prev_idx;
}
_mean[mode_idx] = pix;
_variance[mode_idx] = c_varInit;
}
_modesUsed[0] = nmodes;
bool isShadow = false;
if (detectShadows_flag && !background)
#ifdef SHADOW_DETECT
if (foreground)
{
float tWeight = 0.0f;
for (int mode = 0; mode < nmodes; ++mode)
for (uchar mode = 0; mode < nmodes; ++mode)
{
T_MEAN c_mean = _mean[(mode * frame_row + y) * mean_step + x];
int mode_idx = mad24(mode, idx_step, pt_idx);
T_MEAN c_mean = _mean[mode_idx];
T_MEAN pix_mean = pix * c_mean;
float numerator = sum(pix_mean);
float denominator = sqr(c_mean);
float denominator = dot(c_mean, c_mean);
if (denominator == 0)
break;
@ -214,60 +207,67 @@ __kernel void mog2_kernel(__global const uchar* frame, int frame_step, int frame
{
float a = numerator / denominator;
T_MEAN dD = a * c_mean - pix;
T_MEAN dD = mad(a, c_mean, -pix);
if (sqr(dD) < c_Tb * _variance[(mode * frame_row + y) * var_step + x] * a * a)
if (dot(dD, dD) < c_Tb * _variance[mode_idx] * a * a)
{
isShadow = true;
foreground = c_shadowVal;
break;
}
}
tWeight += _weight[(mode * frame_row + y) * weight_step + x];
tWeight += _weight[mode_idx];
if (tWeight > c_TB)
break;
}
}
__global int* _fgmask = (__global int*)(fgmask + mad24(y, fgmask_step, x*(int)(sizeof(int)) + fgmask_offset));
*_fgmask = background ? 0 : isShadow ? c_shadowVal : 255;
#endif
__global uchar* _fgmask = fgmask + mad24(y, fgmask_step, x + fgmask_offset);
*_fgmask = (uchar)foreground;
}
}
__kernel void getBackgroundImage2_kernel(__global const uchar* modesUsed, int modesUsed_step, int modesUsed_offset, int modesUsed_row, int modesUsed_col,
__global const uchar* weight, int weight_step, int weight_offset,
__global const uchar* mean, int mean_step, int mean_offset,
__global uchar* dst, int dst_step, int dst_offset,
__kernel void getBackgroundImage2_kernel(__global const uchar* modesUsed,
__global const uchar* weight,
__global const uchar* mean,
__global uchar* dst, int dst_step, int dst_offset, int dst_row, int dst_col,
float c_TB)
{
int x = get_global_id(0);
int y = get_global_id(1);
if(x < modesUsed_col && y < modesUsed_row)
if(x < dst_col && y < dst_row)
{
__global int* _modesUsed = (__global int*)(modesUsed + mad24( y, modesUsed_step, x*(int)(sizeof(int))));
int nmodes = _modesUsed[0];
int pt_idx = mad24(y, dst_col, x);
__global const uchar* _modesUsed = modesUsed + pt_idx;
uchar nmodes = _modesUsed[0];
T_MEAN meanVal = (T_MEAN)F_ZERO;
float totalWeight = 0.0f;
for (int mode = 0; mode < nmodes; ++mode)
__global const float* _weight = (__global const float*)weight;
__global const T_MEAN* _mean = (__global const T_MEAN*)(mean);
int idx_step = dst_row * dst_col;
for (uchar mode = 0; mode < nmodes; ++mode)
{
__global const float* _weight = (__global const float*)(weight + mad24(mode * modesUsed_row + y, weight_step, x*(int)(sizeof(float))));
float c_weight = _weight[0];
int mode_idx = mad24(mode, idx_step, pt_idx);
float c_weight = _weight[mode_idx];
T_MEAN c_mean = _mean[mode_idx];
__global const T_MEAN* _mean = (__global const T_MEAN*)(mean + mad24(mode * modesUsed_row + y, mean_step, x*(int)(sizeof(float))*cnMode));
T_MEAN c_mean = _mean[0];
meanVal = meanVal + c_weight * c_mean;
meanVal = mad(c_weight, c_mean, meanVal);
totalWeight += c_weight;
if(totalWeight > c_TB)
if (totalWeight > c_TB)
break;
}
meanVal = meanVal * (1.f / totalWeight);
__global uchar* _dst = dst + y * dst_step + x*CN + dst_offset;
if (0.f < totalWeight)
meanVal = meanVal / totalWeight;
else
meanVal = (T_MEAN)(0.f);
__global uchar* _dst = dst + mad24(y, dst_step, mad24(x, CN, dst_offset));
meanToFrame(meanVal, _dst);
}
}

@ -18,13 +18,11 @@ endif()
set(videoio_hdrs
${CMAKE_CURRENT_LIST_DIR}/src/precomp.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg_impl.hpp
)
set(videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_images.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg.cpp
)
file(GLOB videoio_ext_hdrs
@ -42,6 +40,7 @@ if (WIN32 AND HAVE_DSHOW)
endif()
if (WIN32 AND HAVE_MSMF)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_msmf.hpp)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_msmf.cpp)
endif()
@ -94,18 +93,24 @@ endif(HAVE_opencv_androidcamera)
if(HAVE_XIMEA)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ximea.cpp)
ocv_include_directories(${XIMEA_PATH})
if(XIMEA_PATH)
ocv_include_directories(${XIMEA_PATH})
endif()
if(XIMEA_LIBRARY_DIR)
link_directories("${XIMEA_LIBRARY_DIR}")
endif()
if(X86_64)
if(WIN32 AND X86_64)
list(APPEND VIDEOIO_LIBRARIES m3apiX64)
elseif(APPLE)
list(APPEND VIDEOIO_LIBRARIES "-framework m3api")
else()
list(APPEND VIDEOIO_LIBRARIES m3api)
endif()
endif(HAVE_XIMEA)
if(HAVE_FFMPEG)
list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg_impl.hpp)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg.cpp)
if(UNIX AND BZIP2_LIBRARIES)
list(APPEND VIDEOIO_LIBRARIES ${BZIP2_LIBRARIES})
endif()

@ -363,8 +363,10 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
{
CvCapture * result = 0;
#ifdef HAVE_FFMPEG
if (! result)
result = cvCreateFileCapture_FFMPEG_proxy (filename);
#endif
#ifdef HAVE_VFW
if (! result)
@ -421,8 +423,10 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
if(!fourcc || !fps)
result = cvCreateVideoWriter_Images(filename);
#ifdef HAVE_FFMPEG
if(!result)
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_VFW
if(!result)
@ -454,6 +458,19 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
#endif
#if !defined(HAVE_FFMPEG) && \
!defined(HAVE_VFW) && \
!defined(HAVE_MSMF) && \
!defined(HAVE_AVFOUNDATION) && \
!defined(HAVE_QUICKTIME) && \
!defined(HAVE_QTKIT) && \
!defined(HAVE_GSTREAMER)
// If none of the writers is used
// these statements suppress 'unused parameter' warnings.
(void)frameSize;
(void)is_color;
#endif
if(!result)
result = cvCreateVideoWriter_Images(filename);

@ -493,6 +493,7 @@ static int LockCallBack(void **mutex, AVLockOp op)
localMutex->destroy();
free(localMutex);
localMutex = NULL;
*mutex = NULL;
break;
}
return 0;

@ -73,13 +73,14 @@
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
#if GST_VERSION_MAJOR > 0
#if GST_VERSION_MAJOR == 0
#define COLOR_ELEM "ffmpegcolorspace"
#elif FULL_GST_VERSION < VERSION_NUM(1,5,0)
#define COLOR_ELEM "videoconvert"
#else
#define COLOR_ELEM "ffmpegcolorspace"
#define COLOR_ELEM "autovideoconvert"
#endif
void toFraction(double decimal, double &numerator, double &denominator);
void handleMessage(GstElement * pipeline);
@ -149,8 +150,11 @@ protected:
#endif
GstBuffer* buffer;
GstCaps* caps;
GstCaps* buffer_caps;
IplImage* frame;
gint64 duration;
gint width;
gint height;
double fps;
};
/*!
@ -169,8 +173,11 @@ void CvCapture_GStreamer::init()
#endif
buffer = NULL;
caps = NULL;
buffer_caps = NULL;
frame = NULL;
duration = -1;
width = -1;
height = -1;
fps = -1;
}
/*!
@ -187,42 +194,11 @@ void CvCapture_GStreamer::close()
gst_object_unref(GST_OBJECT(pipeline));
pipeline = NULL;
}
if(uridecodebin){
gst_object_unref(GST_OBJECT(uridecodebin));
uridecodebin = NULL;
}
if(color){
gst_object_unref(GST_OBJECT(color));
color = NULL;
}
if(sink){
gst_object_unref(GST_OBJECT(sink));
sink = NULL;
}
if(buffer) {
gst_buffer_unref(buffer);
buffer = NULL;
}
if(frame) {
frame->imageData = 0;
cvReleaseImage(&frame);
frame = NULL;
}
if(caps){
gst_caps_unref(caps);
caps = NULL;
}
if(buffer_caps){
gst_caps_unref(buffer_caps);
buffer_caps = NULL;
}
#if GST_VERSION_MAJOR > 0
if(sample){
gst_sample_unref(sample);
sample = NULL;
}
#endif
duration = -1;
width = -1;
height = -1;
fps = -1;
}
/*!
@ -280,16 +256,10 @@ IplImage * CvCapture_GStreamer::retrieveFrame(int)
//construct a frame header if we did not have any yet
if(!frame)
{
gint height, width;
//reuse the caps ptr
if (buffer_caps)
gst_caps_unref(buffer_caps);
#if GST_VERSION_MAJOR == 0
buffer_caps = gst_buffer_get_caps(buffer);
GstCaps* buffer_caps = gst_buffer_get_caps(buffer);
#else
buffer_caps = gst_sample_get_caps(sample);
GstCaps* buffer_caps = gst_sample_get_caps(sample);
#endif
// bail out in no caps
assert(gst_caps_get_size(buffer_caps) == 1);
@ -299,10 +269,10 @@ IplImage * CvCapture_GStreamer::retrieveFrame(int)
if(!gst_structure_get_int(structure, "width", &width) ||
!gst_structure_get_int(structure, "height", &height))
{
gst_caps_unref(buffer_caps);
return 0;
}
int depth = 3;
#if GST_VERSION_MAJOR > 0
depth = 0;
@ -335,9 +305,12 @@ IplImage * CvCapture_GStreamer::retrieveFrame(int)
#endif
if (depth > 0) {
frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, depth);
}else{
} else {
gst_caps_unref(buffer_caps);
return 0;
}
gst_caps_unref(buffer_caps);
}
// gstreamer expects us to handle the memory at this point
@ -394,10 +367,20 @@ void CvCapture_GStreamer::startPipeline()
__BEGIN__;
//fprintf(stderr, "relinked, pausing\n");
if(gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING) ==
GST_STATE_CHANGE_FAILURE) {
CV_ERROR(CV_StsError, "GStreamer: unable to start pipeline\n");
GstStateChangeReturn status = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
if (status == GST_STATE_CHANGE_ASYNC)
{
// wait for status update
GstState st1;
GstState st2;
status = gst_element_get_state(pipeline, &st1, &st2, GST_CLOCK_TIME_NONE);
}
if (status == GST_STATE_CHANGE_FAILURE)
{
handleMessage(pipeline);
gst_object_unref(pipeline);
pipeline = NULL;
CV_ERROR(CV_StsError, "GStreamer: unable to start pipeline\n");
return;
}
@ -422,6 +405,7 @@ void CvCapture_GStreamer::stopPipeline()
GST_STATE_CHANGE_FAILURE) {
CV_ERROR(CV_StsError, "GStreamer: unable to stop pipeline\n");
gst_object_unref(pipeline);
pipeline = NULL;
return;
}
__END__;
@ -576,11 +560,13 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
gst_initializer::init();
bool file = false;
bool stream = false;
bool manualpipeline = false;
char *uri = NULL;
uridecodebin = NULL;
GstElementFactory * testfac;
GstStateChangeReturn status;
if (type == CV_CAP_GSTREAMER_V4L){
testfac = gst_element_factory_find("v4lsrc");
@ -611,7 +597,12 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
if(uri)
{
uri = g_filename_to_uri(uri, NULL, NULL);
if(!uri) {
if(uri)
{
file = true;
}
else
{
CV_WARN("GStreamer: Error opening file\n");
close();
return false;
@ -621,9 +612,9 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
{
GError *err = NULL;
uridecodebin = gst_parse_launch(filename, &err);
if(!uridecodebin) {
//fprintf(stderr, "GStreamer: Error opening bin: %s\n", err->message);
//close();
if(!uridecodebin)
{
fprintf(stderr, "GStreamer: Error opening bin: %s\n", err->message);
return false;
}
stream = true;
@ -651,8 +642,8 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
#endif
element_from_uri = true;
}else{
uridecodebin = gst_element_factory_make ("uridecodebin", NULL);
g_object_set(G_OBJECT(uridecodebin),"uri",uri, NULL);
uridecodebin = gst_element_factory_make("uridecodebin", NULL);
g_object_set(G_OBJECT(uridecodebin), "uri", uri, NULL);
}
g_free(protocol);
@ -716,9 +707,9 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
}
else
{
pipeline = gst_pipeline_new (NULL);
// videoconvert (in 0.10: ffmpegcolorspace) automatically selects the correct colorspace
// conversion based on caps.
pipeline = gst_pipeline_new(NULL);
// videoconvert (in 0.10: ffmpegcolorspace, in 1.x autovideoconvert)
//automatically selects the correct colorspace conversion based on caps.
color = gst_element_factory_make(COLOR_ELEM, NULL);
sink = gst_element_factory_make("appsink", NULL);
@ -728,6 +719,7 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
if(!gst_element_link(uridecodebin, color)) {
CV_ERROR(CV_StsError, "GStreamer: cannot link color -> sink\n");
gst_object_unref(pipeline);
pipeline = NULL;
return false;
}
}else{
@ -737,6 +729,7 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
if(!gst_element_link(color, sink)) {
CV_ERROR(CV_StsError, "GStreamer: cannot link color -> sink\n");
gst_object_unref(pipeline);
pipeline = NULL;
return false;
}
}
@ -761,8 +754,76 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
gst_caps_unref(caps);
//we do not start recording here just yet.
// the user probably wants to set capture properties first, so start recording whenever the first frame is requested
// For video files only: set pipeline to PAUSED state to get its duration
if (file)
{
status = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PAUSED);
if (status == GST_STATE_CHANGE_ASYNC)
{
// wait for status update
GstState st1;
GstState st2;
status = gst_element_get_state(pipeline, &st1, &st2, GST_CLOCK_TIME_NONE);
}
if (status == GST_STATE_CHANGE_FAILURE)
{
handleMessage(pipeline);
gst_object_unref(pipeline);
pipeline = NULL;
CV_ERROR(CV_StsError, "GStreamer: unable to start pipeline\n");
return false;
}
GstFormat format;
format = GST_FORMAT_DEFAULT;
#if GST_VERSION_MAJOR == 0
if(!gst_element_query_duration(sink, &format, &duration))
#else
if(!gst_element_query_duration(sink, format, &duration))
#endif
{
handleMessage(pipeline);
CV_WARN("GStreamer: unable to query duration of stream");
duration = -1;
}
GstPad* pad = gst_element_get_static_pad(color, "src");
#if GST_VERSION_MAJOR == 0
GstCaps* buffer_caps = gst_pad_get_caps(pad);
#else
GstCaps* buffer_caps = gst_pad_get_current_caps(pad);
#endif
const GstStructure *structure = gst_caps_get_structure (buffer_caps, 0);
if (!gst_structure_get_int (structure, "width", &width))
{
CV_WARN("Cannot query video width\n");
}
if (!gst_structure_get_int (structure, "height", &height))
{
CV_WARN("Cannot query video heigth\n");
}
gint num = 0, denom=1;
if(!gst_structure_get_fraction(structure, "framerate", &num, &denom))
{
CV_WARN("Cannot query video fps\n");
}
fps = (double)num/(double)denom;
// GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
}
else
{
duration = -1;
width = -1;
height = -1;
fps = -1;
}
__END__;
return true;
@ -819,58 +880,16 @@ double CvCapture_GStreamer::getProperty( int propId )
return false;
}
return ((double) value) / GST_FORMAT_PERCENT_MAX;
case CV_CAP_PROP_FRAME_WIDTH: {
if (!buffer_caps){
CV_WARN("GStreamer: unable to query width of frame; no frame grabbed yet");
return 0;
}
GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);
gint width = 0;
if(!gst_structure_get_int(structure, "width", &width)){
CV_WARN("GStreamer: unable to query width of frame");
return 0;
}
case CV_CAP_PROP_FRAME_WIDTH:
return width;
break;
}
case CV_CAP_PROP_FRAME_HEIGHT: {
if (!buffer_caps){
CV_WARN("GStreamer: unable to query height of frame; no frame grabbed yet");
return 0;
}
GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);
gint height = 0;
if(!gst_structure_get_int(structure, "height", &height)){
CV_WARN("GStreamer: unable to query height of frame");
return 0;
}
case CV_CAP_PROP_FRAME_HEIGHT:
return height;
break;
}
case CV_CAP_PROP_FPS: {
if (!buffer_caps){
CV_WARN("GStreamer: unable to query framerate of stream; no frame grabbed yet");
return 0;
}
GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);
gint num = 0, denom=1;
if(!gst_structure_get_fraction(structure, "framerate", &num, &denom)){
CV_WARN("GStreamer: unable to query framerate of stream");
return 0;
}
return (double)num/(double)denom;
break;
}
case CV_CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_FOURCC:
break;
case CV_CAP_PROP_FRAME_COUNT:
format = GST_FORMAT_DEFAULT;
status = gst_element_query_position(sink, FORMAT, &value);
if(!status) {
CV_WARN("GStreamer: unable to query position of stream");
return false;
}
return value;
return duration;
case CV_CAP_PROP_FORMAT:
case CV_CAP_PROP_MODE:
case CV_CAP_PROP_BRIGHTNESS:
@ -1059,28 +1078,51 @@ void CvVideoWriter_GStreamer::init()
*/
void CvVideoWriter_GStreamer::close()
{
GstStateChangeReturn status;
if (pipeline)
{
gst_app_src_end_of_stream(GST_APP_SRC(source));
handleMessage(pipeline);
if (gst_app_src_end_of_stream(GST_APP_SRC(source)) != GST_FLOW_OK)
{
CV_WARN("Cannot send EOS to GStreamer pipeline\n");
return;
}
//wait for EOS to trickle down the pipeline. This will let all elements finish properly
GstBus* bus = gst_element_get_bus(pipeline);
GstMessage *msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
if(msg != NULL){
if (GST_MESSAGE_TYPE(msg) == GST_MESSAGE_ERROR)
{
CV_WARN("Error during VideoWriter finalization\n");
return;
}
if(msg != NULL)
{
gst_message_unref(msg);
g_object_unref(G_OBJECT(bus));
}
gst_element_set_state (pipeline, GST_STATE_NULL);
handleMessage(pipeline);
status = gst_element_set_state (pipeline, GST_STATE_NULL);
if (status == GST_STATE_CHANGE_ASYNC)
{
// wait for status update
GstState st1;
GstState st2;
status = gst_element_get_state(pipeline, &st1, &st2, GST_CLOCK_TIME_NONE);
}
if (status == GST_STATE_CHANGE_FAILURE)
{
handleMessage (pipeline);
gst_object_unref (GST_OBJECT (pipeline));
pipeline = NULL;
CV_WARN("Unable to stop gstreamer pipeline\n");
return;
}
gst_object_unref (GST_OBJECT (pipeline));
if (source)
gst_object_unref (GST_OBJECT (source));
if (file)
gst_object_unref (GST_OBJECT (file));
pipeline = NULL;
}
}
@ -1130,7 +1172,6 @@ const char* CvVideoWriter_GStreamer::filenameToMimetype(const char *filename)
return (const char*)"video/x-msvideo";
}
/*!
* \brief CvVideoWriter_GStreamer::open
* \param filename filename to output to
@ -1178,7 +1219,15 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
GstEncodingVideoProfile* videoprofile = NULL;
#endif
GstIterator *it = NULL;
GstIterator* it = NULL;
gboolean done = FALSE;
GstElement *element = NULL;
gchar* name = NULL;
#if GST_VERSION_MAJOR == 0
GstElement* splitter = NULL;
GstElement* combiner = NULL;
#endif
// we first try to construct a pipeline from the given string.
// if that fails, we assume it is an ordinary filename
@ -1186,9 +1235,7 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
__BEGIN__;
encodebin = gst_parse_launch(filename, &err);
if(!encodebin) {
manualpipeline = false;
}
manualpipeline = (encodebin != NULL);
if(manualpipeline)
{
@ -1200,10 +1247,6 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
}
#else
it = gst_bin_iterate_sources (GST_BIN(encodebin));
gboolean done = FALSE;
GstElement *element = NULL;
gchar* name = NULL;
GValue value = G_VALUE_INIT;
while (!done) {
@ -1253,7 +1296,9 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
if (fourcc == CV_FOURCC('M','P','2','V')) fourcc = CV_FOURCC('M', 'P', 'G' ,'2');
if (fourcc == CV_FOURCC('D','R','A','C')) fourcc = CV_FOURCC('d', 'r', 'a' ,'c');
//create encoder caps from fourcc
videocaps = gst_riff_create_video_caps(fourcc, NULL, NULL, NULL, NULL, NULL);
if (!videocaps){
CV_ERROR( CV_StsUnsupportedFormat, "Gstreamer Opencv backend does not support this codec.");
@ -1276,6 +1321,7 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
//create pipeline elements
encodebin = gst_element_factory_make("encodebin", NULL);
#if FULL_GST_VERSION >= VERSION_NUM(0,10,32)
g_object_set(G_OBJECT(encodebin), "profile", containerprofile, NULL);
#endif
@ -1340,7 +1386,7 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
g_object_set(G_OBJECT(source), "format", GST_FORMAT_TIME, NULL);
g_object_set(G_OBJECT(source), "block", 1, NULL);
g_object_set(G_OBJECT(source), "is-live", 0, NULL);
g_object_set(G_OBJECT(source), "emit-signals", 1, NULL);
if(!manualpipeline)
{
@ -1351,15 +1397,74 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
}
}
#if GST_VERSION_MAJOR == 0
// HACK: remove streamsplitter and streamcombiner from
// encodebin pipeline to prevent early EOF event handling
// We always fetch BGR or gray-scale frames, so combiner->spliter
// endge in graph is useless.
it = gst_bin_iterate_recurse (GST_BIN(encodebin));
while (!done) {
switch (gst_iterator_next (it, (void**)&element)) {
case GST_ITERATOR_OK:
name = gst_element_get_name(element);
if (strstr(name, "streamsplitter"))
splitter = element;
else if (strstr(name, "streamcombiner"))
combiner = element;
break;
case GST_ITERATOR_RESYNC:
gst_iterator_resync (it);
break;
case GST_ITERATOR_ERROR:
done = true;
break;
case GST_ITERATOR_DONE:
done = true;
break;
}
}
gst_iterator_free (it);
if (splitter && combiner)
{
gst_element_unlink(splitter, combiner);
GstPad* src = gst_element_get_pad(combiner, "src");
GstPad* sink = gst_element_get_pad(combiner, "encodingsink");
GstPad* srcPeer = gst_pad_get_peer(src);
GstPad* sinkPeer = gst_pad_get_peer(sink);
gst_pad_unlink(sinkPeer, sink);
gst_pad_unlink(src, srcPeer);
gst_pad_link(sinkPeer, srcPeer);
src = gst_element_get_pad(splitter, "encodingsrc");
sink = gst_element_get_pad(splitter, "sink");
srcPeer = gst_pad_get_peer(src);
sinkPeer = gst_pad_get_peer(sink);
gst_pad_unlink(sinkPeer, sink);
gst_pad_unlink(src, srcPeer);
gst_pad_link(sinkPeer, srcPeer);
}
#endif
stateret = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
if(stateret == GST_STATE_CHANGE_FAILURE) {
handleMessage(pipeline);
CV_ERROR(CV_StsError, "GStreamer: cannot put pipeline to play\n");
}
handleMessage(pipeline);
framerate = fps;
num_frames = 0;
handleMessage(pipeline);
__END__;
return true;
@ -1376,7 +1481,6 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
*/
bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
{
CV_FUNCNAME("CvVideoWriter_GStreamer::writerFrame");
GstClockTime duration, timestamp;
@ -1384,6 +1488,7 @@ bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
int size;
__BEGIN__;
handleMessage(pipeline);
if (input_pix_fmt == GST_VIDEO_FORMAT_BGR) {
@ -1399,7 +1504,8 @@ bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
}
#endif
else {
assert(false);
CV_ERROR(CV_StsUnsupportedFormat, "cvWriteFrame() needs BGR or grayscale images\n");
return false;
}
size = image->imageSize;
@ -1408,7 +1514,12 @@ bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
//gst_app_src_push_buffer takes ownership of the buffer, so we need to supply it a copy
#if GST_VERSION_MAJOR == 0
buffer = gst_buffer_new_and_alloc (size);
buffer = gst_buffer_try_new_and_alloc (size);
if (!buffer)
{
CV_ERROR(CV_StsBadSize, "Cannot create GStreamer buffer");
}
memcpy(GST_BUFFER_DATA (buffer), (guint8*)image->imageData, size);
GST_BUFFER_DURATION(buffer) = duration;
GST_BUFFER_TIMESTAMP(buffer) = timestamp;
@ -1427,13 +1538,16 @@ bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
ret = gst_app_src_push_buffer(GST_APP_SRC(source), buffer);
if (ret != GST_FLOW_OK) {
/* something wrong, stop pushing */
assert(false);
CV_WARN("Error pushing buffer to GStreamer pipeline");
return false;
}
//gst_debug_bin_to_dot_file (GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
//GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
++num_frames;
__END__;
return true;
}
@ -1518,9 +1632,8 @@ void handleMessage(GstElement * pipeline)
break;
case GST_MESSAGE_ERROR:
gst_message_parse_error(msg, &err, &debug);
//fprintf(stderr, "GStreamer Plugin: Embedded video playback halted; module %s reported: %s\n",
// gst_element_get_name(GST_MESSAGE_SRC (msg)), err->message);
fprintf(stderr, "GStreamer Plugin: Embedded video playback halted; module %s reported: %s\n",
gst_element_get_name(GST_MESSAGE_SRC (msg)), err->message);
g_error_free(err);
g_free(debug);
@ -1531,12 +1644,11 @@ void handleMessage(GstElement * pipeline)
//fprintf(stderr, "reached the end of the stream.");
break;
case GST_MESSAGE_STREAM_STATUS:
gst_message_parse_stream_status(msg,&tp,&elem);
//fprintf(stderr, "stream status: elem %s, %i\n", GST_ELEMENT_NAME(elem), tp);
break;
default:
//fprintf(stderr, "unhandled message\n");
//fprintf(stderr, "unhandled message %s\n",GST_MESSAGE_TYPE_NAME(msg));
break;
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,8 +1,10 @@
#include "precomp.hpp"
#ifdef WIN32
#include "xiApi.h"
#include "xiExt.h"
#include "m3Api.h"
#else
#include <m3api/xiApi.h>
#endif
/**********************************************************************************/
@ -156,7 +158,7 @@ bool CvCaptureCAM_XIMEA::grabFrame()
image.size = sizeof(XI_IMG);
int mvret = xiGetImage( hmv, timeout, &image);
if(mvret == MM40_ACQUISITION_STOPED)
if(mvret == XI_ACQUISITION_STOPED)
{
xiStartAcquisition(hmv);
mvret = xiGetImage(hmv, timeout, &image);

@ -0,0 +1,157 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/ts.hpp"
#include <stdio.h>
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
using namespace cv;
using namespace std;
using namespace cvtest;
#ifdef HAVE_GSTREAMER
const string ext[] = {"avi"};
#else
const string ext[] = {"avi", "mov", "mp4"};
#endif
TEST(Videoio_Video, prop_resolution)
{
const size_t n = sizeof(ext)/sizeof(ext[0]);
const string src_dir = TS::ptr()->get_data_path();
TS::ptr()->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
for (size_t i = 0; i < n; ++i)
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
VideoCapture cap(file_path);
if (!cap.isOpened())
{
TS::ptr()->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
TS::ptr()->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
TS::ptr()->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
ASSERT_EQ(672, cap.get(CAP_PROP_FRAME_WIDTH));
ASSERT_EQ(384, cap.get(CAP_PROP_FRAME_HEIGHT));
}
}
TEST(Videoio_Video, actual_resolution)
{
const size_t n = sizeof(ext)/sizeof(ext[0]);
const string src_dir = TS::ptr()->get_data_path();
TS::ptr()->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
for (size_t i = 0; i < n; ++i)
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
VideoCapture cap(file_path);
if (!cap.isOpened())
{
TS::ptr()->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
TS::ptr()->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
TS::ptr()->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
Mat frame;
cap >> frame;
ASSERT_EQ(672, frame.cols);
ASSERT_EQ(384, frame.rows);
}
}
TEST(Videoio_Video, prop_fps)
{
const size_t n = sizeof(ext)/sizeof(ext[0]);
const string src_dir = TS::ptr()->get_data_path();
TS::ptr()->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
for (size_t i = 0; i < n; ++i)
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
VideoCapture cap(file_path);
if (!cap.isOpened())
{
TS::ptr()->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
TS::ptr()->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
TS::ptr()->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
ASSERT_EQ(24, cap.get(CAP_PROP_FPS));
}
}
TEST(Videoio_Video, prop_framecount)
{
const size_t n = sizeof(ext)/sizeof(ext[0]);
const string src_dir = TS::ptr()->get_data_path();
TS::ptr()->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
for (size_t i = 0; i < n; ++i)
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
VideoCapture cap(file_path);
if (!cap.isOpened())
{
TS::ptr()->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
TS::ptr()->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
TS::ptr()->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
ASSERT_EQ(125, cap.get(CAP_PROP_FRAME_COUNT));
}
}
#endif

@ -92,7 +92,9 @@ const VideoFormat g_specific_fmt_list[] =
VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')),
VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')),
VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')),
#ifndef HAVE_GSTREAMER
VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')),
#endif
VideoFormat()
};
#endif
@ -490,7 +492,13 @@ void CV_VideoIOTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
if (fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && ext == "mkv")
allowed_extra_frames = 1;
if (FRAME_COUNT < IMAGE_COUNT || FRAME_COUNT > IMAGE_COUNT + allowed_extra_frames)
// Hack! Some GStreamer encoding pipelines drop last frame in the video
int allowed_frame_frop = 0;
#ifdef HAVE_GSTREAMER
allowed_frame_frop = 1;
#endif
if (FRAME_COUNT < IMAGE_COUNT - allowed_frame_frop || FRAME_COUNT > IMAGE_COUNT + allowed_extra_frames)
{
ts->printf(ts->LOG, "\nFrame count checking for video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
@ -505,7 +513,7 @@ void CV_VideoIOTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
return;
}
for (int i = 0; (size_t)i < IMAGE_COUNT; i++)
for (int i = 0; (size_t)i < IMAGE_COUNT-allowed_frame_frop; i++)
{
Mat frame; cap >> frame;
if (frame.empty())

@ -6,7 +6,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
opencv_stitching opencv_videostab opencv_shape opencv_xfeatures2d)
opencv_stitching opencv_videostab opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
@ -30,6 +30,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/cudafilters/include")
endif()
if (HAVE_opencv_xfeatures2d)
ocv_include_directories("${OPENCV_MODULE_opencv_xfeatures2d_LOCATION}/include")
endif()
if(HAVE_opencv_ocl)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/ocl/include")
endif()

@ -237,7 +237,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
cout << "Error: can not save the extrinsic parameters\n";
// OpenCV can handle left-right
// or up-down camera arrangements

@ -71,8 +71,8 @@ def mtx2rvec(R):
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save