Merged with HEAD and removed C interface to rotatedRectangleIntersection

pull/1539/head
Nghia Ho 11 years ago
commit cc15898353
  1. 60
      .gitattributes
  2. 1
      .gitignore
  3. 1
      3rdparty/.gitattributes
  4. 2
      3rdparty/ffmpeg/readme.txt
  5. 8
      3rdparty/libpng/CMakeLists.txt
  6. 22
      3rdparty/libpng/opencv-libpng.patch
  7. 4
      3rdparty/libpng/pngpriv.h
  8. 12
      3rdparty/libtiff/CMakeLists.txt
  9. 1
      3rdparty/libtiff/tif_config.h.cmakein
  10. 2
      3rdparty/openexr/CMakeLists.txt
  11. 63
      3rdparty/zlib/ChangeLog
  12. 6
      3rdparty/zlib/README
  13. 2
      3rdparty/zlib/compress.c
  14. 12
      3rdparty/zlib/deflate.c
  15. 2
      3rdparty/zlib/deflate.h
  16. 20
      3rdparty/zlib/gzguts.h
  17. 40
      3rdparty/zlib/gzlib.c
  18. 19
      3rdparty/zlib/gzread.c
  19. 58
      3rdparty/zlib/gzwrite.c
  20. 2
      3rdparty/zlib/infback.c
  21. 6
      3rdparty/zlib/inffast.c
  22. 64
      3rdparty/zlib/inflate.c
  23. 14
      3rdparty/zlib/inftrees.c
  24. 14
      3rdparty/zlib/trees.c
  25. 2
      3rdparty/zlib/uncompr.c
  26. 27
      3rdparty/zlib/zconf.h.cmakein
  27. 48
      3rdparty/zlib/zlib.h
  28. 2
      3rdparty/zlib/zutil.c
  29. 9
      3rdparty/zlib/zutil.h
  30. 40
      CMakeLists.txt
  31. 1
      apps/CMakeLists.txt
  32. 1
      apps/haartraining/CMakeLists.txt
  33. 1
      apps/haartraining/_cvcommon.h
  34. 1
      apps/haartraining/performance.cpp
  35. 1
      apps/traincascade/CMakeLists.txt
  36. 34
      cmake/OpenCVCRTLinkage.cmake
  37. 4
      cmake/OpenCVCompilerOptions.cmake
  38. 52
      cmake/OpenCVDetectPython.cmake
  39. 13
      cmake/OpenCVFindIPP.cmake
  40. 11
      cmake/OpenCVFindLibsGUI.cmake
  41. 13
      cmake/OpenCVFindLibsGrfmt.cmake
  42. 11
      cmake/OpenCVFindLibsVideo.cmake
  43. 4
      cmake/OpenCVGenInfoPlist.cmake
  44. 2
      cmake/OpenCVMinDepVersions.cmake
  45. 18
      cmake/OpenCVModule.cmake
  46. 13
      cmake/OpenCVPCHSupport.cmake
  47. 2
      cmake/OpenCVUtils.cmake
  48. 2
      cmake/templates/cmake_uninstall.cmake.in
  49. 3
      cmake/templates/cvconfig.h.cmake
  50. 2
      doc/CMakeLists.txt
  51. 1
      doc/_static/insertIframe.js
  52. 2
      doc/check_docs.py
  53. 1
      doc/mymath.sty
  54. BIN
      doc/opencv-logo2.png
  55. 1
      doc/pattern_tools/svgfig.py
  56. 84
      doc/tutorials/calib3d/camera_calibration/camera_calibration.rst
  57. 1
      doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst
  58. 3
      doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst
  59. 2
      doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst
  60. 3
      doc/tutorials/features2d/feature_description/feature_description.rst
  61. 2
      doc/tutorials/features2d/feature_detection/feature_detection.rst
  62. 1
      doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst
  63. 2
      doc/tutorials/features2d/feature_homography/feature_homography.rst
  64. 1
      doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst
  65. 1
      doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.rst
  66. 1
      doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.rst
  67. 2
      doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.rst
  68. 2
      doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.rst
  69. 1
      doc/tutorials/general/table_of_content_general/table_of_content_general.rst
  70. 5
      doc/tutorials/highgui/trackbar/trackbar.rst
  71. 1
      doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.rst
  72. 1
      doc/tutorials/imgproc/histograms/template_matching/template_matching.rst
  73. 3
      doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.rst
  74. 1
      doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.rst
  75. 1
      doc/tutorials/imgproc/imgtrans/remap/remap.rst
  76. 1
      doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.rst
  77. 1
      doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.rst
  78. 2
      doc/tutorials/imgproc/pyramids/pyramids.rst
  79. 1
      doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.rst
  80. 1
      doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.rst
  81. 1
      doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.rst
  82. 1
      doc/tutorials/imgproc/shapedescriptors/hull/hull.rst
  83. 1
      doc/tutorials/imgproc/shapedescriptors/moments/moments.rst
  84. 1
      doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.rst
  85. 3
      doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst
  86. 3
      doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst
  87. 1
      doc/tutorials/introduction/linux_install/linux_install.rst
  88. 1
      doc/tutorials/ios/hello/hello.rst
  89. 1
      doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst
  90. 50
      doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst
  91. 2
      include/CMakeLists.txt
  92. 1
      include/opencv/cv.h
  93. 1
      include/opencv/cxeigen.hpp
  94. 1
      modules/androidcamera/camera_wrapper/camera_wrapper.h
  95. 2
      modules/bioinspired/CMakeLists.txt
  96. 3
      modules/bioinspired/include/opencv2/bioinspired/retina.hpp
  97. 753
      modules/bioinspired/src/opencl/retina_kernel.cl
  98. 44
      modules/bioinspired/src/precomp.cpp
  99. 6
      modules/bioinspired/src/precomp.hpp
  100. 1
      modules/bioinspired/src/retina.cpp
  101. Some files were not shown because too many files have changed in this diff Show More

60
.gitattributes vendored

@ -1,42 +1,58 @@
.git* export-ignore
* text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4
*.py text
.git* text export-ignore
*.aidl text
*.appxmanifest text
*.bib text
*.c text
*.cl text
*.conf text
*.cpp text
*.hpp text
*.css_t text
*.cu text
*.cxx text
*.hxx text
*.mm text
*.c text
*.def text
*.filelist text
*.h text
*.hpp text
*.htm text
*.html text
*.hxx text
*.i text
*.js text
*.idl text
*.java text
*.scala text
*.cu text
*.cl text
*.css_t text
*.js text
*.mk text
*.mm text
*.plist text
*.properties text
*.py text
*.qrc text
*.qss text
*.S text
*.rst text
*.tex text
*.sbt text
*.scala text
*.sty text
*.tex text
*.txt text
*.xaml text
*.aidl text
*.mk text
# reST underlines/overlines can look like conflict markers
*.rst text conflict-marker-size=80
*.cmake text whitespace=tabwidth=2
*.cmakein text whitespace=tabwidth=2
*.in text whitespace=tabwidth=2
CMakeLists.txt text whitespace=tabwidth=2
*.png binary
*.jpeg binary
*.jpg binary
*.avi binary
*.bmp binary
*.exr binary
*.ico binary
*.jpeg binary
*.jpg binary
*.png binary
*.a binary
*.so binary
@ -47,6 +63,7 @@ CMakeLists.txt text whitespace=tabwidth=2
*.pbxproj binary
*.vec binary
*.doc binary
*.dia binary
*.xml -text whitespace=cr-at-eol
*.yml -text whitespace=cr-at-eol
@ -55,9 +72,12 @@ CMakeLists.txt text whitespace=tabwidth=2
.cproject -text whitespace=cr-at-eol merge=union
org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union
*.vcproj text eol=crlf merge=union
*.bat text eol=crlf
*.cmd text eol=crlf
*.cmd.tmpl text eol=crlf
*.dsp text eol=crlf -whitespace
*.sln text eol=crlf -whitespace
*.vcproj text eol=crlf -whitespace merge=union
*.vcxproj text eol=crlf -whitespace merge=union
*.sh text eol=lf

1
.gitignore vendored

@ -8,3 +8,4 @@ tegra/
.*.swp
tags
build/
Thumbs.db

@ -0,0 +1 @@
* -whitespace

@ -40,5 +40,3 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F
8. Then, go to <opencv>\3rdparty\ffmpeg, edit make.bat
(change paths to the actual paths to your msys32 and msys64 distributions) and then run make.bat

@ -9,7 +9,7 @@ else()
project(${PNG_LIBRARY})
endif()
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIR})
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIRS})
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
@ -29,8 +29,12 @@ if(MSVC)
add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
endif(MSVC)
if (HAVE_WINRT)
add_definitions(-DHAVE_WINRT)
endif()
add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARY})
target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES})
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)

@ -0,0 +1,22 @@
diff --git a/3rdparty/libpng/pngpriv.h b/3rdparty/libpng/pngpriv.h
index 07b2b0b..e7824b8 100644
--- a/3rdparty/libpng/pngpriv.h
+++ b/3rdparty/libpng/pngpriv.h
@@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
/* Memory model/platform independent fns */
#ifndef PNG_ABORT
-# ifdef _WINDOWS_
+# if defined(_WINDOWS_) && !defined(HAVE_WINRT)
# define PNG_ABORT() ExitProcess(0)
# else
# define PNG_ABORT() abort()
@@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
# define png_memcpy _fmemcpy
# define png_memset _fmemset
#else
-# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */
+# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */
# define CVT_PTR(ptr) (ptr)
# define CVT_PTR_NOCHECK(ptr) (ptr)
# define png_strlen lstrlenA

@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
/* Memory model/platform independent fns */
#ifndef PNG_ABORT
# ifdef _WINDOWS_
# if defined(_WINDOWS_) && !defined(HAVE_WINRT)
# define PNG_ABORT() ExitProcess(0)
# else
# define PNG_ABORT() abort()
@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp;
# define png_memcpy _fmemcpy
# define png_memset _fmemset
#else
# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */
# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */
# define CVT_PTR(ptr) (ptr)
# define CVT_PTR_NOCHECK(ptr) (ptr)
# define png_strlen lstrlenA

@ -17,14 +17,14 @@ check_include_file(string.h HAVE_STRING_H)
check_include_file(sys/types.h HAVE_SYS_TYPES_H)
check_include_file(unistd.h HAVE_UNISTD_H)
if(WIN32)
if(WIN32 AND NOT HAVE_WINRT)
set(USE_WIN32_FILEIO 1)
endif()
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tif_config.h.cmakein"
"${CMAKE_CURRENT_BINARY_DIR}/tif_config.h" @ONLY)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIR})
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIRS})
set(lib_srcs
tif_aux.c
@ -79,14 +79,12 @@ set(lib_srcs
"${CMAKE_CURRENT_BINARY_DIR}/tif_config.h"
)
if(UNIX)
if(WIN32 AND NOT HAVE_WINRT)
list(APPEND lib_srcs tif_win32.c)
else()
list(APPEND lib_srcs tif_unix.c)
endif()
if(WIN32)
list(APPEND lib_srcs tif_win32.c)
endif(WIN32)
ocv_warnings_disable(CMAKE_C_FLAGS -Wno-unused-but-set-variable -Wmissing-prototypes -Wmissing-declarations -Wundef -Wunused -Wsign-compare
-Wcast-align -Wshadow -Wno-maybe-uninitialized -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast)
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter) # clang

@ -168,4 +168,3 @@
/* Support Deflate compression */
#define ZIP_SUPPORT 1

@ -22,7 +22,7 @@ set(OPENEXR_INCLUDE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/Half"
"${CMAKE_CURRENT_SOURCE_DIR}/Imath"
"${CMAKE_CURRENT_SOURCE_DIR}/IlmImf")
ocv_include_directories("${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIR} ${OPENEXR_INCLUDE_PATHS})
ocv_include_directories("${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIRS} ${OPENEXR_INCLUDE_PATHS})
file(GLOB lib_srcs Half/half.cpp Iex/*.cpp IlmThread/*.cpp Imath/*.cpp IlmImf/*.cpp)
file(GLOB lib_hdrs Half/*.h Iex/Iex*.h IlmThread/IlmThread*.h Imath/Imath*.h IlmImf/*.h)

@ -1,6 +1,69 @@
ChangeLog file for zlib
Changes in 1.2.8 (28 Apr 2013)
- Update contrib/minizip/iowin32.c for Windows RT [Vollant]
- Do not force Z_CONST for C++
- Clean up contrib/vstudio [Ro§]
- Correct spelling error in zlib.h
- Fix mixed line endings in contrib/vstudio
Changes in 1.2.7.3 (13 Apr 2013)
- Fix version numbers and DLL names in contrib/vstudio/*/zlib.rc
Changes in 1.2.7.2 (13 Apr 2013)
- Change check for a four-byte type back to hexadecimal
- Fix typo in win32/Makefile.msc
- Add casts in gzwrite.c for pointer differences
Changes in 1.2.7.1 (24 Mar 2013)
- Replace use of unsafe string functions with snprintf if available
- Avoid including stddef.h on Windows for Z_SOLO compile [Niessink]
- Fix gzgetc undefine when Z_PREFIX set [Turk]
- Eliminate use of mktemp in Makefile (not always available)
- Fix bug in 'F' mode for gzopen()
- Add inflateGetDictionary() function
- Correct comment in deflate.h
- Use _snprintf for snprintf in Microsoft C
- On Darwin, only use /usr/bin/libtool if libtool is not Apple
- Delete "--version" file if created by "ar --version" [Richard G.]
- Fix configure check for veracity of compiler error return codes
- Fix CMake compilation of static lib for MSVC2010 x64
- Remove unused variable in infback9.c
- Fix argument checks in gzlog_compress() and gzlog_write()
- Clean up the usage of z_const and respect const usage within zlib
- Clean up examples/gzlog.[ch] comparisons of different types
- Avoid shift equal to bits in type (caused endless loop)
- Fix unintialized value bug in gzputc() introduced by const patches
- Fix memory allocation error in examples/zran.c [Nor]
- Fix bug where gzopen(), gzclose() would write an empty file
- Fix bug in gzclose() when gzwrite() runs out of memory
- Check for input buffer malloc failure in examples/gzappend.c
- Add note to contrib/blast to use binary mode in stdio
- Fix comparisons of differently signed integers in contrib/blast
- Check for invalid code length codes in contrib/puff
- Fix serious but very rare decompression bug in inftrees.c
- Update inflateBack() comments, since inflate() can be faster
- Use underscored I/O function names for WINAPI_FAMILY
- Add _tr_flush_bits to the external symbols prefixed by --zprefix
- Add contrib/vstudio/vc10 pre-build step for static only
- Quote --version-script argument in CMakeLists.txt
- Don't specify --version-script on Apple platforms in CMakeLists.txt
- Fix casting error in contrib/testzlib/testzlib.c
- Fix types in contrib/minizip to match result of get_crc_table()
- Simplify contrib/vstudio/vc10 with 'd' suffix
- Add TOP support to win32/Makefile.msc
- Suport i686 and amd64 assembler builds in CMakeLists.txt
- Fix typos in the use of _LARGEFILE64_SOURCE in zconf.h
- Add vc11 and vc12 build files to contrib/vstudio
- Add gzvprintf() as an undocumented function in zlib
- Fix configure for Sun shell
- Remove runtime check in configure for four-byte integer type
- Add casts and consts to ease user conversion to C++
- Add man pages for minizip and miniunzip
- In Makefile uninstall, don't rm if preceding cd fails
- Do not return Z_BUF_ERROR if deflateParam() has nothing to write
Changes in 1.2.7 (2 May 2012)
- Replace use of memmove() with a simple copy for portability
- Test for existence of strerror

@ -1,6 +1,6 @@
ZLIB DATA COMPRESSION LIBRARY
zlib 1.2.7 is a general purpose data compression library. All the code is
zlib 1.2.8 is a general purpose data compression library. All the code is
thread safe. The data format used by the zlib library is described by RFCs
(Request for Comments) 1950 to 1952 in the files
http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and
@ -31,7 +31,7 @@ Mark Nelson <markn@ieee.org> wrote an article about zlib for the Jan. 1997
issue of Dr. Dobb's Journal; a copy of the article is available at
http://marknelson.us/1997/01/01/zlib-engine/ .
The changes made in version 1.2.7 are documented in the file ChangeLog.
The changes made in version 1.2.8 are documented in the file ChangeLog.
Unsupported third party contributions are provided in directory contrib/ .
@ -84,7 +84,7 @@ Acknowledgments:
Copyright notice:
(C) 1995-2012 Jean-loup Gailly and Mark Adler
(C) 1995-2013 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages

@ -29,7 +29,7 @@ int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
z_stream stream;
int err;
stream.next_in = (Bytef*)source;
stream.next_in = (z_const Bytef *)source;
stream.avail_in = (uInt)sourceLen;
#ifdef MAXSEG_64K
/* Check for source > 64K on 16-bit machine: */

@ -1,5 +1,5 @@
/* deflate.c -- compress data using the deflation algorithm
* Copyright (C) 1995-2012 Jean-loup Gailly and Mark Adler
* Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -52,7 +52,7 @@
#include "deflate.h"
const char deflate_copyright[] =
" deflate 1.2.7 Copyright 1995-2012 Jean-loup Gailly and Mark Adler ";
" deflate 1.2.8 Copyright 1995-2013 Jean-loup Gailly and Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@ -305,7 +305,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
s->pending_buf == Z_NULL) {
s->status = FINISH_STATE;
strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
strm->msg = ERR_MSG(Z_MEM_ERROR);
deflateEnd (strm);
return Z_MEM_ERROR;
}
@ -329,7 +329,7 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
uInt str, n;
int wrap;
unsigned avail;
unsigned char *next;
z_const unsigned char *next;
if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
return Z_STREAM_ERROR;
@ -359,7 +359,7 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
avail = strm->avail_in;
next = strm->next_in;
strm->avail_in = dictLength;
strm->next_in = (Bytef *)dictionary;
strm->next_in = (z_const Bytef *)dictionary;
fill_window(s);
while (s->lookahead >= MIN_MATCH) {
str = s->strstart;
@ -513,6 +513,8 @@ int ZEXPORT deflateParams(strm, level, strategy)
strm->total_in != 0) {
/* Flush the last buffer: */
err = deflate(strm, Z_BLOCK);
if (err == Z_BUF_ERROR && s->pending == 0)
err = Z_OK;
}
if (s->level != level) {
s->level = level;

@ -104,7 +104,7 @@ typedef struct internal_state {
int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
gz_headerp gzhead; /* gzip header information to write */
uInt gzindex; /* where in extra, name, or comment */
Byte method; /* STORED (for zip only) or DEFLATED */
Byte method; /* can only be DEFLATED */
int last_flush; /* value of flush param for previous deflate call */
/* used by deflate.c: */

@ -1,5 +1,5 @@
/* gzguts.h -- zlib internal header definitions for gz* operations
* Copyright (C) 2004, 2005, 2010, 2011, 2012 Mark Adler
* Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -35,6 +35,13 @@
# include <io.h>
#endif
#ifdef WINAPI_FAMILY
# define open _open
# define read _read
# define write _write
# define close _close
#endif
#ifdef NO_DEFLATE /* for compatibility with old definition */
# define NO_GZCOMPRESS
#endif
@ -88,6 +95,14 @@
# endif
#endif
/* unlike snprintf (which is required in C99, yet still not supported by
Microsoft more than a decade later!), _snprintf does not guarantee null
termination of the result -- however this is only used in gzlib.c where
the result is assured to fit in the space provided */
#ifdef _MSC_VER
# define snprintf _snprintf
#endif
#ifndef local
# define local static
#endif
@ -127,7 +142,8 @@
# define DEF_MEM_LEVEL MAX_MEM_LEVEL
#endif
/* default i/o buffer size -- double this for output when reading */
/* default i/o buffer size -- double this for output when reading (this and
twice this must be able to fit in an unsigned type) */
#define GZBUFSIZE 8192
/* gzip modes, also provide a little integrity check on the passed structure */

@ -1,5 +1,5 @@
/* gzlib.c -- zlib functions common to reading and writing gzip files
* Copyright (C) 2004, 2010, 2011, 2012 Mark Adler
* Copyright (C) 2004, 2010, 2011, 2012, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -108,7 +108,7 @@ local gzFile gz_open(path, fd, mode)
return NULL;
/* allocate gzFile structure to return */
state = malloc(sizeof(gz_state));
state = (gz_statep)malloc(sizeof(gz_state));
if (state == NULL)
return NULL;
state->size = 0; /* no buffers allocated yet */
@ -162,8 +162,10 @@ local gzFile gz_open(path, fd, mode)
break;
case 'F':
state->strategy = Z_FIXED;
break;
case 'T':
state->direct = 1;
break;
default: /* could consider as an error, but just ignore */
;
}
@ -194,8 +196,8 @@ local gzFile gz_open(path, fd, mode)
}
else
#endif
len = strlen(path);
state->path = malloc(len + 1);
len = strlen((const char *)path);
state->path = (char *)malloc(len + 1);
if (state->path == NULL) {
free(state);
return NULL;
@ -208,7 +210,11 @@ local gzFile gz_open(path, fd, mode)
*(state->path) = 0;
else
#endif
#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
snprintf(state->path, len + 1, "%s", (const char *)path);
#else
strcpy(state->path, path);
#endif
/* compute the flags for open() */
oflag =
@ -236,7 +242,7 @@ local gzFile gz_open(path, fd, mode)
#ifdef _WIN32
fd == -2 ? _wopen(path, oflag, 0666) :
#endif
open(path, oflag, 0666));
open((const char *)path, oflag, 0666));
if (state->fd == -1) {
free(state->path);
free(state);
@ -282,9 +288,13 @@ gzFile ZEXPORT gzdopen(fd, mode)
char *path; /* identifier for error messages */
gzFile gz;
if (fd == -1 || (path = malloc(7 + 3 * sizeof(int))) == NULL)
if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL)
return NULL;
#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
snprintf(path, 7 + 3 * sizeof(int), "<fd:%d>", fd); /* for debugging */
#else
sprintf(path, "<fd:%d>", fd); /* for debugging */
#endif
gz = gz_open(path, fd, mode);
free(path);
return gz;
@ -531,7 +541,8 @@ const char * ZEXPORT gzerror(file, errnum)
/* return error information */
if (errnum != NULL)
*errnum = state->err;
return state->msg == NULL ? "" : state->msg;
return state->err == Z_MEM_ERROR ? "out of memory" :
(state->msg == NULL ? "" : state->msg);
}
/* -- see zlib.h -- */
@ -582,21 +593,24 @@ void ZLIB_INTERNAL gz_error(state, err, msg)
if (msg == NULL)
return;
/* for an out of memory error, save as static string */
if (err == Z_MEM_ERROR) {
state->msg = (char *)msg;
/* for an out of memory error, return literal string when requested */
if (err == Z_MEM_ERROR)
return;
}
/* construct error message with path */
if ((state->msg = malloc(strlen(state->path) + strlen(msg) + 3)) == NULL) {
if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) ==
NULL) {
state->err = Z_MEM_ERROR;
state->msg = (char *)"out of memory";
return;
}
#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
snprintf(state->msg, strlen(state->path) + strlen(msg) + 3,
"%s%s%s", state->path, ": ", msg);
#else
strcpy(state->msg, state->path);
strcat(state->msg, ": ");
strcat(state->msg, msg);
#endif
return;
}

@ -1,5 +1,5 @@
/* gzread.c -- zlib functions for reading gzip files
* Copyright (C) 2004, 2005, 2010, 2011, 2012 Mark Adler
* Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -58,7 +58,8 @@ local int gz_avail(state)
return -1;
if (state->eof == 0) {
if (strm->avail_in) { /* copy what's there to the start */
unsigned char *p = state->in, *q = strm->next_in;
unsigned char *p = state->in;
unsigned const char *q = strm->next_in;
unsigned n = strm->avail_in;
do {
*p++ = *q++;
@ -90,8 +91,8 @@ local int gz_look(state)
/* allocate read buffers and inflate memory */
if (state->size == 0) {
/* allocate buffers */
state->in = malloc(state->want);
state->out = malloc(state->want << 1);
state->in = (unsigned char *)malloc(state->want);
state->out = (unsigned char *)malloc(state->want << 1);
if (state->in == NULL || state->out == NULL) {
if (state->out != NULL)
free(state->out);
@ -352,14 +353,14 @@ int ZEXPORT gzread(file, buf, len)
/* large len -- read directly into user buffer */
else if (state->how == COPY) { /* read directly */
if (gz_load(state, buf, len, &n) == -1)
if (gz_load(state, (unsigned char *)buf, len, &n) == -1)
return -1;
}
/* large len -- decompress directly into user buffer */
else { /* state->how == GZIP */
strm->avail_out = len;
strm->next_out = buf;
strm->next_out = (unsigned char *)buf;
if (gz_decomp(state) == -1)
return -1;
n = state->x.have;
@ -378,7 +379,11 @@ int ZEXPORT gzread(file, buf, len)
}
/* -- see zlib.h -- */
#ifdef Z_PREFIX_SET
# undef z_gzgetc
#else
# undef gzgetc
#endif
int ZEXPORT gzgetc(file)
gzFile file;
{
@ -518,7 +523,7 @@ char * ZEXPORT gzgets(file, buf, len)
/* look for end-of-line in current output buffer */
n = state->x.have > left ? left : state->x.have;
eol = memchr(state->x.next, '\n', n);
eol = (unsigned char *)memchr(state->x.next, '\n', n);
if (eol != NULL)
n = (unsigned)(eol - state->x.next) + 1;

@ -1,5 +1,5 @@
/* gzwrite.c -- zlib functions for writing gzip files
* Copyright (C) 2004, 2005, 2010, 2011, 2012 Mark Adler
* Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -19,7 +19,7 @@ local int gz_init(state)
z_streamp strm = &(state->strm);
/* allocate input buffer */
state->in = malloc(state->want);
state->in = (unsigned char *)malloc(state->want);
if (state->in == NULL) {
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
@ -28,7 +28,7 @@ local int gz_init(state)
/* only need output buffer and deflate state if compressing */
if (!state->direct) {
/* allocate output buffer */
state->out = malloc(state->want);
state->out = (unsigned char *)malloc(state->want);
if (state->out == NULL) {
free(state->in);
gz_error(state, Z_MEM_ERROR, "out of memory");
@ -168,7 +168,6 @@ int ZEXPORT gzwrite(file, buf, len)
unsigned len;
{
unsigned put = len;
unsigned n;
gz_statep state;
z_streamp strm;
@ -208,16 +207,19 @@ int ZEXPORT gzwrite(file, buf, len)
if (len < state->size) {
/* copy to input buffer, compress when full */
do {
unsigned have, copy;
if (strm->avail_in == 0)
strm->next_in = state->in;
n = state->size - strm->avail_in;
if (n > len)
n = len;
memcpy(strm->next_in + strm->avail_in, buf, n);
strm->avail_in += n;
state->x.pos += n;
buf = (char *)buf + n;
len -= n;
have = (unsigned)((strm->next_in + strm->avail_in) - state->in);
copy = state->size - have;
if (copy > len)
copy = len;
memcpy(state->in + have, buf, copy);
strm->avail_in += copy;
state->x.pos += copy;
buf = (const char *)buf + copy;
len -= copy;
if (len && gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
} while (len);
@ -229,7 +231,7 @@ int ZEXPORT gzwrite(file, buf, len)
/* directly compress user buffer to file */
strm->avail_in = len;
strm->next_in = (voidp)buf;
strm->next_in = (z_const Bytef *)buf;
state->x.pos += len;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
@ -244,6 +246,7 @@ int ZEXPORT gzputc(file, c)
gzFile file;
int c;
{
unsigned have;
unsigned char buf[1];
gz_statep state;
z_streamp strm;
@ -267,13 +270,17 @@ int ZEXPORT gzputc(file, c)
/* try writing to input buffer for speed (state->size == 0 if buffer not
initialized) */
if (strm->avail_in < state->size) {
if (state->size) {
if (strm->avail_in == 0)
strm->next_in = state->in;
strm->next_in[strm->avail_in++] = c;
have = (unsigned)((strm->next_in + strm->avail_in) - state->in);
if (have < state->size) {
state->in[have] = c;
strm->avail_in++;
state->x.pos++;
return c & 0xff;
}
}
/* no room in buffer or not initialized, use gz_write() */
buf[0] = c;
@ -300,12 +307,11 @@ int ZEXPORT gzputs(file, str)
#include <stdarg.h>
/* -- see zlib.h -- */
int ZEXPORTVA gzprintf (gzFile file, const char *format, ...)
int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va)
{
int size, len;
gz_statep state;
z_streamp strm;
va_list va;
/* get internal structure */
if (file == NULL)
@ -335,25 +341,20 @@ int ZEXPORTVA gzprintf (gzFile file, const char *format, ...)
/* do the printf() into the input buffer, put length in len */
size = (int)(state->size);
state->in[size - 1] = 0;
va_start(va, format);
#ifdef NO_vsnprintf
# ifdef HAS_vsprintf_void
(void)vsprintf((char *)(state->in), format, va);
va_end(va);
for (len = 0; len < size; len++)
if (state->in[len] == 0) break;
# else
len = vsprintf((char *)(state->in), format, va);
va_end(va);
# endif
#else
# ifdef HAS_vsnprintf_void
(void)vsnprintf((char *)(state->in), size, format, va);
va_end(va);
len = strlen((char *)(state->in));
# else
len = vsnprintf((char *)(state->in), size, format, va);
va_end(va);
# endif
#endif
@ -368,6 +369,17 @@ int ZEXPORTVA gzprintf (gzFile file, const char *format, ...)
return len;
}
int ZEXPORTVA gzprintf(gzFile file, const char *format, ...)
{
va_list va;
int ret;
va_start(va, format);
ret = gzvprintf(file, format, va);
va_end(va);
return ret;
}
#else /* !STDC && !Z_HAVE_STDARG_H */
/* -- see zlib.h -- */
@ -547,9 +559,9 @@ int ZEXPORT gzclose_w(file)
}
/* flush, free memory, and close file */
if (state->size) {
if (gz_comp(state, Z_FINISH) == -1)
ret = state->err;
if (state->size) {
if (!state->direct) {
(void)deflateEnd(&(state->strm));
free(state->out);

@ -255,7 +255,7 @@ out_func out;
void FAR *out_desc;
{
struct inflate_state FAR *state;
unsigned char FAR *next; /* next input */
z_const unsigned char FAR *next; /* next input */
unsigned char FAR *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */

@ -1,5 +1,5 @@
/* inffast.c -- fast decoding
* Copyright (C) 1995-2008, 2010 Mark Adler
* Copyright (C) 1995-2008, 2010, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -69,8 +69,8 @@ z_streamp strm;
unsigned start; /* inflate()'s starting value for strm->avail_out */
{
struct inflate_state FAR *state;
unsigned char FAR *in; /* local strm->next_in */
unsigned char FAR *last; /* while in < last, enough input available */
z_const unsigned char FAR *in; /* local strm->next_in */
z_const unsigned char FAR *last; /* have enough input while in < last */
unsigned char FAR *out; /* local strm->next_out */
unsigned char FAR *beg; /* inflate()'s initial strm->next_out */
unsigned char FAR *end; /* while out < end, enough space available */

@ -93,11 +93,12 @@
/* function prototypes */
local void fixedtables OF((struct inflate_state FAR *state));
local int updatewindow OF((z_streamp strm, unsigned out));
local int updatewindow OF((z_streamp strm, const unsigned char FAR *end,
unsigned copy));
#ifdef BUILDFIXED
void makefixed OF((void));
#endif
local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf,
local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf,
unsigned len));
int ZEXPORT inflateResetKeep(strm)
@ -375,12 +376,13 @@ void makefixed()
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
local int updatewindow(strm, out)
local int updatewindow(strm, end, copy)
z_streamp strm;
unsigned out;
const Bytef *end;
unsigned copy;
{
struct inflate_state FAR *state;
unsigned copy, dist;
unsigned dist;
state = (struct inflate_state FAR *)strm->state;
@ -400,19 +402,18 @@ unsigned out;
}
/* copy state->wsize or less output bytes into the circular window */
copy = out - strm->avail_out;
if (copy >= state->wsize) {
zmemcpy(state->window, strm->next_out - state->wsize, state->wsize);
zmemcpy(state->window, end - state->wsize, state->wsize);
state->wnext = 0;
state->whave = state->wsize;
}
else {
dist = state->wsize - state->wnext;
if (dist > copy) dist = copy;
zmemcpy(state->window + state->wnext, strm->next_out - copy, dist);
zmemcpy(state->window + state->wnext, end - copy, dist);
copy -= dist;
if (copy) {
zmemcpy(state->window, strm->next_out - copy, copy);
zmemcpy(state->window, end - copy, copy);
state->wnext = copy;
state->whave = state->wsize;
}
@ -606,7 +607,7 @@ z_streamp strm;
int flush;
{
struct inflate_state FAR *state;
unsigned char FAR *next; /* next input */
z_const unsigned char FAR *next; /* next input */
unsigned char FAR *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
@ -920,7 +921,7 @@ int flush;
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (code const FAR *)(state->next);
state->lencode = (const code FAR *)(state->next);
state->lenbits = 7;
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
@ -994,7 +995,7 @@ int flush;
values here (9 and 6) without reading the comments in inftrees.h
concerning the ENOUGH constants, which depend on those values */
state->next = state->codes;
state->lencode = (code const FAR *)(state->next);
state->lencode = (const code FAR *)(state->next);
state->lenbits = 9;
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
@ -1003,7 +1004,7 @@ int flush;
state->mode = BAD;
break;
}
state->distcode = (code const FAR *)(state->next);
state->distcode = (const code FAR *)(state->next);
state->distbits = 6;
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
@ -1230,7 +1231,7 @@ int flush;
RESTORE();
if (state->wsize || (out != strm->avail_out && state->mode < BAD &&
(state->mode < CHECK || flush != Z_FINISH)))
if (updatewindow(strm, out)) {
if (updatewindow(strm, strm->next_out, out - strm->avail_out)) {
state->mode = MEM;
return Z_MEM_ERROR;
}
@ -1264,6 +1265,29 @@ z_streamp strm;
return Z_OK;
}
int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength)
z_streamp strm;
Bytef *dictionary;
uInt *dictLength;
{
struct inflate_state FAR *state;
/* check state */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
/* copy dictionary */
if (state->whave && dictionary != Z_NULL) {
zmemcpy(dictionary, state->window + state->wnext,
state->whave - state->wnext);
zmemcpy(dictionary + state->whave - state->wnext,
state->window, state->wnext);
}
if (dictLength != Z_NULL)
*dictLength = state->whave;
return Z_OK;
}
int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength)
z_streamp strm;
const Bytef *dictionary;
@ -1271,8 +1295,6 @@ uInt dictLength;
{
struct inflate_state FAR *state;
unsigned long dictid;
unsigned char *next;
unsigned avail;
int ret;
/* check state */
@ -1291,13 +1313,7 @@ uInt dictLength;
/* copy dictionary to window using updatewindow(), which will amend the
existing dictionary if appropriate */
next = strm->next_out;
avail = strm->avail_out;
strm->next_out = (Bytef *)dictionary + dictLength;
strm->avail_out = 0;
ret = updatewindow(strm, dictLength);
strm->avail_out = avail;
strm->next_out = next;
ret = updatewindow(strm, dictionary + dictLength, dictLength);
if (ret) {
state->mode = MEM;
return Z_MEM_ERROR;
@ -1337,7 +1353,7 @@ gz_headerp head;
*/
local unsigned syncsearch(have, buf, len)
unsigned FAR *have;
unsigned char FAR *buf;
const unsigned char FAR *buf;
unsigned len;
{
unsigned got;

@ -1,5 +1,5 @@
/* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995-2012 Mark Adler
* Copyright (C) 1995-2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -9,7 +9,7 @@
#define MAXBITS 15
const char inflate_copyright[] =
" inflate 1.2.7 Copyright 1995-2012 Mark Adler ";
" inflate 1.2.8 Copyright 1995-2013 Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@ -62,7 +62,7 @@ unsigned short FAR *work;
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 78, 68};
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
@ -208,8 +208,8 @@ unsigned short FAR *work;
mask = used - 1; /* mask for comparing low */
/* check available table space */
if ((type == LENS && used >= ENOUGH_LENS) ||
(type == DISTS && used >= ENOUGH_DISTS))
if ((type == LENS && used > ENOUGH_LENS) ||
(type == DISTS && used > ENOUGH_DISTS))
return 1;
/* process all codes and make table entries */
@ -277,8 +277,8 @@ unsigned short FAR *work;
/* check for enough space */
used += 1U << curr;
if ((type == LENS && used >= ENOUGH_LENS) ||
(type == DISTS && used >= ENOUGH_DISTS))
if ((type == LENS && used > ENOUGH_LENS) ||
(type == DISTS && used > ENOUGH_DISTS))
return 1;
/* point entry in root table to sub-table */

@ -146,8 +146,8 @@ local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
local int build_bl_tree OF((deflate_state *s));
local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
int blcodes));
local void compress_block OF((deflate_state *s, ct_data *ltree,
ct_data *dtree));
local void compress_block OF((deflate_state *s, const ct_data *ltree,
const ct_data *dtree));
local int detect_data_type OF((deflate_state *s));
local unsigned bi_reverse OF((unsigned value, int length));
local void bi_windup OF((deflate_state *s));
@ -972,7 +972,8 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
} else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) {
#endif
send_bits(s, (STATIC_TREES<<1)+last, 3);
compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
compress_block(s, (const ct_data *)static_ltree,
(const ct_data *)static_dtree);
#ifdef DEBUG
s->compressed_len += 3 + s->static_len;
#endif
@ -980,7 +981,8 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
send_bits(s, (DYN_TREES<<1)+last, 3);
send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
max_blindex+1);
compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
compress_block(s, (const ct_data *)s->dyn_ltree,
(const ct_data *)s->dyn_dtree);
#ifdef DEBUG
s->compressed_len += 3 + s->opt_len;
#endif
@ -1057,8 +1059,8 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
*/
local void compress_block(s, ltree, dtree)
deflate_state *s;
ct_data *ltree; /* literal tree */
ct_data *dtree; /* distance tree */
const ct_data *ltree; /* literal tree */
const ct_data *dtree; /* distance tree */
{
unsigned dist; /* distance of matched string */
int lc; /* match length or unmatched char (if dist == 0) */

@ -30,7 +30,7 @@ int ZEXPORT uncompress (dest, destLen, source, sourceLen)
z_stream stream;
int err;
stream.next_in = (Bytef*)source;
stream.next_in = (z_const Bytef *)source;
stream.avail_in = (uInt)sourceLen;
/* Check for source > 64K on 16-bit machine: */
if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;

@ -1,5 +1,5 @@
/* zconf.h -- configuration of the zlib compression library
* Copyright (C) 1995-2012 Jean-loup Gailly.
* Copyright (C) 1995-2013 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -23,6 +23,7 @@
# define _dist_code z__dist_code
# define _length_code z__length_code
# define _tr_align z__tr_align
# define _tr_flush_bits z__tr_flush_bits
# define _tr_flush_block z__tr_flush_block
# define _tr_init z__tr_init
# define _tr_stored_block z__tr_stored_block
@ -79,6 +80,7 @@
# define gzopen_w z_gzopen_w
# endif
# define gzprintf z_gzprintf
# define gzvprintf z_gzvprintf
# define gzputc z_gzputc
# define gzputs z_gzputs
# define gzread z_gzread
@ -105,6 +107,7 @@
# define inflateReset z_inflateReset
# define inflateReset2 z_inflateReset2
# define inflateSetDictionary z_inflateSetDictionary
# define inflateGetDictionary z_inflateGetDictionary
# define inflateSync z_inflateSync
# define inflateSyncPoint z_inflateSyncPoint
# define inflateUndermine z_inflateUndermine
@ -390,22 +393,16 @@ typedef uLong FAR uLongf;
typedef Byte *voidp;
#endif
/* ./configure may #define Z_U4 here */
#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC)
# include <limits.h>
# if (UINT_MAX == 0xffffffffUL)
# define Z_U4 unsigned
# else
# if (ULONG_MAX == 0xffffffffUL)
# elif (ULONG_MAX == 0xffffffffUL)
# define Z_U4 unsigned long
# else
# if (USHRT_MAX == 0xffffffffUL)
# elif (USHRT_MAX == 0xffffffffUL)
# define Z_U4 unsigned short
# endif
#endif
# endif
#endif
#ifdef Z_U4
typedef Z_U4 z_crc_t;
@ -427,9 +424,17 @@ typedef uLong FAR uLongf;
# endif
#endif
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
# ifndef Z_SOLO
# include <stdarg.h> /* for va_list */
# endif
#endif
#ifdef _WIN32
# ifndef Z_SOLO
# include <stddef.h> /* for wchar_t */
# endif
#endif
/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
* "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
@ -437,7 +442,7 @@ typedef uLong FAR uLongf;
* both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
* equivalently requesting no 64-bit operations
*/
#if defined(LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
# undef _LARGEFILE64_SOURCE
#endif
@ -445,7 +450,7 @@ typedef uLong FAR uLongf;
# define Z_HAVE_UNISTD_H
#endif
#ifndef Z_SOLO
# if defined(Z_HAVE_UNISTD_H) || defined(LARGEFILE64_SOURCE)
# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
# ifdef VMS
# include <unixio.h> /* for off_t */

@ -1,7 +1,7 @@
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.2.7, May 2nd, 2012
version 1.2.8, April 28th, 2013
Copyright (C) 1995-2012 Jean-loup Gailly and Mark Adler
Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
@ -37,11 +37,11 @@
extern "C" {
#endif
#define ZLIB_VERSION "1.2.7"
#define ZLIB_VERNUM 0x1270
#define ZLIB_VERSION "1.2.8"
#define ZLIB_VERNUM 0x1280
#define ZLIB_VER_MAJOR 1
#define ZLIB_VER_MINOR 2
#define ZLIB_VER_REVISION 7
#define ZLIB_VER_REVISION 8
#define ZLIB_VER_SUBREVISION 0
/*
@ -839,6 +839,21 @@ ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm,
inflate().
*/
ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm,
Bytef *dictionary,
uInt *dictLength));
/*
Returns the sliding dictionary being maintained by inflate. dictLength is
set to the number of bytes in the dictionary, and that many bytes are copied
to dictionary. dictionary must have enough space, where 32768 bytes is
always enough. If inflateGetDictionary() is called with dictionary equal to
Z_NULL, then only the dictionary length is returned, and nothing is copied.
Similary, if dictLength is Z_NULL, then it is not set.
inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
stream state is inconsistent.
*/
ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
/*
Skips invalid compressed data until a possible full flush point (see above
@ -846,7 +861,7 @@ ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
available input is skipped. No output is provided.
inflateSync searches for a 00 00 FF FF pattern in the compressed data.
All full flush points have this pattern, but not all occurences of this
All full flush points have this pattern, but not all occurrences of this
pattern are full flush points.
inflateSync returns Z_OK if a possible full flush point has been found,
@ -1007,7 +1022,8 @@ ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits,
the version of the header file.
*/
typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *));
typedef unsigned (*in_func) OF((void FAR *,
z_const unsigned char FAR * FAR *));
typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned));
ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm,
@ -1015,11 +1031,12 @@ ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm,
out_func out, void FAR *out_desc));
/*
inflateBack() does a raw inflate with a single call using a call-back
interface for input and output. This is more efficient than inflate() for
file i/o applications in that it avoids copying between the output and the
sliding window by simply making the window itself the output buffer. This
function trusts the application to not change the output buffer passed by
the output function, at least until inflateBack() returns.
interface for input and output. This is potentially more efficient than
inflate() for file i/o applications, in that it avoids copying between the
output and the sliding window by simply making the window itself the output
buffer. inflate() can be faster on modern CPUs when used with large
buffers. inflateBack() trusts the application to not change the output
buffer passed by the output function, at least until inflateBack() returns.
inflateBackInit() must be called first to allocate the internal state
and to initialize the state with the user-provided window buffer.
@ -1736,6 +1753,13 @@ ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp));
ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path,
const char *mode));
#endif
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
# ifndef Z_SOLO
ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file,
const char *format,
va_list va));
# endif
#endif
#ifdef __cplusplus
}

@ -14,7 +14,7 @@
struct internal_state {int dummy;}; /* for buggy compilers */
#endif
const char * const z_errmsg[10] = {
z_const char * const z_errmsg[10] = {
"need dictionary", /* Z_NEED_DICT 2 */
"stream end", /* Z_STREAM_END 1 */
"", /* Z_OK 0 */

@ -1,5 +1,5 @@
/* zutil.h -- internal interface and configuration of the compression library
* Copyright (C) 1995-2012 Jean-loup Gailly.
* Copyright (C) 1995-2013 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -44,13 +44,13 @@ typedef unsigned short ush;
typedef ush FAR ushf;
typedef unsigned long ulg;
extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
/* (size given to avoid silly warnings with Visual C++) */
#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
#define ERR_RETURN(strm,err) \
return (strm->msg = (char*)ERR_MSG(err), (err))
return (strm->msg = ERR_MSG(err), (err))
/* To be used only when the state is known to be valid */
/* common constants */
@ -168,7 +168,8 @@ extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
#endif
/* provide prototypes for these when building zlib without LFS */
#if !defined(_WIN32) && (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0)
#if !defined(_WIN32) && \
(!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0)
ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
#endif

@ -200,10 +200,6 @@ OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too no
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ENABLE_WINRT_MODE "Build with Windows Runtime support" OFF IF WIN32 )
# uncategorized options
# ===================================================
OCV_OPTION(CMAKE_VERBOSE "Verbose mode" OFF )
# ----------------------------------------------------------------------------
# Get actual OpenCV version number from sources
@ -269,10 +265,6 @@ if(DEFINED CMAKE_DEBUG_POSTFIX)
set(OPENCV_DEBUG_POSTFIX "${CMAKE_DEBUG_POSTFIX}")
endif()
if(CMAKE_VERBOSE)
set(CMAKE_VERBOSE_MAKEFILE 1)
endif()
# ----------------------------------------------------------------------------
# Path for build/platform -specific headers
@ -288,21 +280,10 @@ set(OPENCV_EXTRA_MODULES_PATH "" CACHE PATH "Where to look for additional OpenCV
# ----------------------------------------------------------------------------
# Autodetect if we are in a GIT repository
# ----------------------------------------------------------------------------
find_package(Git QUIET)
# don't use FindGit because it requires CMake 2.8.2
set(git_names git eg) # eg = easy git
# Prefer .cmd variants on Windows unless running in a Makefile in the MSYS shell
if(CMAKE_HOST_WIN32)
if(NOT CMAKE_GENERATOR MATCHES "MSYS")
set(git_names git.cmd git eg.cmd eg)
endif()
endif()
find_host_program(GIT_EXECUTABLE NAMES ${git_names} PATH_SUFFIXES Git/cmd Git/bin DOC "git command line client")
mark_as_advanced(GIT_EXECUTABLE)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --always --dirty --match "2.[0-9].[0-9]*"
if(GIT_FOUND)
execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "2.[0-9].[0-9]*"
WORKING_DIRECTORY "${OpenCV_SOURCE_DIR}"
OUTPUT_VARIABLE OPENCV_VCSVERSION
RESULT_VARIABLE GIT_RESULT
@ -479,6 +460,8 @@ include(cmake/OpenCVGenAndroidMK.cmake)
# Generate OpenCVСonfig.cmake and OpenCVConfig-version.cmake for cmake projects
include(cmake/OpenCVGenConfig.cmake)
# Generate Info.plist for the IOS framework
include(cmake/OpenCVGenInfoPlist.cmake)
# ----------------------------------------------------------------------------
# Summary:
@ -631,7 +614,7 @@ status(" OpenGL support:" HAVE_OPENGL THEN "YES (${OPENGL_LIBRARIES})" ELSE N
# ========================== MEDIA IO ==========================
status("")
status(" Media I/O: ")
status(" ZLib:" BUILD_ZLIB THEN "build (ver ${ZLIB_VERSION_STRING})" ELSE "${ZLIB_LIBRARY} (ver ${ZLIB_VERSION_STRING})")
status(" ZLib:" BUILD_ZLIB THEN "build (ver ${ZLIB_VERSION_STRING})" ELSE "${ZLIB_LIBRARIES} (ver ${ZLIB_VERSION_STRING})")
if(WITH_JPEG)
status(" JPEG:" JPEG_FOUND THEN "${JPEG_LIBRARY} (ver ${JPEG_LIB_VERSION})" ELSE "build (ver ${JPEG_LIB_VERSION})")
@ -736,8 +719,8 @@ if(DEFINED WITH_GIGEAPI)
endif(DEFINED WITH_GIGEAPI)
if(DEFINED WITH_QUICKTIME)
status(" QuickTime:" WITH_QUICKTIME THEN YES ELSE NO)
status(" QTKit:" WITH_QUICKTIME THEN NO ELSE YES)
status(" QuickTime:" HAVE_QUICKTIME THEN YES ELSE NO)
status(" QTKit:" HAVE_QTKIT THEN YES ELSE NO)
endif(DEFINED WITH_QUICKTIME)
if(DEFINED WITH_UNICAP)
@ -826,14 +809,14 @@ endif()
# ========================== python ==========================
status("")
status(" Python:")
status(" Interpreter:" PYTHON_EXECUTABLE THEN "${PYTHON_EXECUTABLE} (ver ${PYTHON_VERSION_FULL})" ELSE NO)
status(" Interpreter:" PYTHONINTERP_FOUND THEN "${PYTHON_EXECUTABLE} (ver ${PYTHON_VERSION_STRING})" ELSE NO)
if(BUILD_opencv_python)
if(PYTHONLIBS_VERSION_STRING)
status(" Libraries:" HAVE_opencv_python THEN "${PYTHON_LIBRARIES} (ver ${PYTHONLIBS_VERSION_STRING})" ELSE NO)
else()
status(" Libraries:" HAVE_opencv_python THEN ${PYTHON_LIBRARIES} ELSE NO)
status(" Libraries:" HAVE_opencv_python THEN "${PYTHON_LIBRARIES}" ELSE NO)
endif()
status(" numpy:" PYTHON_NUMPY_INCLUDE_DIR THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)")
status(" numpy:" PYTHON_NUMPY_INCLUDE_DIRS THEN "${PYTHON_NUMPY_INCLUDE_DIRS} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)")
status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-")
endif()
@ -882,4 +865,3 @@ ocv_finalize_status()
if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree")
endif()

@ -3,4 +3,3 @@ add_definitions(-D__OPENCV_BUILD=1)
add_subdirectory(haartraining)
add_subdirectory(traincascade)
add_subdirectory(sft)

@ -79,4 +79,3 @@ if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(opencv_haartraining PROPERTIES FOLDER "applications")
set_target_properties(opencv_haartraining_engine PROPERTIES FOLDER "applications")
endif()

@ -90,4 +90,3 @@ int icvGetIdxAt( CvMat* idx, int pos )
void icvSave( const CvArr* ptr, const char* filename, int line );
#endif /* __CVCOMMON_H_ */

@ -375,4 +375,3 @@ int main( int argc, char* argv[] )
return 0;
}

@ -34,4 +34,3 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main)

@ -4,37 +4,42 @@ endif()
#INCLUDE (CheckIncludeFiles)
if (ENABLE_WINRT_MODE)
set(HAVE_WINRT True)
set(HAVE_WINRT FALSE)
# search Windows Platform SDK
message(STATUS "Checking for Windows Platfrom SDK")
message(STATUS "Checking for Windows Platform SDK")
GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE)
if (WINDOWS_SDK_PATH STREQUAL "")
message(ERROR "Windows Platform SDK 8.0 was not found!")
set(HAVE_WINRT False)
set(HAVE_MSPDK FALSE)
message(STATUS "Windows Platform SDK 8.0 was not found")
else()
set(HAVE_MSPDK TRUE)
endif()
#search for Visual Studio 11.0 install directory
message(STATUS "Checking for Visual Studio 2012")
GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE)
if (VISUAL_STUDIO_PATH STREQUAL "")
message(ERROR "Visual Studio 2012 was not found!")
set(HAVE_WINRT False)
set(HAVE_MSVC2012 FALSE)
message(STATUS "Visual Studio 2012 was not found")
else()
set(HAVE_MSVC2012 TRUE)
endif()
if (HAVE_WINRT)
TRY_COMPILE(HAVE_WINRT
"${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp"
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp"
CMAKE_FLAGS "\"kernel.lib\" \"user32.lib\""
OUTPUT_VARIABLE OUTPUT)
try_compile(HAVE_WINRT_SDK
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp")
if (ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK)
set(HAVE_WINRT TRUE)
endif()
if (HAVE_WINRT)
add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /appcontainer")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /appcontainer")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /appcontainer")
endif()
endif(ENABLE_WINRT_MODE)
if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT)
foreach(flag_var
@ -96,4 +101,3 @@ if(NOT BUILD_WITH_DEBUG_INFO AND NOT MSVC)
string(REPLACE "/Zi" "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
string(REPLACE "/Zi" "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
endif()

@ -233,6 +233,10 @@ if(MSVC)
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /fp:fast") # !! important - be on the same wave with x64 compilers
endif()
endif()
if(OPENCV_WARNINGS_ARE_ERRORS)
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /WX")
endif()
endif()
# Extra link libs if the user selects building static libs:

@ -2,7 +2,7 @@ if(WIN32 AND NOT PYTHON_EXECUTABLE)
# search for executable with the same bitness as resulting binaries
# standard FindPythonInterp always prefers executable from system path
# this is really important because we are using the interpreter for numpy search and for choosing the install location
foreach(_CURRENT_VERSION ${Python_ADDITIONAL_VERSIONS} 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0)
foreach(_CURRENT_VERSION ${Python_ADDITIONAL_VERSIONS} 2.7 "${MIN_VER_PYTHON}")
find_host_program(PYTHON_EXECUTABLE
NAMES python${_CURRENT_VERSION} python
PATHS
@ -12,39 +12,15 @@ if(WIN32 AND NOT PYTHON_EXECUTABLE)
)
endforeach()
endif()
find_host_package(PythonInterp 2.0)
find_host_package(PythonInterp "${MIN_VER_PYTHON}")
unset(HAVE_SPHINX CACHE)
if(PYTHON_EXECUTABLE)
if(PYTHON_VERSION_STRING)
set(PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
set(PYTHON_VERSION_FULL "${PYTHON_VERSION_STRING}")
else()
execute_process(COMMAND ${PYTHON_EXECUTABLE} --version
ERROR_VARIABLE PYTHON_VERSION_FULL
ERROR_STRIP_TRAILING_WHITESPACE)
string(REGEX MATCH "[0-9]+.[0-9]+" PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_FULL}")
endif()
if("${PYTHON_VERSION_FULL}" MATCHES "[0-9]+.[0-9]+.[0-9]+")
set(PYTHON_VERSION_FULL "${CMAKE_MATCH_0}")
elseif("${PYTHON_VERSION_FULL}" MATCHES "[0-9]+.[0-9]+")
set(PYTHON_VERSION_FULL "${CMAKE_MATCH_0}")
else()
unset(PYTHON_VERSION_FULL)
endif()
if(PYTHONINTERP_FOUND)
set(PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
if(NOT ANDROID AND NOT IOS)
if(CMAKE_VERSION VERSION_GREATER 2.8.8 AND PYTHON_VERSION_FULL)
find_host_package(PythonLibs ${PYTHON_VERSION_FULL} EXACT)
else()
find_host_package(PythonLibs ${PYTHON_VERSION_FULL})
endif()
# cmake 2.4 (at least on Ubuntu 8.04 (hardy)) don't define PYTHONLIBS_FOUND
if(NOT PYTHONLIBS_FOUND AND PYTHON_INCLUDE_PATH)
set(PYTHONLIBS_FOUND ON)
endif()
find_host_package(PythonLibs "${PYTHON_VERSION_STRING}" EXACT)
endif()
if(NOT ANDROID AND NOT IOS)
@ -78,26 +54,27 @@ if(PYTHON_EXECUTABLE)
endif()
SET(PYTHON_PACKAGES_PATH "${_PYTHON_PACKAGES_PATH}" CACHE PATH "Where to install the python packages.")
if(NOT PYTHON_NUMPY_INCLUDE_DIR)
if(NOT PYTHON_NUMPY_INCLUDE_DIRS)
# Attempt to discover the NumPy include directory. If this succeeds, then build python API with NumPy
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print(numpy.distutils.misc_util.get_numpy_include_dirs()[0])"
execute_process(COMMAND "${PYTHON_EXECUTABLE}" -c
"import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print(os.pathsep.join(numpy.distutils.misc_util.get_numpy_include_dirs()))"
RESULT_VARIABLE PYTHON_NUMPY_PROCESS
OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIR
OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIRS
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(PYTHON_NUMPY_PROCESS EQUAL 0)
file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIR}" _PYTHON_NUMPY_INCLUDE_DIR)
set(PYTHON_NUMPY_INCLUDE_DIR ${_PYTHON_NUMPY_INCLUDE_DIR} CACHE PATH "Path to numpy headers")
file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIRS}" _PYTHON_NUMPY_INCLUDE_DIRS)
set(PYTHON_NUMPY_INCLUDE_DIRS "${_PYTHON_NUMPY_INCLUDE_DIRS}" CACHE PATH "Path to numpy headers")
endif()
endif()
if(PYTHON_NUMPY_INCLUDE_DIR)
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print(numpy.version.version)"
RESULT_VARIABLE PYTHON_NUMPY_PROCESS
if(PYTHON_NUMPY_INCLUDE_DIRS)
execute_process(COMMAND "${PYTHON_EXECUTABLE}" -c "import numpy; print(numpy.version.version)"
OUTPUT_VARIABLE PYTHON_NUMPY_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
endif(NOT ANDROID AND NOT IOS)
endif()
if(BUILD_DOCS)
find_host_program(SPHINX_BUILD sphinx-build)
@ -113,4 +90,3 @@ if(PYTHON_EXECUTABLE)
endif()
endif()
endif(BUILD_DOCS)
endif(PYTHON_EXECUTABLE)

@ -136,17 +136,20 @@ endfunction()
# ------------------------------------------------------------------------
# This is auxiliary function called from set_ipp_variables()
# to set IPP_LIBRARIES variable in IPP 7.x style
# to set IPP_LIBRARIES variable in IPP 7.x and 8.x style
# ------------------------------------------------------------------------
function(set_ipp_new_libraries _LATEST_VERSION)
set(IPP_PREFIX "ipp")
if(${_LATEST_VERSION} VERSION_LESS "8.0")
set(IPP_SUFFIX "_l") # static not threaded libs suffix
set(IPP_SUFFIX "_l") # static not threaded libs suffix IPP 7.x
else()
set(IPP_SUFFIX "") # static not threaded libs suffix
if(WIN32)
set(IPP_SUFFIX "mt") # static not threaded libs suffix IPP 8.x for Windows
else()
set(IPP_SUFFIX "") # static not threaded libs suffix IPP 8.x for Linux/OS X
endif()
endif()
set(IPP_THRD "_t") # static threaded libs suffix
set(IPPCORE "core") # core functionality
set(IPPSP "s") # signal processing
set(IPPIP "i") # image processing
@ -218,7 +221,7 @@ function(set_ipp_variables _LATEST_VERSION)
set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/ia32 PARENT_SCOPE)
endif()
# set IPP_LIBRARIES variable (7.x lib names)
# set IPP_LIBRARIES variable (7.x or 8.x lib names)
set_ipp_new_libraries(${_LATEST_VERSION})
set(IPP_LIBRARIES ${IPP_LIBRARIES} PARENT_SCOPE)
message(STATUS "IPP libs: ${IPP_LIBRARIES}")

@ -5,12 +5,11 @@
#--- Win32 UI ---
ocv_clear_vars(HAVE_WIN32UI)
if(WITH_WIN32UI)
TRY_COMPILE(HAVE_WIN32UI
"${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp"
try_compile(HAVE_WIN32UI
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/win32uitest.cpp"
CMAKE_FLAGS "\"user32.lib\" \"gdi32.lib\""
OUTPUT_VARIABLE OUTPUT)
endif(WITH_WIN32UI)
CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=user32;gdi32")
endif()
# --- QT4 ---
ocv_clear_vars(HAVE_QT HAVE_QT5)
@ -70,7 +69,7 @@ endif(WITH_OPENGL)
if(APPLE)
if(WITH_CARBON)
set(HAVE_CARBON YES)
elif(NOT IOS)
elseif(NOT IOS)
set(HAVE_COCOA YES)
endif()
endif()

@ -6,22 +6,21 @@
if(BUILD_ZLIB)
ocv_clear_vars(ZLIB_FOUND)
else()
include(FindZLIB)
find_package(ZLIB "${MIN_VER_ZLIB}")
if(ZLIB_FOUND AND ANDROID)
if(ZLIB_LIBRARY STREQUAL "${ANDROID_SYSROOT}/usr/lib/libz.so")
set(ZLIB_LIBRARY z)
if(ZLIB_LIBRARIES STREQUAL "${ANDROID_SYSROOT}/usr/lib/libz.so")
set(ZLIB_LIBRARIES z)
endif()
endif()
endif()
if(NOT ZLIB_FOUND)
ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIR)
ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIRS)
set(ZLIB_LIBRARY zlib)
set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/zlib")
set(ZLIB_INCLUDE_DIR "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}")
set(ZLIB_INCLUDE_DIRS "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}")
set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
ocv_parse_header2(ZLIB "${${ZLIB_LIBRARY}_SOURCE_DIR}/zlib.h" ZLIB_VERSION)
endif()
@ -150,7 +149,7 @@ if(WITH_JASPER)
endif()
# --- libpng (optional, should be searched after zlib) ---
if(WITH_PNG AND NOT IOS)
if(WITH_PNG)
if(BUILD_PNG)
ocv_clear_vars(PNG_FOUND)
else()

@ -4,11 +4,10 @@
ocv_clear_vars(HAVE_VFW)
if(WITH_VFW)
TRY_COMPILE(HAVE_VFW
"${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp"
try_compile(HAVE_VFW
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/vfwtest.cpp"
CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=vfw32"
OUTPUT_VARIABLE OUTPUT)
CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=vfw32")
endif(WITH_VFW)
# --- GStreamer ---
@ -271,6 +270,10 @@ if(WITH_AVFOUNDATION)
endif()
# --- QuickTime ---
if (NOT IOS)
if(WITH_QUICKTIME)
set(HAVE_QUICKTIME YES)
elseif(APPLE)
set(HAVE_QTKIT YES)
endif()
endif()

@ -0,0 +1,4 @@
if(IOS)
configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in"
"${CMAKE_BINARY_DIR}/ios/Info.plist")
endif()

@ -1 +1,3 @@
set(MIN_VER_CMAKE 2.8.7)
set(MIN_VER_PYTHON 2.6)
set(MIN_VER_ZLIB 1.2.3)

@ -470,8 +470,16 @@ endmacro()
# ocv_create_module(<extra link dependencies>)
# ocv_create_module(SKIP_LINK)
macro(ocv_create_module)
# The condition we ought to be testing here is whether ocv_add_precompiled_headers will
# be called at some point in the future. We can't look into the future, though,
# so this will have to do.
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/precomp.hpp")
get_native_precompiled_header(${the_module} precomp.hpp)
endif()
add_library(${the_module} ${OPENCV_MODULE_TYPE} ${OPENCV_MODULE_${the_module}_HEADERS} ${OPENCV_MODULE_${the_module}_SOURCES}
"${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp")
"${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp"
${${the_module}_pch})
if(NOT the_module STREQUAL opencv_ts)
set_target_properties(${the_module} PROPERTIES COMPILE_DEFINITIONS OPENCV_NOSTL)
endif()
@ -640,7 +648,9 @@ function(ocv_add_perf_tests)
set(OPENCV_PERF_${the_module}_SOURCES ${perf_srcs} ${perf_hdrs})
endif()
add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES})
get_native_precompiled_header(${the_target} perf_precomp.hpp)
add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES} ${${the_target}_pch})
target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${perf_deps} ${OPENCV_LINKER_LIBS})
add_dependencies(opencv_perf_tests ${the_target})
@ -688,7 +698,9 @@ function(ocv_add_accuracy_tests)
set(OPENCV_TEST_${the_module}_SOURCES ${test_srcs} ${test_hdrs})
endif()
add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES})
get_native_precompiled_header(${the_target} test_precomp.hpp)
add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES} ${${the_target}_pch})
target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${test_deps} ${OPENCV_LINKER_LIBS})
add_dependencies(opencv_tests ${the_target})

@ -279,12 +279,9 @@ ENDMACRO(ADD_PRECOMPILED_HEADER)
MACRO(GET_NATIVE_PRECOMPILED_HEADER _targetName _input)
if(CMAKE_GENERATOR MATCHES "^Visual.*$")
SET(_dummy_str "#include \"${_input}\"\n"
"// This is required to suppress LNK4221. Very annoying.\n"
"void *g_${_targetName}Dummy = 0\;\n")
set(_dummy_str "#include \"${_input}\"\n")
# Use of cxx extension for generated files (as Qt does)
SET(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cxx)
set(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cpp)
if(EXISTS ${${_targetName}_pch})
# Check if contents is the same, if not rewrite
# todo
@ -344,11 +341,7 @@ ENDMACRO(ADD_NATIVE_PRECOMPILED_HEADER)
macro(ocv_add_precompiled_header_to_target the_target pch_header)
if(PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS AND EXISTS "${pch_header}")
if(CMAKE_GENERATOR MATCHES Visual)
string(REGEX REPLACE "hpp$" "cpp" ${the_target}_pch "${pch_header}")
add_native_precompiled_header(${the_target} ${pch_header})
unset(${the_target}_pch)
elseif(CMAKE_GENERATOR MATCHES Xcode)
if(CMAKE_GENERATOR MATCHES "^Visual" OR CMAKE_GENERATOR MATCHES Xcode)
add_native_precompiled_header(${the_target} ${pch_header})
elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja")
add_precompiled_header(${the_target} ${pch_header})

@ -77,7 +77,7 @@ MACRO(ocv_check_compiler_flag LANG FLAG RESULT)
if(_fname)
MESSAGE(STATUS "Performing Test ${RESULT}")
TRY_COMPILE(${RESULT}
${CMAKE_BINARY_DIR}
"${CMAKE_BINARY_DIR}"
"${_fname}"
COMPILE_DEFINITIONS "${FLAG}"
OUTPUT_VARIABLE OUTPUT)

@ -23,5 +23,3 @@ FOREACH(file ${files})
MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
ENDIF(EXISTS "$ENV{DESTDIR}${file}")
ENDFOREACH(file)

@ -130,6 +130,9 @@
/* QuickTime video libraries */
#cmakedefine HAVE_QUICKTIME
/* QTKit video libraries */
#cmakedefine HAVE_QTKIT
/* Intel Threading Building Blocks */
#cmakedefine HAVE_TBB

@ -49,7 +49,7 @@ if(BUILD_DOCS AND HAVE_SPHINX)
set(toc_file "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/${mod}.rst")
if(EXISTS "${toc_file}")
file(RELATIVE_PATH toc_file "${OpenCV_SOURCE_DIR}/modules" "${toc_file}")
set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\r\n")
set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\n")
endif()
endforeach()

@ -10,4 +10,3 @@ function insertIframe (elementId, iframeSrc)
element.parentNode.replaceChild(iframe, element);
}
}

@ -184,5 +184,3 @@ p = RSTParser()
for m in opencv_module_list:
print "\n\n*************************** " + m + " *************************\n"
p.check_module_docs(m)

@ -39,4 +39,3 @@
#7 & #8 & #9
\end{bmatrix}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 24 KiB

@ -3667,4 +3667,3 @@ class YErrorBars:
output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans))
return output

@ -3,42 +3,42 @@
Camera calibration With OpenCV
******************************
Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determinate the relation between the camera's natural units (pixels) and the real world units (for example millimeters).
Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determine the relation between the camera's natural units (pixels) and the real world units (for example millimeters).
Theory
======
For the distortion OpenCV takes into account the radial and tangential factors. For the radial one uses the following formula:
For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula:
.. math::
x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\
y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6)
So for an old pixel point at :math:`(x,y)` coordinate in the input image, for a corrected output image its position will be :math:`(x_{corrected} y_{corrected})` . The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect.
So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect.
Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. Correcting this is made via the formulas:
Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas:
.. math::
x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\
y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy]
So we have five distortion parameters, which in OpenCV are organized in a 5 column one row matrix:
So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns:
.. math::
Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)
Now for the unit conversion, we use the following formula:
Now for the unit conversion we use the following formula:
.. math::
\left [ \begin{matrix} x \\ y \\ w \end{matrix} \right ] = \left [ \begin{matrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{matrix} \right ] \left [ \begin{matrix} X \\ Y \\ Z \end{matrix} \right ]
Here the presence of the :math:`w` is cause we use a homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` what are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single :math:`f` focal length. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution.
Here the presence of :math:`w` is explained by the use of homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` which are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single focal length :math:`f`. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution.
The process of determining these two matrices is the calibration. Calculating these parameters is done by some basic geometrical equations. The equations used depend on the calibrating objects used. Currently OpenCV supports three types of object for calibration:
The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration:
.. container:: enumeratevisibleitemswithsquare
@ -46,7 +46,7 @@ The process of determining these two matrices is the calibration. Calculating th
+ Symmetrical circle pattern
+ Asymmetrical circle pattern
Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern equals in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard one requires at least two. However, in practice we have a good amount of noise present in our input images, so for good results you will probably want at least 10 good snapshots of the input pattern in different position.
Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern results in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard pattern requires at least two snapshots. However, in practice we have a good amount of noise present in our input images, so for good results you will probably need at least 10 good snapshots of the input pattern in different positions.
Goal
====
@ -55,19 +55,19 @@ The sample application will:
.. container:: enumeratevisibleitemswithsquare
+ Determinate the distortion matrix
+ Determinate the camera matrix
+ Input from Camera, Video and Image file list
+ Configuration from XML/YAML file
+ Determine the distortion matrix
+ Determine the camera matrix
+ Take input from Camera, Video and Image file list
+ Read configuration from XML/YAML file
+ Save the results into XML/YAML file
+ Calculate re-projection error
Source code
===========
You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument. The name of its configuration file. If none given it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use as input a camera, a video file or an image list. If you opt for the later one, you need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images needs to be specified using the absolute path or the relative one from your applications working directory. You may find all this in the beforehand mentioned directory.
You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument: the name of its configuration file. If none is given then it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use camera as an input, a video file or an image list. If you opt for the last one, you will need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images need to be specified using the absolute path or the relative one from your application's working directory. You may find all this in the samples directory mentioned above.
The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen to do not post here the code part for that. The technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial.
The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen not to post the code for that part here. Technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial.
Explanation
===========
@ -93,9 +93,9 @@ Explanation
return -1;
}
For this I've used simple OpenCV class input operation. After reading the file I've an additional post-process function that checks for the validity of the input. Only if all of them are good will be the *goodInput* variable true.
For this I've used simple OpenCV class input operation. After reading the file I've an additional post-processing function that checks validity of the input. Only if all inputs are good then *goodInput* variable will be true.
#. **Get next input, if it fails or we have enough of them calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to *CALIBRATED* one.
#. **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images then we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to the *CALIBRATED* one.
.. code-block:: cpp
@ -125,7 +125,7 @@ Explanation
For some cameras we may need to flip the input image. Here we do this too.
#. **Find the pattern in the current input**. The formation of the equations I mentioned above consists of finding the major patterns in the input: in case of the chessboard this is their corners of the squares and for the circles, well, the circles itself. The position of these will form the result and is collected into the *pointBuf* vector.
#. **Find the pattern in the current input**. The formation of the equations I mentioned above aims to finding major patterns in the input: in case of the chessboard this are corners of the squares and for the circles, well, the circles themselves. The position of these will form the result which will be written into the *pointBuf* vector.
.. code-block:: cpp
@ -146,9 +146,9 @@ Explanation
break;
}
Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners <findchessboardcorners>` or the :calib3d:`findCirclesGrid <findcirclesgrid>` function. For both of them you pass on the current image, the size of the board and you'll get back the positions of the patterns. Furthermore, they return a boolean variable that states if in the input we could find or not the pattern (we only need to take into account images where this is true!).
Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners <findchessboardcorners>` or the :calib3d:`findCirclesGrid <findcirclesgrid>` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!).
Then again in case of cameras we only take camera images after an input delay time passed. This is in order to allow for the user to move the chessboard around and as getting different images. Same images mean same equations, and same equations at the calibration will form an ill-posed problem, so the calibration will fail. For square images the position of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix <cornersubpix>` function. This way will get a better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image with the :calib3d:`findChessboardCorners <drawchessboardcorners>` function.
Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix <cornersubpix>` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners <drawchessboardcorners>` function.
.. code-block:: cpp
@ -175,7 +175,7 @@ Explanation
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
#. **Show state and result for the user, plus command line control of the application**. The showing part consists of a text output on the live feed, and for video or camera input to show the "capturing" frame we simply bitwise negate the input image.
#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image.
.. code-block:: cpp
@ -199,7 +199,7 @@ Explanation
if( blinkOutput )
bitwise_not(view, view);
If we only ran the calibration and got the camera matrix plus the distortion coefficients we may just as correct the image with the :imgproc_geometric:`undistort <undistort>` function:
If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort <undistort>` function:
.. code-block:: cpp
@ -212,7 +212,7 @@ Explanation
//------------------------------ Show image and check for input commands -------------------
imshow("Image View", view);
Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start all over the detection process (or simply start it), and finally for the *ESC* key quit the application:
Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start again the detection process, and finally for the *ESC* key we quit the application:
.. code-block:: cpp
@ -229,7 +229,7 @@ Explanation
imagePoints.clear();
}
#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must append this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort <undistort>` function, which is in fact first a call of the :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` to find out the transformation matrices and then doing the transformation with the :imgproc_geometric:`remap <remap>` function. Because, after a successful calibration the map calculation needs to be done only once, by using this expanded form you may speed up your application:
#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort <undistort>` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap <remap>` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application:
.. code-block:: cpp
@ -256,7 +256,7 @@ Explanation
The calibration and save
========================
Because the calibration needs to be only once per camera it makes sense to save them after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file.
Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file.
Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what:
@ -280,7 +280,7 @@ Therefore in the first function we just split up these two processes. Because we
return ok;
}
We do the calibration with the help of the :calib3d:`calibrateCamera <calibratecamera>` function. This has the following parameters:
We do the calibration with the help of the :calib3d:`calibrateCamera <calibratecamera>` function. It has the following parameters:
.. container:: enumeratevisibleitemswithsquare
@ -318,11 +318,11 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
+ The image points. This is a vector of *Point2f* vector that for each input image contains where the important points (corners for chessboard, and center of circles for the circle patterns) were found. We already collected this from what the :calib3d:`findChessboardCorners <findchessboardcorners>` or the :calib3d:`findCirclesGrid <findcirclesgrid>` function returned. We just need to pass it on.
+ The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners <findchessboardcorners>` or :calib3d:`findCirclesGrid <findcirclesgrid>` function. We just need to pass it on.
+ The size of the image acquired from the camera, video file or the images.
+ The camera matrix. If we used the fix aspect ratio option we need to set the :math:`f_x` to zero:
+ The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero:
.. code-block:: cpp
@ -336,16 +336,16 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
distCoeffs = Mat::zeros(8, 1, CV_64F);
+ The function will calculate for all the views the rotation and translation vector that transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7th and 8th parameters are an output vector of matrices containing in the ith position the rotation and translation vector for the ith object point to the ith image point.
+ For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point.
+ The final argument is a flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point.
+ The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point.
.. code-block:: cpp
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
+ The function returns the average re-projection error. This number gives a good estimation of just how exact is the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints <projectpoints>` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculate for all the calibration images.
+ The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints <projectpoints>` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images.
.. code-block:: cpp
@ -378,25 +378,25 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
Results
=======
Let there be :download:`this input chessboard pattern <../../../pattern.png>` that has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into a VID5 directory. I've put this inside the :file:`images/CameraCalibraation` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use:
Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use:
.. code-block:: xml
<?xml version="1.0"?>
<opencv_storage>
<images>
images/CameraCalibraation/VID5/xx1.jpg
images/CameraCalibraation/VID5/xx2.jpg
images/CameraCalibraation/VID5/xx3.jpg
images/CameraCalibraation/VID5/xx4.jpg
images/CameraCalibraation/VID5/xx5.jpg
images/CameraCalibraation/VID5/xx6.jpg
images/CameraCalibraation/VID5/xx7.jpg
images/CameraCalibraation/VID5/xx8.jpg
images/CameraCalibration/VID5/xx1.jpg
images/CameraCalibration/VID5/xx2.jpg
images/CameraCalibration/VID5/xx3.jpg
images/CameraCalibration/VID5/xx4.jpg
images/CameraCalibration/VID5/xx5.jpg
images/CameraCalibration/VID5/xx6.jpg
images/CameraCalibration/VID5/xx7.jpg
images/CameraCalibration/VID5/xx8.jpg
</images>
</opencv_storage>
Then specified the :file:`images/CameraCalibraation/VID5/VID5.XML` as input in the configuration file. Here's a chessboard pattern found during the runtime of the application:
Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application:
.. image:: images/fileListImage.jpg
:alt: A found chessboard
@ -433,7 +433,7 @@ In both cases in the specified output XML/YAML file you'll find the camera and d
-4.1802327176423804e-001 5.0715244063187526e-001 0. 0.
-5.7843597214487474e-001</data></Distortion_Coefficients>
Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` and the :imgproc_geometric:`remap <remap>` function to remove distortion and enjoy distortion free inputs with cheap and low quality cameras.
Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` and the :imgproc_geometric:`remap <remap>` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras.
You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=ViPN810E0SU>`_.

@ -277,4 +277,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="File Input and Output using XML and YAML files in OpenCV" width="560" height="349" src="http://www.youtube.com/embed/A4yqVnByMMM?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -127,6 +127,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="Interoperability with OpenCV 1" width="560" height="349" src="http://www.youtube.com/embed/qckm-zvo31w?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

@ -143,7 +143,7 @@ Although *Mat* works really well as an image container, it is also a general mat
You cannot initialize the matrix values with this construction. It will only reallocate its matrix data memory if the new size will not fit into the old one.
+ MATLAB style initializer: :basicstructures:`zeros() <mat-zeros>`, :basicstructures:`ones() <mat-ones>`, ::basicstructures:`eyes() <mat-eye>`. Specify size and data type to use:
+ MATLAB style initializer: :basicstructures:`zeros() <mat-zeros>`, :basicstructures:`ones() <mat-ones>`, :basicstructures:`eye() <mat-eye>`. Specify size and data type to use:
.. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp
:language: cpp

@ -100,6 +100,3 @@ Result
.. image:: images/Feature_Description_BruteForce_Result.jpg
:align: center
:height: 200pt

@ -31,6 +31,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
@ -94,4 +95,3 @@ Result
.. image:: images/Feature_Detection_Result_b.jpg
:align: center
:height: 200pt

@ -28,6 +28,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;

@ -30,6 +30,7 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
@ -145,4 +146,3 @@ Result
.. image:: images/Feature_Homography_Result.jpg
:align: center
:height: 200pt

@ -201,4 +201,3 @@ Learn about how to use the feature points detectors, descriptors and matching f
../feature_flann_matcher/feature_flann_matcher
../feature_homography/feature_homography
../detection_of_planar_objects/detection_of_planar_objects

@ -135,4 +135,3 @@ Here is the result:
.. image:: images/Corner_Subpixeles_Result.jpg
:align: center

@ -37,4 +37,3 @@ Result
.. image:: images/My_Shi_Tomasi_corner_detector_Result.jpg
:align: center

@ -118,5 +118,3 @@ Result
.. image:: images/Feature_Detection_Result_a.jpg
:align: center

@ -243,5 +243,3 @@ The detected corners are surrounded by a small black circle
.. image:: images/Harris_Detector_Result.jpg
:align: center

@ -10,4 +10,3 @@ These tutorials are the bottom of the iceberg as they link together multiple of
.. raw:: latex
\pagebreak

@ -152,8 +152,3 @@ Result
.. image:: images/Adding_Trackbars_Tutorial_Result_1.jpg
:alt: Adding Trackbars - Lena
:align: center

@ -329,4 +329,3 @@ Result
.. image:: images/Histogram_Calculation_Result.jpg
:align: center

@ -369,4 +369,3 @@ Results
.. image:: images/Template_Matching_Image_Result.jpg
:align: center

@ -282,6 +282,3 @@ Result
:align: center
* Notice how the image is superposed to the black background on the edge regions.

@ -290,4 +290,3 @@ We get the following result by using the Probabilistic Hough Line Transform:
:align: center
You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected).

@ -311,4 +311,3 @@ Result
:alt: Result 0 for remapping
:width: 250pt
:align: center

@ -306,4 +306,3 @@ Result
:alt: Original image
:width: 250pt
:align: center

@ -279,4 +279,3 @@ Results
.. image:: images/Morphology_2_Tutorial_Cover.jpg
:alt: Morphology 2: Result sample
:align: center

@ -259,5 +259,3 @@ Results
.. image:: images/Pyramids_Tutorial_PyrUp_Result.jpg
:alt: Pyramids: PyrUp Result
:align: center

@ -121,4 +121,3 @@ Result
.. |BRC_1| image:: images/Bounding_Rects_Circles_Result.jpg
:align: middle

@ -123,4 +123,3 @@ Result
.. |BRE_1| image:: images/Bounding_Rotated_Ellipses_Result.jpg
:align: middle

@ -104,4 +104,3 @@ Result
.. |contour_1| image:: images/Find_Contours_Result.jpg
:align: middle

@ -113,4 +113,3 @@ Result
.. |Hull_1| image:: images/Hull_Result.jpg
:align: middle

@ -133,4 +133,3 @@ Result
.. |MU_2| image:: images/Moments_Result2.jpg
:width: 250pt
:align: middle

@ -114,4 +114,3 @@ Result
.. |PPT_1| image:: images/Point_Polygon_Test_Result.jpg
:align: middle

@ -539,6 +539,3 @@ In this section you will learn about the image processing (manipulation) functio
../shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses
../shapedescriptors/moments/moments
../shapedescriptors/point_polygon_test/point_polygon_test

@ -245,6 +245,3 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*:
a. You can also optionally modify the ``Build command:`` from ``make`` to something like ``make VERBOSE=1 -j4`` which tells the compiler to produce detailed symbol files for debugging and also to compile in 4 parallel threads.
#. Done!

@ -80,4 +80,3 @@ Building OpenCV from Source Using CMake, Using the Command Line
.. note::
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.

@ -73,4 +73,3 @@ Now we will learn how to write a simple Hello World Application in Xcode using O
.. image:: images/output.png
:alt: output
:align: center

@ -185,4 +185,3 @@ Results
.. image:: images/result.png
:alt: The seperated planes
:align: center

@ -44,36 +44,35 @@ This tutorial code's is shown lines below. You can also download it from `here <
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
String window_name = "Capture - Face detection";
/** @function main */
int main( int argc, const char** argv )
int main( void )
{
CvCapture* capture;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
while( true )
if( frame.empty() )
{
frame = cvQueryFrame( capture );
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
if( (char)c == 27 ) { break; } // escape
}
return 0;
}
@ -84,28 +83,28 @@ This tutorial code's is shown lines below. You can also download it from `here <
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for( int i = 0; i < faces.size(); i++ )
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for( int j = 0; j < eyes.size(); j++ )
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
@ -131,4 +130,3 @@ Result
.. image:: images/Cascade_Classifier_Tutorial_Result_LBP.jpg
:align: center
:height: 300pt

@ -5,5 +5,3 @@ install(FILES ${old_hdrs}
install(FILES "opencv2/opencv.hpp"
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2
COMPONENT main)

@ -73,4 +73,3 @@
#endif //CV_IMPL
#endif // __OPENCV_OLD_CV_H_

@ -46,4 +46,3 @@
#include "opencv2/core/eigen.hpp"
#endif

@ -14,4 +14,3 @@ double getCameraPropertyC(void* camera, int propIdx);
void setCameraPropertyC(void* camera, int propIdx, double value);
void applyCameraPropertiesC(void** camera);
}

@ -1,2 +1,2 @@
set(the_description "Biologically inspired algorithms")
ocv_define_module(bioinspired opencv_core OPTIONAL opencv_highgui)
ocv_define_module(bioinspired opencv_core OPTIONAL opencv_highgui opencv_ocl)

@ -304,7 +304,8 @@ public:
CV_EXPORTS Ptr<Retina> createRetina(Size inputSize);
CV_EXPORTS Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
CV_EXPORTS Ptr<Retina> createRetina_OCL(Size inputSize);
CV_EXPORTS Ptr<Retina> createRetina_OCL(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
}
}
#endif /* __OPENCV_BIOINSPIRED_RETINA_HPP__ */

@ -0,0 +1,753 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/////////////////////////////////////////////////////////
//*******************************************************
// basicretinafilter
//////////////// _spatiotemporalLPfilter ////////////////
//_horizontalCausalFilter_addInput
kernel void horizontalCausalFilter_addInput(
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int in_offset,
const int out_offset,
const float _tau,
const float _a
)
{
int gid = get_global_id(0);
if(gid >= rows)
{
return;
}
global const float * iptr =
input + mad24(gid, elements_per_row, in_offset / 4);
global float * optr =
output + mad24(gid, elements_per_row, out_offset / 4);
float res;
float4 in_v4, out_v4, res_v4 = (float4)(0);
//vectorize to increase throughput
for(int i = 0; i < cols / 4; ++i, iptr += 4, optr += 4)
{
in_v4 = vload4(0, iptr);
out_v4 = vload4(0, optr);
res_v4.x = in_v4.x + _tau * out_v4.x + _a * res_v4.w;
res_v4.y = in_v4.y + _tau * out_v4.y + _a * res_v4.x;
res_v4.z = in_v4.z + _tau * out_v4.z + _a * res_v4.y;
res_v4.w = in_v4.w + _tau * out_v4.w + _a * res_v4.z;
vstore4(res_v4, 0, optr);
}
res = res_v4.w;
// there may be left some
for(int i = 0; i < cols % 4; ++i, ++iptr, ++optr)
{
res = *iptr + _tau * *optr + _a * res;
*optr = res;
}
}
//_horizontalAnticausalFilter
kernel void horizontalAnticausalFilter(
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int out_offset,
const float _a
)
{
int gid = get_global_id(0);
if(gid >= rows)
{
return;
}
global float * optr = output +
mad24(gid + 1, elements_per_row, - 1 + out_offset / 4);
float4 result = (float4)(0), out_v4;
// we assume elements_per_row is multple of 4
for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4)
{
// shift left, `offset` is type `size_t` so it cannot be negative
out_v4 = vload4(0, optr - 3);
result.w = out_v4.w + _a * result.x;
result.z = out_v4.z + _a * result.w;
result.y = out_v4.y + _a * result.z;
result.x = out_v4.x + _a * result.y;
vstore4(result, 0, optr - 3);
}
}
//_verticalCausalFilter
kernel void verticalCausalFilter(
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int out_offset,
const float _a
)
{
int gid = get_global_id(0);
if(gid >= cols)
{
return;
}
global float * optr = output + gid + out_offset / 4;
float result = 0;
for(int i = 0; i < rows; ++i, optr += elements_per_row)
{
result = *optr + _a * result;
*optr = result;
}
}
//_verticalCausalFilter
kernel void verticalAnticausalFilter_multGain(
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int out_offset,
const float _a,
const float _gain
)
{
int gid = get_global_id(0);
if(gid >= cols)
{
return;
}
global float * optr = output + (rows - 1) * elements_per_row + gid + out_offset / 4;
float result = 0;
for(int i = 0; i < rows; ++i, optr -= elements_per_row)
{
result = *optr + _a * result;
*optr = _gain * result;
}
}
//
// end of _spatiotemporalLPfilter
/////////////////////////////////////////////////////////////////////
//////////////// horizontalAnticausalFilter_Irregular ////////////////
kernel void horizontalAnticausalFilter_Irregular(
global float * output,
global float * buffer,
const int cols,
const int rows,
const int elements_per_row,
const int out_offset,
const int buffer_offset
)
{
int gid = get_global_id(0);
if(gid >= rows)
{
return;
}
global float * optr =
output + mad24(rows - gid, elements_per_row, -1 + out_offset / 4);
global float * bptr =
buffer + mad24(rows - gid, elements_per_row, -1 + buffer_offset / 4);
float4 buf_v4, out_v4, res_v4 = (float4)(0);
for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4, bptr -= 4)
{
buf_v4 = vload4(0, bptr - 3);
out_v4 = vload4(0, optr - 3);
res_v4.w = out_v4.w + buf_v4.w * res_v4.x;
res_v4.z = out_v4.z + buf_v4.z * res_v4.w;
res_v4.y = out_v4.y + buf_v4.y * res_v4.z;
res_v4.x = out_v4.x + buf_v4.x * res_v4.y;
vstore4(res_v4, 0, optr - 3);
}
}
//////////////// verticalCausalFilter_Irregular ////////////////
kernel void verticalCausalFilter_Irregular(
global float * output,
global float * buffer,
const int cols,
const int rows,
const int elements_per_row,
const int out_offset,
const int buffer_offset
)
{
int gid = get_global_id(0);
if(gid >= cols)
{
return;
}
global float * optr = output + gid + out_offset / 4;
global float * bptr = buffer + gid + buffer_offset / 4;
float result = 0;
for(int i = 0; i < rows; ++i, optr += elements_per_row, bptr += elements_per_row)
{
result = *optr + *bptr * result;
*optr = result;
}
}
//////////////// _adaptiveHorizontalCausalFilter_addInput ////////////////
kernel void adaptiveHorizontalCausalFilter_addInput(
global const float * input,
global const float * gradient,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int in_offset,
const int grad_offset,
const int out_offset
)
{
int gid = get_global_id(0);
if(gid >= rows)
{
return;
}
global const float * iptr =
input + mad24(gid, elements_per_row, in_offset / 4);
global const float * gptr =
gradient + mad24(gid, elements_per_row, grad_offset / 4);
global float * optr =
output + mad24(gid, elements_per_row, out_offset / 4);
float4 in_v4, grad_v4, out_v4, res_v4 = (float4)(0);
for(int i = 0; i < cols / 4; ++i, iptr += 4, gptr += 4, optr += 4)
{
in_v4 = vload4(0, iptr);
grad_v4 = vload4(0, gptr);
res_v4.x = in_v4.x + grad_v4.x * res_v4.w;
res_v4.y = in_v4.y + grad_v4.y * res_v4.x;
res_v4.z = in_v4.z + grad_v4.z * res_v4.y;
res_v4.w = in_v4.w + grad_v4.w * res_v4.z;
vstore4(res_v4, 0, optr);
}
for(int i = 0; i < cols % 4; ++i, ++iptr, ++gptr, ++optr)
{
res_v4.w = *iptr + *gptr * res_v4.w;
*optr = res_v4.w;
}
}
//////////////// _adaptiveVerticalAnticausalFilter_multGain ////////////////
kernel void adaptiveVerticalAnticausalFilter_multGain(
global const float * gradient,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const int grad_offset,
const int out_offset,
const float gain
)
{
int gid = get_global_id(0);
if(gid >= cols)
{
return;
}
int start_idx = mad24(rows - 1, elements_per_row, gid);
global const float * gptr = gradient + start_idx + grad_offset / 4;
global float * optr = output + start_idx + out_offset / 4;
float result = 0;
for(int i = 0; i < rows; ++i, gptr -= elements_per_row, optr -= elements_per_row)
{
result = *optr + *gptr * result;
*optr = gain * result;
}
}
//////////////// _localLuminanceAdaptation ////////////////
// FIXME:
// This kernel seems to have precision problem on GPU
kernel void localLuminanceAdaptation(
global const float * luma,
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const float _localLuminanceAddon,
const float _localLuminanceFactor,
const float _maxInputValue
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
float X0 = luma[offset] * _localLuminanceFactor + _localLuminanceAddon;
float input_val = input[offset];
// output of the following line may be different between GPU and CPU
output[offset] = (_maxInputValue + X0) * input_val / (input_val + X0 + 0.00000000001f);
}
// end of basicretinafilter
//*******************************************************
/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
//******************************************************
// magno
// TODO: this kernel has too many buffer accesses, better to make it
// vector read/write for fetch efficiency
kernel void amacrineCellsComputing(
global const float * opl_on,
global const float * opl_off,
global float * prev_in_on,
global float * prev_in_off,
global float * out_on,
global float * out_off,
const int cols,
const int rows,
const int elements_per_row,
const float coeff
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
opl_on += offset;
opl_off += offset;
prev_in_on += offset;
prev_in_off += offset;
out_on += offset;
out_off += offset;
float magnoXonPixelResult = coeff * (*out_on + *opl_on - *prev_in_on);
*out_on = fmax(magnoXonPixelResult, 0);
float magnoXoffPixelResult = coeff * (*out_off + *opl_off - *prev_in_off);
*out_off = fmax(magnoXoffPixelResult, 0);
*prev_in_on = *opl_on;
*prev_in_off = *opl_off;
}
/////////////////////////////////////////////////////////
//******************************************************
// parvo
// TODO: this kernel has too many buffer accesses, needs optimization
kernel void OPL_OnOffWaysComputing(
global float4 * photo_out,
global float4 * horiz_out,
global float4 * bipol_on,
global float4 * bipol_off,
global float4 * parvo_on,
global float4 * parvo_off,
const int cols,
const int rows,
const int elements_per_row
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx * 4 >= cols || gidy >= rows)
{
return;
}
// we assume elements_per_row must be multiples of 4
int offset = mad24(gidy, elements_per_row >> 2, gidx);
photo_out += offset;
horiz_out += offset;
bipol_on += offset;
bipol_off += offset;
parvo_on += offset;
parvo_off += offset;
float4 diff = *photo_out - *horiz_out;
float4 isPositive;// = convert_float4(diff > (float4)(0.0f, 0.0f, 0.0f, 0.0f));
isPositive.x = diff.x > 0.0f;
isPositive.y = diff.y > 0.0f;
isPositive.z = diff.z > 0.0f;
isPositive.w = diff.w > 0.0f;
float4 res_on = isPositive * diff;
float4 res_off = (isPositive - (float4)(1.0f)) * diff;
*bipol_on = res_on;
*parvo_on = res_on;
*bipol_off = res_off;
*parvo_off = res_off;
}
/////////////////////////////////////////////////////////
//******************************************************
// retinacolor
inline int bayerSampleOffset(int step, int rows, int x, int y)
{
return mad24(y, step, x) +
((y % 2) + (x % 2)) * rows * step;
}
/////// colorMultiplexing //////
kernel void runColorMultiplexingBayer(
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
output[offset] = input[bayerSampleOffset(elements_per_row, rows, gidx, gidy)];
}
kernel void runColorDemultiplexingBayer(
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
output[bayerSampleOffset(elements_per_row, rows, gidx, gidy)] = input[offset];
}
kernel void demultiplexAssign(
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = bayerSampleOffset(elements_per_row, rows, gidx, gidy);
output[offset] = input[offset];
}
//// normalizeGrayOutputCentredSigmoide
kernel void normalizeGrayOutputCentredSigmoide(
global const float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const float meanval,
const float X0
)
{
int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
float input_val = input[offset];
output[offset] = meanval +
(meanval + X0) * (input_val - meanval) / (fabs(input_val - meanval) + X0);
}
//// normalize by photoreceptors density
kernel void normalizePhotoDensity(
global const float * chroma,
global const float * colorDensity,
global const float * multiplex,
global float * luma,
global float * demultiplex,
const int cols,
const int rows,
const int elements_per_row,
const float pG
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
int index = offset;
float Cr = chroma[index] * colorDensity[index];
index += elements_per_row * rows;
float Cg = chroma[index] * colorDensity[index];
index += elements_per_row * rows;
float Cb = chroma[index] * colorDensity[index];
const float luma_res = (Cr + Cg + Cb) * pG;
luma[offset] = luma_res;
demultiplex[bayerSampleOffset(elements_per_row, rows, gidx, gidy)] =
multiplex[offset] - luma_res;
}
//////// computeGradient ///////
// TODO:
// this function maybe accelerated by image2d_t or lds
kernel void computeGradient(
global const float * luma,
global float * gradient,
const int cols,
const int rows,
const int elements_per_row
)
{
int gidx = get_global_id(0) + 2, gidy = get_global_id(1) + 2;
if(gidx >= cols - 2 || gidy >= rows - 2)
{
return;
}
int offset = mad24(gidy, elements_per_row, gidx);
luma += offset;
// horizontal and vertical local gradients
const float v_grad = fabs(luma[elements_per_row] - luma[- elements_per_row]);
const float h_grad = fabs(luma[1] - luma[-1]);
// neighborhood horizontal and vertical gradients
const float cur_val = luma[0];
const float v_grad_p = fabs(cur_val - luma[- 2 * elements_per_row]);
const float h_grad_p = fabs(cur_val - luma[- 2]);
const float v_grad_n = fabs(cur_val - luma[2 * elements_per_row]);
const float h_grad_n = fabs(cur_val - luma[2]);
const float horiz_grad = 0.5f * h_grad + 0.25f * (h_grad_p + h_grad_n);
const float verti_grad = 0.5f * v_grad + 0.25f * (v_grad_p + v_grad_n);
const bool is_vertical_greater = horiz_grad < verti_grad;
gradient[offset + elements_per_row * rows] = is_vertical_greater ? 0.06f : 0.57f;
gradient[offset ] = is_vertical_greater ? 0.57f : 0.06f;
}
/////// substractResidual ///////
kernel void substractResidual(
global float * input,
const int cols,
const int rows,
const int elements_per_row,
const float pR,
const float pG,
const float pB
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
int indices [3] =
{
mad24(gidy, elements_per_row, gidx),
mad24(gidy + rows, elements_per_row, gidx),
mad24(gidy + 2 * rows, elements_per_row, gidx)
};
float vals[3] = {input[indices[0]], input[indices[1]], input[indices[2]]};
float residu = pR * vals[0] + pG * vals[1] + pB * vals[2];
input[indices[0]] = vals[0] - residu;
input[indices[1]] = vals[1] - residu;
input[indices[2]] = vals[2] - residu;
}
///// clipRGBOutput_0_maxInputValue /////
kernel void clipRGBOutput_0_maxInputValue(
global float * input,
const int cols,
const int rows,
const int elements_per_row,
const float maxVal
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
float val = input[offset];
val = clamp(val, 0.0f, maxVal);
input[offset] = val;
}
//// normalizeGrayOutputNearZeroCentreredSigmoide ////
kernel void normalizeGrayOutputNearZeroCentreredSigmoide(
global float * input,
global float * output,
const int cols,
const int rows,
const int elements_per_row,
const float maxVal,
const float X0cube
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
float currentCubeLuminance = input[offset];
currentCubeLuminance = currentCubeLuminance * currentCubeLuminance * currentCubeLuminance;
output[offset] = currentCubeLuminance * X0cube / (X0cube + currentCubeLuminance);
}
//// centerReductImageLuminance ////
kernel void centerReductImageLuminance(
global float * input,
const int cols,
const int rows,
const int elements_per_row,
const float mean,
const float std_dev
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
float val = input[offset];
input[offset] = (val - mean) / std_dev;
}
//// inverseValue ////
kernel void inverseValue(
global float * input,
const int cols,
const int rows,
const int elements_per_row
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
input[offset] = 1.f / input[offset];
}
#define CV_PI 3.1415926535897932384626433832795
//// _processRetinaParvoMagnoMapping ////
kernel void processRetinaParvoMagnoMapping(
global float * parvo,
global float * magno,
global float * output,
const int cols,
const int rows,
const int halfCols,
const int halfRows,
const int elements_per_row,
const float minDistance
)
{
const int gidx = get_global_id(0), gidy = get_global_id(1);
if(gidx >= cols || gidy >= rows)
{
return;
}
const int offset = mad24(gidy, elements_per_row, gidx);
float distanceToCenter =
sqrt(((float)(gidy - halfRows) * (gidy - halfRows) + (gidx - halfCols) * (gidx - halfCols)));
float a = distanceToCenter < minDistance ?
(0.5f + 0.5f * (float)cos(CV_PI * distanceToCenter / minDistance)) : 0;
float b = 1.f - a;
output[offset] = parvo[offset] * a + magno[offset] * b;
}

@ -1,44 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
/* End of file. */

@ -43,11 +43,17 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/opencv_modules.hpp"
#include "opencv2/bioinspired.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include <valarray>
#ifdef HAVE_OPENCV_OCL
#include "opencv2/ocl/private/util.hpp"
#endif
namespace cv
{

@ -628,6 +628,7 @@ void RetinaImpl::_init(const cv::Size inputSz, const bool colorMode, int colorSa
delete _retinaFilter;
_retinaFilter = new RetinaFilter(inputSz.height, inputSz.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
_retinaParameters.OPLandIplParvo.colorMode = colorMode;
// prepare the default parameter XML file with default setup
setup(_retinaParameters);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save