Merge pull request #809 from taka-no-me:drop_python1

pull/824/merge
Andrey Kamaev 12 years ago committed by OpenCV Buildbot
commit 4223a59497
  1. 4
      CMakeLists.txt
  2. 2
      cmake/OpenCVDetectPython.cmake
  3. 15
      doc/_themes/blue/static/default.css_t
  4. 8
      doc/ocv.py
  5. 3
      include/opencv/cvaux.h
  6. 31
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  7. 1295
      modules/contrib/include/opencv2/contrib.hpp
  8. 384
      modules/contrib/include/opencv2/contrib/compat.hpp
  9. 1
      modules/contrib/src/adaptiveskindetector.cpp
  10. 1
      modules/contrib/src/ba.cpp
  11. 18
      modules/contrib/src/chamfermatching.cpp
  12. 16
      modules/contrib/src/colormap.cpp
  13. 54
      modules/contrib/src/facerec.cpp
  14. 1
      modules/contrib/src/fuzzymeanshifttracker.cpp
  15. 2
      modules/contrib/src/gencolors.cpp
  16. 28
      modules/contrib/src/lda.cpp
  17. 2
      modules/contrib/src/octree.cpp
  18. 2
      modules/contrib/src/retina.cpp
  19. 2
      modules/contrib/src/rgbdodometry.cpp
  20. 75
      modules/contrib/src/spinimages.cpp
  21. 12
      modules/contrib/src/stereovar.cpp
  22. 2
      modules/core/doc/clustering.rst
  23. 24
      modules/core/doc/drawing_functions.rst
  24. 4
      modules/core/doc/dynamic_structures.rst
  25. 74
      modules/core/doc/old_basic_structures.rst
  26. 4
      modules/core/doc/old_xml_yaml_persistence.rst
  27. 94
      modules/core/doc/operations_on_arrays.rst
  28. 8
      modules/core/doc/utility_and_system_functions_and_macros.rst
  29. 4
      modules/core/include/opencv2/core/mat.hpp
  30. 21
      modules/highgui/doc/reading_and_writing_images_and_video.rst
  31. 22
      modules/highgui/doc/user_interface.rst
  32. 16
      modules/imgproc/doc/feature_detection.rst
  33. 19
      modules/imgproc/doc/filtering.rst
  34. 23
      modules/imgproc/doc/geometric_transformations.rst
  35. 16
      modules/imgproc/doc/histograms.rst
  36. 10
      modules/imgproc/doc/miscellaneous_transformations.rst
  37. 7
      modules/imgproc/doc/motion_analysis_and_object_tracking.rst
  38. 1
      modules/imgproc/doc/object_detection.rst
  39. 27
      modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst
  40. 2
      modules/java/generator/rst_parser.py
  41. 5
      modules/legacy/doc/histograms.rst
  42. 6
      modules/legacy/doc/motion_analysis.rst
  43. 10
      modules/legacy/doc/planar_subdivisions.rst
  44. 2
      modules/nonfree/doc/feature_detection.rst
  45. 2
      modules/objdetect/doc/cascade_classification.rst
  46. 1
      modules/photo/doc/inpainting.rst
  47. 16
      modules/python/CMakeLists.txt
  48. 1805
      modules/python/src2/api
  49. 1
      modules/python/src2/cv.py
  50. 335
      modules/python/src2/cv2.cpp
  51. 4054
      modules/python/src2/cv2.cv.hpp
  52. 360
      modules/python/src2/defs
  53. 631
      modules/python/src2/gen.py
  54. 16
      modules/python/src2/gen2.py
  55. 2266
      modules/python/test/test.py
  56. 133
      modules/python/test/test2.py
  57. 12
      modules/video/doc/motion_analysis_and_object_tracking.rst
  58. 2
      samples/c/adaptiveskindetector.cpp
  59. 6
      samples/cpp/facerec_demo.cpp
  60. 4
      samples/cpp/stereo_match.cpp
  61. 6
      samples/gpu/cascadeclassifier.cpp
  62. 15
      samples/python/camera.py
  63. 116
      samples/python/camshift.py
  64. 34
      samples/python/chessboard.py
  65. 136
      samples/python/contours.py
  66. 65
      samples/python/convexhull.py
  67. 169
      samples/python/cv20squares.py
  68. 20
      samples/python/cvutils.py
  69. 139
      samples/python/delaunay.py
  70. 80
      samples/python/demhist.py
  71. 114
      samples/python/dft.py
  72. 72
      samples/python/distrans.py
  73. 184
      samples/python/dmtx.py
  74. 162
      samples/python/drawing.py
  75. 60
      samples/python/edge.py
  76. 101
      samples/python/facedetect.py
  77. 56
      samples/python/fback.py
  78. 160
      samples/python/ffilldemo.py
  79. 104
      samples/python/fitellipse.py
  80. 58
      samples/python/houghlines.py
  81. 69
      samples/python/inpaint.py
  82. 98
      samples/python/kalman.py
  83. 60
      samples/python/kmeans.py
  84. 45
      samples/python/laplace.py
  85. 192
      samples/python/lkdemo.py
  86. 45
      samples/python/logpolar.py
  87. 62
      samples/python/minarea.py
  88. 13
      samples/python/minidemo.py
  89. 52
      samples/python/morphology.py
  90. 110
      samples/python/motempl.py
  91. 66
      samples/python/numpy_array.py
  92. 48
      samples/python/numpy_warhol.py
  93. 56
      samples/python/peopledetect.py
  94. 41
      samples/python/pyramid_segmentation.py
  95. 153
      samples/python/squares.py
  96. 109
      samples/python/watershed.py
  97. 5
      samples/python2/_coverage.py
  98. 2
      samples/python2/browse.py
  99. 4
      samples/python2/common.py
  100. 2
      samples/python2/contours.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -827,8 +827,8 @@ if(BUILD_opencv_python)
else()
status(" Libraries:" HAVE_opencv_python THEN ${PYTHON_LIBRARIES} ELSE NO)
endif()
status(" numpy:" PYTHON_USE_NUMPY THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)")
status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-")
status(" numpy:" PYTHON_NUMPY_INCLUDE_DIR THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)")
status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-")
endif()
# ========================== java ==========================

@ -14,7 +14,6 @@ if(WIN32 AND NOT PYTHON_EXECUTABLE)
endif()
find_host_package(PythonInterp 2.0)
unset(PYTHON_USE_NUMPY CACHE)
unset(HAVE_SPHINX CACHE)
if(PYTHON_EXECUTABLE)
if(PYTHON_VERSION_STRING)
@ -93,7 +92,6 @@ if(PYTHON_EXECUTABLE)
endif()
if(PYTHON_NUMPY_INCLUDE_DIR)
set(PYTHON_USE_NUMPY TRUE)
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print numpy.version.version"
RESULT_VARIABLE PYTHON_NUMPY_PROCESS
OUTPUT_VARIABLE PYTHON_NUMPY_VERSION

@ -310,21 +310,6 @@ dl.pyfunction > dt:first-child {
margin-bottom: 7px;
}
dl.pyoldfunction > dt:first-child {
margin-bottom: 7px;
color: #8080B0;
}
dl.pyoldfunction > dt:first-child tt.descname
{
color: #8080B0;
}
dl.pyoldfunction > dt:first-child tt.descclassname
{
color: #8080B0;
}
dl.jfunction > dt:first-child {
margin-bottom: 7px;
}

@ -267,10 +267,6 @@ class OCVPyModulelevel(OCVPyObject):
else:
return ''
class OCVPyOldModulelevel(OCVPyModulelevel):
directive_prefix = 'pyold'
pass
class OCVPyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['ocv:module'] = env.temp_data.get('ocv:module')
@ -1491,7 +1487,6 @@ class OCVDomain(Domain):
'cfunction': ObjType(l_('cfunction'), 'cfunc', 'cfuncx'),
'jfunction': ObjType(l_('jfunction'), 'jfunc', 'jfuncx'),
'pyfunction': ObjType(l_('pyfunction'), 'pyfunc'),
'pyoldfunction': ObjType(l_('pyoldfunction'), 'pyoldfunc'),
'member': ObjType(l_('member'), 'member'),
'emember': ObjType(l_('emember'), 'emember'),
'type': ObjType(l_('type'), 'type'),
@ -1505,7 +1500,6 @@ class OCVDomain(Domain):
'cfunction': OCVCFunctionObject,
'jfunction': OCVJavaFunctionObject,
'pyfunction': OCVPyModulelevel,
'pyoldfunction': OCVPyOldModulelevel,
'member': OCVMemberObject,
'emember': OCVEnumMemberObject,
'type': OCVTypeObject,
@ -1522,7 +1516,6 @@ class OCVDomain(Domain):
'jfunc' : OCVXRefRole(fix_parens=True),
'jfuncx' : OCVXRefRole(),
'pyfunc' : OCVPyXRefRole(),
'pyoldfunc' : OCVPyXRefRole(),
'member': OCVXRefRole(),
'emember': OCVXRefRole(),
'type': OCVXRefRole(),
@ -1612,7 +1605,6 @@ class OCVDomain(Domain):
'cfunction': _('C function'),
'jfunction': _('Java method'),
'pyfunction': _('Python function'),
'pyoldfunction': _('Legacy Python function'),
'member': _('C++ member'),
'emember': _('enum member'),
'type': _('C/C++ type'),

@ -51,11 +51,12 @@
#include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking_c.h"
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/contrib/compat.hpp"
#include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/contrib.hpp"
#endif

@ -121,8 +121,6 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali
.. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* object_points, const CvMat* image_points, const CvMat* point_counts, CvSize image_size, CvMat* camera_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors=NULL, CvMat* translation_vectors=NULL, int flags=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:pyoldfunction:: cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0)-> None
:param objectPoints: In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
In the old interface all the vectors of object points from different views are concatenated together.
@ -279,8 +277,6 @@ For points in an image of a stereo pair, computes the corresponding epilines in
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int which_image, const CvMat* fundamental_matrix, CvMat* correspondent_lines )
.. ocv:pyoldfunction:: cv.ComputeCorrespondEpilines(points, whichImage, F, lines) -> None
:param points: Input points. :math:`N \times 1` or :math:`1 \times N` matrix of type ``CV_32FC2`` or ``vector<Point2f>`` .
:param whichImage: Index of the image (1 or 2) that contains the ``points`` .
@ -354,7 +350,6 @@ Converts points to/from homogeneous coordinates.
.. ocv:function:: void convertPointsHomogeneous( InputArray src, OutputArray dst )
.. ocv:cfunction:: void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous(src, dst) -> None
:param src: Input array or vector of 2D, 3D, or 4D points.
@ -400,8 +395,6 @@ Decomposes a projection matrix into a rotation matrix and a camera matrix.
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat * projMatr, CvMat * calibMatr, CvMat * rotMatr, CvMat * posVect, CvMat * rotMatrX=NULL, CvMat * rotMatrY=NULL, CvMat * rotMatrZ=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.DecomposeProjectionMatrix(projMatrix, cameraMatrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None) -> eulerAngles
:param projMatrix: 3x4 input projection matrix P.
:param cameraMatrix: Output 3x3 camera matrix K.
@ -436,7 +429,6 @@ Renders the detected chessboard corners.
.. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> image
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found )
.. ocv:pyoldfunction:: cv.DrawChessboardCorners(image, patternSize, corners, patternWasFound)-> None
:param image: Destination image. It must be an 8-bit color image.
@ -459,7 +451,6 @@ Finds the positions of internal corners of the chessboard.
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyoldfunction:: cv.FindChessboardCorners(image, patternSize, flags=CV_CALIB_CB_ADAPTIVE_THRESH) -> corners
:param image: Source chessboard view. It must be an 8-bit grayscale or color image.
@ -564,8 +555,6 @@ Finds an object pose from 3D-2D point correspondences.
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector, int use_extrinsic_guess=0 )
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 ) -> None
:param objectPoints: Array of object points in the object coordinate space, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. ``vector<Point3f>`` can be also passed here.
:param imagePoints: Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, where N is the number of points. ``vector<Point2f>`` can be also passed here.
@ -636,7 +625,6 @@ Calculates a fundamental matrix from the corresponding points in two images.
.. ocv:pyfunction:: cv2.findFundamentalMat(points1, points2[, method[, param1[, param2[, mask]]]]) -> retval, mask
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamental_matrix, int method=CV_FM_RANSAC, double param1=3., double param2=0.99, CvMat* status=NULL )
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> retval
:param points1: Array of ``N`` points from the first image. The point coordinates should be floating-point (single or double precision).
@ -820,8 +808,6 @@ Finds a perspective transformation between two planes.
.. ocv:cfunction:: int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, CvMat* homography, int method=0, double ransacReprojThreshold=3, CvMat* mask=0 )
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method=0, ransacReprojThreshold=3.0, status=None) -> None
:param srcPoints: Coordinates of the points in the original plane, a matrix of the type ``CV_32FC2`` or ``vector<Point2f>`` .
:param dstPoints: Coordinates of the points in the target plane, a matrix of the type ``CV_32FC2`` or a ``vector<Point2f>`` .
@ -946,8 +932,6 @@ Returns the new camera matrix based on the free scaling parameter.
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, const CvMat* dist_coeffs, CvSize image_size, double alpha, CvMat* new_camera_matrix, CvSize new_imag_size=cvSize(0,0), CvRect* valid_pixel_ROI=0, int center_principal_point=0 )
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0, centerPrincipalPoint=0) -> None
:param cameraMatrix: Input camera matrix.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
@ -981,8 +965,6 @@ Finds an initial camera matrix from 3D-2D point correspondences.
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* object_points, const CvMat* image_points, const CvMat* npoints, CvSize image_size, CvMat* camera_matrix, double aspect_ratio=1. )
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, npoints, imageSize, cameraMatrix, aspectRatio=1.) -> None
:param objectPoints: Vector of vectors of the calibration pattern points in the calibration pattern coordinate space. In the old interface all the per-view vectors are concatenated. See :ocv:func:`calibrateCamera` for details.
:param imagePoints: Vector of vectors of the projections of the calibration pattern points. In the old interface all the per-view vectors are concatenated.
@ -1030,8 +1012,6 @@ Projects 3D points to an image plane.
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL, double aspect_ratio=0 )
.. ocv:pyoldfunction:: cv.ProjectPoints2(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, dpdrot=None, dpdt=None, dpdf=None, dpdc=None, dpddist=None)-> None
:param objectPoints: Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or ``vector<Point3f>`` ), where N is the number of points in the view.
:param rvec: Rotation vector. See :ocv:func:`Rodrigues` for details.
@ -1075,8 +1055,6 @@ Reprojects a disparity image to 3D space.
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0 )
.. ocv:pyoldfunction:: cv.ReprojectImageTo3D(disparity, _3dImage, Q, handleMissingValues=0) -> None
:param disparity: Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit floating-point disparity image.
:param _3dImage: Output 3-channel floating-point image of the same size as ``disparity`` . Each element of ``_3dImage(x,y)`` contains 3D coordinates of the point ``(x,y)`` computed from the disparity map.
@ -1109,7 +1087,6 @@ Computes an RQ decomposition of 3x3 matrices.
.. ocv:pyfunction:: cv2.RQDecomp3x3(src[, mtxR[, mtxQ[, Qx[, Qy[, Qz]]]]]) -> retval, mtxR, mtxQ, Qx, Qy, Qz
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat * matrixM, CvMat * matrixR, CvMat * matrixQ, CvMat * matrixQx=NULL, CvMat * matrixQy=NULL, CvMat * matrixQz=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.RQDecomp3x3(M, R, Q, Qx=None, Qy=None, Qz=None) -> eulerAngles
:param src: 3x3 input matrix.
@ -1140,8 +1117,6 @@ Converts a rotation matrix to a rotation vector or vice versa.
.. ocv:cfunction:: int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian=0 )
.. ocv:pyoldfunction:: cv.Rodrigues2(src, dst, jacobian=0)-> None
:param src: Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
:param dst: Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
@ -1269,8 +1244,6 @@ Calibrates the stereo camera.
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, const CvMat* image_points2, const CvMat* npoints, CvMat* camera_matrix1, CvMat* dist_coeffs1, CvMat* camera_matrix2, CvMat* dist_coeffs2, CvSize image_size, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
:param objectPoints: Vector of vectors of the calibration pattern points.
:param imagePoints1: Vector of vectors of the projections of the calibration pattern points, observed by the first camera.
@ -1362,8 +1335,6 @@ Computes rectification transforms for each head of a calibrated stereo camera.
.. ocv:cfunction:: void cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, CvSize image_size, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize new_image_size=cvSize(0,0), CvRect* valid_pix_ROI1=0, CvRect* valid_pix_ROI2=0 )
.. ocv:pyoldfunction:: cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0)) -> (roi1, roi2)
:param cameraMatrix1: First camera matrix.
:param cameraMatrix2: Second camera matrix.
@ -1451,8 +1422,6 @@ Computes a rectification transform for an uncalibrated stereo camera.
.. ocv:cfunction:: int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize img_size, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:pyoldfunction:: cv.StereoRectifyUncalibrated(points1, points2, F, imageSize, H1, H2, threshold=5)-> None
:param points1: Array of feature points in the first image.
:param points2: The corresponding points in the second image. The same formats as in :ocv:func:`findFundamentalMat` are supported.

File diff suppressed because it is too large Load Diff

@ -0,0 +1,384 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CONTRIB_COMPAT_HPP__
#define __OPENCV_CONTRIB_COMPAT_HPP__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
/****************************************************************************************\
* Adaptive Skin Detector *
\****************************************************************************************/
class CV_EXPORTS CvAdaptiveSkinDetector
{
private:
enum {
GSD_HUE_LT = 3,
GSD_HUE_UT = 33,
GSD_INTENSITY_LT = 15,
GSD_INTENSITY_UT = 250
};
class CV_EXPORTS Histogram
{
private:
enum {
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
};
protected:
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
public:
CvHistogram *fHistogram;
Histogram();
virtual ~Histogram();
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
void mergeWith(Histogram *source, double weight);
};
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
double fHistogramMergeFactor, fHuePercentCovered;
Histogram histogramHueMotion, skinHueHistogram;
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
protected:
void initData(IplImage *src, int widthDivider, int heightDivider);
void adaptiveFilter();
public:
enum {
MORPHING_METHOD_NONE = 0,
MORPHING_METHOD_ERODE = 1,
MORPHING_METHOD_ERODE_ERODE = 2,
MORPHING_METHOD_ERODE_DILATE = 3
};
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
virtual ~CvAdaptiveSkinDetector();
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
};
/****************************************************************************************\
* Fuzzy MeanShift Tracker *
\****************************************************************************************/
class CV_EXPORTS CvFuzzyPoint {
public:
double x, y, value;
CvFuzzyPoint(double _x, double _y);
};
class CV_EXPORTS CvFuzzyCurve {
private:
std::vector<CvFuzzyPoint> points;
double value, centre;
bool between(double x, double x1, double x2);
public:
CvFuzzyCurve();
~CvFuzzyCurve();
void setCentre(double _centre);
double getCentre();
void clear();
void addPoint(double x, double y);
double calcValue(double param);
double getValue();
void setValue(double _value);
};
class CV_EXPORTS CvFuzzyFunction {
public:
std::vector<CvFuzzyCurve> curves;
CvFuzzyFunction();
~CvFuzzyFunction();
void addCurve(CvFuzzyCurve *curve, double value = 0);
void resetValues();
double calcValue();
CvFuzzyCurve *newCurve();
};
class CV_EXPORTS CvFuzzyRule {
private:
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
CvFuzzyCurve *fuzzyOutput;
public:
CvFuzzyRule();
~CvFuzzyRule();
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcValue(double param1, double param2);
CvFuzzyCurve *getOutputCurve();
};
class CV_EXPORTS CvFuzzyController {
private:
std::vector<CvFuzzyRule*> rules;
public:
CvFuzzyController();
~CvFuzzyController();
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcOutput(double param1, double param2);
};
class CV_EXPORTS CvFuzzyMeanShiftTracker
{
private:
class FuzzyResizer
{
private:
CvFuzzyFunction iInput, iOutput;
CvFuzzyController fuzzyController;
public:
FuzzyResizer();
int calcOutput(double edgeDensity, double density);
};
class SearchWindow
{
public:
FuzzyResizer *fuzzyResizer;
int x, y;
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
int ldx, ldy, ldw, ldh, numShifts, numIters;
int xGc, yGc;
long m00, m01, m10, m11, m02, m20;
double ellipseAngle;
double density;
unsigned int depthLow, depthHigh;
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
SearchWindow();
~SearchWindow();
void setSize(int _x, int _y, int _width, int _height);
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
bool shift();
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
};
public:
enum TrackingState
{
tsNone = 0,
tsSearching = 1,
tsTracking = 2,
tsSetWindow = 3,
tsDisabled = 10
};
enum ResizeMethod {
rmEdgeDensityLinear = 0,
rmEdgeDensityFuzzy = 1,
rmInnerDensity = 2
};
enum {
MinKernelMass = 1000
};
SearchWindow kernel;
int searchMode;
private:
enum
{
MaxMeanShiftIteration = 5,
MaxSetSizeIteration = 5
};
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
public:
CvFuzzyMeanShiftTracker();
~CvFuzzyMeanShiftTracker();
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
};
namespace cv
{
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
class CV_EXPORTS LevMarqSparse {
public:
LevMarqSparse();
LevMarqSparse(int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (*fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (*func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
virtual ~LevMarqSparse();
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
virtual void clear();
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
std::vector<Mat>& T, // translation vector of all cameras (input and output)
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
CvMat** A;
CvMat** B;
CvMat** W;
CvMat* X; //measurement
CvMat* hX; //current measurement extimation given new parameter vector
CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
// this parameters may be rejected
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
// length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
CvMat** Yj; //length of array is i = num_points
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
//target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
void* data;
BundleAdjustCallback cb;
void* user_data;
};
} // cv
#endif /* __cplusplus */
#endif /* __OPENCV_CONTRIB_COMPAT_HPP__ */

@ -36,6 +36,7 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/contrib/compat.hpp"
#define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;}

@ -41,6 +41,7 @@
#include "precomp.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/contrib/compat.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>

@ -142,7 +142,7 @@ private:
LocationScaleImageRange(const std::vector<Point>& locations, const std::vector<float>& _scales) :
locations_(locations), scales_(_scales)
{
assert(locations.size()==_scales.size());
CV_Assert(locations.size()==_scales.size());
}
ImageIterator* iterator() const
@ -393,7 +393,7 @@ private:
LocationScaleImageIterator(const std::vector<Point>& locations, const std::vector<float>& _scales) :
locations_(locations), scales_(_scales)
{
assert(locations.size()==_scales.size());
CV_Assert(locations.size()==_scales.size());
reset();
}
@ -622,7 +622,7 @@ void ChamferMatcher::Matching::followContour(Mat& templ_img, template_coords_t&
coordinate_t next;
unsigned char ptr;
assert (direction==-1 || !coords.empty());
CV_Assert (direction==-1 || !coords.empty());
coordinate_t crt = coords.back();
@ -903,18 +903,18 @@ void ChamferMatcher::Template::show() const
p2.x = x + pad*(int)(sin(orientations[i])*100)/100;
p2.y = y + pad*(int)(cos(orientations[i])*100)/100;
line(templ_color, p1,p2, CV_RGB(255,0,0));
line(templ_color, p1,p2, Scalar(255,0,0));
}
}
circle(templ_color,Point(center.x + pad, center.y + pad),1,CV_RGB(0,255,0));
circle(templ_color,Point(center.x + pad, center.y + pad),1,Scalar(0,255,0));
#ifdef HAVE_OPENCV_HIGHGUI
namedWindow("templ",1);
imshow("templ",templ_color);
waitKey();
#else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support");
CV_Error(Error::StsNotImplemented, "OpenCV has been compiled without GUI support");
#endif
templ_color.release();
@ -1059,7 +1059,7 @@ void ChamferMatcher::Matching::fillNonContourOrientations(Mat& annotated_img, Ma
int cols = annotated_img.cols;
int rows = annotated_img.rows;
assert(orientation_img.cols==cols && orientation_img.rows==rows);
CV_Assert(orientation_img.cols==cols && orientation_img.rows==rows);
for (int y=0;y<rows;++y) {
for (int x=0;x<cols;++x) {
@ -1279,7 +1279,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
std::cout << "Index too big.\n" << std::endl;
}
assert(img.channels()==3);
CV_Assert(img.channels()==3);
Match match = matches[index];
@ -1298,7 +1298,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
void ChamferMatcher::showMatch(Mat& img, Match match)
{
assert(img.channels()==3);
CV_Assert(img.channels()==3);
const template_coords_t& templ_coords = match.tpl->coords;
for (size_t i=0;i<templ_coords.size();++i) {

@ -40,7 +40,7 @@ static Mat linspace(float x0, float x1, int n)
static void sortMatrixRowsByIndices(InputArray _src, InputArray _indices, OutputArray _dst)
{
if(_indices.getMat().type() != CV_32SC1)
CV_Error(CV_StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
CV_Error(Error::StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
Mat src = _src.getMat();
std::vector<int> indices = _indices.getMat();
_dst.create(src.rows, src.cols, src.type());
@ -64,8 +64,8 @@ static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1)
CV_Error(CV_StsBadArg, "cv::argsort only sorts 1D matrices.");
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
CV_Error(Error::StsBadArg, "cv::argsort only sorts 1D matrices.");
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
Mat sorted_indices;
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
@ -116,8 +116,8 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
Mat Y = _Y.getMat();
Mat xi = _xi.getMat();
// check types & alignment
assert((x.type() == Y.type()) && (Y.type() == xi.type()));
assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
CV_Assert((x.type() == Y.type()) && (Y.type() == xi.type()));
CV_Assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
// call templated interp1
switch(x.type()) {
case CV_8SC1: return interp1_<char>(x,Y,xi); break;
@ -127,7 +127,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
case CV_32SC1: return interp1_<int>(x,Y,xi); break;
case CV_32FC1: return interp1_<float>(x,Y,xi); break;
case CV_64FC1: return interp1_<double>(x,Y,xi); break;
default: CV_Error(CV_StsUnsupportedFormat, ""); break;
default: CV_Error(Error::StsUnsupportedFormat, ""); break;
}
return Mat();
}
@ -473,7 +473,7 @@ namespace colormap
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
{
if(_lut.total() != 256)
CV_Error(CV_StsAssert, "cv::LUT only supports tables of size 256.");
CV_Error(Error::StsAssert, "cv::LUT only supports tables of size 256.");
Mat src = _src.getMat();
// Return original matrix if wrong type is given (is fail loud better here?)
if(src.type() != CV_8UC1 && src.type() != CV_8UC3)
@ -521,7 +521,7 @@ namespace colormap
colormap == COLORMAP_WINTER ? (colormap::ColorMap*)(new colormap::Winter) : 0;
if( !cm )
CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
CV_Error( Error::StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
(*cm)(src, dst);

@ -51,7 +51,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
// make sure the input data is a vector of matrices or vector of vector
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// number of samples
size_t n = src.total();
@ -67,7 +67,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
// make sure data can be reshaped, throw exception if not!
if(src.getMat(i).total() != d) {
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// get a hold of the current row
Mat xi = data.row(i);
@ -306,13 +306,13 @@ void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) {
}
String error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
CV_Error(CV_StsNotImplemented, error_msg);
CV_Error(Error::StsNotImplemented, error_msg);
}
void FaceRecognizer::save(const String& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->save(fs);
fs.release();
}
@ -320,7 +320,7 @@ void FaceRecognizer::save(const String& filename) const {
void FaceRecognizer::load(const String& filename) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->load(fs);
fs.release();
}
@ -331,17 +331,17 @@ void FaceRecognizer::load(const String& filename) {
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
if(_src.total() == 0) {
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
} else if(_local_labels.getMat().type() != CV_32SC1) {
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// make sure data has correct size
if(_src.total() > 1) {
for(int i = 1; i < static_cast<int>(_src.total()); i++) {
if(_src.getMat(i-1).total() != _src.getMat(i).total()) {
String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
CV_Error(Error::StsUnsupportedFormat, error_message);
}
}
}
@ -355,7 +355,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
// assert there are as much samples as labels
if(static_cast<int>(labels.total()) != n) {
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// clear existing model data
_labels.release();
@ -365,7 +365,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
_num_components = n;
// perform the PCA
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components);
// copy the PCA results
_mean = pca.mean.reshape(1,1); // store the mean vector
_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
@ -386,11 +386,11 @@ void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
String error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
CV_Error(CV_StsError, error_message);
CV_Error(Error::StsError, error_message);
} else if(_eigenvectors.rows != static_cast<int>(src.total())) {
// check data alignment just for clearer exception messages
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// project into PCA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
@ -440,17 +440,17 @@ void Eigenfaces::save(FileStorage& fs) const {
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
if(src.total() == 0) {
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
} else if(_lbls.getMat().type() != CV_32SC1) {
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// make sure data has correct size
if(src.total() > 1) {
for(int i = 1; i < static_cast<int>(src.total()); i++) {
if(src.getMat(i-1).total() != src.getMat(i).total()) {
String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
CV_Error(Error::StsUnsupportedFormat, error_message);
}
}
}
@ -462,10 +462,10 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
// make sure labels are passed in correct shape
if(labels.total() != (size_t) N) {
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
} else if(labels.rows != 1 && labels.cols != 1) {
String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// clear existing model data
_labels.release();
@ -481,7 +481,7 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
if((_num_components <= 0) || (_num_components > (C-1)))
_num_components = (C-1);
// perform a PCA and keep (N-C) components
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, (N-C));
PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
// project the data and perform a LDA on it
LDA lda(pca.project(data),labels, _num_components);
// store the total mean vector
@ -506,10 +506,10 @@ void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
} else if(src.total() != (size_t) _eigenvectors.rows) {
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// project into LDA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
@ -641,7 +641,7 @@ static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break;
default:
String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
CV_Error(CV_StsNotImplemented, error_msg);
CV_Error(Error::StsNotImplemented, error_msg);
break;
}
}
@ -687,7 +687,7 @@ static Mat histc(InputArray _src, int minVal, int maxVal, bool normed)
return histc_(src, minVal, maxVal, normed);
break;
default:
CV_Error(CV_StsUnmatchedFormats, "This type is not implemented yet."); break;
CV_Error(Error::StsUnmatchedFormats, "This type is not implemented yet."); break;
}
return Mat();
}
@ -769,14 +769,14 @@ void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
if(_in_src.total() == 0) {
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsUnsupportedFormat, error_message);
CV_Error(Error::StsUnsupportedFormat, error_message);
} else if(_in_labels.getMat().type() != CV_32SC1) {
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
CV_Error(CV_StsUnsupportedFormat, error_message);
CV_Error(Error::StsUnsupportedFormat, error_message);
}
// get the vector of matrices
std::vector<Mat> src;
@ -786,7 +786,7 @@ void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserv
// check if data is well- aligned
if(labels.total() != src.size()) {
String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// if this model should be trained without preserving old data, delete old model data
if(!preserveData) {
@ -817,7 +817,7 @@ void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
if(_histograms.empty()) {
// throw error if no data (or simply return -1?)
String error_message = "This LBPH model is not computed yet. Did you call the train method?";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
Mat src = _src.getMat();
// get the spatial histogram from input image

@ -35,6 +35,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/contrib/compat.hpp"
CvFuzzyPoint::CvFuzzyPoint(double _x, double _y)
{

@ -85,7 +85,7 @@ static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
candidatePointsMask.at<uchar>(0, maxLoc.x) = 0;
Mat minDists;
reduce( activedDists, minDists, 0, CV_REDUCE_MIN );
reduce( activedDists, minDists, 0, REDUCE_MIN );
minMaxLoc( minDists, 0, &maxVal, 0, &maxLoc, candidatePointsMask );
dst.at<Point3_<uchar> >((int)i) = src.at<Point3_<uchar> >(maxLoc.x);
}

@ -43,9 +43,9 @@ static Mat argsort(InputArray _src, bool ascending=true)
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1) {
String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
Mat sorted_indices;
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
@ -55,7 +55,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
// make sure the input data is a vector of matrices or vector of vector
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// number of samples
size_t n = src.total();
@ -71,7 +71,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
// make sure data can be reshaped, throw exception if not!
if(src.getMat(i).total() != d) {
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// get a hold of the current row
Mat xi = data.row(i);
@ -87,7 +87,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
if(_indices.getMat().type() != CV_32SC1) {
CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
}
Mat src = _src.getMat();
std::vector<int> indices = _indices.getMat();
@ -179,12 +179,12 @@ Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
// make sure the data has the correct shape
if(W.rows != d) {
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// make sure mean is correct if not empty
if(!mean.empty() && (mean.total() != (size_t) d)) {
String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// create temporary matrices
Mat X, Y;
@ -217,12 +217,12 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
// make sure the data has the correct shape
if(W.cols != d) {
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// make sure mean is correct if not empty
if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// initalize temporary matrices
Mat X, Y;
@ -939,7 +939,7 @@ public:
void LDA::save(const String& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
if (!fs.isOpened()) {
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
}
this->save(fs);
fs.release();
@ -949,7 +949,7 @@ void LDA::save(const String& filename) const {
void LDA::load(const String& filename) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->load(fs);
fs.release();
}
@ -1002,12 +1002,12 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// want to separate from each other then?
if(C == 1) {
String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// throw error if less labels, than samples
if (labels.size() != static_cast<size_t>(N)) {
String error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
// warn if within-classes scatter matrix becomes singular
if (N < D) {
@ -1090,7 +1090,7 @@ void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
break;
default:
String error_message= format("InputArray Datatype %d is not supported.", _src.kind());
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
break;
}
}

@ -258,7 +258,7 @@ namespace cv
void Octree::buildTree(const std::vector<Point3f>& points3d, int maxLevels, int _minPoints)
{
assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
CV_Assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
points.resize(points3d.size());
std::copy(points3d.begin(), points3d.end(), points.begin());
minPoints = _minPoints;

@ -450,7 +450,7 @@ bool Retina::_convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::
inputMatToConvert.convertTo(dst, dsttype);
}
else
CV_Error(CV_StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered");
CV_Error(Error::StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered");
return imageNumberOfChannels>1; // return bool : false for gray level image processing, true for color mode
}

@ -422,7 +422,7 @@ bool computeKsi( int transformType,
computeCFuncPtr = computeC_Translation;
}
else
CV_Error( CV_StsBadFlag, "Unsupported value of transformation type flag.");
CV_Error(Error::StsBadFlag, "Unsupported value of transformation type flag.");
Mat C( correspsCount, Cwidth, CV_64FC1 );
Mat dI_dt( correspsCount, 1, CV_64FC1 );

@ -56,24 +56,24 @@ namespace
{
const static Scalar colors[] =
{
CV_RGB(255, 0, 0),
CV_RGB( 0, 255, 0),
CV_RGB( 0, 0, 255),
CV_RGB(255, 255, 0),
CV_RGB(255, 0, 255),
CV_RGB( 0, 255, 255),
CV_RGB(255, 127, 127),
CV_RGB(127, 127, 255),
CV_RGB(127, 255, 127),
CV_RGB(255, 255, 127),
CV_RGB(127, 255, 255),
CV_RGB(255, 127, 255),
CV_RGB(127, 0, 0),
CV_RGB( 0, 127, 0),
CV_RGB( 0, 0, 127),
CV_RGB(127, 127, 0),
CV_RGB(127, 0, 127),
CV_RGB( 0, 127, 127)
Scalar(255, 0, 0),
Scalar( 0, 255, 0),
Scalar( 0, 0, 255),
Scalar(255, 255, 0),
Scalar(255, 0, 255),
Scalar( 0, 255, 255),
Scalar(255, 127, 127),
Scalar(127, 127, 255),
Scalar(127, 255, 127),
Scalar(255, 255, 127),
Scalar(127, 255, 255),
Scalar(255, 127, 255),
Scalar(127, 0, 0),
Scalar( 0, 127, 0),
Scalar( 0, 0, 127),
Scalar(127, 127, 0),
Scalar(127, 0, 127),
Scalar( 0, 127, 127)
};
size_t colors_mum = sizeof(colors)/sizeof(colors[0]);
@ -199,7 +199,7 @@ void convertTransformMatrix(const float* matrix, float* sseMatrix)
inline __m128 transformSSE(const __m128* matrix, const __m128& in)
{
assert(((size_t)matrix & 15) == 0);
CV_DbgAssert(((size_t)matrix & 15) == 0);
__m128 a0 = _mm_mul_ps(_mm_load_ps((float*)(matrix+0)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(0,0,0,0)));
__m128 a1 = _mm_mul_ps(_mm_load_ps((float*)(matrix+1)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(1,1,1,1)));
__m128 a2 = _mm_mul_ps(_mm_load_ps((float*)(matrix+2)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(2,2,2,2)));
@ -221,8 +221,8 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
float pixelsPerMeter = 1.f / binSize;
float support = imageWidth * binSize;
assert(normals.size() == points.size());
assert(mask.size() == points.size());
CV_Assert(normals.size() == points.size());
CV_Assert(mask.size() == points.size());
size_t points_size = points.size();
mask.resize(points_size);
@ -250,7 +250,7 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
if (mask[i] == 0)
continue;
int t = cvGetThreadNum();
int t = getThreadNum();
std::vector<Point3f>& pointsInSphere = pointsInSpherePool[t];
const Point3f& center = points[i];
@ -289,7 +289,7 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
__m128 ppm4 = _mm_set1_ps(pixelsPerMeter);
__m128i height4m1 = _mm_set1_epi32(spinImage.rows-1);
__m128i width4m1 = _mm_set1_epi32(spinImage.cols-1);
assert( spinImage.step <= 0xffff );
CV_Assert( spinImage.step <= 0xffff );
__m128i step4 = _mm_set1_epi16((short)step);
__m128i zero4 = _mm_setzero_si128();
__m128i one4i = _mm_set1_epi32(1);
@ -472,7 +472,7 @@ float cv::Mesh3D::estimateResolution(float /*tryRatio*/)
return resolution = (float)dist[ dist.size() / 2 ];
#else
CV_Error(CV_StsNotImplemented, "");
CV_Error(Error::StsNotImplemented, "");
return 1.f;
#endif
}
@ -686,16 +686,15 @@ inline float cv::SpinImageModel::groupingCreteria(const Point3f& pointScene1, co
}
cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh) , out(0)
cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh)
{
if (mesh.vtx.empty())
throw Mesh3D::EmptyMeshException();
defaultParams();
}
cv::SpinImageModel::SpinImageModel() : out(0) { defaultParams(); }
cv::SpinImageModel::~SpinImageModel() {}
void cv::SpinImageModel::setLogger(std::ostream* log) { out = log; }
cv::SpinImageModel::SpinImageModel() { defaultParams(); }
cv::SpinImageModel::~SpinImageModel() {}
void cv::SpinImageModel::defaultParams()
{
@ -756,7 +755,7 @@ Mat cv::SpinImageModel::packRandomScaledSpins(bool separateScale, size_t xCount,
int sz = spins.front().cols;
Mat result((int)(yCount * sz + (yCount - 1)), (int)(xCount * sz + (xCount - 1)), CV_8UC3);
result = colors[(static_cast<int64>(cvGetTickCount()/cvGetTickFrequency())/1000) % colors_mum];
result = colors[(static_cast<int64>(getTickCount()/getTickFrequency())/1000) % colors_mum];
int pos = 0;
for(int y = 0; y < (int)yCount; ++y)
@ -1030,12 +1029,8 @@ private:
matchSpinToModel(scene.spinImages.row(i), indeces, coeffs);
for(size_t t = 0; t < indeces.size(); ++t)
allMatches.push_back(Match(i, indeces[t], coeffs[t]));
if (out) if (i % 100 == 0) *out << "Comparing scene spinimage " << i << " of " << scene.spinImages.rows << std::endl;
}
corr_timer.stop();
if (out) *out << "Spin correlation time = " << corr_timer << std::endl;
if (out) *out << "Matches number = " << allMatches.size() << std::endl;
if(allMatches.empty())
return;
@ -1046,7 +1041,6 @@ private:
allMatches.erase(
remove_if(allMatches.begin(), allMatches.end(), bind2nd(std::less<float>(), maxMeasure * fraction)),
allMatches.end());
if (out) *out << "Matches number [filtered by similarity measure] = " << allMatches.size() << std::endl;
int matchesSize = (int)allMatches.size();
if(matchesSize == 0)
@ -1095,15 +1089,12 @@ private:
allMatches.erase(
std::remove_if(allMatches.begin(), allMatches.end(), std::bind2nd(std::equal_to<float>(), infinity)),
allMatches.end());
if (out) *out << "Matches number [filtered by geometric consistency] = " << allMatches.size() << std::endl;
matchesSize = (int)allMatches.size();
if(matchesSize == 0)
return;
if (out) *out << "grouping ..." << std::endl;
Mat groupingMat((int)matchesSize, (int)matchesSize, CV_32F);
groupingMat = Scalar(0);
@ -1151,8 +1142,6 @@ private:
for(int g = 0; g < matchesSize; ++g)
{
if (out) if (g % 100 == 0) *out << "G = " << g << std::endl;
group_t left = allMatchesInds;
group_t group;
@ -1201,16 +1190,16 @@ private:
cv::TickMeter::TickMeter() { reset(); }
int64 cv::TickMeter::getTimeTicks() const { return sumTime; }
double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/cvGetTickFrequency(); }
double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/getTickFrequency(); }
double cv::TickMeter::getTimeMilli() const { return getTimeMicro()*1e-3; }
double cv::TickMeter::getTimeSec() const { return getTimeMilli()*1e-3; }
int64 cv::TickMeter::getCounter() const { return counter; }
void cv::TickMeter::reset() {startTime = 0; sumTime = 0; counter = 0; }
void cv::TickMeter::start(){ startTime = cvGetTickCount(); }
void cv::TickMeter::start(){ startTime = getTickCount(); }
void cv::TickMeter::stop()
{
int64 time = cvGetTickCount();
int64 time = getTickCount();
if ( startTime == 0 )
return;
@ -1220,4 +1209,4 @@ void cv::TickMeter::stop()
startTime = 0;
}
std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; }
//std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; }

@ -239,8 +239,8 @@ void StereoVar::VariationalSolver(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
{
CvSize imgSize = _u.size();
CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
Size imgSize = _u.size();
Size frmSize = Size((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
Mat I1_h, I2_h, I2x_h, u_h, U, U_h;
//PRE relaxation
@ -285,7 +285,7 @@ void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
{
double scale = std::pow(pyrScale, (double) level);
CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
Size frmSize = Size((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
Mat I1_h, I2_h, I2x_h, u_h;
//scaling DOWN
@ -350,7 +350,7 @@ void StereoVar::autoParams()
void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
CV_Assert(left.size() == right.size() && left.type() == right.type());
CvSize imgSize = left.size();
Size imgSize = left.size();
int MaxD = MAX(labs(minDisp), labs(maxDisp));
int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
@ -378,8 +378,8 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
equalizeHist(rightgray, rightgray);
}
if (poly_sigma > 0.0001) {
GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
GaussianBlur(leftgray, leftgray, Size(poly_n, poly_n), poly_sigma);
GaussianBlur(rightgray, rightgray, Size(poly_n, poly_n), poly_sigma);
}
if (flags & USE_AUTO_PARAMS) {

@ -13,8 +13,6 @@ Finds centers of clusters and groups input samples around the clusters.
.. ocv:cfunction:: int cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* _centers=0, double* compactness=0 )
.. ocv:pyoldfunction:: cv.KMeans2(samples, nclusters, labels, termcrit, attempts=1, flags=0, centers=None) -> float
:param samples: Floating-point matrix of input samples, one row per sample.
:param cluster_count: Number of clusters to split the set by.

@ -36,8 +36,6 @@ Draws a circle.
.. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image where the circle is drawn.
:param center: Center of the circle.
@ -66,8 +64,6 @@ Clips the line against the image rectangle.
.. ocv:cfunction:: int cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 )
.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (point1, point2)
:param imgSize: Image size. The image rectangle is ``Rect(0, 0, imgSize.width, imgSize.height)`` .
:param imgRect: Image rectangle.
@ -93,12 +89,8 @@ Draws a simple or thick elliptic arc or fills an ellipse sector.
.. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness=1, lineType=8, shift=0)-> None
.. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.EllipseBox(img, box, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
:param center: Center of the ellipse.
@ -170,8 +162,6 @@ Fills a convex polygon.
.. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillConvexPoly(img, pn, color, lineType=8, shift=0)-> None
:param img: Image.
:param pts: Polygon vertices.
@ -202,8 +192,6 @@ Fills the area bounded by one or more polygons.
.. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, int contours, CvScalar color, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.FillPoly(img, polys, color, lineType=8, shift=0)-> None
:param img: Image.
:param pts: Array of polygons where each polygon is represented as an array of points.
@ -235,8 +223,6 @@ Calculates the width and height of a text string.
.. ocv:cfunction:: void cvGetTextSize( const char* text_string, const CvFont* font, CvSize* text_size, int* baseline )
.. ocv:pyoldfunction:: cv.GetTextSize(textString, font)-> (textSize, baseline)
:param text: Input text string.
:param fontFace: Font to use. See the :ocv:func:`putText` for details.
@ -340,8 +326,6 @@ Draws a line segment connecting two points.
.. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
:param pt1: First point of the line segment.
@ -430,8 +414,6 @@ Draws a simple, thick, or filled up-right rectangle.
.. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.Rectangle(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None
:param img: Image.
:param pt1: Vertex of the rectangle.
@ -464,8 +446,6 @@ Draws several polygonal curves.
.. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, int is_closed, CvScalar color, int thickness=1, int line_type=8, int shift=0 )
.. ocv:pyoldfunction:: cv.PolyLine(img, polys, is_closed, color, thickness=1, lineType=8, shift=0) -> None
:param img: Image.
:param pts: Array of polygonal curves.
@ -497,8 +477,6 @@ Draws contours outlines or filled contours.
.. ocv:cfunction:: void cvDrawContours( CvArr * img, CvSeq* contour, CvScalar external_color, CvScalar hole_color, int max_level, int thickness=1, int line_type=8, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.DrawContours(img, contour, external_color, hole_color, max_level, thickness=1, lineType=8, offset=(0, 0))-> None
:param image: Destination image.
:param contours: All the input contours. Each contour is stored as a point vector.
@ -580,8 +558,6 @@ Draws a text string.
.. ocv:cfunction:: void cvPutText( CvArr* img, const char* text, CvPoint org, const CvFont* font, CvScalar color )
.. ocv:pyoldfunction:: cv.PutText(img, text, org, font, color)-> None
:param img: Image.
:param text: Text string to be drawn.

@ -279,7 +279,6 @@ CloneSeq
Creates a copy of a sequence.
.. ocv:cfunction:: CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.CloneSeq(seq, storage)-> None
:param seq: Sequence
@ -388,9 +387,6 @@ Creates memory storage.
.. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int block_size=0 )
.. ocv:pyoldfunction:: cv.CreateMemStorage(blockSize=0) -> memstorage
:param block_size: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K.
The function creates an empty memory storage. See

@ -478,8 +478,6 @@ Clears a specific array element.
.. ocv:cfunction:: void cvClearND( CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.ClearND(arr, idx)-> None
:param arr: Input array
:param idx: Array of the element indices
@ -490,7 +488,6 @@ CloneImage
Makes a full copy of an image, including the header, data, and ROI.
.. ocv:cfunction:: IplImage* cvCloneImage(const IplImage* image)
.. ocv:pyoldfunction:: cv.CloneImage(image) -> image
:param image: The original image
@ -499,7 +496,6 @@ CloneMat
Creates a full matrix copy.
.. ocv:cfunction:: CvMat* cvCloneMat(const CvMat* mat)
.. ocv:pyoldfunction:: cv.CloneMat(mat) -> mat
:param mat: Matrix to be copied
@ -510,7 +506,6 @@ CloneMatND
Creates full copy of a multi-dimensional array and returns a pointer to the copy.
.. ocv:cfunction:: CvMatND* cvCloneMatND(const CvMatND* mat)
.. ocv:pyoldfunction:: cv.CloneMatND(mat) -> matND
:param mat: Input array
@ -530,8 +525,6 @@ ConvertScale
Converts one array to another with optional linear transformation.
.. ocv:cfunction:: void cvConvertScale(const CvArr* src, CvArr* dst, double scale=1, double shift=0)
.. ocv:pyoldfunction:: cv.ConvertScale(src, dst, scale=1.0, shift=0.0)-> None
.. ocv:pyoldfunction:: cv.Convert(src, dst)-> None
::
@ -569,7 +562,6 @@ Copy
Copies one array to another.
.. ocv:cfunction:: void cvCopy(const CvArr* src, CvArr* dst, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Copy(src, dst, mask=None)-> None
:param src: The source array
@ -591,7 +583,6 @@ CreateData
Allocates array data
.. ocv:cfunction:: void cvCreateData(CvArr* arr)
.. ocv:pyoldfunction:: cv.CreateData(arr) -> None
:param arr: Array header
@ -603,7 +594,6 @@ CreateImage
Creates an image header and allocates the image data.
.. ocv:cfunction:: IplImage* cvCreateImage(CvSize size, int depth, int channels)
.. ocv:pyoldfunction:: cv.CreateImage(size, depth, channels)->image
:param size: Image width and height
@ -621,7 +611,6 @@ CreateImageHeader
Creates an image header but does not allocate the image data.
.. ocv:cfunction:: IplImage* cvCreateImageHeader(CvSize size, int depth, int channels)
.. ocv:pyoldfunction:: cv.CreateImageHeader(size, depth, channels) -> image
:param size: Image width and height
@ -634,7 +623,6 @@ CreateMat
Creates a matrix header and allocates the matrix data.
.. ocv:cfunction:: CvMat* cvCreateMat( int rows, int cols, int type)
.. ocv:pyoldfunction:: cv.CreateMat(rows, cols, type) -> mat
:param rows: Number of rows in the matrix
@ -652,7 +640,6 @@ CreateMatHeader
Creates a matrix header but does not allocate the matrix data.
.. ocv:cfunction:: CvMat* cvCreateMatHeader( int rows, int cols, int type)
.. ocv:pyoldfunction:: cv.CreateMatHeader(rows, cols, type) -> mat
:param rows: Number of rows in the matrix
@ -667,7 +654,6 @@ CreateMatND
Creates the header and allocates the data for a multi-dimensional dense array.
.. ocv:cfunction:: CvMatND* cvCreateMatND( int dims, const int* sizes, int type)
.. ocv:pyoldfunction:: cv.CreateMatND(dims, type) -> matND
:param dims: Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be changed at build time).
@ -685,7 +671,6 @@ CreateMatNDHeader
Creates a new matrix header but does not allocate the matrix data.
.. ocv:cfunction:: CvMatND* cvCreateMatNDHeader( int dims, const int* sizes, int type)
.. ocv:pyoldfunction:: cv.CreateMatNDHeader(dims, type) -> matND
:param dims: Number of array dimensions
@ -716,7 +701,6 @@ CrossProduct
Calculates the cross product of two 3D vectors.
.. ocv:cfunction:: void cvCrossProduct(const CvArr* src1, const CvArr* src2, CvArr* dst)
.. ocv:pyoldfunction:: cv.CrossProduct(src1, src2, dst)-> None
:param src1: The first source vector
@ -742,7 +726,6 @@ DotProduct
Calculates the dot product of two arrays in Euclidean metrics.
.. ocv:cfunction:: double cvDotProduct(const CvArr* src1, const CvArr* src2)
.. ocv:pyoldfunction:: cv.DotProduct(src1, src2) -> float
:param src1: The first source array
@ -767,11 +750,6 @@ Get?D
.. ocv:cfunction:: CvScalar cvGet3D(const CvArr* arr, int idx0, int idx1, int idx2)
.. ocv:cfunction:: CvScalar cvGetND( const CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.Get1D(arr, idx) -> scalar
.. ocv:pyoldfunction:: cv.Get2D(arr, idx0, idx1) -> scalar
.. ocv:pyoldfunction:: cv.Get3D(arr, idx0, idx1, idx2) -> scalar
.. ocv:pyoldfunction:: cv.GetND(arr, indices) -> scalar
Return a specific array element.
:param arr: Input array
@ -794,10 +772,6 @@ Returns one of more array columns.
.. ocv:cfunction:: CvMat* cvGetCols( const CvArr* arr, CvMat* submat, int start_col, int end_col )
.. ocv:pyoldfunction:: cv.GetCol(arr, col)-> submat
.. ocv:pyoldfunction:: cv.GetCols(arr, startCol, endCol)-> submat
:param arr: Input array
:param submat: Pointer to the resulting sub-array header
@ -815,7 +789,6 @@ GetDiag
Returns one of array diagonals.
.. ocv:cfunction:: CvMat* cvGetDiag(const CvArr* arr, CvMat* submat, int diag=0)
.. ocv:pyoldfunction:: cv.GetDiag(arr, diag=0)-> submat
:param arr: Input array
@ -830,7 +803,6 @@ GetDims
Return number of array dimensions
.. ocv:cfunction:: int cvGetDims(const CvArr* arr, int* sizes=NULL)
.. ocv:pyoldfunction:: cv.GetDims(arr) -> (dim1, dim2, ...)
:param arr: Input array
@ -861,7 +833,6 @@ GetElemType
Returns type of array elements.
.. ocv:cfunction:: int cvGetElemType(const CvArr* arr)
.. ocv:pyoldfunction:: cv.GetElemType(arr)-> int
:param arr: Input array
@ -877,8 +848,6 @@ Returns image header for arbitrary array.
.. ocv:cfunction:: IplImage* cvGetImage( const CvArr* arr, IplImage* image_header )
.. ocv:pyoldfunction:: cv.GetImage(arr) -> iplimage
:param arr: Input array
:param image_header: Pointer to ``IplImage`` structure used as a temporary buffer
@ -890,7 +859,6 @@ GetImageCOI
Returns the index of the channel of interest.
.. ocv:cfunction:: int cvGetImageCOI(const IplImage* image)
.. ocv:pyoldfunction:: cv.GetImageCOI(image) -> int
:param image: A pointer to the image header
@ -902,7 +870,6 @@ GetImageROI
Returns the image ROI.
.. ocv:cfunction:: CvRect cvGetImageROI(const IplImage* image)
.. ocv:pyoldfunction:: cv.GetImageROI(image)-> CvRect
:param image: A pointer to the image header
@ -913,7 +880,6 @@ GetMat
Returns matrix header for arbitrary array.
.. ocv:cfunction:: CvMat* cvGetMat(const CvArr* arr, CvMat* header, int* coi=NULL, int allowND=0)
.. ocv:pyoldfunction:: cv.GetMat(arr, allowND=0) -> mat
:param arr: Input array
@ -1002,11 +968,6 @@ Return a specific element of single-channel 1D, 2D, 3D or nD array.
.. ocv:cfunction:: double cvGetReal3D(const CvArr* arr, int idx0, int idx1, int idx2)
.. ocv:cfunction:: double cvGetRealND( const CvArr* arr, const int* idx )
.. ocv:pyoldfunction:: cv.GetReal1D(arr, idx0)->float
.. ocv:pyoldfunction:: cv.GetReal2D(arr, idx0, idx1)->float
.. ocv:pyoldfunction:: cv.GetReal3D(arr, idx0, idx1, idx2)->float
.. ocv:pyoldfunction:: cv.GetRealND(arr, idx)->float
:param arr: Input array. Must have a single channel.
:param idx0: The first zero-based component of the element index
@ -1030,9 +991,6 @@ Returns array row or row span.
.. ocv:cfunction:: CvMat* cvGetRows( const CvArr* arr, CvMat* submat, int start_row, int end_row, int delta_row=1 )
.. ocv:pyoldfunction:: cv.GetRow(arr, row)-> submat
.. ocv:pyoldfunction:: cv.GetRows(arr, startRow, endRow, deltaRow=1)-> submat
:param arr: Input array
:param submat: Pointer to the resulting sub-array header
@ -1053,7 +1011,6 @@ GetSize
Returns size of matrix or image ROI.
.. ocv:cfunction:: CvSize cvGetSize(const CvArr* arr)
.. ocv:pyoldfunction:: cv.GetSize(arr)-> (width, height)
:param arr: array header
@ -1064,7 +1021,6 @@ GetSubRect
Returns matrix header corresponding to the rectangular sub-array of input image or matrix.
.. ocv:cfunction:: CvMat* cvGetSubRect(const CvArr* arr, CvMat* submat, CvRect rect)
.. ocv:pyoldfunction:: cv.GetSubRect(arr, rect) -> submat
:param arr: Input array
@ -1357,7 +1313,6 @@ ResetImageROI
Resets the image ROI to include the entire image and releases the ROI structure.
.. ocv:cfunction:: void cvResetImageROI(IplImage* image)
.. ocv:pyoldfunction:: cv.ResetImageROI(image)-> None
:param image: A pointer to the image header
@ -1374,8 +1329,6 @@ Changes shape of matrix/image without copying data.
.. ocv:cfunction:: CvMat* cvReshape( const CvArr* arr, CvMat* header, int new_cn, int new_rows=0 )
.. ocv:pyoldfunction:: cv.Reshape(arr, newCn, newRows=0) -> mat
:param arr: Input array
:param header: Output header to be filled
@ -1412,8 +1365,6 @@ Changes the shape of a multi-dimensional array without copying the data.
.. ocv:cfunction:: CvArr* cvReshapeMatND( const CvArr* arr, int sizeof_header, CvArr* header, int new_cn, int new_dims, int* new_sizes )
.. ocv:pyoldfunction:: cv.ReshapeMatND(arr, newCn, newDims) -> mat
:param arr: Input array
:param sizeof_header: Size of output header to distinguish between IplImage, CvMat and CvMatND output headers
@ -1453,7 +1404,6 @@ Set
Sets every element of an array to a given value.
.. ocv:cfunction:: void cvSet(CvArr* arr, CvScalar value, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Set(arr, value, mask=None)-> None
:param arr: The destination array
@ -1481,12 +1431,6 @@ Change the particular array element.
.. ocv:cfunction:: void cvSetND( CvArr* arr, const int* idx, CvScalar value )
.. ocv:pyoldfunction:: cv.Set1D(arr, idx, value) -> None
.. ocv:pyoldfunction:: cv.Set2D(arr, idx0, idx1, value) -> None
.. ocv:pyoldfunction:: cv.Set3D(arr, idx0, idx1, idx2, value) -> None
.. ocv:pyoldfunction:: cv.SetND(arr, indices, value) -> None
:param arr: Input array
:param idx0: The first zero-based component of the element index
@ -1506,7 +1450,6 @@ SetData
Assigns user data to the array header.
.. ocv:cfunction:: void cvSetData(CvArr* arr, void* data, int step)
.. ocv:pyoldfunction:: cv.SetData(arr, data, step)-> None
:param arr: Array header
@ -1525,7 +1468,6 @@ SetImageCOI
Sets the channel of interest in an IplImage.
.. ocv:cfunction:: void cvSetImageCOI( IplImage* image, int coi)
.. ocv:pyoldfunction:: cv.SetImageCOI(image, coi)-> None
:param image: A pointer to the image header
@ -1539,7 +1481,6 @@ SetImageROI
Sets an image Region Of Interest (ROI) for a given rectangle.
.. ocv:cfunction:: void cvSetImageROI( IplImage* image, CvRect rect)
.. ocv:pyoldfunction:: cv.SetImageROI(image, rect)-> None
:param image: A pointer to the image header
@ -1562,11 +1503,6 @@ Change a specific array element.
.. ocv:cfunction:: void cvSetRealND( CvArr* arr, const int* idx, double value )
.. ocv:pyoldfunction:: cv.SetReal1D(arr, idx, value) -> None
.. ocv:pyoldfunction:: cv.SetReal2D(arr, idx0, idx1, value) -> None
.. ocv:pyoldfunction:: cv.SetReal3D(arr, idx0, idx1, idx2, value) -> None
.. ocv:pyoldfunction:: cv.SetRealND(arr, indices, value) -> None
:param arr: Input array
:param idx0: The first zero-based component of the element index
@ -1588,7 +1524,6 @@ SetZero
Clears the array.
.. ocv:cfunction:: void cvSetZero(CvArr* arr)
.. ocv:pyoldfunction:: cv.SetZero(arr) -> None
:param arr: Array to be cleared
@ -1599,7 +1534,6 @@ mGet
Returns the particular element of single-channel floating-point matrix.
.. ocv:cfunction:: double cvmGet(const CvMat* mat, int row, int col)
.. ocv:pyoldfunction:: cv.mGet(mat, row, col) -> float
:param mat: Input matrix
@ -1614,7 +1548,6 @@ mSet
Sets a specific element of a single-channel floating-point matrix.
.. ocv:cfunction:: void cvmSet(CvMat* mat, int row, int col, double value)
.. ocv:pyoldfunction:: cv.mSet(mat, row, col, value)-> None
:param mat: The matrix
@ -1655,7 +1588,6 @@ RNG
Initializes a random number generator state.
.. ocv:cfunction:: CvRNG cvRNG(int64 seed=-1)
.. ocv:pyoldfunction:: cv.RNG(seed=-1LL)-> CvRNG
:param seed: 64-bit value used to initiate a random sequence
@ -1670,8 +1602,6 @@ Fills an array with random numbers and updates the RNG state.
.. ocv:cfunction:: void cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, CvScalar param1, CvScalar param2 )
.. ocv:pyoldfunction:: cv.RandArr(rng, arr, distType, param1, param2)-> None
:param rng: CvRNG state initialized by :ocv:cfunc:`RNG`
:param arr: The destination array
@ -1695,7 +1625,6 @@ RandInt
Returns a 32-bit unsigned integer and updates RNG.
.. ocv:cfunction:: unsigned cvRandInt(CvRNG* rng)
.. ocv:pyoldfunction:: cv.RandInt(rng)-> unsigned
:param rng: CvRNG state initialized by :ocv:cfunc:`RNG`.
@ -1707,7 +1636,6 @@ RandReal
Returns a floating-point random number and updates RNG.
.. ocv:cfunction:: double cvRandReal(CvRNG* rng)
.. ocv:pyoldfunction:: cv.RandReal(rng) -> float
:param rng: RNG state initialized by :ocv:cfunc:`RNG`
@ -1718,8 +1646,6 @@ fromarray
---------
Create a CvMat from an object that supports the array interface.
.. ocv:pyoldfunction:: cv.fromarray(array, allowND=False) -> mat
:param object: Any object that supports the array interface
:param allowND: If true, will return a CvMatND

@ -327,8 +327,6 @@ Loads an object from a file.
.. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* memstorage=NULL, const char* name=NULL, const char** real_name=NULL )
.. ocv:pyoldfunction:: cv.Load(filename, storage=None, name=None)-> generic
:param filename: File name
:param memstorage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images.
@ -596,8 +594,6 @@ Saves an object to a file.
.. ocv:cfunction:: void cvSave( const char* filename, const void* struct_ptr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList() )
.. ocv:pyoldfunction:: cv.Save(filename, structPtr, name=None, comment=None)-> None
:param filename: File name
:param struct_ptr: Object to save

@ -36,8 +36,6 @@ Calculates the per-element absolute difference between two arrays or between an
.. ocv:cfunction:: void cvAbsDiff(const CvArr* src1, const CvArr* src2, CvArr* dst)
.. ocv:cfunction:: void cvAbsDiffS(const CvArr* src, CvArr* dst, CvScalar value)
.. ocv:pyoldfunction:: cv.AbsDiff(src1, src2, dst)-> None
.. ocv:pyoldfunction:: cv.AbsDiffS(src, dst, value)-> None
:param src1: first input array or a scalar.
@ -90,8 +88,6 @@ Calculates the per-element sum of two arrays or an array and a scalar.
.. ocv:cfunction:: void cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvAddS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Add(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.AddS(src, value, dst, mask=None)-> None
:param src1: first input array or a scalar.
@ -160,7 +156,6 @@ Calculates the weighted sum of two arrays.
.. ocv:pyfunction:: cv2.addWeighted(src1, alpha, src2, beta, gamma[, dst[, dtype]]) -> dst
.. ocv:cfunction:: void cvAddWeighted(const CvArr* src1, double alpha, const CvArr* src2, double beta, double gamma, CvArr* dst)
.. ocv:pyoldfunction:: cv.AddWeighted(src1, alpha, src2, beta, gamma, dst)-> None
:param src1: first input array.
@ -210,8 +205,6 @@ Calculates the per-element bit-wise conjunction of two arrays or an array and a
.. ocv:cfunction:: void cvAnd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvAndS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.And(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.AndS(src, value, dst, mask=None)-> None
:param src1: first input array or a scalar.
@ -262,7 +255,6 @@ Inverts every bit of an array.
.. ocv:pyfunction:: cv2.bitwise_not(src[, dst[, mask]]) -> dst
.. ocv:cfunction:: void cvNot(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Not(src, dst)-> None
:param src: input array.
@ -290,8 +282,6 @@ Calculates the per-element bit-wise disjunction of two arrays or an array and a
.. ocv:cfunction:: void cvOr(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvOrS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Or(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.OrS(src, value, dst, mask=None)-> None
:param src1: first input array or a scalar.
@ -342,8 +332,6 @@ Calculates the per-element bit-wise "exclusive or" operation on two arrays or an
.. ocv:cfunction:: void cvXor(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL)
.. ocv:cfunction:: void cvXorS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL)
.. ocv:pyoldfunction:: cv.Xor(src1, src2, dst, mask=None)-> None
.. ocv:pyoldfunction:: cv.XorS(src, value, dst, mask=None)-> None
:param src1: first input array or a scalar.
@ -396,8 +384,6 @@ Calculates the covariance matrix of a set of vectors.
.. ocv:cfunction:: void cvCalcCovarMatrix( const CvArr** vects, int count, CvArr* cov_mat, CvArr* avg, int flags )
.. ocv:pyoldfunction:: cv.CalcCovarMatrix(vects, covMat, avg, flags)-> None
:param samples: samples stored either as separate matrices or as rows/columns of a single matrix.
:param nsamples: number of samples when they are stored separately.
@ -456,8 +442,6 @@ Calculates the magnitude and angle of 2D vectors.
.. ocv:cfunction:: void cvCartToPolar( const CvArr* x, const CvArr* y, CvArr* magnitude, CvArr* angle=NULL, int angle_in_degrees=0 )
.. ocv:pyoldfunction:: cv.CartToPolar(x, y, magnitude, angle=None, angleInDegrees=0)-> None
:param x: array of x-coordinates; this must be a single-precision or double-precision floating-point array.
:param y: array of y-coordinates, that must have the same size and same type as ``x``.
@ -518,12 +502,8 @@ Performs the per-element comparison of two arrays or an array and scalar value.
.. ocv:cfunction:: void cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op )
.. ocv:pyoldfunction:: cv.Cmp(src1, src2, dst, cmpOp)-> None
.. ocv:cfunction:: void cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op )
.. ocv:pyoldfunction:: cv.CmpS(src, value, dst, cmpOp)-> None
:param src1: first input array or a scalar (in the case of ``cvCmp``, ``cv.Cmp``, ``cvCmpS``, ``cv.CmpS`` it is always an array); when it is an array, it must have a single channel.
:param src2: second input array or a scalar (in the case of ``cvCmp`` and ``cv.Cmp`` it is always an array; in the case of ``cvCmpS``, ``cv.CmpS`` it is always a scalar); when it is an array, it must have a single channel.
@ -624,7 +604,6 @@ Scales, calculates absolute values, and converts the result to 8-bit.
.. ocv:pyfunction:: cv2.convertScaleAbs(src[, dst[, alpha[, beta]]]) -> dst
.. ocv:cfunction:: void cvConvertScaleAbs(const CvArr* src, CvArr* dst, double scale=1, double shift=0)
.. ocv:pyoldfunction:: cv.ConvertScaleAbs(src, dst, scale=1.0, shift=0.0)-> None
:param src: input array.
@ -668,8 +647,6 @@ Counts non-zero array elements.
.. ocv:cfunction:: int cvCountNonZero(const CvArr* arr)
.. ocv:pyoldfunction:: cv.CountNonZero(arr)-> int
:param src: single-channel array.
The function returns the number of non-zero elements in ``src`` :
@ -760,7 +737,6 @@ Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
.. ocv:pyfunction:: cv2.dct(src[, dst[, flags]]) -> dst
.. ocv:cfunction:: void cvDCT(const CvArr* src, CvArr* dst, int flags)
.. ocv:pyoldfunction:: cv.DCT(src, dst, flags)-> None
:param src: input floating-point array.
@ -855,8 +831,6 @@ Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-
.. ocv:cfunction:: void cvDFT( const CvArr* src, CvArr* dst, int flags, int nonzero_rows=0 )
.. ocv:pyoldfunction:: cv.DFT(src, dst, flags, nonzeroRows=0)-> None
:param src: input array that could be real or complex.
:param dst: output array whose size and type depends on the ``flags`` .
@ -1011,7 +985,6 @@ Performs per-element division of two arrays or a scalar by an array.
.. ocv:pyfunction:: cv2.divide(scale, src2[, dst[, dtype]]) -> dst
.. ocv:cfunction:: void cvDiv(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1)
.. ocv:pyoldfunction:: cv.Div(src1, src2, dst, scale=1) -> None
:param src1: first input array.
@ -1058,8 +1031,6 @@ Returns the determinant of a square floating-point matrix.
.. ocv:cfunction:: double cvDet( const CvArr* mat )
.. ocv:pyoldfunction:: cv.Det(mat) -> float
:param mtx: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size.
The function ``determinant`` calculates and returns the determinant of the specified matrix. For small matrices ( ``mtx.cols=mtx.rows<=3`` ),
@ -1087,8 +1058,6 @@ Calculates eigenvalues and eigenvectors of a symmetric matrix.
.. ocv:cfunction:: void cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, double eps=0, int lowindex=-1, int highindex=-1 )
.. ocv:pyoldfunction:: cv.EigenVV(mat, evects, evals, eps, lowindex=-1, highindex=-1)-> None
:param src: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type, square size and be symmetrical (``src`` :sup:`T` == ``src``).
:param eigenvalues: output vector of eigenvalues of the same type as ``src``; the eigenvalues are stored in the descending order.
@ -1118,7 +1087,6 @@ Calculates the exponent of every array element.
.. ocv:pyfunction:: cv2.exp(src[, dst]) -> dst
.. ocv:cfunction:: void cvExp(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Exp(src, dst)-> None
:param src: input array.
@ -1199,8 +1167,6 @@ Flips a 2D array around vertical, horizontal, or both axes.
.. ocv:cfunction:: void cvFlip( const CvArr* src, CvArr* dst=NULL, int flip_mode=0 )
.. ocv:pyoldfunction:: cv.Flip(src, dst=None, flipMode=0)-> None
:param src: input array.
:param dst: output array of the same size and type as ``src``.
@ -1247,7 +1213,6 @@ Performs generalized matrix multiplication.
.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst
.. ocv:cfunction:: void cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, const CvArr* src3, double beta, CvArr* dst, int tABC=0)
.. ocv:pyoldfunction:: cv.GEMM(src1, src2, alpha, src3, beta, dst, tABC=0)-> None
:param src1: first multiplied input matrix that should have ``CV_32FC1``, ``CV_64FC1``, ``CV_32FC2``, or ``CV_64FC2`` type.
@ -1291,7 +1256,6 @@ Returns the optimal DFT size for a given vector size.
.. ocv:pyfunction:: cv2.getOptimalDFTSize(vecsize) -> retval
.. ocv:cfunction:: int cvGetOptimalDFTSize(int size0)
.. ocv:pyoldfunction:: cv.GetOptimalDFTSize(size0)-> int
:param vecsize: vector size.
@ -1375,8 +1339,6 @@ Checks if array elements lie between the elements of two other arrays.
.. ocv:cfunction:: void cvInRange(const CvArr* src, const CvArr* lower, const CvArr* upper, CvArr* dst)
.. ocv:cfunction:: void cvInRangeS(const CvArr* src, CvScalar lower, CvScalar upper, CvArr* dst)
.. ocv:pyoldfunction:: cv.InRange(src, lower, upper, dst)-> None
.. ocv:pyoldfunction:: cv.InRangeS(src, lower, upper, dst)-> None
:param src: first input array.
@ -1417,8 +1379,6 @@ Finds the inverse or pseudo-inverse of a matrix.
.. ocv:cfunction:: double cvInvert( const CvArr* src, CvArr* dst, int method=CV_LU )
.. ocv:pyoldfunction:: cv.Invert(src, dst, method=CV_LU) -> float
:param src: input floating-point ``M x N`` matrix.
:param dst: output matrix of ``N x M`` size and the same type as ``src``.
@ -1456,7 +1416,6 @@ Calculates the natural logarithm of every array element.
.. ocv:pyfunction:: cv2.log(src[, dst]) -> dst
.. ocv:cfunction:: void cvLog(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Log(src, dst)-> None
:param src: input array.
@ -1492,7 +1451,6 @@ Performs a look-up table transform of an array.
.. ocv:pyfunction:: cv2.LUT(src, lut[, dst]) -> dst
.. ocv:cfunction:: void cvLUT(const CvArr* src, CvArr* dst, const CvArr* lut)
.. ocv:pyoldfunction:: cv.LUT(src, dst, lut)-> None
:param src: input array of 8-bit elements.
@ -1558,8 +1516,6 @@ Calculates the Mahalanobis distance between two vectors.
.. ocv:cfunction:: double cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat )
.. ocv:pyoldfunction:: cv.Mahalonobis(vec1, vec2, mat) -> None
:param vec1: first 1D input vector.
:param vec2: second 1D input vector.
@ -1596,8 +1552,6 @@ Calculates per-element maximum of two arrays or an array and a scalar.
.. ocv:cfunction:: void cvMax(const CvArr* src1, const CvArr* src2, CvArr* dst)
.. ocv:cfunction:: void cvMaxS(const CvArr* src, double value, CvArr* dst)
.. ocv:pyoldfunction:: cv.Max(src1, src2, dst)-> None
.. ocv:pyoldfunction:: cv.MaxS(src, value, dst)-> None
:param src1: first input array.
@ -1643,8 +1597,6 @@ Calculates an average (mean) of array elements.
.. ocv:cfunction:: CvScalar cvAvg( const CvArr* arr, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Avg(arr, mask=None) -> scalar
:param src: input array that should have from 1 to 4 channels so that the result can be stored in :ocv:class:`Scalar_` .
:param mask: optional operation mask.
@ -1676,8 +1628,6 @@ Calculates a mean and standard deviation of array elements.
.. ocv:cfunction:: void cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.AvgSdv(arr, mask=None) -> (mean, stdDev)
:param src: input array that should have from 1 to 4 channels so that the results can be stored in :ocv:class:`Scalar_` 's.
:param mean: output parameter: calculated mean value.
@ -1717,7 +1667,6 @@ Creates one multichannel array out of several single-channel ones.
.. ocv:pyfunction:: cv2.merge(mv[, dst]) -> dst
.. ocv:cfunction:: void cvMerge(const CvArr* src0, const CvArr* src1, const CvArr* src2, const CvArr* src3, CvArr* dst)
.. ocv:pyoldfunction:: cv.Merge(src0, src1, src2, src3, dst)-> None
:param mv: input array or vector of matrices to be merged; all the matrices in ``mv`` must have the same size and the same depth.
@ -1757,8 +1706,6 @@ Calculates per-element minimum of two arrays or an array and a scalar.
.. ocv:cfunction:: void cvMin(const CvArr* src1, const CvArr* src2, CvArr* dst)
.. ocv:cfunction:: void cvMinS(const CvArr* src, double value, CvArr* dst)
.. ocv:pyoldfunction:: cv.Min(src1, src2, dst)-> None
.. ocv:pyoldfunction:: cv.MinS(src, value, dst)-> None
:param src1: first input array.
@ -1838,8 +1785,6 @@ Finds the global minimum and maximum in an array.
.. ocv:cfunction:: void cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, CvPoint* min_loc=NULL, CvPoint* max_loc=NULL, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MinMaxLoc(arr, mask=None)-> (minVal, maxVal, minLoc, maxLoc)
:param src: input single-channel array.
:param minVal: pointer to the returned minimum value; ``NULL`` is used if not required.
@ -1888,8 +1833,6 @@ Copies specified channels from input arrays to the specified channels of output
.. ocv:cfunction:: void cvMixChannels( const CvArr** src, int src_count, CvArr** dst, int dst_count, const int* from_to, int pair_count )
.. ocv:pyoldfunction:: cv.MixChannels(src, dst, fromTo) -> None
:param src: input array or vector of matricesl; all of the matrices must have the same size and the same depth.
:param nsrcs: number of matrices in ``src``.
@ -1942,7 +1885,6 @@ Performs the per-element multiplication of two Fourier spectrums.
.. ocv:pyfunction:: cv2.mulSpectrums(a, b, flags[, c[, conjB]]) -> c
.. ocv:cfunction:: void cvMulSpectrums( const CvArr* src1, const CvArr* src2, CvArr* dst, int flags)
.. ocv:pyoldfunction:: cv.MulSpectrums(src1, src2, dst, flags)-> None
:param src1: first input array.
@ -1972,7 +1914,6 @@ Calculates the per-element scaled product of two arrays.
.. ocv:pyfunction:: cv2.multiply(src1, src2[, dst[, scale[, dtype]]]) -> dst
.. ocv:cfunction:: void cvMul(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1)
.. ocv:pyoldfunction:: cv.Mul(src1, src2, dst, scale=1) -> None
:param src1: first input array.
@ -2022,8 +1963,6 @@ Calculates the product of a matrix and its transposition.
.. ocv:cfunction:: void cvMulTransposed( const CvArr* src, CvArr* dst, int order, const CvArr* delta=NULL, double scale=1. )
.. ocv:pyoldfunction:: cv.MulTransposed(src, dst, order, delta=None, scale=1.0) -> None
:param src: input single-channel matrix. Note that unlike :ocv:func:`gemm`, the function can multiply not only floating-point matrices.
:param dst: output square matrix.
@ -2074,8 +2013,6 @@ Calculates an absolute array norm, an absolute difference norm, or a relative di
.. ocv:cfunction:: double cvNorm( const CvArr* arr1, const CvArr* arr2=NULL, int norm_type=CV_L2, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Norm(arr1, arr2, normType=CV_L2, mask=None) -> float
:param src1: first input array.
:param src2: second input array of the same size and the same type as ``src1``.
@ -2331,7 +2268,6 @@ Performs the perspective matrix transformation of vectors.
.. ocv:pyfunction:: cv2.perspectiveTransform(src, m[, dst]) -> dst
.. ocv:cfunction:: void cvPerspectiveTransform(const CvArr* src, CvArr* dst, const CvMat* mat)
.. ocv:pyoldfunction:: cv.PerspectiveTransform(src, dst, mat)-> None
:param src: input two-channel or three-channel floating-point array; each element is a 2D/3D vector to be transformed.
@ -2405,8 +2341,6 @@ Calculates x and y coordinates of 2D vectors from their magnitude and angle.
.. ocv:cfunction:: void cvPolarToCart( const CvArr* magnitude, const CvArr* angle, CvArr* x, CvArr* y, int angle_in_degrees=0 )
.. ocv:pyoldfunction:: cv.PolarToCart(magnitude, angle, x, y, angleInDegrees=0)-> None
:param magnitude: input floating-point array of magnitudes of 2D vectors; it can be an empty matrix (``=Mat()``), in this case, the function assumes that all the magnitudes are =1; if it is not empty, it must have the same size and type as ``angle``.
:param angle: input floating-point array of angles of 2D vectors.
@ -2446,7 +2380,6 @@ Raises every array element to a power.
.. ocv:pyfunction:: cv2.pow(src, power[, dst]) -> dst
.. ocv:cfunction:: void cvPow( const CvArr* src, CvArr* dst, double power)
.. ocv:pyoldfunction:: cv.Pow(src, dst, power)-> None
:param src: input array.
@ -2714,7 +2647,6 @@ Reduces a matrix to a vector.
.. ocv:pyfunction:: cv2.reduce(src, dim, rtype[, dst[, dtype]]) -> dst
.. ocv:cfunction:: void cvReduce(const CvArr* src, CvArr* dst, int dim=-1, int op=CV_REDUCE_SUM)
.. ocv:pyoldfunction:: cv.Reduce(src, dst, dim=-1, op=CV_REDUCE_SUM)-> None
:param src: input 2D matrix.
@ -2752,8 +2684,6 @@ Fills the output array with repeated copies of the input array.
.. ocv:cfunction:: void cvRepeat(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Repeat(src, dst)-> None
:param src: input array to replicate.
:param dst: output array of the same type as ``src``.
@ -2788,7 +2718,6 @@ Calculates the sum of a scaled array and another array.
.. ocv:pyfunction:: cv2.scaleAdd(src1, alpha, src2[, dst]) -> dst
.. ocv:cfunction:: void cvScaleAdd(const CvArr* src1, CvScalar scale, const CvArr* src2, CvArr* dst)
.. ocv:pyoldfunction:: cv.ScaleAdd(src1, scale, src2, dst)-> None
:param src1: first input array.
@ -2832,8 +2761,6 @@ Initializes a scaled identity matrix.
.. ocv:cfunction:: void cvSetIdentity(CvArr* mat, CvScalar value=cvRealScalar(1))
.. ocv:pyoldfunction:: cv.SetIdentity(mat, value=1)-> None
:param mtx: matrix to initialize (not necessarily square).
:param value: value to assign to diagonal elements.
@ -2870,7 +2797,6 @@ Solves one or more linear systems or least-squares problems.
.. ocv:pyfunction:: cv2.solve(src1, src2[, dst[, flags]]) -> retval, dst
.. ocv:cfunction:: int cvSolve(const CvArr* src1, const CvArr* src2, CvArr* dst, int method=CV_LU)
.. ocv:pyoldfunction:: cv.Solve(A, B, X, method=CV_LU)-> None
:param src1: input matrix on the left-hand side of the system.
@ -2921,8 +2847,6 @@ Finds the real roots of a cubic equation.
.. ocv:cfunction:: int cvSolveCubic( const CvMat* coeffs, CvMat* roots )
.. ocv:pyoldfunction:: cv.SolveCubic(coeffs, roots)-> None
:param coeffs: equation coefficients, an array of 3 or 4 elements.
:param roots: output array of real roots that has 1 or 3 elements.
@ -3048,8 +2972,6 @@ Divides a multi-channel array into several single-channel arrays.
.. ocv:cfunction:: void cvSplit(const CvArr* src, CvArr* dst0, CvArr* dst1, CvArr* dst2, CvArr* dst3)
.. ocv:pyoldfunction:: cv.Split(src, dst0, dst1, dst2, dst3)-> None
:param src: input multi-channel array.
:param mv: output array or vector of arrays; in the first variant of the function the number of arrays must match ``src.channels()``; the arrays themselves are reallocated, if needed.
@ -3080,7 +3002,6 @@ Calculates a square root of array elements.
.. ocv:pyfunction:: cv2.sqrt(src[, dst]) -> dst
.. ocv:cfunction:: float cvSqrt(float value)
.. ocv:pyoldfunction:: cv.Sqrt(value)-> float
:param src: input floating-point array.
@ -3107,10 +3028,6 @@ Calculates the per-element difference between two arrays or array and a scalar.
.. ocv:cfunction:: void cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL )
.. ocv:cfunction:: void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Sub(src1, src2, dst, mask=None) -> None
.. ocv:pyoldfunction:: cv.SubRS(src, value, dst, mask=None) -> None
.. ocv:pyoldfunction:: cv.SubS(src, value, dst, mask=None) -> None
:param src1: first input array or a scalar.
:param src2: second input array or a scalar.
@ -3243,8 +3160,6 @@ Performs SVD of a matrix
.. ocv:cfunction:: void cvSVD( CvArr* A, CvArr* W, CvArr* U=NULL, CvArr* V=NULL, int flags=0 )
.. ocv:pyoldfunction:: cv.SVD(A, W, U=None, V=None, flags=0) -> None
:param src: decomposed matrix
:param w: calculated singular values
@ -3293,8 +3208,6 @@ Performs a singular value back substitution.
.. ocv:cfunction:: void cvSVBkSb( const CvArr* W, const CvArr* U, const CvArr* V, const CvArr* B, CvArr* X, int flags )
.. ocv:pyoldfunction:: cv.SVBkSb(W, U, V, B, X, flags) -> None
:param w: singular values
:param u: left singular vectors
@ -3329,8 +3242,6 @@ Calculates the sum of array elements.
.. ocv:cfunction:: CvScalar cvSum(const CvArr* arr)
.. ocv:pyoldfunction:: cv.Sum(arr) -> scalar
:param arr: input array that must have from 1 to 4 channels.
The functions ``sum`` calculate and return the sum of array elements, independently for each channel.
@ -3374,8 +3285,6 @@ Returns the trace of a matrix.
.. ocv:cfunction:: CvScalar cvTrace(const CvArr* mat)
.. ocv:pyoldfunction:: cv.Trace(mat) -> scalar
:param mat: input matrix.
The function ``trace`` returns the sum of the diagonal elements of the matrix ``mtx`` .
@ -3396,8 +3305,6 @@ Performs the matrix transformation of every array element.
.. ocv:cfunction:: void cvTransform( const CvArr* src, CvArr* dst, const CvMat* transmat, const CvMat* shiftvec=NULL )
.. ocv:pyoldfunction:: cv.Transform(src, dst, transmat, shiftvec=None)-> None
:param src: input array that must have as many channels (1 to 4) as ``m.cols`` or ``m.cols-1``.
:param dst: output array of the same size and depth as ``src``; it has as many channels as ``m.rows``.
@ -3447,7 +3354,6 @@ Transposes a matrix.
.. ocv:pyfunction:: cv2.transpose(src[, dst]) -> dst
.. ocv:cfunction:: void cvTranspose(const CvArr* src, CvArr* dst)
.. ocv:pyoldfunction:: cv.Transpose(src, dst)-> None
:param src: input array.

@ -76,7 +76,6 @@ Calculates the angle of a 2D vector in degrees.
.. ocv:pyfunction:: cv2.fastAtan2(y, x) -> retval
.. ocv:cfunction:: float cvFastArctan(float y, float x)
.. ocv:pyoldfunction:: cv.FastArctan(y, x)-> float
:param x: x-coordinate of the vector.
@ -95,8 +94,6 @@ Computes the cube root of an argument.
.. ocv:cfunction:: float cvCbrt( float value )
.. ocv:pyoldfunction:: cv.Cbrt(value)-> float
:param val: A function argument.
The function ``cubeRoot`` computes :math:`\sqrt[3]{\texttt{val}}`. Negative arguments are handled correctly. NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for single-precision data.
@ -107,7 +104,6 @@ Ceil
Rounds floating-point number to the nearest integer not smaller than the original.
.. ocv:cfunction:: int cvCeil(double value)
.. ocv:pyoldfunction:: cv.Ceil(value) -> int
:param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined.
@ -123,7 +119,6 @@ Floor
Rounds floating-point number to the nearest integer not larger than the original.
.. ocv:cfunction:: int cvFloor(double value)
.. ocv:pyoldfunction:: cv.Floor(value) -> int
:param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined.
@ -139,7 +134,6 @@ Round
Rounds floating-point number to the nearest integer
.. ocv:cfunction:: int cvRound(double value)
.. ocv:pyoldfunction:: cv.Round(value) -> int
:param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined.
@ -149,7 +143,6 @@ IsInf
Determines if the argument is Infinity.
.. ocv:cfunction:: int cvIsInf(double value)
.. ocv:pyoldfunction:: cv.IsInf(value)-> int
:param value: The input floating-point value
@ -160,7 +153,6 @@ IsNaN
Determines if the argument is Not A Number.
.. ocv:cfunction:: int cvIsNaN(double value)
.. ocv:pyoldfunction:: cv.IsNaN(value)-> int
:param value: The input floating-point value

@ -717,8 +717,8 @@ public:
template<typename _Tp> MatConstIterator_<_Tp> begin() const;
template<typename _Tp> MatConstIterator_<_Tp> end() const;
enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG };
enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 };
enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };
enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 };
/*! includes several bit-fields:
- the magic signature

@ -67,10 +67,6 @@ Loads an image from a file.
.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR )
.. ocv:pyoldfunction:: cv.LoadImage(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
.. ocv:pyoldfunction:: cv.LoadImageM(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None
:param filename: Name of file to be loaded.
:param flags: Flags specifying the color type of a loaded image:
@ -126,8 +122,6 @@ Saves an image to a specified file.
.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 )
.. ocv:pyoldfunction:: cv.SaveImage(filename, image)-> None
:param filename: Name of the file.
:param image: Image to be saved.
@ -245,9 +239,7 @@ VideoCapture constructors.
.. ocv:pyfunction:: cv2.VideoCapture(device) -> <VideoCapture object>
.. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device )
.. ocv:pyoldfunction:: cv.CaptureFromCAM(index) -> CvCapture
.. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename )
.. ocv:pyoldfunction:: cv.CaptureFromFile(filename) -> CvCapture
:param filename: name of the opened video file
@ -308,8 +300,6 @@ Grabs the next frame from video file or capturing device.
.. ocv:cfunction:: int cvGrabFrame(CvCapture* capture)
.. ocv:pyoldfunction:: cv.GrabFrame(capture) -> int
The methods/functions grab the next frame from video file or camera and return true (non-zero) in the case of success.
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
@ -327,8 +317,6 @@ Decodes and returns the grabbed video frame.
.. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 )
.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> image
The methods/functions decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
.. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy.
@ -346,8 +334,6 @@ Grabs, decodes and returns the next video frame.
.. ocv:cfunction:: IplImage* cvQueryFrame(CvCapture* capture)
.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> image
The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`VideoCapture::retrieve` in one call. This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
.. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy.
@ -363,9 +349,6 @@ Returns the specified ``VideoCapture`` property
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int property_id )
.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, property_id) -> float
:param propId: Property identifier. It can be one of the following:
* **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video capture timestamp.
@ -419,8 +402,6 @@ Sets a property in the ``VideoCapture``.
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int property_id, double value )
.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, property_id, value) -> retval
:param propId: Property identifier. It can be one of the following:
* **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
@ -484,7 +465,6 @@ VideoWriter constructors
.. ocv:pyfunction:: cv2.VideoWriter([filename, fourcc, fps, frameSize[, isColor]]) -> <VideoWriter object>
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 )
.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frame_size, is_color=true) -> CvVideoWriter
.. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval
.. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval
@ -544,7 +524,6 @@ Writes the next video frame
.. ocv:pyfunction:: cv2.VideoWriter.write(image) -> None
.. ocv:cfunction:: int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
.. ocv:pyoldfunction:: cv.WriteFrame(writer, image)->int
:param writer: Video writer structure (OpenCV 1.x API)

@ -11,8 +11,6 @@ Creates a trackbar and attaches it to the specified window.
.. ocv:cfunction:: int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* value, int count, CvTrackbarCallback on_change=NULL )
.. ocv:pyoldfunction:: cv.CreateTrackbar(trackbarName, windowName, value, count, onChange) -> None
:param trackbarname: Name of the created trackbar.
:param winname: Name of the window that will be used as a parent of the created trackbar.
@ -43,8 +41,6 @@ Returns the trackbar position.
.. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbar_name, const char* window_name )
.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName) -> retval
:param trackbarname: Name of the trackbar.
:param winname: Name of the window that is the parent of the trackbar.
@ -65,8 +61,6 @@ Displays an image in the specified window.
.. ocv:cfunction:: void cvShowImage( const char* name, const CvArr* image )
.. ocv:pyoldfunction:: cv.ShowImage(name, image) -> None
:param winname: Name of the window.
:param image: Image to be shown.
@ -90,8 +84,6 @@ Creates a window.
.. ocv:cfunction:: int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE )
.. ocv:pyoldfunction:: cv.NamedWindow(name, flags=CV_WINDOW_AUTOSIZE)-> None
:param name: Name of the window in the window caption that may be used as a window identifier.
:param flags: Flags of the window. Currently the only supported flag is ``CV_WINDOW_AUTOSIZE`` . If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually.
@ -125,8 +117,6 @@ Destroys a window.
.. ocv:cfunction:: void cvDestroyWindow( const char* name )
.. ocv:pyoldfunction:: cv.DestroyWindow(name)-> None
:param winname: Name of the window to be destroyed.
The function ``destroyWindow`` destroys the window with the given name.
@ -142,8 +132,6 @@ Destroys all of the HighGUI windows.
.. ocv:cfunction:: void cvDestroyAllWindows()
.. ocv:pyoldfunction:: cv.DestroyAllWindows()-> None
The function ``destroyAllWindows`` destroys all of the opened HighGUI windows.
@ -157,8 +145,6 @@ Moves window to the specified position
.. ocv:cfunction:: void cvMoveWindow( const char* name, int x, int y )
.. ocv:pyoldfunction:: cv.MoveWindow(name, x, y)-> None
:param winname: Window name
:param x: The new x-coordinate of the window
@ -176,8 +162,6 @@ Resizes window to the specified size
.. ocv:cfunction:: void cvResizeWindow( const char* name, int width, int height )
.. ocv:pyoldfunction:: cv.ResizeWindow(name, width, height)-> None
:param winname: Window name
:param width: The new window width
@ -199,8 +183,6 @@ Sets mouse handler for the specified window
.. ocv:cfunction:: void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL )
.. ocv:pyoldfunction:: cv.SetMouseCallback(windowName, onMouse, param=None) -> None
:param winname: Window name
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
@ -218,8 +200,6 @@ Sets the trackbar position.
.. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos )
.. ocv:pyoldfunction:: cv.SetTrackbarPos(trackbarName, windowName, pos)-> None
:param trackbarname: Name of the trackbar.
:param winname: Name of the window that is the parent of trackbar.
@ -242,8 +222,6 @@ Waits for a pressed key.
.. ocv:cfunction:: int cvWaitKey( int delay=0 )
.. ocv:pyoldfunction:: cv.WaitKey(delay=0)-> int
:param delay: Delay in milliseconds. 0 is the special value that means "forever".
The function ``waitKey`` waits for a key event infinitely (when

@ -15,8 +15,6 @@ Finds edges in an image using the [Canny86]_ algorithm.
.. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, aperture_size=3) -> None
:param image: single-channel 8-bit input image.
:param edges: output edge map; it has the same size and type as ``image`` .
@ -44,8 +42,6 @@ Calculates eigenvalues and eigenvectors of image blocks for corner detection.
.. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the results. It has the same size as ``src`` and the type ``CV_32FC(6)`` .
@ -97,8 +93,6 @@ Harris edge detector.
.. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harris_responce, int block_size, int aperture_size=3, double k=0.04 )
.. ocv:pyoldfunction:: cv.CornerHarris(image, harris_dst, blockSize, aperture_size=3, k=0.04) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the Harris detector responses. It has the type ``CV_32FC1`` and the same size as ``src`` .
@ -137,8 +131,6 @@ Calculates the minimal eigenvalue of gradient matrices for corner detection.
.. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int block_size, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, aperture_size=3) -> None
:param src: Input single-channel 8-bit or floating-point image.
:param dst: Image to store the minimal eigenvalues. It has the type ``CV_32FC1`` and the same size as ``src`` .
@ -166,8 +158,6 @@ Refines the corner locations.
.. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria )
.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, win, zero_zone, criteria) -> corners
:param image: Input image.
:param corners: Initial coordinates of the input corners and refined coordinates provided for output.
@ -228,8 +218,6 @@ Determines strong corners on an image.
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, CvArr* temp_image, CvPoint2D32f* corners, int* corner_count, double quality_level, double min_distance, const CvArr* mask=NULL, int block_size=3, int use_harris=0, double k=0.04 )
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04) -> cornerCount
:param image: Input 8-bit or floating-point 32-bit, single-channel image.
:param eig_image: The parameter is ignored.
@ -367,8 +355,6 @@ Finds lines in a binary image using the standard Hough transform.
.. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 )
.. ocv:pyoldfunction:: cv.HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)-> lines
:param image: 8-bit, single-channel binary source image. The image may be modified by the function.
:param lines: Output vector of lines. Each line is represented by a two-element vector :math:`(\rho, \theta)` . :math:`\rho` is the distance from the coordinate origin :math:`(0,0)` (top-left corner of the image). :math:`\theta` is the line rotation angle in radians ( :math:`0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}` ).
@ -510,8 +496,6 @@ Calculates a feature map for corner detection.
.. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.PreCornerDetect(image, corners, apertureSize=3)-> None
:param src: Source single-channel 8-bit of floating-point image.
:param dst: Output image that has the type ``CV_32F`` and the same size as ``src`` .

@ -750,7 +750,6 @@ Dilates an image by using a specific structuring element.
.. ocv:pyfunction:: cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
.. ocv:cfunction:: void cvDilate( const CvArr* src, CvArr* dst, IplConvKernel* element=NULL, int iterations=1 )
.. ocv:pyoldfunction:: cv.Dilate(src, dst, element=None, iterations=1)-> None
:param src: input image; the number of channels can be arbitrary, but the depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
@ -790,7 +789,6 @@ Erodes an image by using a specific structuring element.
.. ocv:pyfunction:: cv2.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
.. ocv:cfunction:: void cvErode( const CvArr* src, CvArr* dst, IplConvKernel* element=NULL, int iterations=1)
.. ocv:pyoldfunction:: cv.Erode(src, dst, element=None, iterations=1)-> None
:param src: input image; the number of channels can be arbitrary, but the depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
@ -832,8 +830,6 @@ Convolves an image with the kernel.
.. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) )
.. ocv:pyoldfunction:: cv.Filter2D(src, dst, kernel, anchor=(-1, -1))-> None
:param src: input image.
:param dst: output image of the same size and the same number of channels as ``src``.
@ -1013,8 +1009,6 @@ Returns a structuring element of the specified size and shape for morphological
.. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchor_x, int anchor_y, int shape, int* values=NULL )
.. ocv:pyoldfunction:: cv.CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)-> kernel
:param shape: Element shape that could be one of the following:
* **MORPH_RECT** - a rectangular structuring element:
@ -1091,7 +1085,6 @@ Performs advanced morphological transformations.
.. ocv:pyfunction:: cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst
.. ocv:cfunction:: void cvMorphologyEx( const CvArr* src, CvArr* dst, CvArr* temp, IplConvKernel* element, int operation, int iterations=1 )
.. ocv:pyoldfunction:: cv.MorphologyEx(src, dst, temp, element, operation, iterations=1)-> None
:param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``.
@ -1168,8 +1161,6 @@ Calculates the Laplacian of an image.
.. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Laplace(src, dst, apertureSize=3) -> None
:param src: Source image.
:param dst: Destination image of the same size and the same number of channels as ``src`` .
@ -1214,8 +1205,6 @@ Blurs an image and downsamples it.
.. ocv:cfunction:: void cvPyrDown( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
.. ocv:pyoldfunction:: cv.PyrDown(src, dst, filter=CV_GAUSSIAN_5X5) -> None
:param src: input image.
:param dst: output image; it has the specified size and the same type as ``src``.
@ -1247,8 +1236,6 @@ Upsamples an image and then blurs it.
.. ocv:cfunction:: cvPyrUp( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 )
.. ocv:pyoldfunction:: cv.PyrUp(src, dst, filter=CV_GAUSSIAN_5X5) -> None
:param src: input image.
:param dst: output image. It has the specified size and the same type as ``src`` .
@ -1274,8 +1261,6 @@ Performs initial step of meanshift segmentation of an image.
.. ocv:cfunction:: void cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst, double sp, double sr, int max_level=1, CvTermCriteria termcrit= cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1))
.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) -> None
:param src: The source 8-bit, 3-channel image.
:param dst: The destination image of the same format and the same size as the source.
@ -1361,8 +1346,6 @@ Smooths the image in one of several ways.
.. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int size1=3, int size2=0, double sigma1=0, double sigma2=0 )
.. ocv:pyoldfunction:: cv.Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)-> None
:param src: The source image
:param dst: The destination image
@ -1417,8 +1400,6 @@ Calculates the first, second, third, or mixed image derivatives using an extende
.. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.Sobel(src, dst, xorder, yorder, apertureSize=3)-> None
:param src: input image.
:param dst: output image of the same size and the same number of channels as ``src`` .

@ -85,8 +85,6 @@ Calculates an affine transform from three pairs of the corresponding points.
.. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f * src, const CvPoint2D32f * dst, CvMat * map_matrix )
.. ocv:pyoldfunction:: cv.GetAffineTransform(src, dst, mapMatrix)-> None
:param src: Coordinates of triangle vertices in the source image.
:param dst: Coordinates of the corresponding triangle vertices in the destination image.
@ -124,8 +122,6 @@ Calculates a perspective transform from four pairs of the corresponding points.
.. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetPerspectiveTransform(src, dst, mapMatrix)-> None
:param src: Coordinates of quadrangle vertices in the source image.
:param dst: Coordinates of the corresponding quadrangle vertices in the destination image.
@ -160,7 +156,6 @@ Retrieves a pixel rectangle from an image with sub-pixel accuracy.
.. ocv:pyfunction:: cv2.getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch
.. ocv:cfunction:: void cvGetRectSubPix( const CvArr* src, CvArr* dst, CvPoint2D32f center )
.. ocv:pyoldfunction:: cv.GetRectSubPix(src, dst, center)-> None
:param src: Source image.
@ -202,8 +197,6 @@ Calculates an affine matrix of 2D rotation.
.. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetRotationMatrix2D(center, angle, scale, mapMatrix)-> None
:param center: Center of the rotation in the source image.
:param angle: Rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).
@ -264,8 +257,6 @@ Remaps an image to log-polar space.
.. ocv:cfunction:: void cvLogPolar( const CvArr* src, CvArr* dst, CvPoint2D32f center, double M, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS )
.. ocv:pyoldfunction:: cv.LogPolar(src, dst, center, M, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS)-> None
:param src: Source image
:param dst: Destination image
@ -317,7 +308,6 @@ Applies a generic geometrical transformation to an image.
.. ocv:pyfunction:: cv2.remap(src, map1, map2, interpolation[, dst[, borderMode[, borderValue]]]) -> dst
.. ocv:cfunction:: void cvRemap( const CvArr* src, CvArr* dst, const CvArr* mapx, const CvArr* mapy, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.Remap(src, dst, mapx, mapy, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
:param src: Source image.
@ -364,7 +354,6 @@ Resizes an image.
.. ocv:pyfunction:: cv2.resize(src, dsize[, dst[, fx[, fy[, interpolation]]]]) -> dst
.. ocv:cfunction:: void cvResize( const CvArr* src, CvArr* dst, int interpolation=CV_INTER_LINEAR )
.. ocv:pyoldfunction:: cv.Resize(src, dst, interpolation=CV_INTER_LINEAR)-> None
:param src: input image.
@ -434,12 +423,8 @@ Applies an affine transformation to an image.
.. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpAffine(src, dst, mapMatrix, flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
.. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* map_matrix )
.. ocv:pyoldfunction:: cv.GetQuadrangleSubPix(src, dst, mapMatrix)-> None
:param src: input image.
:param dst: output image that has the size ``dsize`` and the same type as ``src`` .
@ -485,8 +470,6 @@ Applies a perspective transformation to an image.
.. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) )
.. ocv:pyoldfunction:: cv.WarpPerspective(src, dst, mapMatrix, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None
:param src: input image.
:param dst: output image that has the size ``dsize`` and the same type as ``src`` .
@ -534,9 +517,6 @@ Computes the undistortion and rectification transformation map.
.. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat * R, const CvMat* new_camera_matrix, CvArr* mapx, CvArr* mapy )
.. ocv:cfunction:: void cvInitUndistortMap( const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvArr* mapx, CvArr* mapy )
.. ocv:pyoldfunction:: cv.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, map1, map2)-> None
.. ocv:pyoldfunction:: cv.InitUndistortMap(cameraMatrix, distCoeffs, map1, map2)-> None
:param cameraMatrix: Input camera matrix :math:`A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
@ -630,8 +610,6 @@ Transforms an image to compensate for lens distortion.
.. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* camera_matrix, const CvMat* distortion_coeffs, const CvMat* new_camera_matrix=0 )
.. ocv:pyoldfunction:: cv.Undistort2(src, dst, cameraMatrix, distCoeffs)-> None
:param src: Input (distorted) image.
:param dst: Output (corrected) image that has the same size and type as ``src`` .
@ -668,7 +646,6 @@ Computes the ideal point coordinates from the observed point coordinates.
.. ocv:function:: void undistortPoints( InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray())
.. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* R=0, const CvMat* P=0 )
.. ocv:pyoldfunction:: cv.UndistortPoints(src, dst, cameraMatrix, distCoeffs, R=None, P=None)-> None
:param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).

@ -16,7 +16,6 @@ Calculates a histogram of a set of arrays.
.. ocv:pyfunction:: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist
.. ocv:cfunction:: void cvCalcHist( IplImage** image, CvHistogram* hist, int accumulate=0, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.CalcHist(image, hist, accumulate=0, mask=None)-> None
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
@ -113,7 +112,6 @@ Calculates the back projection of a histogram.
.. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst
.. ocv:cfunction:: void cvCalcBackProject( IplImage** image, CvArr* backProject, const CvHistogram* hist )
.. ocv:pyoldfunction:: cv.CalcBackProject(image, back_project, hist) -> None
:param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels.
@ -159,7 +157,6 @@ Compares two histograms.
.. ocv:pyfunction:: cv2.compareHist(H1, H2, method) -> retval
.. ocv:cfunction:: double cvCompareHist( const CvHistogram* hist1, const CvHistogram* hist2, int method )
.. ocv:pyoldfunction:: cv.CompareHist(hist1, hist2, method)->float
:param H1: First compared histogram.
@ -229,8 +226,6 @@ Computes the "minimal work" distance between two weighted point configurations.
.. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distance_type, CvDistanceFunction distance_func=NULL, const CvArr* cost_matrix=NULL, CvArr* flow=NULL, float* lower_bound=NULL, void* userdata=NULL )
.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distance_type, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None) -> float
:param signature1: First signature, a :math:`\texttt{size1}\times \texttt{dims}+1` floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used.
:param signature2: Second signature of the same format as ``signature1`` , though the number of rows may be different. The total weights may be different. In this case an extra "dummy" point is added to either ``signature1`` or ``signature2`` .
@ -304,8 +299,6 @@ Locates a template within an image by using a histogram comparison.
.. ocv:cfunction:: void cvCalcBackProjectPatch( IplImage** images, CvArr* dst, CvSize patch_size, CvHistogram* hist, int method, double factor )
.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patch_size, hist, method, factor)-> None
:param images: Source images (though, you may pass CvMat** as well).
:param dst: Destination image.
@ -329,8 +322,6 @@ Divides one histogram by another.
.. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dst_hist, double scale=255 )
.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dst_hist, scale=255) -> None
:param hist1: First histogram (the divisor).
:param hist2: Second histogram.
@ -351,7 +342,6 @@ ClearHist
Clears the histogram.
.. ocv:cfunction:: void cvClearHist( CvHistogram* hist )
.. ocv:pyoldfunction:: cv.ClearHist(hist)-> None
:param hist: Histogram.
@ -378,8 +368,6 @@ Creates a histogram.
.. ocv:cfunction:: CvHistogram* cvCreateHist( int dims, int* sizes, int type, float** ranges=NULL, int uniform=1 )
.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges=None, uniform=1) -> hist
:param dims: Number of histogram dimensions.
:param sizes: Array of the histogram dimension sizes.
@ -414,8 +402,6 @@ Finds the minimum and maximum histogram bins.
.. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL )
.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (min_value, max_value, min_idx, max_idx)
:param hist: Histogram.
:param min_value: Pointer to the minimum value of the histogram.
@ -454,7 +440,6 @@ NormalizeHist
Normalizes the histogram.
.. ocv:cfunction:: void cvNormalizeHist( CvHistogram* hist, double factor )
.. ocv:pyoldfunction:: cv.NormalizeHist(hist, factor)-> None
:param hist: Pointer to the histogram.
@ -494,7 +479,6 @@ ThreshHist
Thresholds the histogram.
.. ocv:cfunction:: void cvThreshHist( CvHistogram* hist, double threshold )
.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold) -> None
:param hist: Pointer to the histogram.

@ -14,8 +14,6 @@ Applies an adaptive threshold to an array.
.. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, int adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, int threshold_type=CV_THRESH_BINARY, int block_size=3, double param1=5 )
.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None
:param src: Source 8-bit single-channel image.
:param dst: Destination image of the same size and the same type as ``src`` .
@ -79,7 +77,6 @@ Converts an image from one color space to another.
.. ocv:pyfunction:: cv2.cvtColor(src, code[, dst[, dstCn]]) -> dst
.. ocv:cfunction:: void cvCvtColor( const CvArr* src, CvArr* dst, int code )
.. ocv:pyoldfunction:: cv.CvtColor(src, dst, code)-> None
:param src: input image: 8-bit unsigned, 16-bit unsigned ( ``CV_16UC...`` ), or single-precision floating-point.
@ -419,8 +416,6 @@ Calculates the distance to the closest zero pixel for each pixel of the source i
.. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distance_type=CV_DIST_L2, int mask_size=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP )
.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distance_type=CV_DIST_L2, mask_size=3, mask=None, labels=None) -> None
:param src: 8-bit, single-channel (binary) source image.
:param dst: Output image with calculated distances. It is a 32-bit floating-point, single-channel image of the same size as ``src`` .
@ -491,7 +486,6 @@ Fills a connected component with the given color.
.. ocv:pyfunction:: cv2.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, image, mask, rect
.. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seed_point, CvScalar new_val, CvScalar lo_diff=cvScalarAll(0), CvScalar up_diff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.FloodFill(image, seed_point, new_val, lo_diff=(0, 0, 0, 0), up_diff=(0, 0, 0, 0), flags=4, mask=None)-> comp
:param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below.
@ -603,8 +597,6 @@ Calculates the integral of an image.
.. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tilted_sum=NULL )
.. ocv:pyoldfunction:: cv.Integral(image, sum, sqsum=None, tiltedSum=None)-> None
:param image: input image as :math:`W \times H`, 8-bit or floating-point (32f or 64f).
:param sum: integral image as :math:`(W+1)\times (H+1)` , 32-bit integer or floating-point (32f or 64f).
@ -655,8 +647,6 @@ Applies a fixed-level threshold to each array element.
.. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double max_value, int threshold_type )
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
:param src: input array (single-channel, 8-bit or 32-bit floating point).
:param dst: output array of the same size and type as ``src``.

@ -13,8 +13,6 @@ Adds an image to the accumulator.
.. ocv:cfunction:: void cvAcc( const CvArr* image, CvArr* sum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(image, sum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
:param dst: Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
@ -49,8 +47,6 @@ Adds the square of a source image to the accumulator.
.. ocv:cfunction:: void cvSquareAcc( const CvArr* image, CvArr* sqsum, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(image, sqsum, mask=None) -> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
:param dst: Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
@ -83,8 +79,6 @@ Adds the per-element product of two input images to the accumulator.
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(image1, image2, acc, mask=None)-> None
:param src1: First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
:param src2: Second input image of the same type and the same size as ``src1`` .
@ -118,7 +112,6 @@ Updates a running average.
.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> dst
.. ocv:cfunction:: void cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(image, acc, alpha, mask=None)-> None
:param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point.

@ -12,7 +12,6 @@ Compares a template against overlapped image regions.
.. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result
.. ocv:cfunction:: void cvMatchTemplate( const CvArr* image, const CvArr* templ, CvArr* result, int method )
.. ocv:pyoldfunction:: cv.MatchTemplate(image, templ, result, method)-> None
:param image: Image where the search is running. It must be 8-bit or 32-bit floating-point.

@ -13,8 +13,6 @@ Calculates all of the moments up to the third order of a polygon or rasterized s
.. ocv:cfunction:: void cvMoments( const CvArr* arr, CvMoments* moments, int binary=0 )
.. ocv:pyoldfunction:: cv.Moments(arr, binary=0) -> moments
:param array: Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( :math:`1 \times N` or :math:`N \times 1` ) of 2D points (``Point`` or ``Point2f`` ).
:param binaryImage: If it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only.
@ -98,8 +96,6 @@ Calculates seven Hu invariants.
.. ocv:cfunction:: void cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments )
.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu
:param moments: Input moments computed with :ocv:func:`moments` .
:param hu: Output Hu invariants.
@ -163,8 +159,6 @@ Finds contours in a binary image.
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) )
.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> contours
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours.
:param contours: Detected contours. Each contour is stored as a vector of points.
@ -243,8 +237,6 @@ Approximates Freeman chain(s) with a polygonal curve.
.. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* src_seq, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimal_perimeter=0, int recursive=0 )
.. ocv:pyoldfunction:: cv.ApproxChains(src_seq, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimal_perimeter=0, recursive=0)-> contours
:param src_seq: Pointer to the approximated Freeman chain that can refer to other chains.
:param storage: Storage location for the resulting polylines.
@ -270,8 +262,6 @@ Calculates a contour perimeter or a curve length.
.. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int is_closed=-1 )
.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1) -> float
:param curve: Input vector of 2D points, stored in ``std::vector`` or ``Mat``.
:param closed: Flag indicating whether the curve is closed or not.
@ -289,7 +279,6 @@ Calculates the up-right bounding rectangle of a point set.
.. ocv:pyfunction:: cv2.boundingRect(points) -> retval
.. ocv:cfunction:: CvRect cvBoundingRect( CvArr* points, int update=0 )
.. ocv:pyoldfunction:: cv.BoundingRect(points, update=0)-> CvRect
:param points: Input 2D point set, stored in ``std::vector`` or ``Mat``.
@ -308,8 +297,6 @@ Calculates a contour area.
.. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ, int oriented=0 )
.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ) -> float
:param contour: Input vector of 2D points (contour vertices), stored in ``std::vector`` or ``Mat``.
:param oriented: Oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking the sign of an area. By default, the parameter is ``false``, which means that the absolute value is returned.
@ -349,8 +336,6 @@ Finds the convex hull of a point set.
.. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0 )
.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, return_points=0) -> convexHull
:param points: Input 2D point set, stored in ``std::vector`` or ``Mat``.
:param hull: Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the ``hull`` elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, ``hull`` elements are the convex hull points themselves.
@ -379,8 +364,6 @@ Finds the convexity defects of a contour.
.. ocv:cfunction:: CvSeq* cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.ConvexityDefects(contour, convexhull, storage)-> convexityDefects
:param contour: Input contour.
:param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull.
@ -412,7 +395,6 @@ Fits an ellipse around a set of 2D points.
.. ocv:pyfunction:: cv2.fitEllipse(points) -> retval
.. ocv:cfunction:: CvBox2D cvFitEllipse2( const CvArr* points )
.. ocv:pyoldfunction:: cv.FitEllipse2(points)-> Box2D
:param points: Input 2D point set, stored in:
@ -434,8 +416,6 @@ Fits a line to a 2D or 3D point set.
.. ocv:cfunction:: void cvFitLine( const CvArr* points, int dist_type, double param, double reps, double aeps, float* line )
.. ocv:pyoldfunction:: cv.FitLine(points, dist_type, param, reps, aeps) -> line
:param points: Input vector of 2D or 3D points, stored in ``std::vector<>`` or ``Mat``.
:param line: Output line parameters. In case of 2D fitting, it should be a vector of 4 elements (like ``Vec4f``) - ``(vx, vy, x0, y0)``, where ``(vx, vy)`` is a normalized vector collinear to the line and ``(x0, y0)`` is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like ``Vec6f``) - ``(vx, vy, vz, x0, y0, z0)``, where ``(vx, vy, vz)`` is a normalized vector collinear to the line and ``(x0, y0, z0)`` is a point on the line.
@ -507,7 +487,6 @@ Tests a contour convexity.
.. ocv:pyfunction:: cv2.isContourConvex(contour) -> retval
.. ocv:cfunction:: int cvCheckContourConvexity( const CvArr* contour )
.. ocv:pyoldfunction:: cv.CheckContourConvexity(contour)-> int
:param contour: Input vector of 2D points, stored in:
@ -531,8 +510,6 @@ Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
.. ocv:cfunction:: CvBox2D cvMinAreaRect2( const CvArr* points, CvMemStorage* storage=NULL )
.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None) -> Box2D
:param points: Input vector of 2D points, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
@ -555,8 +532,6 @@ Finds a circle of the minimum area enclosing a 2D point set.
.. ocv:cfunction:: int cvMinEnclosingCircle( const CvArr* points, CvPoint2D32f* center, float* radius )
.. ocv:pyoldfunction:: cv.MinEnclosingCircle(points)-> (int, center, radius)
:param points: Input vector of 2D points, stored in:
* ``std::vector<>`` or ``Mat`` (C++ interface)
@ -582,7 +557,6 @@ Compares two shapes.
.. ocv:pyfunction:: cv2.matchShapes(contour1, contour2, method, parameter) -> retval
.. ocv:cfunction:: double cvMatchShapes( const void* object1, const void* object2, int method, double parameter=0 )
.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0) -> float
:param object1: First contour or grayscale image.
@ -637,7 +611,6 @@ Performs a point-in-contour test.
.. ocv:pyfunction:: cv2.pointPolygonTest(contour, pt, measureDist) -> retval
.. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measure_dist )
.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measure_dist) -> float
:param contour: Input contour.

@ -79,8 +79,6 @@ class DeclarationParser(object):
return "C"
if line.startswith(".. ocv:pyfunction::"):
return "Python2"
if line.startswith(".. ocv:pyoldfunction::"):
return "Python1"
if line.startswith(".. ocv:jfunction::"):
return "Java"
return None

@ -39,11 +39,6 @@ Queries the value of the histogram bin.
.. ocv:cfunction:: float cvQueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2)
.. ocv:cfunction:: float cvQueryHistValue_nD(CvHistogram hist, const int* idx)
.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float
.. ocv:pyoldfunction:: cv.QueryHistValue_nD(hist, idx) -> float
:param hist: Histogram.
:param idx0: 0-th index.

@ -10,8 +10,6 @@ Calculates the optical flow for two images by using the block matching method.
.. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize block_size, CvSize shift_size, CvSize max_range, int use_previous, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, max_range, usePrevious, velx, vely)-> None
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
@ -45,8 +43,6 @@ Calculates the optical flow for two images using Horn-Schunck algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int use_previous, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria)
.. ocv:pyoldfunction:: cv.CalcOpticalFlowHS(prev, curr, usePrevious, velx, vely, lambda, criteria)-> None
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel
@ -71,8 +67,6 @@ Calculates the optical flow for two images using Lucas-Kanade algorithm.
.. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize win_size, CvArr* velx, CvArr* vely )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowLK(prev, curr, winSize, velx, vely)-> None
:param prev: First image, 8-bit, single-channel
:param curr: Second image, 8-bit, single-channel

@ -116,7 +116,6 @@ CalcSubdivVoronoi2D
Calculates the coordinates of the Voronoi diagram cells.
.. ocv:cfunction:: void cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv )
.. ocv:pyoldfunction:: cv.CalcSubdivVoronoi2D(subdiv)-> None
:param subdiv: Delaunay subdivision, in which all the points are already added.
@ -130,7 +129,6 @@ ClearSubdivVoronoi2D
Removes all virtual points.
.. ocv:cfunction:: void cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv )
.. ocv:pyoldfunction:: cv.ClearSubdivVoronoi2D(subdiv)-> None
:param subdiv: Delaunay subdivision.
@ -145,7 +143,6 @@ CreateSubdivDelaunay2D
Creates an empty Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage) -> CvSubdiv2D
:param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.
@ -166,7 +163,6 @@ FindNearestPoint2D
Finds the subdivision vertex closest to the given point.
.. ocv:cfunction:: CvSubdiv2DPoint* cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt )
.. ocv:pyoldfunction:: cv.FindNearestPoint2D(subdiv, pt)-> point
:param subdiv: Delaunay or another subdivision.
@ -185,7 +181,6 @@ Subdiv2DEdgeDst
Returns the edge destination.
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
.. ocv:pyoldfunction:: cv.Subdiv2DEdgeDst(edge)-> point
:param edge: Subdivision edge (not a quad-edge).
@ -200,7 +195,6 @@ Subdiv2DGetEdge
Returns one of the edges related to the given edge.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
.. ocv:pyoldfunction:: cv.Subdiv2DGetEdge(edge, type)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
@ -231,7 +225,6 @@ Subdiv2DNextEdge
Returns next edge around the edge origin.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
.. ocv:pyoldfunction:: cv.Subdiv2DNextEdge(edge)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
@ -246,7 +239,6 @@ Subdiv2DLocate
Returns the location of a point within a Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2DPointLocation cvSubdiv2DLocate( CvSubdiv2D* subdiv, CvPoint2D32f pt, CvSubdiv2DEdge* edge, CvSubdiv2DPoint** vertex=NULL )
.. ocv:pyoldfunction:: cv.Subdiv2DLocate(subdiv, pt) -> (loc, where)
:param subdiv: Delaunay or another subdivision.
@ -294,7 +286,6 @@ Subdiv2DRotateEdge
Returns another edge of the same quad-edge.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
.. ocv:pyoldfunction:: cv.Subdiv2DRotateEdge(edge, rotate)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
@ -315,7 +306,6 @@ SubdivDelaunay2DInsert
Inserts a single point into a Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt)
.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point
:param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`.

@ -108,8 +108,6 @@ Detects keypoints and computes SURF descriptors for them.
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
.. ocv:pyoldfunction:: cv.ExtractSURF(image, mask, storage, params)-> (keypoints, descriptors)
:param image: Input 8-bit grayscale image
:param mask: Optional input mask that marks the regions where we should detect features.

@ -195,8 +195,6 @@ Detects objects of different sizes in the input image. The detected objects are
.. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor=1.1, int min_neighbors=3, int flags=0, CvSize min_size=cvSize(0,0), CvSize max_size=cvSize(0,0) )
.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, min_size=(0, 0)) -> detectedObjects
:param cascade: Haar classifier cascade (OpenCV 1.x API only). It can be loaded from XML or YAML file using :ocv:cfunc:`Load`. When the cascade is not needed anymore, release it using ``cvReleaseHaarClassifierCascade(&cascade)``.
:param image: Matrix of the type ``CV_8U`` containing an image where objects are detected.

@ -12,7 +12,6 @@ Restores the selected region in an image using the region neighborhood.
.. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRadius, flags[, dst]) -> dst
.. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* inpaint_mask, CvArr* dst, double inpaintRange, int flags )
.. ocv:pyoldfunction:: cv.Inpaint(src, mask, dst, inpaintRadius, flags) -> None
:param src: Input 8-bit 1-channel or 3-channel image.

@ -5,23 +5,20 @@
if(WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")
ocv_module_disable(python)
endif()
if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_USE_NUMPY)
if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_NUMPY_INCLUDE_DIR)
ocv_module_disable(python)
endif()
set(the_description "The python bindings")
ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_photo opencv_objdetect opencv_contrib opencv_legacy opencv_softcascade OPTIONAL opencv_nonfree)
add_definitions(-DPYTHON_USE_NUMPY=1)
ocv_module_include_directories(
"${PYTHON_INCLUDE_PATH}"
"${PYTHON_NUMPY_INCLUDE_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/src2"
)
set(opencv_hdrs
"${OPENCV_MODULE_opencv_core_LOCATION}/include/opencv2/core.hpp"
"${OPENCV_MODULE_opencv_core_LOCATION}/include/opencv2/core/base.hpp"
@ -53,13 +50,6 @@ set(cv2_generated_hdrs
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h")
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated0.i
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src2"
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/api
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/defs
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/gen.py)
add_custom_command(
OUTPUT ${cv2_generated_hdrs}
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs}
@ -67,7 +57,7 @@ add_custom_command(
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/hdr_parser.py
DEPENDS ${opencv_hdrs})
add_library(${the_module} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
add_library(${the_module} SHARED src2/cv2.cpp ${cv2_generated_hdrs})
set_target_properties(${the_module} PROPERTIES COMPILE_DEFINITIONS OPENCV_NOSTL)
if(PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES "optimized.*debug")

File diff suppressed because it is too large Load Diff

@ -1 +0,0 @@
from cv2.cv import *

@ -1,12 +1,7 @@
#include <Python.h>
#if !PYTHON_USE_NUMPY
#error "The module can only be built if NumPy is available"
#endif
#define MODULESTR "cv2"
#include "numpy/ndarrayobject.h"
#include <numpy/ndarrayobject.h>
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
@ -14,7 +9,6 @@
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/softcascade.hpp"
@ -22,13 +16,9 @@
#include "opencv2/photo.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking_c.h"
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/ml.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
# include "opencv2/nonfree.hpp"
#endif
@ -196,19 +186,14 @@ public:
depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
int i;
npy_intp _sizes[CV_MAX_DIM+1];
cv::AutoBuffer<npy_intp> _sizes(dims + 1);
for( i = 0; i < dims; i++ )
_sizes[i] = sizes[i];
if( cn > 1 )
{
/*if( _sizes[dims-1] == 1 )
_sizes[dims-1] = cn;
else*/
_sizes[dims++] = cn;
}
_sizes[dims++] = cn;
PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
if(!o)
CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
refcount = refcountFromPyObject(o);
npy_intp* _strides = PyArray_STRIDES(o);
for( i = 0; i < dims - (cn > 1); i++ )
@ -229,11 +214,19 @@ public:
NumpyAllocator g_numpyAllocator;
template<typename T> static
bool pyopencv_to(PyObject* obj, T& p, const char* name = "<unknown>");
template<typename T> static
PyObject* pyopencv_from(const T& src);
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
// special case, when the convertor needs full ArgInfo structure
static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allowND=true)
static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
{
bool allowND = true;
if(!o || o == Py_None)
{
if( !m.data )
@ -306,6 +299,10 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow
}
}
#ifndef CV_MAX_DIM
const int CV_MAX_DIM = 32;
#endif
int ndims = PyArray_NDIM(o);
if(ndims >= CV_MAX_DIM)
{
@ -314,7 +311,8 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow
}
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
size_t step[CV_MAX_DIM+1];
size_t elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(o);
const npy_intp* _strides = PyArray_STRIDES(o);
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
@ -388,7 +386,8 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow
return true;
}
static PyObject* pyopencv_from(const Mat& m)
template<>
PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
@ -403,7 +402,8 @@ static PyObject* pyopencv_from(const Mat& m)
return pyObjectFromRefcount(p->refcount);
}
static bool pyopencv_to(PyObject *o, Scalar& s, const char *name = "<unknown>")
template<>
bool pyopencv_to(PyObject *o, Scalar& s, const char *name)
{
if(!o || o == Py_None)
return true;
@ -437,17 +437,20 @@ static bool pyopencv_to(PyObject *o, Scalar& s, const char *name = "<unknown>")
return true;
}
static inline PyObject* pyopencv_from(const Scalar& src)
template<>
PyObject* pyopencv_from(const Scalar& src)
{
return Py_BuildValue("(dddd)", src[0], src[1], src[2], src[3]);
}
static PyObject* pyopencv_from(bool value)
template<>
PyObject* pyopencv_from(const bool& value)
{
return PyBool_FromLong(value);
}
static bool pyopencv_to(PyObject* obj, bool& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, bool& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -459,12 +462,14 @@ static bool pyopencv_to(PyObject* obj, bool& value, const char* name = "<unknown
return true;
}
static PyObject* pyopencv_from(size_t value)
template<>
PyObject* pyopencv_from(const size_t& value)
{
return PyLong_FromSize_t(value);
}
static bool pyopencv_to(PyObject* obj, size_t& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, size_t& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -473,22 +478,33 @@ static bool pyopencv_to(PyObject* obj, size_t& value, const char* name = "<unkno
return value != (size_t)-1 || !PyErr_Occurred();
}
static PyObject* pyopencv_from(int value)
template<>
PyObject* pyopencv_from(const int& value)
{
return PyInt_FromLong(value);
}
static PyObject* pyopencv_from(cvflann_flann_algorithm_t value)
template<>
PyObject* pyopencv_from(const cvflann_flann_algorithm_t& value)
{
return PyInt_FromLong(int(value));
}
static PyObject* pyopencv_from(cvflann_flann_distance_t value)
template<>
PyObject* pyopencv_from(const cvflann_flann_distance_t& value)
{
return PyInt_FromLong(int(value));
}
static bool pyopencv_to(PyObject* obj, int& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject*, cv::flann::SearchParams &, const char *)
{
CV_Assert(!"not implemented");
return false;
}
template<>
bool pyopencv_to(PyObject* obj, int& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -502,12 +518,14 @@ static bool pyopencv_to(PyObject* obj, int& value, const char* name = "<unknown>
return value != -1 || !PyErr_Occurred();
}
static PyObject* pyopencv_from(uchar value)
template<>
PyObject* pyopencv_from(const uchar& value)
{
return PyInt_FromLong(value);
}
static bool pyopencv_to(PyObject* obj, uchar& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, uchar& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -517,12 +535,14 @@ static bool pyopencv_to(PyObject* obj, uchar& value, const char* name = "<unknow
return ivalue != -1 || !PyErr_Occurred();
}
static PyObject* pyopencv_from(double value)
template<>
PyObject* pyopencv_from(const double& value)
{
return PyFloat_FromDouble(value);
}
static bool pyopencv_to(PyObject* obj, double& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, double& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -534,12 +554,14 @@ static bool pyopencv_to(PyObject* obj, double& value, const char* name = "<unkno
return !PyErr_Occurred();
}
static PyObject* pyopencv_from(float value)
template<>
PyObject* pyopencv_from(const float& value)
{
return PyFloat_FromDouble(value);
}
static bool pyopencv_to(PyObject* obj, float& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, float& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -551,17 +573,20 @@ static bool pyopencv_to(PyObject* obj, float& value, const char* name = "<unknow
return !PyErr_Occurred();
}
static PyObject* pyopencv_from(int64 value)
template<>
PyObject* pyopencv_from(const int64& value)
{
return PyLong_FromLongLong(value);
}
static PyObject* pyopencv_from(const String& value)
template<>
PyObject* pyopencv_from(const String& value)
{
return PyString_FromString(value.empty() ? "" : value.c_str());
}
static bool pyopencv_to(PyObject* obj, String& value, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, String& value, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -573,7 +598,8 @@ static bool pyopencv_to(PyObject* obj, String& value, const char* name = "<unkno
return true;
}
static inline bool pyopencv_to(PyObject* obj, Size& sz, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Size& sz, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -581,12 +607,14 @@ static inline bool pyopencv_to(PyObject* obj, Size& sz, const char* name = "<unk
return PyArg_ParseTuple(obj, "ii", &sz.width, &sz.height) > 0;
}
static inline PyObject* pyopencv_from(const Size& sz)
template<>
PyObject* pyopencv_from(const Size& sz)
{
return Py_BuildValue("(ii)", sz.width, sz.height);
}
static inline bool pyopencv_to(PyObject* obj, Rect& r, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Rect& r, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -594,12 +622,14 @@ static inline bool pyopencv_to(PyObject* obj, Rect& r, const char* name = "<unkn
return PyArg_ParseTuple(obj, "iiii", &r.x, &r.y, &r.width, &r.height) > 0;
}
static inline PyObject* pyopencv_from(const Rect& r)
template<>
PyObject* pyopencv_from(const Rect& r)
{
return Py_BuildValue("(iiii)", r.x, r.y, r.width, r.height);
}
static inline bool pyopencv_to(PyObject* obj, Range& r, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Range& r, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -612,30 +642,14 @@ static inline bool pyopencv_to(PyObject* obj, Range& r, const char* name = "<unk
return PyArg_ParseTuple(obj, "ii", &r.start, &r.end) > 0;
}
static inline PyObject* pyopencv_from(const Range& r)
template<>
PyObject* pyopencv_from(const Range& r)
{
return Py_BuildValue("(ii)", r.start, r.end);
}
static inline bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name = "<unknown>")
{
(void)name;
if(!obj || obj == Py_None)
return true;
if(PyObject_Size(obj) == 0)
{
r = CV_WHOLE_SEQ;
return true;
}
return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
}
static inline PyObject* pyopencv_from(const CvSlice& r)
{
return Py_BuildValue("(ii)", r.start_index, r.end_index);
}
static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Point& p, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -650,7 +664,8 @@ static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = "<unk
return PyArg_ParseTuple(obj, "ii", &p.x, &p.y) > 0;
}
static inline bool pyopencv_to(PyObject* obj, Point2f& p, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Point2f& p, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
@ -665,17 +680,20 @@ static inline bool pyopencv_to(PyObject* obj, Point2f& p, const char* name = "<u
return PyArg_ParseTuple(obj, "ff", &p.x, &p.y) > 0;
}
static inline PyObject* pyopencv_from(const Point& p)
template<>
PyObject* pyopencv_from(const Point& p)
{
return Py_BuildValue("(ii)", p.x, p.y);
}
static inline PyObject* pyopencv_from(const Point2f& p)
template<>
PyObject* pyopencv_from(const Point2f& p)
{
return Py_BuildValue("(dd)", p.x, p.y);
}
static inline bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name = "<unknown>")
template<>
bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name)
{
(void)name;
if(!obj)
@ -683,17 +701,20 @@ static inline bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name = "<unk
return PyArg_ParseTuple(obj, "ddd", &v[0], &v[1], &v[2]) > 0;
}
static inline PyObject* pyopencv_from(const Vec3d& v)
template<>
PyObject* pyopencv_from(const Vec3d& v)
{
return Py_BuildValue("(ddd)", v[0], v[1], v[2]);
}
static inline PyObject* pyopencv_from(const Vec2d& v)
template<>
PyObject* pyopencv_from(const Vec2d& v)
{
return Py_BuildValue("(dd)", v[0], v[1]);
}
static inline PyObject* pyopencv_from(const Point2d& p)
template<>
PyObject* pyopencv_from(const Point2d& p)
{
return Py_BuildValue("(dd)", p.x, p.y);
}
@ -807,20 +828,18 @@ template<typename _Tp> struct pyopencvVecConverter
}
};
template<typename _Tp> static inline bool pyopencv_to(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info)
template <typename _Tp>
bool pyopencv_to(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info)
{
return pyopencvVecConverter<_Tp>::to(obj, value, info);
}
template<typename _Tp> static inline PyObject* pyopencv_from(const std::vector<_Tp>& value)
template<typename _Tp>
PyObject* pyopencv_from(const std::vector<_Tp>& value)
{
return pyopencvVecConverter<_Tp>::from(value);
}
static PyObject* pyopencv_from(const KeyPoint&);
static PyObject* pyopencv_from(const DMatch&);
template<typename _Tp> static inline bool pyopencv_to_generic_vec(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info)
{
if(!obj || obj == Py_None)
@ -930,21 +949,8 @@ template<> struct pyopencvVecConverter<String>
}
};
static inline bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name="<unknown>")
{
(void)name;
if(!obj)
return true;
return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0;
}
static inline PyObject* pyopencv_from(const CvTermCriteria& src)
{
return Py_BuildValue("(iid)", src.type, src.max_iter, src.epsilon);
}
static inline bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *name="<unknown>")
template<>
bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *name)
{
(void)name;
if(!obj)
@ -952,12 +958,14 @@ static inline bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *nam
return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.maxCount, &dst.epsilon) > 0;
}
static inline PyObject* pyopencv_from(const TermCriteria& src)
template<>
PyObject* pyopencv_from(const TermCriteria& src)
{
return Py_BuildValue("(iid)", src.type, src.maxCount, src.epsilon);
}
static inline bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name="<unknown>")
template<>
bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name)
{
(void)name;
if(!obj)
@ -965,12 +973,14 @@ static inline bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name
return PyArg_ParseTuple(obj, "(ff)(ff)f", &dst.center.x, &dst.center.y, &dst.size.width, &dst.size.height, &dst.angle) > 0;
}
static inline PyObject* pyopencv_from(const RotatedRect& src)
template<>
PyObject* pyopencv_from(const RotatedRect& src)
{
return Py_BuildValue("((ff)(ff)f)", src.center.x, src.center.y, src.size.width, src.size.height, src.angle);
}
static inline PyObject* pyopencv_from(const Moments& m)
template<>
PyObject* pyopencv_from(const Moments& m)
{
return Py_BuildValue("{s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d}",
"m00", m.m00, "m10", m.m10, "m01", m.m01,
@ -982,14 +992,8 @@ static inline PyObject* pyopencv_from(const Moments& m)
"nu30", m.nu30, "nu21", m.nu21, "nu12", m.nu12, "nu03", m.nu03);
}
static inline PyObject* pyopencv_from(const CvDTreeNode* node)
{
double value = node->value;
int ivalue = cvRound(value);
return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value);
}
static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name="<unknown>")
template<>
bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name)
{
(void)name;
bool ok = false;
@ -1037,15 +1041,15 @@ static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name
return ok;
}
template <class T>
static bool pyopencv_to(PyObject *o, Ptr<T>& p, const char *name="<unknown>")
template <typename T>
bool pyopencv_to(PyObject *o, Ptr<T>& p, const char *name)
{
p = new T();
return pyopencv_to(o, *p, name);
}
static bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char *name="<unknown>")
template<>
bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char *name)
{
int d = (int)dist;
bool ok = pyopencv_to(o, d, name);
@ -1053,6 +1057,41 @@ static bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char
return ok;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// TODO: REMOVE used only by ml wrapper
template<>
bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name)
{
(void)name;
if(!obj)
return true;
return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0;
}
template<>
bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name)
{
(void)name;
if(!obj || obj == Py_None)
return true;
if(PyObject_Size(obj) == 0)
{
r = CV_WHOLE_SEQ;
return true;
}
return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
}
template<>
PyObject* pyopencv_from(CvDTreeNode* const & node)
{
double value = node->value;
int ivalue = cvRound(value);
return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void OnMouse(int event, int x, int y, int flags, void* param)
@ -1088,7 +1127,7 @@ static PyObject *pycvSetMouseCallback(PyObject*, PyObject *args, PyObject *kw)
if (param == NULL) {
param = Py_None;
}
ERRWRAP2(cvSetMouseCallback(name, OnMouse, Py_BuildValue("OO", on_mouse, param)));
ERRWRAP2(setMouseCallback(name, OnMouse, Py_BuildValue("OO", on_mouse, param)));
Py_RETURN_NONE;
}
@ -1120,12 +1159,23 @@ static PyObject *pycvCreateTrackbar(PyObject*, PyObject *args)
PyErr_SetString(PyExc_TypeError, "on_change must be callable");
return NULL;
}
ERRWRAP2(cvCreateTrackbar2(trackbar_name, window_name, value, count, OnChange, Py_BuildValue("OO", on_change, Py_None)));
ERRWRAP2(createTrackbar(trackbar_name, window_name, value, count, OnChange, Py_BuildValue("OO", on_change, Py_None)));
Py_RETURN_NONE;
}
///////////////////////////////////////////////////////////////////////////////////////
static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name")
{
if (PyString_Check(o) && PyString_Size(o) == 1) {
*dst = PyString_AsString(o)[0];
return 1;
} else {
(*dst) = 0;
return failmsg("Expected single character string for argument '%s'", name);
}
}
#define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return
#ifdef __GNUC__
@ -1155,8 +1205,6 @@ static int to_ok(PyTypeObject *to)
return (PyType_Ready(to) == 0);
}
#include "cv2.cv.hpp"
extern "C"
#if defined WIN32 || defined _WIN32
__declspec(dllexport)
@ -1165,13 +1213,9 @@ void initcv2();
void initcv2()
{
#if PYTHON_USE_NUMPY
import_array();
#endif
import_array();
#if PYTHON_USE_NUMPY
#include "pyopencv_generated_type_reg.h"
#endif
PyObject* m = Py_InitModule(MODULESTR, methods);
PyObject* d = PyModule_GetDict(m);
@ -1181,27 +1225,10 @@ void initcv2()
opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
PyDict_SetItemString(d, "error", opencv_error);
PyObject* cv_m = init_cv();
PyDict_SetItemString(d, "cv", cv_m);
#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
//#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
#define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value))
PUBLISHU(IPL_DEPTH_8U);
PUBLISHU(IPL_DEPTH_8S);
PUBLISHU(IPL_DEPTH_16U);
PUBLISHU(IPL_DEPTH_16S);
PUBLISHU(IPL_DEPTH_32S);
PUBLISHU(IPL_DEPTH_32F);
PUBLISHU(IPL_DEPTH_64F);
PUBLISH(CV_LOAD_IMAGE_COLOR);
PUBLISH(CV_LOAD_IMAGE_GRAYSCALE);
PUBLISH(CV_LOAD_IMAGE_UNCHANGED);
PUBLISH(CV_HIST_ARRAY);
PUBLISH(CV_HIST_SPARSE);
PUBLISH(CV_8U);
PUBLISH(CV_8UC1);
PUBLISH(CV_8UC2);
@ -1237,37 +1264,7 @@ void initcv2()
PUBLISH(CV_64FC2);
PUBLISH(CV_64FC3);
PUBLISH(CV_64FC4);
PUBLISH(CV_NEXT_AROUND_ORG);
PUBLISH(CV_NEXT_AROUND_DST);
PUBLISH(CV_PREV_AROUND_ORG);
PUBLISH(CV_PREV_AROUND_DST);
PUBLISH(CV_NEXT_AROUND_LEFT);
PUBLISH(CV_NEXT_AROUND_RIGHT);
PUBLISH(CV_PREV_AROUND_LEFT);
PUBLISH(CV_PREV_AROUND_RIGHT);
PUBLISH(CV_WINDOW_AUTOSIZE);
PUBLISH(CV_PTLOC_INSIDE);
PUBLISH(CV_PTLOC_ON_EDGE);
PUBLISH(CV_PTLOC_VERTEX);
PUBLISH(CV_PTLOC_OUTSIDE_RECT);
PUBLISH(GC_BGD);
PUBLISH(GC_FGD);
PUBLISH(GC_PR_BGD);
PUBLISH(GC_PR_FGD);
PUBLISH(GC_INIT_WITH_RECT);
PUBLISH(GC_INIT_WITH_MASK);
PUBLISH(GC_EVAL);
PUBLISH(CV_ROW_SAMPLE);
PUBLISH(CV_VAR_NUMERICAL);
PUBLISH(CV_VAR_ORDERED);
PUBLISH(CV_VAR_CATEGORICAL);
PUBLISH(CV_AA);
#include "pyopencv_generated_const_reg.h"
}
}

File diff suppressed because it is too large Load Diff

@ -1,360 +0,0 @@
#define CV_BLUR_NO_SCALE 0
#define CV_BLUR 1
#define CV_GAUSSIAN 2
#define CV_MEDIAN 3
#define CV_BILATERAL 4
#define CV_INPAINT_NS 0
#define CV_INPAINT_TELEA 1
#define CV_SCHARR -1
#define CV_MAX_SOBEL_KSIZE 7
#define CV_BGR2BGRA 0
#define CV_RGB2RGBA CV_BGR2BGRA
#define CV_BGRA2BGR 1
#define CV_RGBA2RGB CV_BGRA2BGR
#define CV_BGR2RGBA 2
#define CV_RGB2BGRA CV_BGR2RGBA
#define CV_RGBA2BGR 3
#define CV_BGRA2RGB CV_RGBA2BGR
#define CV_BGR2RGB 4
#define CV_RGB2BGR CV_BGR2RGB
#define CV_BGRA2RGBA 5
#define CV_RGBA2BGRA CV_BGRA2RGBA
#define CV_BGR2GRAY 6
#define CV_RGB2GRAY 7
#define CV_GRAY2BGR 8
#define CV_GRAY2RGB CV_GRAY2BGR
#define CV_GRAY2BGRA 9
#define CV_GRAY2RGBA CV_GRAY2BGRA
#define CV_BGRA2GRAY 10
#define CV_RGBA2GRAY 11
#define CV_BGR2BGR565 12
#define CV_RGB2BGR565 13
#define CV_BGR5652BGR 14
#define CV_BGR5652RGB 15
#define CV_BGRA2BGR565 16
#define CV_RGBA2BGR565 17
#define CV_BGR5652BGRA 18
#define CV_BGR5652RGBA 19
#define CV_GRAY2BGR565 20
#define CV_BGR5652GRAY 21
#define CV_BGR2BGR555 22
#define CV_RGB2BGR555 23
#define CV_BGR5552BGR 24
#define CV_BGR5552RGB 25
#define CV_BGRA2BGR555 26
#define CV_RGBA2BGR555 27
#define CV_BGR5552BGRA 28
#define CV_BGR5552RGBA 29
#define CV_GRAY2BGR555 30
#define CV_BGR5552GRAY 31
#define CV_BGR2XYZ 32
#define CV_RGB2XYZ 33
#define CV_XYZ2BGR 34
#define CV_XYZ2RGB 35
#define CV_BGR2YCrCb 36
#define CV_RGB2YCrCb 37
#define CV_YCrCb2BGR 38
#define CV_YCrCb2RGB 39
#define CV_BGR2HSV 40
#define CV_RGB2HSV 41
#define CV_BGR2Lab 44
#define CV_RGB2Lab 45
#define CV_BayerBG2BGR 46
#define CV_BayerGB2BGR 47
#define CV_BayerRG2BGR 48
#define CV_BayerGR2BGR 49
#define CV_BayerBG2RGB CV_BayerRG2BGR
#define CV_BayerGB2RGB CV_BayerGR2BGR
#define CV_BayerRG2RGB CV_BayerBG2BGR
#define CV_BayerGR2RGB CV_BayerGB2BGR
#define CV_BayerBG2BGR_VNG 62
#define CV_BayerGB2BGR_VNG 63
#define CV_BayerRG2BGR_VNG 64
#define CV_BayerGR2BGR_VNG 65
#define CV_BGR2Luv 50
#define CV_RGB2Luv 51
#define CV_BGR2HLS 52
#define CV_RGB2HLS 53
#define CV_HSV2BGR 54
#define CV_HSV2RGB 55
#define CV_Lab2BGR 56
#define CV_Lab2RGB 57
#define CV_Luv2BGR 58
#define CV_Luv2RGB 59
#define CV_HLS2BGR 60
#define CV_HLS2RGB 61
#define CV_COLORCVT_MAX 100
#define CV_INTER_NN 0
#define CV_INTER_LINEAR 1
#define CV_INTER_CUBIC 2
#define CV_INTER_AREA 3
#define CV_WARP_FILL_OUTLIERS 8
#define CV_WARP_INVERSE_MAP 16
#define CV_SHAPE_RECT 0
#define CV_SHAPE_CROSS 1
#define CV_SHAPE_ELLIPSE 2
#define CV_SHAPE_CUSTOM 100
#define CV_MOP_OPEN 2
#define CV_MOP_CLOSE 3
#define CV_MOP_GRADIENT 4
#define CV_MOP_TOPHAT 5
#define CV_MOP_BLACKHAT 6
#define CV_TM_SQDIFF 0
#define CV_TM_SQDIFF_NORMED 1
#define CV_TM_CCORR 2
#define CV_TM_CCORR_NORMED 3
#define CV_TM_CCOEFF 4
#define CV_TM_CCOEFF_NORMED 5
#define CV_LKFLOW_PYR_A_READY 1
#define CV_LKFLOW_PYR_B_READY 2
#define CV_LKFLOW_INITIAL_GUESSES 4
#define CV_LKFLOW_GET_MIN_EIGENVALS 8
#define CV_POLY_APPROX_DP 0
#define CV_CONTOURS_MATCH_I1 1
#define CV_CONTOURS_MATCH_I2 2
#define CV_CONTOURS_MATCH_I3 3
#define CV_CLOCKWISE 1
#define CV_COUNTER_CLOCKWISE 2
#define CV_COMP_CORREL 0
#define CV_COMP_CHISQR 1
#define CV_COMP_INTERSECT 2
#define CV_COMP_BHATTACHARYYA 3
#define CV_DIST_MASK_3 3
#define CV_DIST_MASK_5 5
#define CV_DIST_MASK_PRECISE 0
#define CV_THRESH_BINARY 0 /* value = value > threshold ? max_value : 0 */
#define CV_THRESH_BINARY_INV 1 /* value = value > threshold ? 0 : max_value */
#define CV_THRESH_TRUNC 2 /* value = value > threshold ? threshold : value */
#define CV_THRESH_TOZERO 3 /* value = value > threshold ? value : 0 */
#define CV_THRESH_TOZERO_INV 4 /* value = value > threshold ? 0 : value */
#define CV_THRESH_MASK 7
#define CV_THRESH_OTSU 8 /* use Otsu algorithm to choose the optimal threshold value;
#define CV_ADAPTIVE_THRESH_MEAN_C 0
#define CV_ADAPTIVE_THRESH_GAUSSIAN_C 1
#define CV_FLOODFILL_FIXED_RANGE (1 << 16)
#define CV_FLOODFILL_MASK_ONLY (1 << 17)
#define CV_CANNY_L2_GRADIENT (1 << 31)
#define CV_HOUGH_STANDARD 0
#define CV_HOUGH_PROBABILISTIC 1
#define CV_HOUGH_MULTI_SCALE 2
#define CV_HOUGH_GRADIENT 3
#define CV_HAAR_DO_CANNY_PRUNING 1
#define CV_HAAR_SCALE_IMAGE 2
#define CV_HAAR_FIND_BIGGEST_OBJECT 4
#define CV_HAAR_DO_ROUGH_SEARCH 8
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
#define CV_CALIB_ZERO_DISPARITY 1024
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
#define CV_STEREO_GC_OCCLUDED SHRT_MAX
#define CV_AUTOSTEP 0x7fffffff
#define CV_MAX_ARR 10
#define CV_NO_DEPTH_CHECK 1
#define CV_NO_CN_CHECK 2
#define CV_NO_SIZE_CHECK 4
#define CV_CMP_EQ 0
#define CV_CMP_GT 1
#define CV_CMP_GE 2
#define CV_CMP_LT 3
#define CV_CMP_LE 4
#define CV_CMP_NE 5
#define CV_CHECK_RANGE 1
#define CV_CHECK_QUIET 2
#define CV_RAND_UNI 0
#define CV_RAND_NORMAL 1
#define CV_SORT_EVERY_ROW 0
#define CV_SORT_EVERY_COLUMN 1
#define CV_SORT_ASCENDING 0
#define CV_SORT_DESCENDING 16
#define CV_GEMM_A_T 1
#define CV_GEMM_B_T 2
#define CV_GEMM_C_T 4
#define CV_SVD_MODIFY_A 1
#define CV_SVD_U_T 2
#define CV_SVD_V_T 4
#define CV_LU 0
#define CV_SVD 1
#define CV_SVD_SYM 2
#define CV_CHOLESKY 3
#define CV_QR 4
#define CV_NORMAL 16
#define CV_COVAR_SCRAMBLED 0
#define CV_COVAR_NORMAL 1
#define CV_COVAR_USE_AVG 2
#define CV_COVAR_SCALE 4
#define CV_COVAR_ROWS 8
#define CV_COVAR_COLS 16
#define CV_PCA_DATA_AS_ROW 0
#define CV_PCA_DATA_AS_COL 1
#define CV_PCA_USE_AVG 2
#define CV_C 1
#define CV_L1 2
#define CV_L2 4
#define CV_NORM_MASK 7
#define CV_RELATIVE 8
#define CV_DIFF 16
#define CV_MINMAX 32
#define CV_DIFF_C (CV_DIFF | CV_C)
#define CV_DIFF_L1 (CV_DIFF | CV_L1)
#define CV_DIFF_L2 (CV_DIFF | CV_L2)
#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
#define CV_REDUCE_SUM 0
#define CV_REDUCE_AVG 1
#define CV_REDUCE_MAX 2
#define CV_REDUCE_MIN 3
#define CV_DXT_FORWARD 0
#define CV_DXT_INVERSE 1
#define CV_DXT_SCALE 2 /* divide result by size of array */
#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)
#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE
#define CV_DXT_ROWS 4 /* transform each row individually */
#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */
#define CV_FRONT 1
#define CV_BACK 0
#define CV_GRAPH_VERTEX 1
#define CV_GRAPH_TREE_EDGE 2
#define CV_GRAPH_BACK_EDGE 4
#define CV_GRAPH_FORWARD_EDGE 8
#define CV_GRAPH_CROSS_EDGE 16
#define CV_GRAPH_ANY_EDGE 30
#define CV_GRAPH_NEW_TREE 32
#define CV_GRAPH_BACKTRACKING 64
#define CV_GRAPH_OVER -1
#define CV_GRAPH_ALL_ITEMS -1
#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30)
#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29)
#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28)
#define CV_FILLED -1
#define CV_AA 16
#define CV_FONT_HERSHEY_SIMPLEX 0
#define CV_FONT_HERSHEY_PLAIN 1
#define CV_FONT_HERSHEY_DUPLEX 2
#define CV_FONT_HERSHEY_COMPLEX 3
#define CV_FONT_HERSHEY_TRIPLEX 4
#define CV_FONT_HERSHEY_COMPLEX_SMALL 5
#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6
#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7
#define CV_FONT_ITALIC 16
#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX
#define CV_KMEANS_USE_INITIAL_LABELS 1
#define CV_ErrModeLeaf 0 /* Print error and exit program */
#define CV_ErrModeParent 1 /* Print error and continue */
#define CV_ErrModeSilent 2 /* Don't print and continue */
#define CV_RETR_EXTERNAL 0
#define CV_RETR_LIST 1
#define CV_RETR_CCOMP 2
#define CV_RETR_TREE 3
#define CV_CHAIN_CODE 0
#define CV_CHAIN_APPROX_NONE 1
#define CV_CHAIN_APPROX_SIMPLE 2
#define CV_CHAIN_APPROX_TC89_L1 3
#define CV_CHAIN_APPROX_TC89_KCOS 4
#define CV_LINK_RUNS 5
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
#define CV_DIST_USER -1 /* User defined distance */
#define CV_DIST_L1 1 /* distance = |x1-x2| + |y1-y2| */
#define CV_DIST_L2 2 /* the simple euclidean distance */
#define CV_DIST_C 3 /* distance = max(|x1-x2|,|y1-y2|) */
#define CV_DIST_L12 4 /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
#define CV_DIST_FAIR 5 /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
#define CV_DIST_WELSCH 6 /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
#define CV_DIST_HUBER 7 /* distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */
#define CV_HAAR_MAGIC_VAL 0x42500000
#define CV_HAAR_FEATURE_MAX 3
#define CV_TERMCRIT_ITER 1
#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER
#define CV_TERMCRIT_EPS 2
#define CV_EVENT_MOUSEMOVE 0
#define CV_EVENT_LBUTTONDOWN 1
#define CV_EVENT_RBUTTONDOWN 2
#define CV_EVENT_MBUTTONDOWN 3
#define CV_EVENT_LBUTTONUP 4
#define CV_EVENT_RBUTTONUP 5
#define CV_EVENT_MBUTTONUP 6
#define CV_EVENT_LBUTTONDBLCLK 7
#define CV_EVENT_RBUTTONDBLCLK 8
#define CV_EVENT_MBUTTONDBLCLK 9
#define CV_EVENT_FLAG_LBUTTON 1
#define CV_EVENT_FLAG_RBUTTON 2
#define CV_EVENT_FLAG_MBUTTON 4
#define CV_EVENT_FLAG_CTRLKEY 8
#define CV_EVENT_FLAG_SHIFTKEY 16
#define CV_EVENT_FLAG_ALTKEY 32
#define CV_MAX_DIM 32
#define CV_CAP_PROP_POS_MSEC 0
#define CV_CAP_PROP_POS_FRAMES 1
#define CV_CAP_PROP_POS_AVI_RATIO 2
#define CV_CAP_PROP_FRAME_WIDTH 3
#define CV_CAP_PROP_FRAME_HEIGHT 4
#define CV_CAP_PROP_FPS 5
#define CV_CAP_PROP_FOURCC 6
#define CV_CAP_PROP_FRAME_COUNT 7
#define CV_CAP_PROP_FORMAT 8
#define CV_CAP_PROP_MODE 9
#define CV_CAP_PROP_BRIGHTNESS 10
#define CV_CAP_PROP_CONTRAST 11
#define CV_CAP_PROP_SATURATION 12
#define CV_CAP_PROP_HUE 13
#define CV_CAP_PROP_GAIN 14
#define CV_CAP_PROP_EXPOSURE 15
#define CV_CAP_PROP_CONVERT_RGB 16
#define CV_CAP_PROP_RECTIFICATION 18
#define CV_CAP_OPENNI 900
#define CV_CAP_OPENNI_DEPTH_GENERATOR 2147483648
#define CV_CAP_OPENNI_IMAGE_GENERATOR 1073741824
#define CV_CAP_OPENNI_DEPTH_MAP 0
#define CV_CAP_OPENNI_POINT_CLOUD_MAP 1
#define CV_CAP_OPENNI_DISPARITY_MAP 2
#define CV_CAP_OPENNI_DISPARITY_MAP_32F 3
#define CV_CAP_OPENNI_VALID_DEPTH_MASK 4
#define CV_CAP_OPENNI_BGR_IMAGE 5
#define CV_CAP_OPENNI_GRAY_IMAGE 6
#define CV_CAP_PROP_OPENNI_OUTPUT_MODE 100
#define CV_CAP_OPENNI_VGA_30HZ 0
#define CV_CAP_OPENNI_SXGA_15HZ 1
#define CV_CAP_PROP_OPENNI_REGISTRATION 104
#define CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH 101
#define CV_CAP_PROP_OPENNI_BASELINE 102
#define CV_CAP_PROP_OPENNI_FOCAL_LENGTH 103
#define CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE 1073741924
#define CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE 2147483750
#define CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH 2147483751
#define CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION 2147483752
#define CV_CN_SHIFT 3
#define CV_IMWRITE_JPEG_QUALITY 1
#define CV_IMWRITE_PNG_COMPRESSION 16
#define CV_IMWRITE_PXM_BINARY 32
#define IPL_ORIGIN_TL 0
#define IPL_ORIGIN_BL 1
#define CV_GAUSSIAN_5x5
#define CV_CN_MAX
#define CV_WINDOW_AUTOSIZE 1
#define CV_WINDOW_NORMAL 0
#define CV_WINDOW_FULLSCREEN 1
#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
#define CV_CVTIMG_FLIP 1
#define CV_CVTIMG_SWAP_RB 2

@ -1,631 +0,0 @@
#!/usr/bin/env python
import sys
from string import Template
class argument:
def __init__(self, fields):
self.ty = fields[0]
self.nm = fields[1]
self.flags = ""
self.init = None
if len(fields) > 2:
if fields[2][0] == '/':
self.flags = fields[2][1:].split(",")
else:
self.init = fields[2]
api = []
for l in open("%s/api" % sys.argv[1]):
if l[0] == '#':
continue
l = l.rstrip()
if (not l.startswith(' ')) and ('/' in l):
(l, flags) = l.split('/')
else:
flags = ""
f = l.split()
if len(f) != 0:
if l[0] != ' ':
if len(f) > 1:
ty = f[1]
else:
ty = None
api.append((f[0], [], ty, flags))
else:
api[-1][1].append(argument(f))
# Validation: check that any optional arguments are last
had_error = False
for (f, args, ty, flags) in api:
has_init = [(a.init != None) for a in args if not 'O' in a.flags]
if True in has_init and not all(has_init[has_init.index(True):]):
print 'Error in definition for "%s", optional arguments must be last' % f
had_error = True
if had_error:
sys.exit(1)
def cname(n):
if n.startswith("CV"):
return '_' + n
elif n[0].isdigit():
return '_' + n
else:
return n
# RHS is how the aggregate gets expanded in the C call
aggregate = {
'pts_npts_contours' : '!.pts,!.npts,!.contours',
'cvarr_count' : '!.cvarr,!.count',
'cvarr_plane_count' : '!.cvarr,!.count',
'floats' : '!.f',
'ints' : '!.i',
'ints0' : '!.i',
'CvPoints' : '!.p,!.count',
'CvPoint2D32fs' : '!.p,!.count',
'CvPoint3D32fs' : '!.p,!.count',
'cvarrseq' : '!.seq',
'CvArrs' : '!.ims',
'IplImages' : '!.ims',
'intpair' : '!.pairs,!.count',
'cvpoint2d32f_count' : '!.points,&!.count'
}
conversion_types = [
'char',
'CvArr',
'CvArrSeq',
'CvBox2D', # '((ff)(ff)f)',
'CvBox2D*',
'CvCapture*',
'CvStereoBMState*',
'CvStereoGCState*',
'CvKalman*',
'CvVideoWriter*',
'CvContourTree*',
'CvFont',
'CvFont*',
'CvHaarClassifierCascade*',
'CvHistogram',
'CvMat',
'CvMatND',
'CvMemStorage',
'CvMoments',
'CvMoments*',
'CvNextEdgeType',
'CvPoint',
'CvPoint*',
'CvPoint2D32f', # '(ff)',
'CvPoint2D32f*',
'CvPoint3D32f*',
'CvPoint2D64f',
'CvPOSITObject*',
'CvRect',
'CvRect*',
'CvRNG*',
'CvScalar',
'CvSeq',
'CvSeqOfCvConvexityDefect',
'CvSize',
'CvSlice',
'CvStarDetectorParams',
'CvSubdiv2D*',
'CvSubdiv2DEdge',
'CvTermCriteria',
'generic',
'IplConvKernel*',
'IplImage',
'PyObject*',
'PyCallableObject*'
]
def safename(s):
return s.replace('*', 'PTR').replace('[', '_').replace(']', '_')
def has_optional(al):
""" return true if any argument is optional """
return any([a.init for a in al])
def gen(name, args, ty, flags):
yield ""
if has_optional(args):
yield "static PyObject *pycv%s(PyObject *self, PyObject *args, PyObject *kw)" % cname(name)
else:
yield "static PyObject *pycv%s(PyObject *self, PyObject *args)" % cname(name)
if 'doconly' in flags:
yield ";"
else:
yield "{"
destinations = []
for a in args:
remap = {
'CvArr' : 'CvArr*',
'CvMat' : 'CvMat*',
'CvMatND' : 'CvMatND*',
'IplImage' : 'IplImage*',
'CvMemStorage' : 'CvMemStorage*',
'CvHistogram':'CvHistogram*',
'CvSeq':'CvSeq*',
'CvHaarClassifierCascade' : 'CvHaarClassifierCascade*'
}
ctype = remap.get(a.ty, a.ty)
if a.init:
init = " = %s" % a.init
else:
init = ''
yield " %s %s%s;" % (ctype, a.nm, init)
if 'O' in a.flags:
continue
if a.ty in (conversion_types + aggregate.keys()):
yield ' PyObject *pyobj_%s = NULL;' % (a.nm)
destinations.append('&pyobj_%s' % (a.nm))
elif a.ty in [ 'CvPoint2D32f' ]:
destinations.append('&%s.x, &%s.y' % (a.nm, a.nm))
elif a.ty in [ 'CvTermCriteria' ]:
destinations.append('&%s.type, &%s.max_iter, &%s.epsilon' % ((a.nm,)*3))
elif a.ty in [ 'CvSURFParams' ]:
destinations.append('&%s.extended, &%s.hessianThreshold, &%s.nOctaves, &%s.nOctaveLayers' % ((a.nm,)*4))
elif a.nm in [ 'CvBox2D' ]:
s = ", ".join([('&' + a.nm +'.' + fld) for fld in [ 'center.x', 'center.y', 'size.width', 'size.height', 'angle' ] ])
destinations.append(s)
else:
destinations.append('&%s' % a.nm)
fmap = {
'CvSURFParams' : '(idii)',
'double' : 'd',
'float' : 'f',
'int' : 'i',
'int64' : 'L',
'char*' : 's',
}
for k in (conversion_types + aggregate.keys()):
fmap[k] = 'O'
in_args = [ a for a in args if not 'O' in a.flags ]
fmt0 = "".join([ fmap[a.ty] for a in in_args if not a.init])
fmt1 = "".join([ fmap[a.ty] for a in in_args if a.init])
yield ''
if len(fmt0 + fmt1) > 0:
if len(fmt1) > 0:
yield ' const char *keywords[] = { %s };' % (", ".join([ '"%s"' % arg.nm for arg in args if not 'O' in arg.flags ] + ['NULL']))
yield ' if (!PyArg_ParseTupleAndKeywords(args, kw, "%s|%s", %s))' % (fmt0, fmt1, ", ".join(['(char**)keywords'] + destinations))
if '(' in (fmt0 + fmt1):
print "Tuple with kwargs is not allowed, function", name
sys.exit(1)
else:
yield ' if (!PyArg_ParseTuple(args, "%s", %s))' % (fmt0, ", ".join(destinations))
yield ' return NULL;'
# Do the conversions:
for a in args:
joinwith = [f[2:] for f in a.flags if f.startswith("J:")]
if len(joinwith) > 0:
yield 'preShareData(%s, &%s);' % (joinwith[0], a.nm)
if 'O' in a.flags:
continue
if a.ty in (conversion_types + aggregate.keys()):
if a.init:
pred = '(pyobj_%s != NULL) && ' % a.nm
else:
pred = ''
yield ' if (%s!convert_to_%s(pyobj_%s, &%s, "%s")) return NULL;' % (pred, safename(a.ty), a.nm, a.nm, a.nm)
yield '#ifdef CVPY_VALIDATE_%s' % name
yield 'CVPY_VALIDATE_%s();' % name
yield '#endif'
def invokename(a):
if 'K' in a.flags:
prefix = "(const CvArr **)"
elif 'O' in a.flags and not 'A' in a.flags:
prefix = "&"
else:
prefix = ""
if a.ty in aggregate:
return prefix + aggregate[a.ty].replace('!', a.nm)
else:
return prefix + a.nm
def funcname(s):
# The name by which the function is called, in C
if s.startswith("CV"):
return s
else:
return "cv" + s
tocall = '%s(%s)' % (funcname(name), ", ".join(invokename(a) for a in args))
if 'stub' in flags:
yield ' return stub%s(%s);' % (name, ", ".join(invokename(a) for a in args))
elif ty == None:
yield ' ERRWRAP(%s);' % tocall
yield ' Py_RETURN_NONE;'
else:
Rtypes = [
'int',
'int64',
'double',
'CvCapture*',
'CvVideoWriter*',
'CvPOSITObject*',
'CvScalar',
'CvSize',
'CvRect',
'CvSeq*',
'CvBox2D',
'CvSeqOfCvAvgComp*',
'CvSeqOfCvConvexityDefect*',
'CvSeqOfCvStarKeypoint*',
'CvSeqOfCvSURFPoint*',
'CvSeqOfCvSURFDescriptor*',
'CvContourTree*',
'IplConvKernel*',
'IplImage*',
'CvMat*',
'constCvMat*',
'ROCvMat*',
'CvMatND*',
'CvPoint2D32f_4',
'CvRNG',
'CvSubdiv2D*',
'CvSubdiv2DPoint*',
'CvSubdiv2DEdge',
'ROIplImage*',
'CvStereoBMState*',
'CvStereoGCState*',
'CvKalman*',
'float',
'generic',
'unsigned' ]
if ty in Rtypes:
yield ' %s r;' % (ty)
yield ' ERRWRAP(r = %s);' % (tocall)
yield ' return FROM_%s(r);' % safename(ty)
else:
all_returns = ty.split(",")
return_value_from_call = len(set(Rtypes) & set(all_returns)) != 0
if return_value_from_call:
yield ' %s r;' % list(set(Rtypes) & set(all_returns))[0]
yield ' ERRWRAP(r = %s);' % (tocall)
else:
yield ' ERRWRAP(%s);' % (tocall)
typed = dict([ (a.nm,a.ty) for a in args])
for i in range(len(all_returns)):
if all_returns[i] in Rtypes:
typed['r'] = all_returns[i]
all_returns[i] = "r"
if len(all_returns) == 1:
af = dict([ (a.nm,a.flags) for a in args])
joinwith = [f[2:] for f in af.get(all_returns[0], []) if f.startswith("J:")]
if len(joinwith) > 0:
yield ' return shareData(pyobj_%s, %s, %s);' % (joinwith[0], joinwith[0], all_returns[0])
else:
yield ' return FROM_%s(%s);' % (safename(typed[all_returns[0]]), all_returns[0])
else:
yield ' return Py_BuildValue("%s", %s);' % ("N" * len(all_returns), ", ".join(["FROM_%s(%s)" % (safename(typed[n]), n) for n in all_returns]))
yield '}'
gen_c = [ open("generated%d.i" % i, "w") for i in range(5) ]
print "Generated %d functions" % len(api)
for nm,args,ty,flags in sorted(api):
# Figure out docstring into ds_*
ds_args = []
mandatory = [a.nm for a in args if not ('O' in a.flags) and not a.init]
optional = [a.nm for a in args if not ('O' in a.flags) and a.init]
ds_args = ", ".join(mandatory)
def o2s(o):
if o == []:
return ""
else:
return ' [, %s%s]' % (o[0], o2s(o[1:]))
ds_args += o2s(optional)
ds = "%s(%s) -> %s" % (nm, ds_args, str(ty))
#print ds
if has_optional(args):
entry = '{"%%s", (PyCFunction)pycv%s, METH_KEYWORDS, "%s"},' % (cname(nm), ds)
else:
entry = '{"%%s", pycv%s, METH_VARARGS, "%s"},' % (cname(nm), ds)
print >>gen_c[1], entry % (nm)
if nm.startswith('CV_'):
print >>gen_c[1], entry % (nm[3:])
for l in gen(nm,args,ty,flags):
print >>gen_c[0], l
for l in open("%s/defs" % sys.argv[1]):
print >>gen_c[2], "PUBLISH(%s);" % l.split()[1]
########################################################################
# Generated objects.
########################################################################
# gen_c[3] is the code, gen_c[4] initializers
gensimple = Template("""
/*
${cvtype} is the OpenCV C struct
${ourname}_t is the Python object
*/
struct ${ourname}_t {
PyObject_HEAD
${cvtype} v;
};
static PyObject *${ourname}_repr(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
char str[1000];
sprintf(str, "<${ourname} %p>", p);
return PyString_FromString(str);
}
${getset_funcs}
static PyGetSetDef ${ourname}_getseters[] = {
${getset_inits}
{NULL} /* Sentinel */
};
static PyTypeObject ${ourname}_Type = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*size*/
MODULESTR".${ourname}", /*name*/
sizeof(${ourname}_t), /*basicsize*/
};
static void ${ourname}_specials(void)
{
${ourname}_Type.tp_repr = ${ourname}_repr;
${ourname}_Type.tp_getset = ${ourname}_getseters;
}
static PyObject *FROM_${cvtype}(${cvtype} r)
{
${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
m->v = r;
return (PyObject*)m;
}
static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
{
${allownull}
if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
*dst = &(((${ourname}_t*)o)->v);
return 1;
} else {
(*dst) = (${cvtype}*)NULL;
return failmsg("Expected ${cvtype} for argument '%s'", name);
}
}
""")
genptr = Template("""
/*
${cvtype} is the OpenCV C struct
${ourname}_t is the Python object
*/
struct ${ourname}_t {
PyObject_HEAD
${cvtype} *v;
};
static void ${ourname}_dealloc(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
cvRelease${ourname}(&p->v);
PyObject_Del(self);
}
static PyObject *${ourname}_repr(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
char str[1000];
sprintf(str, "<${ourname} %p>", p);
return PyString_FromString(str);
}
${getset_funcs}
static PyGetSetDef ${ourname}_getseters[] = {
${getset_inits}
{NULL} /* Sentinel */
};
static PyTypeObject ${ourname}_Type = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*size*/
MODULESTR".${ourname}", /*name*/
sizeof(${ourname}_t), /*basicsize*/
};
static void ${ourname}_specials(void)
{
${ourname}_Type.tp_dealloc = ${ourname}_dealloc;
${ourname}_Type.tp_repr = ${ourname}_repr;
${ourname}_Type.tp_getset = ${ourname}_getseters;
}
static PyObject *FROM_${cvtype}PTR(${cvtype} *r)
{
${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
m->v = r;
return (PyObject*)m;
}
static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
{
${allownull}
if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
*dst = ((${ourname}_t*)o)->v;
return 1;
} else {
(*dst) = (${cvtype}*)NULL;
return failmsg("Expected ${cvtype} for argument '%s'", name);
}
}
""")
getset_func_template = Template("""
static PyObject *${ourname}_get_${member}(${ourname}_t *p, void *closure)
{
return ${rconverter}(p->v${accessor}${member});
}
static int ${ourname}_set_${member}(${ourname}_t *p, PyObject *value, void *closure)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
if (! ${checker}(value)) {
PyErr_SetString(PyExc_TypeError, "The ${member} attribute value must be a ${typename}");
return -1;
}
p->v${accessor}${member} = ${converter}(value);
return 0;
}
""")
getset_init_template = Template("""
{(char*)"${member}", (getter)${ourname}_get_${member}, (setter)${ourname}_set_${member}, (char*)"${member}", NULL},
""")
objects = [
( 'IplConvKernel', ['allownull'], {
"nCols" : 'i',
"nRows" : 'i',
"anchorX" : 'i',
"anchorY" : 'i',
}),
( 'CvCapture', [], {}),
( 'CvHaarClassifierCascade', [], {}),
( 'CvPOSITObject', [], {}),
( 'CvVideoWriter', [], {}),
( 'CvStereoBMState', [], {
"preFilterType" : 'i',
"preFilterSize" : 'i',
"preFilterCap" : 'i',
"SADWindowSize" : 'i',
"minDisparity" : 'i',
"numberOfDisparities" : 'i',
"textureThreshold" : 'i',
"uniquenessRatio" : 'i',
"speckleWindowSize" : 'i',
"speckleRange" : 'i',
}),
( 'CvStereoGCState', [], {
"Ithreshold" : 'i',
"interactionRadius" : 'i',
"K" : 'f',
"lambda" : 'f',
"lambda1" : 'f',
"lambda2" : 'f',
"occlusionCost" : 'i',
"minDisparity" : 'i',
"numberOfDisparities" : 'i',
"maxIters" : 'i',
}),
( 'CvKalman', [], {
"MP" : 'i',
"DP" : 'i',
"CP" : 'i',
"state_pre" : 'mr',
"state_post" : 'mr',
"transition_matrix" : 'mr',
"control_matrix" : 'mr',
"measurement_matrix" : 'mr',
"control_matrix" : 'mr',
"process_noise_cov" : 'mr',
"measurement_noise_cov" : 'mr',
"error_cov_pre" : 'mr',
"gain" : 'mr',
"error_cov_post" : 'mr',
}),
( 'CvMoments', ['copy'], {
"m00" : 'f',
"m10" : 'f',
"m01" : 'f',
"m20" : 'f',
"m11" : 'f',
"m02" : 'f',
"m30" : 'f',
"m21" : 'f',
"m12" : 'f',
"m03" : 'f',
"mu20" : 'f',
"mu11" : 'f',
"mu02" : 'f',
"mu30" : 'f',
"mu21" : 'f',
"mu12" : 'f',
"mu03" : 'f',
"inv_sqrt_m00" : 'f',
}),
]
checkers = {
'i' : 'PyNumber_Check',
'f' : 'PyNumber_Check',
'm' : 'is_cvmat',
'mr' : 'is_cvmat'
}
# Python -> C
converters = {
'i' : 'PyInt_AsLong',
'f' : 'PyFloat_AsDouble',
'm' : 'PyCvMat_AsCvMat',
'mr' : 'PyCvMat_AsCvMat'
}
# C -> Python
rconverters = {
'i' : 'PyInt_FromLong',
'f' : 'PyFloat_FromDouble',
'm' : 'FROM_CvMat',
'mr' : 'FROM_ROCvMatPTR'
}
# Human-readable type names
typenames = {
'i' : 'integer',
'f' : 'float',
'm' : 'list of CvMat',
'mr' : 'list of CvMat',
}
for (t, flags, members) in objects:
map = {'cvtype' : t,
'ourname' : t.replace('Cv', '')}
# gsf is all the generated code for the member accessors
if 'copy' in flags:
a = '.'
else:
a = '->'
gsf = "".join([getset_func_template.substitute(map, accessor = a, member = m, checker = checkers[t], converter = converters[t], rconverter = rconverters[t], typename = typenames[t]) for (m, t) in members.items()])
# gsi is the generated code for the initializer for each accessor
gsi = "".join([getset_init_template.substitute(map, member = m) for (m, t) in members.items()])
# s is the template that pulls everything together
if 'allownull' in flags:
nullcode = """if (o == Py_None) { *dst = (%s*)NULL; return 1; }""" % map['cvtype']
else:
nullcode = ""
if 'copy' in flags:
print >>gen_c[3], gensimple.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
else:
print >>gen_c[3], genptr.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
print >>gen_c[4], "MKTYPE(%s);" % map['ourname']
for f in gen_c:
f.close()

@ -53,14 +53,14 @@ static void pyopencv_${name}_dealloc(PyObject* self)
PyObject_Del(self);
}
static PyObject* pyopencv_from(const ${cname}& r)
template<> PyObject* pyopencv_from(const ${cname}& r)
{
pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
m->v = r;
return (PyObject*)m;
}
static bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name="<unknown>")
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name)
{
if( src == NULL || src == Py_None )
return true;
@ -96,7 +96,7 @@ static void pyopencv_${name}_dealloc(PyObject* self)
PyObject_Del(self);
}
static PyObject* pyopencv_from(const Ptr<${cname}>& r)
template<> PyObject* pyopencv_from(const Ptr<${cname}>& r)
{
pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
new (&(m->v)) Ptr<$cname1>(); // init Ptr with placement new
@ -104,7 +104,7 @@ static PyObject* pyopencv_from(const Ptr<${cname}>& r)
return (PyObject*)m;
}
static bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name="<unknown>")
template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name)
{
if( src == NULL || src == Py_None )
return true;
@ -120,7 +120,7 @@ static bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name="<un
""")
gen_template_map_type_cvt = Template("""
static bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name="<unknown>");
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name);
""")
gen_template_set_prop_from_map = Template("""
@ -213,7 +213,6 @@ gen_template_rw_prop_init = Template("""
simple_argtype_mapping = {
"bool": ("bool", "b", "0"),
"char": ("char", "b", "0"),
"int": ("int", "i", "0"),
"float": ("float", "f", "0.f"),
"double": ("double", "d", "0"),
@ -619,7 +618,10 @@ class FuncInfo(object):
if amapping[1] == "O":
code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,)
parse_name = "pyobj_" + a.name
code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr()))
if a.tp == 'char':
code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)"% (a.name, a.name, a.crepr()))
else:
code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr()))
all_cargs.append([amapping, parse_name])

File diff suppressed because it is too large Load Diff

@ -1,133 +0,0 @@
#!/usr/bin/env python
import unittest
import random
import time
import math
import sys
import array
import urllib
import tarfile
import hashlib
import os
import getopt
import operator
import functools
import numpy as np
import cv2
import cv2.cv as cv
class NewOpenCVTests(unittest.TestCase):
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
return self.image_cache[filename]
def setUp(self):
self.image_cache = {}
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).digest()
if sys.version_info[:2] == (2, 6):
def assertLess(self, a, b, msg=None):
if not a < b:
self.fail('%s not less than %s' % (repr(a), repr(b)))
def assertLessEqual(self, a, b, msg=None):
if not a <= b:
self.fail('%s not less than or equal to %s' % (repr(a), repr(b)))
def assertGreater(self, a, b, msg=None):
if not a > b:
self.fail('%s not greater than %s' % (repr(a), repr(b)))
# Tests to run first; check the handful of basic operations that the later tests rely on
class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assert_(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
self.assertEqual(ajpg.shape[1], 1)
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
def test_estimateAffine3D(self):
pattern_size = (11, 8)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
keypoints = fd.detect(img)
self.assert_(600 <= len(keypoints) <= 700)
for kpt in keypoints:
self.assertNotEqual(kpt.response, 0)
def check_close_angles(self, a, b, angle_delta):
self.assert_(abs(a - b) <= angle_delta or
abs(360 - abs(a - b)) <= angle_delta)
def check_close_pairs(self, a, b, delta):
self.assertLessEqual(abs(a[0] - b[0]), delta)
self.assertLessEqual(abs(a[1] - b[1]), delta)
def check_close_boxes(self, a, b, delta, angle_delta):
self.check_close_pairs(a[0], b[0], delta)
self.check_close_pairs(a[1], b[1], delta)
self.check_close_angles(a[2], b[2], angle_delta)
def test_geometry(self):
npt = 100
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
img = np.zeros((300, 300, 3), dtype='uint8')
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
self.check_close_boxes(be, be0, 5, 15)
self.check_close_boxes(br, br0, 5, 15)
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
if __name__ == '__main__':
print "testing", cv2.__version__
random.seed(0)
unittest.main()

@ -13,7 +13,6 @@ Calculates an optical flow for a sparse feature set using the iterative Lucas-Ka
.. ocv:pyfunction:: cv2.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts[, status[, err[, winSize[, maxLevel[, criteria[, flags[, minEigThreshold]]]]]]]) -> nextPts, status, err
.. ocv:cfunction:: void cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr, CvArr* prev_pyr, CvArr* curr_pyr, const CvPoint2D32f* prev_features, CvPoint2D32f* curr_features, int count, CvSize win_size, int level, char* status, float* track_error, CvTermCriteria criteria, int flags )
.. ocv:pyoldfunction:: cv.CalcOpticalFlowPyrLK(prev, curr, prevPyr, currPyr, prevFeatures, winSize, level, criteria, flags, guesses=None) -> (currFeatures, status, track_error)
:param prevImg: first 8-bit input image or pyramid constructed by :ocv:func:`buildOpticalFlowPyramid`.
@ -210,7 +209,6 @@ Updates the motion history image by a moving silhouette.
.. ocv:pyfunction:: cv2.updateMotionHistory(silhouette, mhi, timestamp, duration) -> mhi
.. ocv:cfunction:: void cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi, double timestamp, double duration )
.. ocv:pyoldfunction:: cv.UpdateMotionHistory(silhouette, mhi, timestamp, duration)-> None
:param silhouette: Silhouette mask that has non-zero pixels where the motion occurs.
@ -244,7 +242,6 @@ Calculates a gradient orientation of a motion history image.
.. ocv:pyfunction:: cv2.calcMotionGradient(mhi, delta1, delta2[, mask[, orientation[, apertureSize]]]) -> mask, orientation
.. ocv:cfunction:: void cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation, double delta1, double delta2, int aperture_size=3 )
.. ocv:pyoldfunction:: cv.CalcMotionGradient(mhi, mask, orientation, delta1, delta2, apertureSize=3)-> None
:param mhi: Motion history single-channel floating-point image.
@ -284,7 +281,6 @@ Calculates a global motion orientation in a selected region.
.. ocv:pyfunction:: cv2.calcGlobalOrientation(orientation, mask, mhi, timestamp, duration) -> retval
.. ocv:cfunction:: double cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask, const CvArr* mhi, double timestamp, double duration )
.. ocv:pyoldfunction:: cv.CalcGlobalOrientation(orientation, mask, mhi, timestamp, duration)-> float
:param orientation: Motion gradient orientation image calculated by the function :ocv:func:`calcMotionGradient` .
@ -314,7 +310,6 @@ Splits a motion history image into a few parts corresponding to separate indepen
.. ocv:pyfunction:: cv2.segmentMotion(mhi, timestamp, segThresh[, segmask]) -> segmask, boundingRects
.. ocv:cfunction:: CvSeq* cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, CvMemStorage* storage, double timestamp, double seg_thresh )
.. ocv:pyoldfunction:: cv.SegmentMotion(mhi, seg_mask, storage, timestamp, seg_thresh) -> boundingRects
:param mhi: Motion history image.
@ -342,8 +337,6 @@ Finds an object center, size, and orientation.
.. ocv:cfunction:: int cvCamShift( const CvArr* prob_image, CvRect window, CvTermCriteria criteria, CvConnectedComp* comp, CvBox2D* box=NULL )
.. ocv:pyoldfunction:: cv.CamShift(prob_image, window, criteria) -> (int, comp, box)
:param probImage: Back projection of the object histogram. See :ocv:func:`calcBackProject` .
:param window: Initial search window.
@ -370,7 +363,6 @@ Finds an object on a back projection image.
.. ocv:pyfunction:: cv2.meanShift(probImage, window, criteria) -> retval, window
.. ocv:cfunction:: int cvMeanShift( const CvArr* prob_image, CvRect window, CvTermCriteria criteria, CvConnectedComp* comp )
.. ocv:pyoldfunction:: cv.MeanShift(prob_image, window, criteria) -> comp
:param probImage: Back projection of the object histogram. See :ocv:func:`calcBackProject` for details.
@ -413,7 +405,6 @@ The constructors.
.. ocv:pyfunction:: cv2.KalmanFilter([dynamParams, measureParams[, controlParams[, type]]]) -> <KalmanFilter object>
.. ocv:cfunction:: CvKalman* cvCreateKalman( int dynam_params, int measure_params, int control_params=0 )
.. ocv:pyoldfunction:: cv.CreateKalman(dynam_params, measure_params, control_params=0) -> CvKalman
The full constructor.
@ -451,7 +442,6 @@ Computes a predicted state.
.. ocv:pyfunction:: cv2.KalmanFilter.predict([control]) -> retval
.. ocv:cfunction:: const CvMat* cvKalmanPredict( CvKalman* kalman, const CvMat* control=NULL)
.. ocv:pyoldfunction:: cv.KalmanPredict(kalman, control=None) -> mat
:param control: The optional input control
@ -466,8 +456,6 @@ Updates the predicted state from the measurement.
.. ocv:cfunction:: const CvMat* cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
.. ocv:pyoldfunction:: cv.KalmanCorrect(kalman, measurement) -> mat
:param measurement: The measured system parameters

@ -39,7 +39,7 @@
#include <cstdio>
#include <cstring>
#include <ctime>
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/contrib/compat.hpp"
#include "opencv2/highgui/highgui_c.h"
static void help(char **argv)

@ -32,7 +32,7 @@ static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1) {
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported");
}
// create and return normalized image
Mat dst;
@ -44,7 +44,7 @@ static void read_csv(const string& filename, vector<Mat>& images, vector<int>& l
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
@ -82,7 +82,7 @@ int main(int argc, const char *argv[]) {
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
CV_Error(Error::StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original

@ -178,7 +178,7 @@ int main(int argc, char** argv)
if( intrinsic_filename )
{
// reading intrinsic parameters
FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
FileStorage fs(intrinsic_filename, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", intrinsic_filename);
@ -194,7 +194,7 @@ int main(int argc, char** argv)
M1 *= scale;
M2 *= scale;
fs.open(extrinsic_filename, CV_STORAGE_READ);
fs.open(extrinsic_filename, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", extrinsic_filename);

@ -59,15 +59,15 @@ static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss
Point org;
org.x = 1;
org.y = 3 * fontSize.height * (lineOffsY + 1) / 2;
putText(img, ss, org, fontFace, fontScale, CV_RGB(0,0,0), 5*fontThickness/2, 16);
putText(img, ss, org, fontFace, fontScale, Scalar(0,0,0), 5*fontThickness/2, 16);
putText(img, ss, org, fontFace, fontScale, fontColor, fontThickness, 16);
}
static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)
{
Scalar fontColorRed = CV_RGB(255,0,0);
Scalar fontColorNV = CV_RGB(118,185,0);
Scalar fontColorRed = Scalar(255,0,0);
Scalar fontColorNV = Scalar(118,185,0);
ostringstream ss;
ss << "FPS = " << setprecision(1) << fixed << fps;

@ -1,15 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
if cv.WaitKey(10) == 27:
break
cv.DestroyAllWindows()

@ -1,116 +0,0 @@
#!/usr/bin/env python
import cv2.cv as cv
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
while True:
frame = cv.QueryFrame( self.capture )
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()

@ -1,34 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
if __name__ == "__main__":
cv.NamedWindow("win")
if len(sys.argv) > 1:
filename = sys.argv[1]
im = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
im3 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_COLOR)
else:
try: # try opening local copy of image
fileName = '../cpp/left01.jpg'
im = cv.LoadImageM(fileName, False)
im3 = cv.LoadImageM(fileName, True)
except: # if local copy cannot be opened, try downloading it
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/left01.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
im3 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
chessboard_dim = ( 9, 6 )
found_all, corners = cv.FindChessboardCorners( im, chessboard_dim )
print found_all, len(corners)
cv.DrawChessboardCorners( im3, chessboard_dim, corners, found_all )
cv.ShowImage("win", im3);
cv.WaitKey()
cv.DestroyAllWindows()

@ -1,136 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of contours"
# import the necessary things for OpenCV
import cv2.cv as cv
# some default constants
_SIZE = 500
_DEFAULT_LEVEL = 3
# definition of some colors
_red = (0, 0, 255, 0);
_green = (0, 255, 0, 0);
_white = cv.RealScalar (255)
_black = cv.RealScalar (0)
# the callback on the trackbar, to set the level of contours we want
# to display
def on_trackbar (position):
# create the image for putting in it the founded contours
contours_image = cv.CreateImage ( (_SIZE, _SIZE), 8, 3)
# compute the real level of display, given the current position
levels = position - 3
# initialisation
_contours = contours
if levels <= 0:
# zero or negative value
# => get to the nearest face to make it look more funny
_contours = contours.h_next().h_next().h_next()
# first, clear the image where we will draw contours
cv.SetZero (contours_image)
# draw contours in red and green
cv.DrawContours (contours_image, _contours,
_red, _green,
levels, 3, cv.CV_AA,
(0, 0))
# finally, show the image
cv.ShowImage ("contours", contours_image)
if __name__ == '__main__':
# create the image where we want to display results
image = cv.CreateImage ( (_SIZE, _SIZE), 8, 1)
# start with an empty image
cv.SetZero (image)
# draw the original picture
for i in range (6):
dx = (i % 2) * 250 - 30
dy = (i / 2) * 150
cv.Ellipse (image,
(dx + 150, dy + 100),
(100, 70),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 150, dy + 100),
(10, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 150, dy + 150),
(40, 10),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 27, dy + 100),
(20, 35),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 273, dy + 100),
(20, 35),
0, 0, 360, _white, -1, 8, 0)
# create window and display the original picture in it
cv.NamedWindow ("image", 1)
cv.ShowImage ("image", image)
# create the storage area
storage = cv.CreateMemStorage (0)
# find the contours
contours = cv.FindContours(image,
storage,
cv.CV_RETR_TREE,
cv.CV_CHAIN_APPROX_SIMPLE,
(0,0))
# comment this out if you do not want approximation
contours = cv.ApproxPoly (contours,
storage,
cv.CV_POLY_APPROX_DP, 3, 1)
# create the window for the contours
cv.NamedWindow ("contours", 1)
# create the trackbar, to enable the change of the displayed level
cv.CreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar)
# call one time the callback, so we will have the 1st display done
on_trackbar (_DEFAULT_LEVEL)
# wait a key pressed to end
cv.WaitKey (0)
cv.DestroyAllWindows()

@ -1,65 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of convexhull"
# import the necessary things for OpenCV
import cv2.cv as cv
# to generate random values
import random
# how many points we want at max
_MAX_POINTS = 100
if __name__ == '__main__':
# main object to get random values from
my_random = random.Random ()
# create the image where we want to display results
image = cv.CreateImage ( (500, 500), 8, 3)
# create the window to put the image in
cv.NamedWindow ('hull', cv.CV_WINDOW_AUTOSIZE)
while True:
# do forever
# get a random number of points
count = my_random.randrange (0, _MAX_POINTS) + 1
# initialisations
points = []
for i in range (count):
# generate a random point
points.append ( (
my_random.randrange (0, image.width / 2) + image.width / 4,
my_random.randrange (0, image.width / 2) + image.width / 4
))
# compute the convex hull
storage = cv.CreateMemStorage(0)
hull = cv.ConvexHull2 (points, storage, cv.CV_CLOCKWISE, 1)
# start with an empty image
cv.SetZero (image)
# draw all the points as circles in red
for i in range (count):
cv.Circle (image, points [i], 2,
(0, 0, 255, 0),
cv.CV_FILLED, cv.CV_AA, 0)
# Draw the convex hull as a closed polyline in green
cv.PolyLine(image, [hull], 1, cv.RGB(0,255,0), 1, cv.CV_AA)
# display the final image
cv.ShowImage ('hull', image)
# handle events, and wait a key pressed
k = cv.WaitKey (0) % 0x100
if k == 27:
# user has press the ESC key, so exit
break
cv.DestroyAllWindows()

@ -1,169 +0,0 @@
#!/usr/bin/python
"""
Find Squares in image by finding countours and filtering
"""
#Results slightly different from C version on same images, but is
#otherwise ok
import math
import cv2.cv as cv
def angle(pt1, pt2, pt0):
"calculate angle contained by 3 points(x, y)"
dx1 = pt1[0] - pt0[0]
dy1 = pt1[1] - pt0[1]
dx2 = pt2[0] - pt0[0]
dy2 = pt2[1] - pt0[1]
nom = dx1*dx2 + dy1*dy2
denom = math.sqrt( (dx1*dx1 + dy1*dy1) * (dx2*dx2 + dy2*dy2) + 1e-10 )
ang = nom / denom
return ang
def is_square(contour):
"""
Squareness checker
Square contours should:
-have 4 vertices after approximation,
-have relatively large area (to filter out noisy contours)
-be convex.
-have angles between sides close to 90deg (cos(ang) ~0 )
Note: absolute value of an area is used because area may be
positive or negative - in accordance with the contour orientation
"""
area = math.fabs( cv.ContourArea(contour) )
isconvex = cv.CheckContourConvexity(contour)
s = 0
if len(contour) == 4 and area > 1000 and isconvex:
for i in range(1, 4):
# find minimum angle between joint edges (maximum of cosine)
pt1 = contour[i]
pt2 = contour[i-1]
pt0 = contour[i-2]
t = math.fabs(angle(pt0, pt1, pt2))
if s <= t:s = t
# if cosines of all angles are small (all angles are ~90 degree)
# then its a square
if s < 0.3:return True
return False
def find_squares_from_binary( gray ):
"""
use contour search to find squares in binary image
returns list of numpy arrays containing 4 points
"""
squares = []
storage = cv.CreateMemStorage(0)
contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))
storage = cv.CreateMemStorage(0)
while contours:
#approximate contour with accuracy proportional to the contour perimeter
arclength = cv.ArcLength(contours)
polygon = cv.ApproxPoly( contours, storage, cv.CV_POLY_APPROX_DP, arclength * 0.02, 0)
if is_square(polygon):
squares.append(polygon[0:4])
contours = contours.h_next()
return squares
def find_squares4(color_img):
"""
Finds multiple squares in image
Steps:
-Use Canny edge to highlight contours, and dilation to connect
the edge segments.
-Threshold the result to binary edge tokens
-Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
-Filter each candidate: use Approx poly, keep only contours with 4 vertices,
enough area, and ~90deg angles.
Return all squares contours in one flat list of arrays, 4 x,y points each.
"""
#select even sizes only
width, height = (color_img.width & -2, color_img.height & -2 )
timg = cv.CloneImage( color_img ) # make a copy of input image
gray = cv.CreateImage( (width,height), 8, 1 )
# select the maximum ROI in the image
cv.SetImageROI( timg, (0, 0, width, height) )
# down-scale and upscale the image to filter out the noise
pyr = cv.CreateImage( (width/2, height/2), 8, 3 )
cv.PyrDown( timg, pyr, 7 )
cv.PyrUp( pyr, timg, 7 )
tgray = cv.CreateImage( (width,height), 8, 1 )
squares = []
# Find squares in every color plane of the image
# Two methods, we use both:
# 1. Canny to catch squares with gradient shading. Use upper threshold
# from slider, set the lower to 0 (which forces edges merging). Then
# dilate canny output to remove potential holes between edge segments.
# 2. Binary thresholding at multiple levels
N = 11
for c in [0, 1, 2]:
#extract the c-th color plane
cv.SetImageCOI( timg, c+1 );
cv.Copy( timg, tgray, None );
cv.Canny( tgray, gray, 0, 50, 5 )
cv.Dilate( gray, gray)
squares = squares + find_squares_from_binary( gray )
# Look for more squares at several threshold levels
for l in range(1, N):
cv.Threshold( tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY )
squares = squares + find_squares_from_binary( gray )
return squares
RED = (0,0,255)
GREEN = (0,255,0)
def draw_squares( color_img, squares ):
"""
Squares is py list containing 4-pt numpy arrays. Step through the list
and draw a polygon for each 4-group
"""
color, othercolor = RED, GREEN
for square in squares:
cv.PolyLine(color_img, [square], True, color, 3, cv.CV_AA, 0)
color, othercolor = othercolor, color
cv.ShowImage(WNDNAME, color_img)
WNDNAME = "Squares Demo"
def main():
"""Open test color images, create display window, start the search"""
cv.NamedWindow(WNDNAME, 1)
for name in [ "../c/pic%d.png" % i for i in [1, 2, 3, 4, 5, 6] ]:
img0 = cv.LoadImage(name, 1)
try:
img0
except ValueError:
print "Couldn't load %s\n" % name
continue
# slider deleted from C version, same here and use fixed Canny param=50
img = cv.CloneImage(img0)
cv.ShowImage(WNDNAME, img)
# force the image processing
draw_squares( img, find_squares4( img ) )
# wait for key.
if cv.WaitKey(-1) % 0x100 == 27:
break
if __name__ == "__main__":
main()
cv.DestroyAllWindows()

@ -1,20 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import urllib2
from sys import argv
def load_sample(name=None):
if len(argv) > 1:
img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR)
elif name is not None:
try:
img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR)
except IOError:
urlbase = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/'
file = name.split('/')[-1]
filedata = urllib2.urlopen(urlbase+file).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
return img0

@ -1,139 +0,0 @@
#!/usr/bin/python
"""
the script demostrates iterative construction of
delaunay triangulation and voronoi tesselation
Original Author (C version): ?
Converted to Python by: Roman Stanchak
"""
import cv2.cv as cv
import random
def draw_subdiv_point( img, fp, color ):
cv.Circle( img, (cv.Round(fp[0]), cv.Round(fp[1])), 3, color, cv.CV_FILLED, 8, 0 );
def draw_subdiv_edge( img, edge, color ):
org_pt = cv.Subdiv2DEdgeOrg(edge);
dst_pt = cv.Subdiv2DEdgeDst(edge);
if org_pt and dst_pt :
org = org_pt.pt;
dst = dst_pt.pt;
iorg = ( cv.Round( org[0] ), cv.Round( org[1] ));
idst = ( cv.Round( dst[0] ), cv.Round( dst[1] ));
cv.Line( img, iorg, idst, color, 1, cv.CV_AA, 0 );
def draw_subdiv( img, subdiv, delaunay_color, voronoi_color ):
for edge in subdiv.edges:
edge_rot = cv.Subdiv2DRotateEdge( edge, 1 )
draw_subdiv_edge( img, edge_rot, voronoi_color );
draw_subdiv_edge( img, edge, delaunay_color );
def locate_point( subdiv, fp, img, active_color ):
(res, e0) = cv.Subdiv2DLocate( subdiv, fp );
if res in [ cv.CV_PTLOC_INSIDE, cv.CV_PTLOC_ON_EDGE ]:
e = e0
while True:
draw_subdiv_edge( img, e, active_color );
e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT);
if e == e0:
break
draw_subdiv_point( img, fp, active_color );
def draw_subdiv_facet( img, edge ):
t = edge;
count = 0;
# count number of edges in facet
while count == 0 or t != edge:
count+=1
t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT );
buf = []
# gather points
t = edge;
for i in range(count):
assert t>4
pt = cv.Subdiv2DEdgeOrg( t );
if not pt:
break;
buf.append( ( cv.Round(pt.pt[0]), cv.Round(pt.pt[1]) ) );
t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT );
if( len(buf)==count ):
pt = cv.Subdiv2DEdgeDst( cv.Subdiv2DRotateEdge( edge, 1 ));
cv.FillConvexPoly( img, buf, cv.RGB(random.randrange(256),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 );
cv.PolyLine( img, [buf], 1, cv.RGB(0,0,0), 1, cv.CV_AA, 0);
draw_subdiv_point( img, pt.pt, cv.RGB(0,0,0));
def paint_voronoi( subdiv, img ):
cv.CalcSubdivVoronoi2D( subdiv );
for edge in subdiv.edges:
# left
draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 1 ));
# right
draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 3 ));
if __name__ == '__main__':
win = "source";
rect = ( 0, 0, 600, 600 );
active_facet_color = cv.RGB( 255, 0, 0 );
delaunay_color = cv.RGB( 0,0,0);
voronoi_color = cv.RGB(0, 180, 0);
bkgnd_color = cv.RGB(255,255,255);
img = cv.CreateImage( (rect[2],rect[3]), 8, 3 );
cv.Set( img, bkgnd_color );
cv.NamedWindow( win, 1 );
storage = cv.CreateMemStorage(0);
subdiv = cv.CreateSubdivDelaunay2D( rect, storage );
print "Delaunay triangulation will be build now interactively."
print "To stop the process, press any key\n";
for i in range(200):
fp = ( random.random()*(rect[2]-10)+5, random.random()*(rect[3]-10)+5 )
locate_point( subdiv, fp, img, active_facet_color );
cv.ShowImage( win, img );
if( cv.WaitKey( 100 ) >= 0 ):
break;
cv.SubdivDelaunay2DInsert( subdiv, fp );
cv.CalcSubdivVoronoi2D( subdiv );
cv.Set( img, bkgnd_color );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cv.ShowImage( win, img );
if( cv.WaitKey( 100 ) >= 0 ):
break;
cv.Set( img, bkgnd_color );
paint_voronoi( subdiv, img );
cv.ShowImage( win, img );
cv.WaitKey(0);
cv.DestroyWindow( win );

@ -1,80 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
hist_size = 64
range_0 = [0, 256]
ranges = [ range_0 ]
class DemHist:
def __init__(self, src_image):
self.src_image = src_image
self.dst_image = cv.CloneMat(src_image)
self.hist_image = cv.CreateImage((320, 200), 8, 1)
self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)
self.brightness = 0
self.contrast = 0
cv.NamedWindow("image", 0)
cv.NamedWindow("histogram", 0)
cv.CreateTrackbar("brightness", "image", 100, 200, self.update_brightness)
cv.CreateTrackbar("contrast", "image", 100, 200, self.update_contrast)
self.update_brightcont()
def update_brightness(self, val):
self.brightness = val - 100
self.update_brightcont()
def update_contrast(self, val):
self.contrast = val - 100
self.update_brightcont()
def update_brightcont(self):
# The algorithm is by Werner D. Streidt
# (http://visca.com/ffactory/archives/5-99/msg00021.html)
if self.contrast > 0:
delta = 127. * self.contrast / 100
a = 255. / (255. - delta * 2)
b = a * (self.brightness - delta)
else:
delta = -128. * self.contrast / 100
a = (256. - delta * 2) / 255.
b = a * self.brightness + delta
cv.ConvertScale(self.src_image, self.dst_image, a, b)
cv.ShowImage("image", self.dst_image)
cv.CalcArrHist([self.dst_image], self.hist)
(min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist)
cv.Scale(self.hist.bins, self.hist.bins, float(self.hist_image.height) / max_value, 0)
cv.Set(self.hist_image, cv.ScalarAll(255))
bin_w = round(float(self.hist_image.width) / hist_size)
for i in range(hist_size):
cv.Rectangle(self.hist_image, (int(i * bin_w), self.hist_image.height),
(int((i + 1) * bin_w), self.hist_image.height - cv.Round(self.hist.bins[i])),
cv.ScalarAll(0), -1, 8, 0)
cv.ShowImage("histogram", self.hist_image)
if __name__ == "__main__":
# Load the source image.
if len(sys.argv) > 1:
src_image = cv.GetMat(cv.LoadImage(sys.argv[1], 0))
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src_image = cv.DecodeImageM(imagefiledata, 0)
dh = DemHist(src_image)
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,114 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
# src & dst arrays of equal size & type
def cvShiftDFT(src_arr, dst_arr ):
size = cv.GetSize(src_arr)
dst_size = cv.GetSize(dst_arr)
if dst_size != size:
cv.Error( cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ )
if(src_arr is dst_arr):
tmp = cv.CreateMat(size[1]/2, size[0]/2, cv.GetElemType(src_arr))
cx = size[0] / 2
cy = size[1] / 2 # image center
q1 = cv.GetSubRect( src_arr, (0,0,cx, cy) )
q2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) )
q3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) )
q4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) )
d1 = cv.GetSubRect( src_arr, (0,0,cx,cy) )
d2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) )
d3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) )
d4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) )
if(src_arr is not dst_arr):
if( not cv.CV_ARE_TYPES_EQ( q1, d1 )):
cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ )
cv.Copy(q3, d1)
cv.Copy(q4, d2)
cv.Copy(q1, d3)
cv.Copy(q2, d4)
else:
cv.Copy(q3, tmp)
cv.Copy(q1, q3)
cv.Copy(tmp, q1)
cv.Copy(q4, tmp)
cv.Copy(q2, q4)
cv.Copy(tmp, q2)
if __name__ == "__main__":
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
realInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
imaginaryInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
complexInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 2)
cv.Scale(im, realInput, 1.0, 0.0)
cv.Zero(imaginaryInput)
cv.Merge(realInput, imaginaryInput, None, None, complexInput)
dft_M = cv.GetOptimalDFTSize( im.height - 1 )
dft_N = cv.GetOptimalDFTSize( im.width - 1 )
dft_A = cv.CreateMat( dft_M, dft_N, cv.CV_64FC2 )
image_Re = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)
image_Im = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)
# copy A to dft_A and pad dft_A with zeros
tmp = cv.GetSubRect( dft_A, (0,0, im.width, im.height))
cv.Copy( complexInput, tmp, None )
if(dft_A.width > im.width):
tmp = cv.GetSubRect( dft_A, (im.width,0, dft_N - im.width, im.height))
cv.Zero( tmp )
# no need to pad bottom part of dft_A with zeros because of
# use nonzero_rows parameter in cv.FT() call below
cv.DFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height )
cv.NamedWindow("win", 0)
cv.NamedWindow("magnitude", 0)
cv.ShowImage("win", im)
# Split Fourier in real and imaginary parts
cv.Split( dft_A, image_Re, image_Im, None, None )
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cv.Pow( image_Re, image_Re, 2.0)
cv.Pow( image_Im, image_Im, 2.0)
cv.Add( image_Re, image_Im, image_Re, None)
cv.Pow( image_Re, image_Re, 0.5 )
# Compute log(1 + Mag)
cv.AddS( image_Re, cv.ScalarAll(1.0), image_Re, None ) # 1 + Mag
cv.Log( image_Re, image_Re ) # log(1 + Mag)
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
cvShiftDFT( image_Re, image_Re )
min, max, pt1, pt2 = cv.MinMaxLoc(image_Re)
cv.Scale(image_Re, image_Re, 1.0/(max-min), 1.0*(-min)/(max-min))
cv.ShowImage("magnitude", image_Re)
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,72 +0,0 @@
#!/usr/bin/python
import sys
import cv2.cv as cv
import urllib2
wndname = "Distance transform"
tbarname = "Threshold"
# The output images
dist = 0
dist8u1 = 0
dist8u2 = 0
dist8u = 0
dist32s = 0
gray = 0
edge = 0
# define a trackbar callback
def on_trackbar(edge_thresh):
cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh), cv.CV_THRESH_BINARY)
#Distance transform
cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5)
cv.ConvertScale(dist, dist, 5000.0, 0)
cv.Pow(dist, dist, 0.5)
cv.ConvertScale(dist, dist32s, 1.0, 0.5)
cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u1, 1, 0)
cv.ConvertScale(dist32s, dist32s, -1, 0)
cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u2, 1, 0)
cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u)
cv.ShowImage(wndname, dist8u)
if __name__ == "__main__":
edge_thresh = 100
if len(sys.argv) > 1:
gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
gray = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Create the output image
dist = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32F, 1)
dist8u1 = cv.CloneImage(gray)
dist8u2 = cv.CloneImage(gray)
dist8u = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_8U, 3)
dist32s = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32S, 1)
# Convert to grayscale
edge = cv.CloneImage(gray)
# Create a window
cv.NamedWindow(wndname, 1)
# create a toolbar
cv.CreateTrackbar(tbarname, wndname, edge_thresh, 255, on_trackbar)
# Show the image
on_trackbar(edge_thresh)
# Wait for a key stroke; the same function arranges events processing
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,184 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
import time
from pydmtx import DataMatrix
import numpy
import sys
import math
'''
Find 2 D barcode based on up to 3 channel datamatrix
'''
def absnorm8(im, im8):
""" im may be any single-channel image type. Return an 8-bit version, absolute value, normalized so that max is 255 """
(minVal, maxVal, _, _) = cv.MinMaxLoc(im)
cv.ConvertScaleAbs(im, im8, 255 / max(abs(minVal), abs(maxVal)), 0)
return im8
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, thickness = 2, lineType = cv.CV_AA)
if 0:
started = time.time()
print dm_write.decode(bg.width, bg.height, buffer(bg.tostring()), max_count = 1, min_edge = 12, max_edge = 13, shape = DataMatrix.DmtxSymbol10x10) # , timeout = 10)
print "took", time.time() - started
class DmtxFinder:
def __init__(self):
self.cache = {}
self.dm = DataMatrix()
def Cached(self, name, rows, cols, type):
key = (name, rows, cols)
if not key in self.cache:
self.cache[key] = cv.CreateMat(rows, cols, type)
return self.cache[key]
def find0(self, img):
started = time.time()
self.dm.decode(img.width,
img.height,
buffer(img.tostring()),
max_count = 4,
#min_edge = 6,
#max_edge = 19 # Units of 2 pixels
)
print "brute", time.time() - started
found = {}
for i in range(self.dm.count()):
stats = dm_read.stats(i + 1)
print stats
found[stats[0]] = stats[1]
return found
def find(self, img):
started = time.time()
gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)
cv.Sobel(gray, sobel, 1, 0)
cv.Sobel(gray, sobely, 0, 1)
cv.Add(sobel, sobely, sobel)
sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
absnorm8(sobel, sobel8)
cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)
sobel_integral = self.Cached('sobel_integral', img.height + 1, img.width + 1, cv.CV_32SC1)
cv.Integral(sobel8, sobel_integral)
d = 16
_x1y1 = cv.GetSubRect(sobel_integral, (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
_x1y2 = cv.GetSubRect(sobel_integral, (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
_x2y1 = cv.GetSubRect(sobel_integral, (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
_x2y2 = cv.GetSubRect(sobel_integral, (d, d, sobel_integral.cols - d, sobel_integral.rows - d))
summation = cv.CloneMat(_x2y2)
cv.Sub(summation, _x1y2, summation)
cv.Sub(summation, _x2y1, summation)
cv.Add(summation, _x1y1, summation)
sum8 = self.Cached('sum8', summation.height, summation.width, cv.CV_8UC1)
absnorm8(summation, sum8)
cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)
cv.ShowImage("sum8", sum8)
seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
t_cull = time.time() - started
seqs = []
while seq:
seqs.append(seq)
seq = seq.h_next()
started = time.time()
found = {}
print 'seqs', len(seqs)
for seq in seqs:
area = cv.ContourArea(seq)
if area > 1000:
rect = cv.BoundingRect(seq)
edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
candidate = cv.GetSubRect(subimg, rect)
sym = self.dm.decode(candidate.width,
candidate.height,
buffer(candidate.tostring()),
max_count = 1,
#min_edge = 6,
#max_edge = int(edge) # Units of 2 pixels
)
if sym:
onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y) for (x, y) in self.dm.stats(1)[1]]
found[sym] = onscreen
else:
print "FAILED"
t_brute = time.time() - started
print "cull took", t_cull, "brute", t_brute
return found
bg = cv.CreateMat(1024, 1024, cv.CV_8UC3)
cv.Set(bg, cv.RGB(0, 0, 0))
df = DmtxFinder()
cv.NamedWindow("camera", 1)
def mkdmtx(msg):
dm_write = DataMatrix()
dm_write.encode(msg)
pi = dm_write.image # .resize((14, 14))
cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_im, pi.tostring())
return cv_im
# test = [('WIL', (100,100))]: # , ('LOW', (250,100)), ('GAR', (300, 300)), ('AGE', (500, 300))]:
test = []
y = 10
for j in range(7):
r = 28 + j * 4
mr = r * math.sqrt(2)
y += mr * 1.8
test += [(str(deg) + "abcdefgh"[j], (50 + deg * 11, y), math.pi * deg / 180, r) for deg in range(0, 90, 10)]
for (msg, (x, y), angle, r) in test:
map = cv.CreateMat(2, 3, cv.CV_32FC1)
corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th)) for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]]
src = mkdmtx(msg)
(sx, sy) = cv.GetSize(src)
cv.GetAffineTransform([(0,0), (sx, 0), (sx, sy)], corners[:3], map)
temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3)
cv.Set(temp, cv.RGB(0, 0, 0))
cv.WarpAffine(src, temp, map)
cv.Or(temp, bg, bg)
cv.ShowImage("comp", bg)
scribble = cv.CloneMat(bg)
if 0:
for i in range(10):
df.find(bg)
for (sym, coords) in df.find(bg).items():
print sym
cv.PolyLine(scribble, [coords], 1, cv.CV_RGB(255, 0,0), 1, lineType = cv.CV_AA)
Xs = [x for (x, y) in coords]
Ys = [y for (x, y) in coords]
where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50)
cv.PutText(scribble, sym, where, font, cv.RGB(0,255, 0))
cv.ShowImage("results", scribble)
cv.WaitKey()
cv.DestroyAllWindows()
sys.exit(0)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("capture", img)
print df.find(img)
cv.WaitKey(6)

@ -1,162 +0,0 @@
#! /usr/bin/env python
from random import Random
import colorsys
print "OpenCV Python version of drawing"
import cv2.cv as cv
def random_color(random):
"""
Return a random color
"""
icolor = random.randint(0, 0xFFFFFF)
return cv.Scalar(icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)
if __name__ == '__main__':
# some "constants"
width = 1000
height = 700
window_name = "Drawing Demo"
number = 100
delay = 5
line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics
# create the source image
image = cv.CreateImage( (width, height), 8, 3)
# create window and display the original picture in it
cv.NamedWindow(window_name, 1)
cv.SetZero(image)
cv.ShowImage(window_name, image)
# create the random number
random = Random()
# draw some lines
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Line(image, pt1, pt2,
random_color(random),
random.randrange(0, 10),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some rectangles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Rectangle(image, pt1, pt2,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some ellipes
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
sz = (random.randrange(0, 200),
random.randrange(0, 200))
angle = random.randrange(0, 1000) * 0.180
cv.Ellipse(image, pt1, sz, angle, angle - 100, angle + 200,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# init the list of polylines
nb_polylines = 2
polylines_size = 3
pt = [0,] * nb_polylines
for a in range(nb_polylines):
pt [a] = [0,] * polylines_size
# draw some polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.PolyLine(image, pt, 1,
random_color(random),
random.randrange(1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some filled polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.FillPoly(image, pt,
random_color(random),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some circles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Circle(image, pt1, random.randrange(0, 300),
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some text
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
font = cv.InitFont(random.randrange(0, 8),
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 5) * 0.1,
random.randrange(0, 10),
line_type)
cv.PutText(image, "Testing text rendering!",
pt1, font,
random_color(random))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# prepare a text, and get it's properties
font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,
3, 3, 0.0, 5, line_type)
text_size, ymin = cv.GetTextSize("OpenCV forever!", font)
pt1 = ((width - text_size[0]) / 2, (height + text_size[1]) / 2)
image2 = cv.CloneImage(image)
# now, draw some OpenCV pub ;-)
for i in range(0, 512, 2):
cv.SubS(image2, cv.ScalarAll(i), image)
(r, g, b) = colorsys.hsv_to_rgb((i % 100) / 100., 1, 1)
cv.PutText(image, "OpenCV forever!",
pt1, font, cv.RGB(255 * r, 255 * g, 255 * b))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# wait some key to end
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,60 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of edge"
import sys
import urllib2
import cv2.cv as cv
# some definitions
win_name = "Edge"
trackbar_name = "Threshold"
# the callback on the trackbar
def on_trackbar(position):
cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
cv.Not(gray, edge)
# run the edge dector on gray scale
cv.Canny(gray, edge, position, position * 3, 3)
# reset
cv.SetZero(col_edge)
# copy edge points
cv.Copy(im, col_edge, edge)
# show the im
cv.ShowImage(win_name, col_edge)
if __name__ == '__main__':
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
# create the output im
col_edge = cv.CreateImage((im.width, im.height), 8, 3)
# convert to grayscale
gray = cv.CreateImage((im.width, im.height), 8, 1)
edge = cv.CreateImage((im.width, im.height), 8, 1)
cv.CvtColor(im, gray, cv.CV_BGR2GRAY)
# create the window
cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE)
# create the trackbar
cv.CreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar)
# show the im
on_trackbar(0)
# wait a key pressed to end
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,101 +0,0 @@
#!/usr/bin/python
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak, James Bowman
"""
import sys
import cv2.cv as cv
from optparse import OptionParser
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = (20, 20)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
def detect_and_draw(img, cascade):
# allocate temporary images
gray = cv.CreateImage((img.width,img.height), 8, 1)
small_img = cv.CreateImage((cv.Round(img.width / image_scale),
cv.Round (img.height / image_scale)), 8, 1)
# convert color input image to grayscale
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
# scale input image for faster processing
cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
cv.EqualizeHist(small_img, small_img)
if(cascade):
t = cv.GetTickCount()
faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size)
t = cv.GetTickCount() - t
print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
if faces:
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
cv.ShowImage("result", img)
if __name__ == '__main__':
parser = OptionParser(usage = "usage: %prog [options] [filename|camera_index]")
parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = "../data/haarcascades/haarcascade_frontalface_alt.xml")
(options, args) = parser.parse_args()
cascade = cv.Load(options.cascade)
if len(args) != 1:
parser.print_help()
sys.exit(1)
input_name = args[0]
if input_name.isdigit():
capture = cv.CreateCameraCapture(int(input_name))
else:
capture = None
cv.NamedWindow("result", 1)
if capture:
frame_copy = None
while True:
frame = cv.QueryFrame(capture)
if not frame:
cv.WaitKey(0)
break
if not frame_copy:
frame_copy = cv.CreateImage((frame.width,frame.height),
cv.IPL_DEPTH_8U, frame.nChannels)
if frame.origin == cv.IPL_ORIGIN_TL:
cv.Copy(frame, frame_copy)
else:
cv.Flip(frame, frame_copy, 0)
detect_and_draw(frame_copy, cascade)
if cv.WaitKey(10) >= 0:
break
else:
image = cv.LoadImage(input_name, 1)
detect_and_draw(image, cascade)
cv.WaitKey(0)
cv.DestroyWindow("result")

@ -1,56 +0,0 @@
#!/usr/bin/env python
from cv import *
class FBackDemo:
def __init__(self):
self.capture = CaptureFromCAM(0)
self.mv_step = 16
self.mv_scale = 1.5
self.mv_color = (0, 255, 0)
self.cflow = None
self.flow = None
NamedWindow( "Optical Flow", 1 )
print( "Press ESC - quit the program\n" )
def draw_flow(self, flow, prevgray):
""" Returns a nice representation of a hue histogram """
CvtColor(prevgray, self.cflow, CV_GRAY2BGR)
for y in range(0, flow.height, self.mv_step):
for x in range(0, flow.width, self.mv_step):
fx, fy = flow[y, x]
Line(self.cflow, (x,y), (x+fx,y+fy), self.mv_color)
Circle(self.cflow, (x,y), 2, self.mv_color, -1)
ShowImage("Optical Flow", self.cflow)
def run(self):
first_frame = True
while True:
frame = QueryFrame( self.capture )
if first_frame:
gray = CreateImage(GetSize(frame), 8, 1)
prev_gray = CreateImage(GetSize(frame), 8, 1)
flow = CreateImage(GetSize(frame), 32, 2)
self.cflow = CreateImage(GetSize(frame), 8, 3)
CvtColor(frame, gray, CV_BGR2GRAY)
if not first_frame:
CalcOpticalFlowFarneback(prev_gray, gray, flow,
pyr_scale=0.5, levels=3, winsize=15,
iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
self.draw_flow(flow, prev_gray)
c = WaitKey(7)
if c in [27, ord('q'), ord('Q')]:
break
prev_gray, gray = gray, prev_gray
first_frame = False
if __name__=="__main__":
demo = FBackDemo()
demo.run()
cv.DestroyAllWindows()

@ -1,160 +0,0 @@
#!/usr/bin/python
import sys
import random
import urllib2
import cv2.cv as cv
im=None;
mask=None;
color_img=None;
gray_img0 = None;
gray_img = None;
ffill_case = 1;
lo_diff = 20
up_diff = 20;
connectivity = 4;
is_color = 1;
is_mask = 0;
new_mask_val = 255;
def update_lo( pos ):
lo_diff = pos
def update_up( pos ):
up_diff = pos
def on_mouse( event, x, y, flags, param ):
if( not color_img ):
return;
if event == cv.CV_EVENT_LBUTTONDOWN:
my_mask = None
seed = (x,y);
if ffill_case==0:
lo = up = 0
flags = connectivity + (new_mask_val << 8)
else:
lo = lo_diff;
up = up_diff;
flags = connectivity + (new_mask_val << 8) + cv.CV_FLOODFILL_FIXED_RANGE
b = random.randint(0,255)
g = random.randint(0,255)
r = random.randint(0,255)
if( is_mask ):
my_mask = mask
cv.Threshold( mask, mask, 1, 128, cv.CV_THRESH_BINARY );
if( is_color ):
color = cv.CV_RGB( r, g, b );
comp = cv.FloodFill( color_img, seed, color, cv.CV_RGB( lo, lo, lo ),
cv.CV_RGB( up, up, up ), flags, my_mask );
cv.ShowImage( "image", color_img );
else:
brightness = cv.RealScalar((r*2 + g*7 + b + 5)/10);
comp = cv.FloodFill( gray_img, seed, brightness, cv.RealScalar(lo),
cv.RealScalar(up), flags, my_mask );
cv.ShowImage( "image", gray_img );
print "%g pixels were repainted" % comp[0]
if( is_mask ):
cv.ShowImage( "mask", mask );
if __name__ == "__main__":
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
print "Hot keys:"
print "\tESC - quit the program"
print "\tc - switch color/grayscale mode"
print "\tm - switch mask mode"
print "\tr - restore the original image"
print "\ts - use null-range floodfill"
print "\tf - use gradient floodfill with fixed(absolute) range"
print "\tg - use gradient floodfill with floating(relative) range"
print "\t4 - use 4-connectivity mode"
print "\t8 - use 8-connectivity mode"
color_img = cv.CloneImage( im );
gray_img0 = cv.CreateImage( (color_img.width, color_img.height), 8, 1 );
cv.CvtColor( color_img, gray_img0, cv.CV_BGR2GRAY );
gray_img = cv.CloneImage( gray_img0 );
mask = cv.CreateImage( (color_img.width + 2, color_img.height + 2), 8, 1 );
cv.NamedWindow( "image", 1 );
cv.CreateTrackbar( "lo_diff", "image", lo_diff, 255, update_lo);
cv.CreateTrackbar( "up_diff", "image", up_diff, 255, update_up);
cv.SetMouseCallback( "image", on_mouse );
while True:
if( is_color ):
cv.ShowImage( "image", color_img );
else:
cv.ShowImage( "image", gray_img );
c = cv.WaitKey(0) % 0x100
if c == 27:
print("Exiting ...");
sys.exit(0)
elif c == ord('c'):
if( is_color ):
print("Grayscale mode is set");
cv.CvtColor( color_img, gray_img, cv.CV_BGR2GRAY );
is_color = 0;
else:
print("Color mode is set");
cv.Copy( im, color_img, None );
cv.Zero( mask );
is_color = 1;
elif c == ord('m'):
if( is_mask ):
cv.DestroyWindow( "mask" );
is_mask = 0;
else:
cv.NamedWindow( "mask", 0 );
cv.Zero( mask );
cv.ShowImage( "mask", mask );
is_mask = 1;
elif c == ord('r'):
print("Original image is restored");
cv.Copy( im, color_img, None );
cv.Copy( gray_img0, gray_img, None );
cv.Zero( mask );
elif c == ord('s'):
print("Simple floodfill mode is set");
ffill_case = 0;
elif c == ord('f'):
print("Fixed Range floodfill mode is set");
ffill_case = 1;
elif c == ord('g'):
print("Gradient (floating range) floodfill mode is set");
ffill_case = 2;
elif c == ord('4'):
print("4-connectivity mode is set");
connectivity = 4;
elif c == ord('8'):
print("8-connectivity mode is set");
connectivity = 8;
cv.DestroyAllWindows()

@ -1,104 +0,0 @@
#!/usr/bin/python
"""
This program is a demonstration of ellipse fitting.
Trackbar controls threshold parameter.
Gray lines are contours. Colored lines are fit ellipses.
Original C implementation by: Denis Burenkov.
Python implementation by: Roman Stanchak, James Bowman
"""
import sys
import urllib2
import random
import cv2.cv as cv
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
class FitEllipse:
def __init__(self, source_image, slider_pos):
self.source_image = source_image
cv.CreateTrackbar("Threshold", "Result", slider_pos, 255, self.process_image)
self.process_image(slider_pos)
def process_image(self, slider_pos):
"""
This function finds contours, draws them and their approximation by ellipses.
"""
stor = cv.CreateMemStorage()
# Create the destination images
image02 = cv.CloneImage(self.source_image)
cv.Zero(image02)
image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3)
cv.Zero(image04)
# Threshold the source image. This needful for cv.FindContours().
cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY)
# Find all contours.
cont = cv.FindContours(image02,
stor,
cv.CV_RETR_LIST,
cv.CV_CHAIN_APPROX_NONE,
(0, 0))
for c in contour_iterator(cont):
# Number of points must be more than or equal to 6 for cv.FitEllipse2
if len(c) >= 6:
# Copy the contour into an array of (x,y)s
PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
for (i, (x, y)) in enumerate(c):
PointArray2D32f[0, i] = (x, y)
# Draw the current contour in gray
gray = cv.CV_RGB(100, 100, 100)
cv.DrawContours(image04, c, gray, gray,0,1,8,(0,0))
# Fits ellipse to current contour.
(center, size, angle) = cv.FitEllipse2(PointArray2D32f)
# Convert ellipse data from float to integer representation.
center = (cv.Round(center[0]), cv.Round(center[1]))
size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))
# Draw ellipse in random color
color = cv.CV_RGB(random.randrange(256),random.randrange(256),random.randrange(256))
cv.Ellipse(image04, center, size,
angle, 0, 360,
color, 2, cv.CV_AA, 0)
# Show image. HighGUI use.
cv.ShowImage( "Result", image04 )
if __name__ == '__main__':
if len(sys.argv) > 1:
source_image = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
source_image = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Create windows.
cv.NamedWindow("Source", 1)
cv.NamedWindow("Result", 1)
# Show the image.
cv.ShowImage("Source", source_image)
fe = FitEllipse(source_image, 70)
print "Press any key to exit"
cv.WaitKey(0)
cv.DestroyWindow("Source")
cv.DestroyWindow("Result")

@ -1,58 +0,0 @@
#!/usr/bin/python
# This is a standalone program. Pass an image name as a first parameter of the program.
import sys
from math import sin, cos, sqrt, pi
import cv2.cv as cv
import urllib2
# toggle between CV_HOUGH_STANDARD and CV_HOUGH_PROBILISTIC
USE_STANDARD = True
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/doc/pics/building.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
cv.NamedWindow("Source", 1)
cv.NamedWindow("Hough", 1)
while True:
dst = cv.CreateImage(cv.GetSize(src), 8, 1)
color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
storage = cv.CreateMemStorage(0)
lines = 0
cv.Canny(src, dst, 50, 200, 3)
cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)
if USE_STANDARD:
lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
a = cos(theta)
b = sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
else:
lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10)
for line in lines:
cv.Line(color_dst, line[0], line[1], cv.CV_RGB(255, 0, 0), 3, 8)
cv.ShowImage("Source", src)
cv.ShowImage("Hough", color_dst)
k = cv.WaitKey(0) % 0x100
if k == ord(' '):
USE_STANDARD = not USE_STANDARD
if k == 27:
break
cv.DestroyAllWindows()

@ -1,69 +0,0 @@
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
class Sketcher:
def __init__(self, windowname, dests):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
cv.SetMouseCallback(self.windowname, self.on_mouse)
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON):
self.prev_pt = None
elif event == cv.CV_EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) :
if self.prev_pt:
for dst in self.dests:
cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
self.prev_pt = pt
cv.ShowImage(self.windowname, img)
if __name__=="__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\ti or ENTER - run inpainting algorithm"
print "\t\t(before running it, paint something on the image)"
cv.NamedWindow("image", 1)
cv.NamedWindow("inpainted image", 1)
img = cv.CloneImage(img0)
inpainted = cv.CloneImage(img0)
inpaint_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
cv.Zero(inpaint_mask)
cv.Zero(inpainted)
cv.ShowImage("image", img)
cv.ShowImage("inpainted image", inpainted)
sk = Sketcher("image", [img, inpaint_mask])
while True:
c = cv.WaitKey(0) % 0x100
if c == 27 or c == ord('q'):
break
if c == ord('r'):
cv.Zero(inpaint_mask)
cv.Copy(img0, img)
cv.ShowImage("image", img)
if c == ord('i') or c == ord('\n'):
cv.Inpaint(img, inpaint_mask, inpainted, 3, cv.CV_INPAINT_TELEA)
cv.ShowImage("inpainted image", inpainted)
cv.DestroyAllWindows()

@ -1,98 +0,0 @@
#!/usr/bin/python
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
import urllib2
import cv2.cv as cv
from math import cos, sin, sqrt
import sys
if __name__ == "__main__":
A = [ [1, 1], [0, 1] ]
img = cv.CreateImage((500, 500), 8, 3)
kalman = cv.CreateKalman(2, 1, 0)
state = cv.CreateMat(2, 1, cv.CV_32FC1) # (phi, delta_phi)
process_noise = cv.CreateMat(2, 1, cv.CV_32FC1)
measurement = cv.CreateMat(1, 1, cv.CV_32FC1)
rng = cv.RNG(-1)
code = -1L
cv.Zero(measurement)
cv.NamedWindow("Kalman", 1)
while True:
cv.RandArr(rng, state, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
kalman.transition_matrix[0,0] = 1
kalman.transition_matrix[0,1] = 1
kalman.transition_matrix[1,0] = 0
kalman.transition_matrix[1,1] = 1
cv.SetIdentity(kalman.measurement_matrix, cv.RealScalar(1))
cv.SetIdentity(kalman.process_noise_cov, cv.RealScalar(1e-5))
cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(1e-1))
cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1))
cv.RandArr(rng, kalman.state_post, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
while True:
def calc_point(angle):
return (cv.Round(img.width/2 + img.width/3*cos(angle)),
cv.Round(img.height/2 - img.width/3*sin(angle)))
state_angle = state[0,0]
state_pt = calc_point(state_angle)
prediction = cv.KalmanPredict(kalman)
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
cv.RandArr(rng, measurement, cv.CV_RAND_NORMAL, cv.RealScalar(0),
cv.RealScalar(sqrt(kalman.measurement_noise_cov[0, 0])))
# generate measurement
cv.MatMulAdd(kalman.measurement_matrix, state, measurement, measurement)
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.Line(img, (center[0] - d, center[1] - d),
(center[0] + d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Line(img, (center[0] + d, center[1] - d),
(center[0] - d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Zero(img)
draw_cross(state_pt, cv.CV_RGB(255, 255, 255), 3)
draw_cross(measurement_pt, cv.CV_RGB(255, 0,0), 3)
draw_cross(predict_pt, cv.CV_RGB(0, 255, 0), 3)
cv.Line(img, state_pt, measurement_pt, cv.CV_RGB(255, 0,0), 3, cv. CV_AA, 0)
cv.Line(img, state_pt, predict_pt, cv.CV_RGB(255, 255, 0), 3, cv. CV_AA, 0)
cv.KalmanCorrect(kalman, measurement)
cv.RandArr(rng, process_noise, cv.CV_RAND_NORMAL, cv.RealScalar(0),
cv.RealScalar(sqrt(kalman.process_noise_cov[0, 0])))
cv.MatMulAdd(kalman.transition_matrix, state, process_noise, state)
cv.ShowImage("Kalman", img)
code = cv.WaitKey(100) % 0x100
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("Kalman")

@ -1,60 +0,0 @@
#!/usr/bin/python
import urllib2
import cv2.cv as cv
from random import randint
MAX_CLUSTERS = 5
if __name__ == "__main__":
color_tab = [
cv.CV_RGB(255, 0,0),
cv.CV_RGB(0, 255, 0),
cv.CV_RGB(100, 100, 255),
cv.CV_RGB(255, 0,255),
cv.CV_RGB(255, 255, 0)]
img = cv.CreateImage((500, 500), 8, 3)
rng = cv.RNG(-1)
cv.NamedWindow("clusters", 1)
while True:
cluster_count = randint(2, MAX_CLUSTERS)
sample_count = randint(1, 1000)
points = cv.CreateMat(sample_count, 1, cv.CV_32FC2)
clusters = cv.CreateMat(sample_count, 1, cv.CV_32SC1)
# generate random sample from multigaussian distribution
for k in range(cluster_count):
center = (cv.RandInt(rng)%img.width, cv.RandInt(rng)%img.height)
first = k*sample_count/cluster_count
last = sample_count
if k != cluster_count:
last = (k+1)*sample_count/cluster_count
point_chunk = cv.GetRows(points, first, last)
cv.RandArr(rng, point_chunk, cv.CV_RAND_NORMAL,
cv.Scalar(center[0], center[1], 0, 0),
cv.Scalar(img.width*0.1, img.height*0.1, 0, 0))
# shuffle samples
cv.RandShuffle(points, rng)
cv.KMeans2(points, cluster_count, clusters,
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 10, 1.0))
cv.Zero(img)
for i in range(sample_count):
cluster_idx = int(clusters[i, 0])
pt = (cv.Round(points[i, 0][0]), cv.Round(points[i, 0][1]))
cv.Circle(img, pt, 2, color_tab[cluster_idx], cv.CV_FILLED, cv.CV_AA, 0)
cv.ShowImage("clusters", img)
key = cv.WaitKey(0) % 0x100
if key in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("clusters")

@ -1,45 +0,0 @@
#!/usr/bin/python
import urllib2
import cv2.cv as cv
import sys
if __name__ == "__main__":
laplace = None
colorlaplace = None
planes = [ None, None, None ]
capture = None
if len(sys.argv) == 1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv) == 2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv) == 2:
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Laplacian", 1)
while True:
frame = cv.QueryFrame(capture)
if frame:
if not laplace:
planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)]
laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1)
colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3)
cv.Split(frame, planes[0], planes[1], planes[2], None)
for plane in planes:
cv.Laplace(plane, laplace, 3)
cv.ConvertScaleAbs(laplace, plane, 1, 0)
cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)
cv.ShowImage("Laplacian", colorlaplace)
if cv.WaitKey(10) != -1:
break
cv.DestroyWindow("Laplacian")

@ -1,192 +0,0 @@
#! /usr/bin/env python
print "OpenCV Python version of lkdemo"
import sys
# import the necessary things for OpenCV
import cv2.cv as cv
#############################################################################
# some "constants"
win_size = 10
MAX_COUNT = 500
#############################################################################
# some "global" variables
image = None
pt = None
add_remove_pt = False
flags = 0
night_mode = False
need_to_init = False
#############################################################################
# the mouse callback
# the callback on the trackbar
def on_mouse (event, x, y, flags, param):
# we will use the global pt and add_remove_pt
global pt
global add_remove_pt
if image is None:
# not initialized, so skip
return
if image.origin != 0:
# different origin
y = image.height - y
if event == cv.CV_EVENT_LBUTTONDOWN:
# user has click, so memorize it
pt = (x, y)
add_remove_pt = True
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
frames = sys.argv[1:]
if frames == []:
print "usage lkdemo.py <image files>"
sys.exit(1)
# display a small howto use it
print "Hot keys: \n" \
"\tESC - quit the program\n" \
"\tr - auto-initialize tracking\n" \
"\tc - delete all the points\n" \
"\tn - switch the \"night\" mode on/off\n" \
"\tSPACE - next frame\n" \
"To add/remove a feature point click it\n"
# first, create the necessary windows
cv.NamedWindow ('LkDemo', cv.CV_WINDOW_AUTOSIZE)
# register the mouse callback
cv.SetMouseCallback ('LkDemo', on_mouse, None)
fc = 0
while 1:
# do forever
frame = cv.LoadImage(frames[fc])
if image is None:
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
image.origin = frame.origin
grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
features = []
# copy the frame, so we can draw on it
cv.Copy (frame, image)
# create a grey version of the image
cv.CvtColor (image, grey, cv.CV_BGR2GRAY)
if night_mode:
# night mode: only display the points
cv.SetZero (image)
if need_to_init:
# we want to search all the good points
# create the wanted images
eig = cv.CreateImage (cv.GetSize (grey), 32, 1)
temp = cv.CreateImage (cv.GetSize (grey), 32, 1)
# the default parameters
quality = 0.01
min_distance = 10
# search the good points
features = cv.GoodFeaturesToTrack (
grey, eig, temp,
MAX_COUNT,
quality, min_distance, None, 3, 0, 0.04)
# refine the corner locations
features = cv.FindCornerSubPix (
grey,
features,
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
elif features != []:
# we have points, so display them
# calculate the optical flow
features, status, track_error = cv.CalcOpticalFlowPyrLK (
prev_grey, grey, prev_pyramid, pyramid,
features,
(win_size, win_size), 3,
(cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03),
flags)
# set back the points we keep
features = [ p for (st,p) in zip(status, features) if st]
if add_remove_pt:
# we have a point to add, so see if it is close to
# another one. If yes, don't use it
def ptptdist(p0, p1):
dx = p0[0] - p1[0]
dy = p0[1] - p1[1]
return dx**2 + dy**2
if min([ ptptdist(pt, p) for p in features ]) < 25:
# too close
add_remove_pt = 0
# draw the points as green circles
for the_point in features:
cv.Circle (image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0)
if add_remove_pt:
# we want to add a point
# refine this corner location and append it to 'features'
features += cv.FindCornerSubPix (
grey,
[pt],
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
20, 0.03))
# we are no longer in "add_remove_pt" mode
add_remove_pt = False
# swapping
prev_grey, grey = grey, prev_grey
prev_pyramid, pyramid = pyramid, prev_pyramid
need_to_init = False
# we can now display the image
cv.ShowImage ('LkDemo', image)
# handle events
c = cv.WaitKey(10) % 0x100
if c == 27:
# user has press the ESC key, so exit
break
# processing depending on the character
if 32 <= c and c < 128:
cc = chr(c).lower()
if cc == 'r':
need_to_init = True
elif cc == 'c':
features = []
elif cc == 'n':
night_mode = not night_mode
elif cc == ' ':
fc = (fc + 1) % len(frames)
cv.DestroyAllWindows()

@ -1,45 +0,0 @@
#!/usr/bin/python
import sys
import urllib2
import cv2.cv as cv
src=None
dst=None
src2=None
def on_mouse(event, x, y, flags, param):
if not src:
return
if event==cv.CV_EVENT_LBUTTONDOWN:
cv.LogPolar(src, dst, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS)
cv.LogPolar(dst, src2, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS + cv.CV_WARP_INVERSE_MAP)
cv.ShowImage("log-polar", dst)
cv.ShowImage("inverse log-polar", src2)
if __name__ == "__main__":
if len(sys.argv) > 1:
src = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
cv.NamedWindow("original", 1)
cv.NamedWindow("log-polar", 1)
cv.NamedWindow("inverse log-polar", 1)
dst = cv.CreateImage((256, 256), 8, 3)
src2 = cv.CreateImage(cv.GetSize(src), 8, 3)
cv.SetMouseCallback("original", on_mouse)
on_mouse(cv.CV_EVENT_LBUTTONDOWN, src.width/2, src.height/2, None, None)
cv.ShowImage("original", src)
cv.WaitKey()
cv.DestroyAllWindows()

@ -1,62 +0,0 @@
#!/usr/bin/python
import urllib2
import cv2.cv as cv
from random import randint
def roundxy(pt):
return (cv.Round(pt[0]), cv.Round(pt[1]))
def draw_common(points):
success, center, radius = cv.MinEnclosingCircle(points)
if success:
cv.Circle(img, roundxy(center), cv.Round(radius), cv.CV_RGB(255, 255, 0), 1, cv. CV_AA, 0)
box = cv.MinAreaRect2(points)
box_vtx = [roundxy(p) for p in cv.BoxPoints(box)]
cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv. CV_AA)
def minarea_array(img, count):
pointMat = cv.CreateMat(count, 1, cv.CV_32SC2)
for i in range(count):
pointMat[i, 0] = (randint(img.width/4, img.width*3/4),
randint(img.height/4, img.height*3/4))
cv.Zero(img)
for i in range(count):
cv.Circle(img, roundxy(pointMat[i, 0]), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0)
draw_common(pointMat)
def minarea_seq(img, count, storage):
points = [(randint(img.width/4, img.width*3/4), randint(img.height/4, img.height*3/4)) for i in range(count)]
cv.Zero(img)
for p in points:
cv.Circle(img, roundxy(p), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0)
draw_common(points)
if __name__ == "__main__":
img = cv.CreateImage((500, 500), 8, 3)
storage = cv.CreateMemStorage()
cv.NamedWindow("rect & circle", 1)
use_seq = True
while True:
count = randint(1, 100)
if use_seq:
minarea_seq(img, count, storage)
else:
minarea_array(img, count)
cv.ShowImage("rect & circle", img)
key = cv.WaitKey() % 0x100
if key in [27, ord('q'), ord('Q')]:
break
use_seq = not use_seq
cv.DestroyAllWindows()

@ -1,13 +0,0 @@
#! /usr/bin/env python
import cv2.cv as cv
cap = cv.CreateFileCapture("../c/tree.avi")
img = cv.QueryFrame(cap)
print "Got frame of dimensions (", img.width, " x ", img.height, ")"
cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage("win", img)
cv.MoveWindow("win", 200, 200)
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,52 +0,0 @@
#!/usr/bin/python
import sys
import urllib2
import cv2.cv as cv
src = 0
image = 0
dest = 0
element_shape = cv.CV_SHAPE_RECT
def Opening(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Erode(src, image, element, 1)
cv.Dilate(image, dest, element, 1)
cv.ShowImage("Opening & Closing", dest)
def Closing(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Dilate(src, image, element, 1)
cv.Erode(image, dest, element, 1)
cv.ShowImage("Opening & Closing", dest)
def Erosion(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Erode(src, dest, element, 1)
cv.ShowImage("Erosion & Dilation", dest)
def Dilation(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Dilate(src, dest, element, 1)
cv.ShowImage("Erosion & Dilation", dest)
if __name__ == "__main__":
if len(sys.argv) > 1:
src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
image = cv.CloneImage(src)
dest = cv.CloneImage(src)
cv.NamedWindow("Opening & Closing", 1)
cv.NamedWindow("Erosion & Dilation", 1)
cv.ShowImage("Opening & Closing", src)
cv.ShowImage("Erosion & Dilation", src)
cv.CreateTrackbar("Open", "Opening & Closing", 0, 10, Opening)
cv.CreateTrackbar("Close", "Opening & Closing", 0, 10, Closing)
cv.CreateTrackbar("Dilate", "Erosion & Dilation", 0, 10, Dilation)
cv.CreateTrackbar("Erode", "Erosion & Dilation", 0, 10, Erosion)
cv.WaitKey(0)
cv.DestroyWindow("Opening & Closing")
cv.DestroyWindow("Erosion & Dilation")

@ -1,110 +0,0 @@
#!/usr/bin/python
import urllib2
import sys
import time
from math import cos, sin
import cv2.cv as cv
CLOCKS_PER_SEC = 1.0
MHI_DURATION = 1
MAX_TIME_DELTA = 0.5
MIN_TIME_DELTA = 0.05
N = 4
buf = range(10)
last = 0
mhi = None # MHI
orient = None # orientation
mask = None # valid orientation mask
segmask = None # motion segmentation map
storage = None # temporary storage
def update_mhi(img, dst, diff_threshold):
global last
global mhi
global storage
global mask
global orient
global segmask
timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
size = cv.GetSize(img) # get current frame size
idx1 = last
if not mhi or cv.GetSize(mhi) != size:
for i in range(N):
buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(buf[i])
mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
cv.Zero(mhi) # clear MHI at the beginning
orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)
cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
idx2 = (last + 1) % N # index of (last - (N-1))th frame
last = idx2
silh = buf[idx2]
cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
cv.CvtScale(mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION)
cv.Zero(dst)
cv.Merge(mask, None, None, None, dst)
cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
if not storage:
storage = cv.CreateMemStorage(0)
seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
for (area, value, comp_rect) in seq:
if comp_rect[2] + comp_rect[3] > 100: # reject very small components
color = cv.CV_RGB(255, 0,0)
silh_roi = cv.GetSubRect(silh, comp_rect)
mhi_roi = cv.GetSubRect(mhi, comp_rect)
orient_roi = cv.GetSubRect(orient, comp_rect)
mask_roi = cv.GetSubRect(mask, comp_rect)
angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
if count < (comp_rect[2] * comp_rect[3] * 0.05):
continue
magnitude = 30.
center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
cv.Line(dst,
center,
(cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
color,
3,
cv.CV_AA,
0)
if __name__ == "__main__":
motion = 0
capture = 0
if len(sys.argv)==1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv)==2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv)==2:
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Motion", 1)
while True:
image = cv.QueryFrame(capture)
if(image):
if(not motion):
motion = cv.CreateImage((image.width, image.height), 8, 3)
cv.Zero(motion)
#motion.origin = image.origin
update_mhi(image, motion, 30)
cv.ShowImage("Motion", motion)
if(cv.WaitKey(10) != -1):
break
else:
break
cv.DestroyWindow("Motion")

@ -1,66 +0,0 @@
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
import numpy
# SRGB-linear conversions using NumPy - see http://en.wikipedia.org/wiki/SRGB
def srgb2lin(x):
a = 0.055
return numpy.where(x <= 0.04045,
x * (1.0 / 12.92),
numpy.power((x + a) * (1.0 / (1 + a)), 2.4))
def lin2srgb(x):
a = 0.055
return numpy.where(x <= 0.0031308,
x * 12.92,
(1 + a) * numpy.power(x, 1 / 2.4) - a)
if __name__ == "__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImageM( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/lena.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
cv.NamedWindow("original", 1)
cv.ShowImage("original", img0)
# Image was originally bytes in range 0-255. Turn it into an array of floats in range 0.0 - 1.0
n = numpy.asarray(img0) / 255.0
# Use NumPy to do some transformations on the image
# Negate the image by subtracting it from 1.0
cv.NamedWindow("negative")
cv.ShowImage("negative", cv.fromarray(1.0 - n))
# Assume the image was sRGB, and compute the linear version.
cv.NamedWindow("linear")
cv.ShowImage("linear", cv.fromarray(srgb2lin(n)))
# Look at a subwindow
cv.NamedWindow("subwindow")
cv.ShowImage("subwindow", cv.fromarray(n[200:300,200:400]))
# Compute the grayscale image
cv.NamedWindow("monochrome")
ln = srgb2lin(n)
red = ln[:,:,0]
grn = ln[:,:,1]
blu = ln[:,:,2]
linear_mono = 0.3 * red + 0.59 * grn + 0.11 * blu
cv.ShowImage("monochrome", cv.fromarray(lin2srgb(linear_mono)))
# Apply a blur to the NumPy array using OpenCV
cv.NamedWindow("gaussian")
cv.Smooth(n, n, cv.CV_GAUSSIAN, 15, 15)
cv.ShowImage("gaussian", cv.fromarray(n))
cv.WaitKey(0)
cv.DestroyAllWindows()

@ -1,48 +0,0 @@
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
import numpy
if __name__ == "__main__":
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
paste = cv.CreateMat(960, 1280, cv.CV_8UC3)
topleft = numpy.asarray(cv.GetSubRect(paste, (0, 0, 640, 480)))
topright = numpy.asarray(cv.GetSubRect(paste, (640, 0, 640, 480)))
bottomleft = numpy.asarray(cv.GetSubRect(paste, (0, 480, 640, 480)))
bottomright = numpy.asarray(cv.GetSubRect(paste, (640, 480, 640, 480)))
while True:
img = cv.GetMat(cv.QueryFrame(capture))
n = (numpy.asarray(img)).astype(numpy.uint8)
red = n[:,:,0]
grn = n[:,:,1]
blu = n[:,:,2]
topleft[:,:,0] = 255 - grn
topleft[:,:,1] = red
topleft[:,:,2] = blu
topright[:,:,0] = blu
topright[:,:,1] = 255 - red
topright[:,:,2] = grn
bottomright[:,:,0] = red
bottomright[:,:,1] = grn
bottomright[:,:,2] = 255 - blu
fgrn = grn.astype(numpy.float32)
fred = red.astype(numpy.float32)
bottomleft[:,:,0] = blu
bottomleft[:,:,1] = (abs(fgrn - fred)).astype(numpy.uint8)
bottomleft[:,:,2] = red
cv.ShowImage("camera", paste)
if cv.WaitKey(6) == 27:
break
cv.DestroyAllWindows()

@ -1,56 +0,0 @@
#!/usr/bin/python
import sys
from cv import *
def inside(r, q):
(rx, ry), (rw, rh) = r
(qx, qy), (qw, qh) = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
try:
img = LoadImage(sys.argv[1])
except:
try:
f = open(sys.argv[1], "rt")
except:
print "cannot read " + sys.argv[1]
sys.exit(-1)
imglist = list(f.readlines())
else:
imglist = [sys.argv[1]]
NamedWindow("people detection demo", 1)
storage = CreateMemStorage(0)
for name in imglist:
n = name.strip()
print n
try:
img = LoadImage(n)
except:
continue
#ClearMemStorage(storage)
found = list(HOGDetectMultiScale(img, storage, win_stride=(8,8),
padding=(32,32), scale=1.05, group_threshold=2))
found_filtered = []
for r in found:
insidef = False
for q in found:
if inside(r, q):
insidef = True
break
if not insidef:
found_filtered.append(r)
for r in found_filtered:
(rx, ry), (rw, rh) = r
tl = (rx + int(rw*0.1), ry + int(rh*0.07))
br = (rx + int(rw*0.9), ry + int(rh*0.87))
Rectangle(img, tl, br, (0, 255, 0), 3)
ShowImage("people detection demo", img)
c = WaitKey(0)
if c == ord('q'):
break
cv.DestroyAllWindows()

@ -1,41 +0,0 @@
#!/usr/bin/python
import cv2.cv as cv
class PyrSegmentation:
def __init__(self, img0):
self.thresh1 = 255
self.thresh2 = 30
self.level =4
self.storage = cv.CreateMemStorage()
cv.NamedWindow("Source", 0)
cv.ShowImage("Source", img0)
cv.NamedWindow("Segmentation", 0)
cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1)
cv.CreateTrackbar("Thresh2", "Segmentation", self.thresh2, 255, self.set_thresh2)
self.image0 = cv.CloneImage(img0)
self.image1 = cv.CloneImage(img0)
cv.ShowImage("Segmentation", self.image1)
def set_thresh1(self, val):
self.thresh1 = val
self.on_segment()
def set_thresh2(self, val):
self.thresh2 = val
self.on_segment()
def on_segment(self):
comp = cv.PyrSegmentation(self.image0, self.image1, self.storage, \
self.level, self.thresh1+1, self.thresh2+1)
cv.ShowImage("Segmentation", self.image1)
def run(self):
self.on_segment()
cv.WaitKey(0)
if __name__ == "__main__":
img0 = cv.LoadImage("../c/fruits.jpg", 1)
# segmentation of the color image
PyrSegmentation(img0).run()
cv.DestroyAllWindows()

@ -1,153 +0,0 @@
#!/usr/bin/python
#
# The full "Square Detector" program.
# It loads several images subsequentally and tries to find squares in
# each image
#
import urllib2
from math import sqrt
import cv2.cv as cv
thresh = 50
img = None
img0 = None
storage = None
wndname = "Square Detection Demo"
def angle(pt1, pt2, pt0):
dx1 = pt1.x - pt0.x
dy1 = pt1.y - pt0.y
dx2 = pt2.x - pt0.x
dy2 = pt2.y - pt0.y
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10)
def findSquares4(img, storage):
N = 11
sz = (img.width & -2, img.height & -2)
timg = cv.CloneImage(img); # make a copy of input image
gray = cv.CreateImage(sz, 8, 1)
pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3)
# create empty sequence that will contain points -
# 4 points per square (the square's vertices)
squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
squares = CvSeq_CvPoint.cast(squares)
# select the maximum ROI in the image
# with the width and height divisible by 2
subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))
# down-scale and upscale the image to filter out the noise
cv.PyrDown(subimage, pyr, 7)
cv.PyrUp(pyr, subimage, 7)
tgray = cv.CreateImage(sz, 8, 1)
# find squares in every color plane of the image
for c in range(3):
# extract the c-th color plane
channels = [None, None, None]
channels[c] = tgray
cv.Split(subimage, channels[0], channels[1], channels[2], None)
for l in range(N):
# hack: use Canny instead of zero threshold level.
# Canny helps to catch squares with gradient shading
if(l == 0):
# apply Canny. Take the upper threshold from slider
# and set the lower to 0 (which forces edges merging)
cv.Canny(tgray, gray, 0, thresh, 5)
# dilate canny output to remove potential
# holes between edge segments
cv.Dilate(gray, gray, None, 1)
else:
# apply threshold if l!=0:
# tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)
# find contours and store them all as a list
count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0))
if not contours:
continue
# test each contour
for contour in contours.hrange():
# approximate contour with accuracy proportional
# to the contour perimeter
result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0)
# square contours should have 4 vertices after approximation
# relatively large area (to filter out noisy contours)
# and be convex.
# Note: absolute value of an area is used because
# area may be positive or negative - in accordance with the
# contour orientation
if(result.total == 4 and
abs(cv.ContourArea(result)) > 1000 and
cv.CheckContourConvexity(result)):
s = 0
for i in range(5):
# find minimum angle between joint
# edges (maximum of cosine)
if(i >= 2):
t = abs(angle(result[i], result[i-2], result[i-1]))
if s<t:
s=t
# if cosines of all angles are small
# (all angles are ~90 degree) then write quandrange
# vertices to resultant sequence
if(s < 0.3):
for i in range(4):
squares.append(result[i])
return squares
# the function draws all the squares in the image
def drawSquares(img, squares):
cpy = cv.CloneImage(img)
# read 4 sequence elements at a time (all vertices of a square)
i=0
while i<squares.total:
pt = []
# read 4 vertices
pt.append(squares[i])
pt.append(squares[i+1])
pt.append(squares[i+2])
pt.append(squares[i+3])
# draw the square as a closed polyline
cv.PolyLine(cpy, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv. CV_AA, 0)
i+=4
# show the resultant image
cv.ShowImage(wndname, cpy)
def on_trackbar(a):
if(img):
drawSquares(img, findSquares4(img, storage))
names = ["../c/pic1.png", "../c/pic2.png", "../c/pic3.png",
"../c/pic4.png", "../c/pic5.png", "../c/pic6.png" ]
if __name__ == "__main__":
# create memory storage that will contain all the dynamic data
storage = cv.CreateMemStorage(0)
for name in names:
img0 = cv.LoadImage(name, 1)
if not img0:
print "Couldn't load %s" % name
continue
img = cv.CloneImage(img0)
# create window and a trackbar (slider) with parent "image" and set callback
# (the slider regulates upper threshold, passed to Canny edge detector)
cv.NamedWindow(wndname, 1)
cv.CreateTrackbar("canny thresh", wndname, thresh, 1000, on_trackbar)
# force the image processing
on_trackbar(0)
# wait for key.
# Also the function cv.WaitKey takes care of event processing
c = cv.WaitKey(0) % 0x100
# clear memory storage - reset free space position
cv.ClearMemStorage(storage)
if(c == '\x1b'):
break
cv.DestroyWindow(wndname)

@ -1,109 +0,0 @@
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
class Sketcher:
def __init__(self, windowname, dests):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
cv.SetMouseCallback(self.windowname, self.on_mouse)
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON):
self.prev_pt = None
elif event == cv.CV_EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) :
if self.prev_pt:
for dst in self.dests:
cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
self.prev_pt = pt
cv.ShowImage(self.windowname, img)
if __name__ == "__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
rng = cv.RNG(-1)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\tw - run watershed algorithm"
print "\t (before that, roughly outline several markers on the image)"
cv.NamedWindow("image", 1)
cv.NamedWindow("watershed transform", 1)
img = cv.CloneImage(img0)
img_gray = cv.CloneImage(img0)
wshed = cv.CloneImage(img0)
marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1)
cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY)
cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR)
cv.Zero(marker_mask)
cv.Zero(wshed)
cv.ShowImage("image", img)
cv.ShowImage("watershed transform", wshed)
sk = Sketcher("image", [img, marker_mask])
while True:
c = cv.WaitKey(0) % 0x100
if c == 27 or c == ord('q'):
break
if c == ord('r'):
cv.Zero(marker_mask)
cv.Copy(img0, img)
cv.ShowImage("image", img)
if c == ord('w'):
storage = cv.CreateMemStorage(0)
#cv.SaveImage("wshed_mask.png", marker_mask)
#marker_mask = cv.LoadImage("wshed_mask.png", 0)
contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
cv.Zero(markers)
comp_count = 0
for c in contour_iterator(contours):
cv.DrawContours(markers,
c,
cv.ScalarAll(comp_count + 1),
cv.ScalarAll(comp_count + 1),
-1,
-1,
8)
comp_count += 1
cv.Watershed(img0, markers)
cv.Set(wshed, cv.ScalarAll(255))
# paint the watershed image
color_tab = [(cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50) for i in range(comp_count)]
for j in range(markers.height):
for i in range(markers.width):
idx = markers[j, i]
if idx != -1:
wshed[j, i] = color_tab[int(idx - 1)]
cv.AddWeighted(wshed, 0.5, img_gray, 0.5, 0, wshed)
cv.ShowImage("watershed transform", wshed)
cv.DestroyAllWindows()

@ -24,8 +24,3 @@ if __name__ == '__main__':
r = 1.0 * len(cv2_used) / len(cv2_callable)
print '\ncv2 api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 )
print '\nold (cv) symbols:'
for s in found:
if s.startswith('cv.'):
print s

@ -37,7 +37,7 @@ if __name__ == '__main__':
img = np.zeros((sz, sz), np.uint8)
track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0)
track = np.int32(track*10 + (sz/2, sz/2))
cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA)
cv2.polylines(img, [track], 0, 255, 1, cv2.LINE_AA)
small = img

@ -71,8 +71,8 @@ def mtx2rvec(R):
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):

@ -53,7 +53,7 @@ if __name__ == '__main__':
vis = np.zeros((h, w, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
3, cv2.CV_AA, hierarchy, abs(levels) )
3, cv2.LINE_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save