diff --git a/CMakeLists.txt b/CMakeLists.txt index a7a09ee322..9f7ab02514 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -827,8 +827,8 @@ if(BUILD_opencv_python) else() status(" Libraries:" HAVE_opencv_python THEN ${PYTHON_LIBRARIES} ELSE NO) endif() - status(" numpy:" PYTHON_USE_NUMPY THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)") - status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-") + status(" numpy:" PYTHON_NUMPY_INCLUDE_DIR THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)") + status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-") endif() # ========================== java ========================== diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake index 73143b7341..115ce338e8 100644 --- a/cmake/OpenCVDetectPython.cmake +++ b/cmake/OpenCVDetectPython.cmake @@ -14,7 +14,6 @@ if(WIN32 AND NOT PYTHON_EXECUTABLE) endif() find_host_package(PythonInterp 2.0) -unset(PYTHON_USE_NUMPY CACHE) unset(HAVE_SPHINX CACHE) if(PYTHON_EXECUTABLE) if(PYTHON_VERSION_STRING) @@ -93,7 +92,6 @@ if(PYTHON_EXECUTABLE) endif() if(PYTHON_NUMPY_INCLUDE_DIR) - set(PYTHON_USE_NUMPY TRUE) execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print numpy.version.version" RESULT_VARIABLE PYTHON_NUMPY_PROCESS OUTPUT_VARIABLE PYTHON_NUMPY_VERSION diff --git a/doc/_themes/blue/static/default.css_t b/doc/_themes/blue/static/default.css_t index e2127db1da..49a57e9ec3 100644 --- a/doc/_themes/blue/static/default.css_t +++ b/doc/_themes/blue/static/default.css_t @@ -310,21 +310,6 @@ dl.pyfunction > dt:first-child { margin-bottom: 7px; } -dl.pyoldfunction > dt:first-child { - margin-bottom: 7px; - color: #8080B0; -} - -dl.pyoldfunction > dt:first-child tt.descname -{ - color: #8080B0; -} - -dl.pyoldfunction > dt:first-child tt.descclassname -{ - color: #8080B0; -} - dl.jfunction > dt:first-child { margin-bottom: 7px; } diff --git a/doc/ocv.py b/doc/ocv.py index 8fcef4040b..a22d3daa56 100755 --- a/doc/ocv.py +++ b/doc/ocv.py @@ -267,10 +267,6 @@ class OCVPyModulelevel(OCVPyObject): else: return '' -class OCVPyOldModulelevel(OCVPyModulelevel): - directive_prefix = 'pyold' - pass - class OCVPyXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): refnode['ocv:module'] = env.temp_data.get('ocv:module') @@ -1491,7 +1487,6 @@ class OCVDomain(Domain): 'cfunction': ObjType(l_('cfunction'), 'cfunc', 'cfuncx'), 'jfunction': ObjType(l_('jfunction'), 'jfunc', 'jfuncx'), 'pyfunction': ObjType(l_('pyfunction'), 'pyfunc'), - 'pyoldfunction': ObjType(l_('pyoldfunction'), 'pyoldfunc'), 'member': ObjType(l_('member'), 'member'), 'emember': ObjType(l_('emember'), 'emember'), 'type': ObjType(l_('type'), 'type'), @@ -1505,7 +1500,6 @@ class OCVDomain(Domain): 'cfunction': OCVCFunctionObject, 'jfunction': OCVJavaFunctionObject, 'pyfunction': OCVPyModulelevel, - 'pyoldfunction': OCVPyOldModulelevel, 'member': OCVMemberObject, 'emember': OCVEnumMemberObject, 'type': OCVTypeObject, @@ -1522,7 +1516,6 @@ class OCVDomain(Domain): 'jfunc' : OCVXRefRole(fix_parens=True), 'jfuncx' : OCVXRefRole(), 'pyfunc' : OCVPyXRefRole(), - 'pyoldfunc' : OCVPyXRefRole(), 'member': OCVXRefRole(), 'emember': OCVXRefRole(), 'type': OCVXRefRole(), @@ -1612,7 +1605,6 @@ class OCVDomain(Domain): 'cfunction': _('C function'), 'jfunction': _('Java method'), 'pyfunction': _('Python function'), - 'pyoldfunction': _('Legacy Python function'), 'member': _('C++ member'), 'emember': _('enum member'), 'type': _('C/C++ type'), diff --git a/include/opencv/cvaux.h b/include/opencv/cvaux.h index 2332c8798b..cb49c086ba 100644 --- a/include/opencv/cvaux.h +++ b/include/opencv/cvaux.h @@ -51,11 +51,12 @@ #include "opencv2/photo/photo_c.h" #include "opencv2/video/tracking_c.h" #include "opencv2/objdetect/objdetect_c.h" +#include "opencv2/contrib/compat.hpp" + #include "opencv2/legacy.hpp" #include "opencv2/legacy/compat.hpp" #include "opencv2/legacy/blobtrack.hpp" -#include "opencv2/contrib.hpp" #endif diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 9f47213324..4dda9662d9 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -121,8 +121,6 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali .. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* object_points, const CvMat* image_points, const CvMat* point_counts, CvSize image_size, CvMat* camera_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors=NULL, CvMat* translation_vectors=NULL, int flags=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) ) -.. ocv:pyoldfunction:: cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0)-> None - :param objectPoints: In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0. In the old interface all the vectors of object points from different views are concatenated together. @@ -279,8 +277,6 @@ For points in an image of a stereo pair, computes the corresponding epilines in .. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int which_image, const CvMat* fundamental_matrix, CvMat* correspondent_lines ) -.. ocv:pyoldfunction:: cv.ComputeCorrespondEpilines(points, whichImage, F, lines) -> None - :param points: Input points. :math:`N \times 1` or :math:`1 \times N` matrix of type ``CV_32FC2`` or ``vector`` . :param whichImage: Index of the image (1 or 2) that contains the ``points`` . @@ -354,7 +350,6 @@ Converts points to/from homogeneous coordinates. .. ocv:function:: void convertPointsHomogeneous( InputArray src, OutputArray dst ) .. ocv:cfunction:: void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst ) -.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous(src, dst) -> None :param src: Input array or vector of 2D, 3D, or 4D points. @@ -400,8 +395,6 @@ Decomposes a projection matrix into a rotation matrix and a camera matrix. .. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat * projMatr, CvMat * calibMatr, CvMat * rotMatr, CvMat * posVect, CvMat * rotMatrX=NULL, CvMat * rotMatrY=NULL, CvMat * rotMatrZ=NULL, CvPoint3D64f * eulerAngles=NULL ) -.. ocv:pyoldfunction:: cv.DecomposeProjectionMatrix(projMatrix, cameraMatrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None) -> eulerAngles - :param projMatrix: 3x4 input projection matrix P. :param cameraMatrix: Output 3x3 camera matrix K. @@ -436,7 +429,6 @@ Renders the detected chessboard corners. .. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> image .. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found ) -.. ocv:pyoldfunction:: cv.DrawChessboardCorners(image, patternSize, corners, patternWasFound)-> None :param image: Destination image. It must be an 8-bit color image. @@ -459,7 +451,6 @@ Finds the positions of internal corners of the chessboard. .. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners .. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE ) -.. ocv:pyoldfunction:: cv.FindChessboardCorners(image, patternSize, flags=CV_CALIB_CB_ADAPTIVE_THRESH) -> corners :param image: Source chessboard view. It must be an 8-bit grayscale or color image. @@ -564,8 +555,6 @@ Finds an object pose from 3D-2D point correspondences. .. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector, int use_extrinsic_guess=0 ) -.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 ) -> None - :param objectPoints: Array of object points in the object coordinate space, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. ``vector`` can be also passed here. :param imagePoints: Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, where N is the number of points. ``vector`` can be also passed here. @@ -636,7 +625,6 @@ Calculates a fundamental matrix from the corresponding points in two images. .. ocv:pyfunction:: cv2.findFundamentalMat(points1, points2[, method[, param1[, param2[, mask]]]]) -> retval, mask .. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamental_matrix, int method=CV_FM_RANSAC, double param1=3., double param2=0.99, CvMat* status=NULL ) -.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> retval :param points1: Array of ``N`` points from the first image. The point coordinates should be floating-point (single or double precision). @@ -820,8 +808,6 @@ Finds a perspective transformation between two planes. .. ocv:cfunction:: int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, CvMat* homography, int method=0, double ransacReprojThreshold=3, CvMat* mask=0 ) -.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method=0, ransacReprojThreshold=3.0, status=None) -> None - :param srcPoints: Coordinates of the points in the original plane, a matrix of the type ``CV_32FC2`` or ``vector`` . :param dstPoints: Coordinates of the points in the target plane, a matrix of the type ``CV_32FC2`` or a ``vector`` . @@ -946,8 +932,6 @@ Returns the new camera matrix based on the free scaling parameter. .. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, const CvMat* dist_coeffs, CvSize image_size, double alpha, CvMat* new_camera_matrix, CvSize new_imag_size=cvSize(0,0), CvRect* valid_pixel_ROI=0, int center_principal_point=0 ) -.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0, centerPrincipalPoint=0) -> None - :param cameraMatrix: Input camera matrix. :param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. @@ -981,8 +965,6 @@ Finds an initial camera matrix from 3D-2D point correspondences. .. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* object_points, const CvMat* image_points, const CvMat* npoints, CvSize image_size, CvMat* camera_matrix, double aspect_ratio=1. ) -.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, npoints, imageSize, cameraMatrix, aspectRatio=1.) -> None - :param objectPoints: Vector of vectors of the calibration pattern points in the calibration pattern coordinate space. In the old interface all the per-view vectors are concatenated. See :ocv:func:`calibrateCamera` for details. :param imagePoints: Vector of vectors of the projections of the calibration pattern points. In the old interface all the per-view vectors are concatenated. @@ -1030,8 +1012,6 @@ Projects 3D points to an image plane. .. ocv:cfunction:: void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL, double aspect_ratio=0 ) -.. ocv:pyoldfunction:: cv.ProjectPoints2(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, dpdrot=None, dpdt=None, dpdf=None, dpdc=None, dpddist=None)-> None - :param objectPoints: Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or ``vector`` ), where N is the number of points in the view. :param rvec: Rotation vector. See :ocv:func:`Rodrigues` for details. @@ -1075,8 +1055,6 @@ Reprojects a disparity image to 3D space. .. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0 ) -.. ocv:pyoldfunction:: cv.ReprojectImageTo3D(disparity, _3dImage, Q, handleMissingValues=0) -> None - :param disparity: Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit floating-point disparity image. :param _3dImage: Output 3-channel floating-point image of the same size as ``disparity`` . Each element of ``_3dImage(x,y)`` contains 3D coordinates of the point ``(x,y)`` computed from the disparity map. @@ -1109,7 +1087,6 @@ Computes an RQ decomposition of 3x3 matrices. .. ocv:pyfunction:: cv2.RQDecomp3x3(src[, mtxR[, mtxQ[, Qx[, Qy[, Qz]]]]]) -> retval, mtxR, mtxQ, Qx, Qy, Qz .. ocv:cfunction:: void cvRQDecomp3x3( const CvMat * matrixM, CvMat * matrixR, CvMat * matrixQ, CvMat * matrixQx=NULL, CvMat * matrixQy=NULL, CvMat * matrixQz=NULL, CvPoint3D64f * eulerAngles=NULL ) -.. ocv:pyoldfunction:: cv.RQDecomp3x3(M, R, Q, Qx=None, Qy=None, Qz=None) -> eulerAngles :param src: 3x3 input matrix. @@ -1140,8 +1117,6 @@ Converts a rotation matrix to a rotation vector or vice versa. .. ocv:cfunction:: int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian=0 ) -.. ocv:pyoldfunction:: cv.Rodrigues2(src, dst, jacobian=0)-> None - :param src: Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). :param dst: Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively. @@ -1269,8 +1244,6 @@ Calibrates the stereo camera. .. ocv:cfunction:: double cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, const CvMat* image_points2, const CvMat* npoints, CvMat* camera_matrix1, CvMat* dist_coeffs1, CvMat* camera_matrix2, CvMat* dist_coeffs2, CvSize image_size, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6), int flags=CV_CALIB_FIX_INTRINSIC ) -.. ocv:pyoldfunction:: cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None - :param objectPoints: Vector of vectors of the calibration pattern points. :param imagePoints1: Vector of vectors of the projections of the calibration pattern points, observed by the first camera. @@ -1362,8 +1335,6 @@ Computes rectification transforms for each head of a calibrated stereo camera. .. ocv:cfunction:: void cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, CvSize image_size, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize new_image_size=cvSize(0,0), CvRect* valid_pix_ROI1=0, CvRect* valid_pix_ROI2=0 ) -.. ocv:pyoldfunction:: cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0)) -> (roi1, roi2) - :param cameraMatrix1: First camera matrix. :param cameraMatrix2: Second camera matrix. @@ -1451,8 +1422,6 @@ Computes a rectification transform for an uncalibrated stereo camera. .. ocv:cfunction:: int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize img_size, CvMat* H1, CvMat* H2, double threshold=5 ) -.. ocv:pyoldfunction:: cv.StereoRectifyUncalibrated(points1, points2, F, imageSize, H1, H2, threshold=5)-> None - :param points1: Array of feature points in the first image. :param points2: The corresponding points in the second image. The same formats as in :ocv:func:`findFundamentalMat` are supported. diff --git a/modules/contrib/include/opencv2/contrib.hpp b/modules/contrib/include/opencv2/contrib.hpp index 35bac1b90a..be83152db5 100644 --- a/modules/contrib/include/opencv2/contrib.hpp +++ b/modules/contrib/include/opencv2/contrib.hpp @@ -48,931 +48,592 @@ #include "opencv2/features2d.hpp" #include "opencv2/objdetect.hpp" -#include "opencv2/core/core_c.h" - -#include - -#ifdef __cplusplus - -/****************************************************************************************\ -* Adaptive Skin Detector * -\****************************************************************************************/ - -class CV_EXPORTS CvAdaptiveSkinDetector +namespace cv +{ +class CV_EXPORTS Octree { -private: - enum { - GSD_HUE_LT = 3, - GSD_HUE_UT = 33, - GSD_INTENSITY_LT = 15, - GSD_INTENSITY_UT = 250 - }; - - class CV_EXPORTS Histogram - { - private: - enum { - HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1) - }; - - protected: - int findCoverageIndex(double surfaceToCover, int defaultValue = 0); - - public: - CvHistogram *fHistogram; - Histogram(); - virtual ~Histogram(); - - void findCurveThresholds(int &x1, int &x2, double percent = 0.05); - void mergeWith(Histogram *source, double weight); - }; - - int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider; - double fHistogramMergeFactor, fHuePercentCovered; - Histogram histogramHueMotion, skinHueHistogram; - IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame; - IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame; - -protected: - void initData(IplImage *src, int widthDivider, int heightDivider); - void adaptiveFilter(); - public: - - enum { - MORPHING_METHOD_NONE = 0, - MORPHING_METHOD_ERODE = 1, - MORPHING_METHOD_ERODE_ERODE = 2, - MORPHING_METHOD_ERODE_DILATE = 3 + struct Node + { + Node() {} + int begin, end; + float x_min, x_max, y_min, y_max, z_min, z_max; + int maxLevels; + bool isLeaf; + int children[8]; }; - CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE); - virtual ~CvAdaptiveSkinDetector(); - - virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask); -}; - - -/****************************************************************************************\ - * Fuzzy MeanShift Tracker * - \****************************************************************************************/ - -class CV_EXPORTS CvFuzzyPoint { -public: - double x, y, value; - - CvFuzzyPoint(double _x, double _y); -}; + Octree(); + Octree( const std::vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual ~Octree(); -class CV_EXPORTS CvFuzzyCurve { + virtual void buildTree( const std::vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual void getPointsWithinSphere( const Point3f& center, float radius, + std::vector& points ) const; + const std::vector& getNodes() const { return nodes; } private: - std::vector points; - double value, centre; + int minPoints; + std::vector points; + std::vector nodes; - bool between(double x, double x1, double x2); - -public: - CvFuzzyCurve(); - ~CvFuzzyCurve(); - - void setCentre(double _centre); - double getCentre(); - void clear(); - void addPoint(double x, double y); - double calcValue(double param); - double getValue(); - void setValue(double _value); + virtual void buildNext(size_t node_ind); }; -class CV_EXPORTS CvFuzzyFunction { -public: - std::vector curves; - - CvFuzzyFunction(); - ~CvFuzzyFunction(); - void addCurve(CvFuzzyCurve *curve, double value = 0); - void resetValues(); - double calcValue(); - CvFuzzyCurve *newCurve(); -}; -class CV_EXPORTS CvFuzzyRule { -private: - CvFuzzyCurve *fuzzyInput1, *fuzzyInput2; - CvFuzzyCurve *fuzzyOutput; -public: - CvFuzzyRule(); - ~CvFuzzyRule(); - void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); - double calcValue(double param1, double param2); - CvFuzzyCurve *getOutputCurve(); -}; - -class CV_EXPORTS CvFuzzyController { -private: - std::vector rules; -public: - CvFuzzyController(); - ~CvFuzzyController(); - void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); - double calcOutput(double param1, double param2); -}; - -class CV_EXPORTS CvFuzzyMeanShiftTracker +class CV_EXPORTS Mesh3D { -private: - class FuzzyResizer - { - private: - CvFuzzyFunction iInput, iOutput; - CvFuzzyController fuzzyController; - public: - FuzzyResizer(); - int calcOutput(double edgeDensity, double density); - }; - - class SearchWindow - { - public: - FuzzyResizer *fuzzyResizer; - int x, y; - int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth; - int ldx, ldy, ldw, ldh, numShifts, numIters; - int xGc, yGc; - long m00, m01, m10, m11, m02, m20; - double ellipseAngle; - double density; - unsigned int depthLow, depthHigh; - int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom; - - SearchWindow(); - ~SearchWindow(); - void setSize(int _x, int _y, int _width, int _height); - void initDepthValues(IplImage *maskImage, IplImage *depthMap); - bool shift(); - void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth); - void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); - void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); - void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); - bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth); - }; - public: - enum TrackingState - { - tsNone = 0, - tsSearching = 1, - tsTracking = 2, - tsSetWindow = 3, - tsDisabled = 10 - }; + struct EmptyMeshException {}; - enum ResizeMethod { - rmEdgeDensityLinear = 0, - rmEdgeDensityFuzzy = 1, - rmInnerDensity = 2 - }; - - enum { - MinKernelMass = 1000 - }; + Mesh3D(); + Mesh3D(const std::vector& vtx); + ~Mesh3D(); - SearchWindow kernel; - int searchMode; + void buildOctree(); + void clearOctree(); + float estimateResolution(float tryRatio = 0.1f); + void computeNormals(float normalRadius, int minNeighbors = 20); + void computeNormals(const std::vector& subset, float normalRadius, int minNeighbors = 20); -private: - enum - { - MaxMeanShiftIteration = 5, - MaxSetSizeIteration = 5 - }; + void writeAsVrml(const String& file, const std::vector& colors = std::vector()) const; - void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth); + std::vector vtx; + std::vector normals; + float resolution; + Octree octree; -public: - CvFuzzyMeanShiftTracker(); - ~CvFuzzyMeanShiftTracker(); - - void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass); + const static Point3f allzero; }; - -namespace cv +class CV_EXPORTS SpinImageModel { +public: - class CV_EXPORTS Octree - { - public: - struct Node - { - Node() {} - int begin, end; - float x_min, x_max, y_min, y_max, z_min, z_max; - int maxLevels; - bool isLeaf; - int children[8]; - }; - - Octree(); - Octree( const std::vector& points, int maxLevels = 10, int minPoints = 20 ); - virtual ~Octree(); - - virtual void buildTree( const std::vector& points, int maxLevels = 10, int minPoints = 20 ); - virtual void getPointsWithinSphere( const Point3f& center, float radius, - std::vector& points ) const; - const std::vector& getNodes() const { return nodes; } - private: - int minPoints; - std::vector points; - std::vector nodes; - - virtual void buildNext(size_t node_ind); - }; - - - class CV_EXPORTS Mesh3D - { - public: - struct EmptyMeshException {}; - - Mesh3D(); - Mesh3D(const std::vector& vtx); - ~Mesh3D(); - - void buildOctree(); - void clearOctree(); - float estimateResolution(float tryRatio = 0.1f); - void computeNormals(float normalRadius, int minNeighbors = 20); - void computeNormals(const std::vector& subset, float normalRadius, int minNeighbors = 20); - - void writeAsVrml(const String& file, const std::vector& colors = std::vector()) const; - - std::vector vtx; - std::vector normals; - float resolution; - Octree octree; - - const static Point3f allzero; - }; - - class CV_EXPORTS SpinImageModel - { - public: - - /* model parameters, leave unset for default or auto estimate */ - float normalRadius; - int minNeighbors; - - float binSize; - int imageWidth; + /* model parameters, leave unset for default or auto estimate */ + float normalRadius; + int minNeighbors; - float lambda; - float gamma; + float binSize; + int imageWidth; - float T_GeometriccConsistency; - float T_GroupingCorespondances; + float lambda; + float gamma; - /* public interface */ - SpinImageModel(); - explicit SpinImageModel(const Mesh3D& mesh); - ~SpinImageModel(); + float T_GeometriccConsistency; + float T_GroupingCorespondances; - void setLogger(std::ostream* log); - void selectRandomSubset(float ratio); - void setSubset(const std::vector& subset); - void compute(); + /* public interface */ + SpinImageModel(); + explicit SpinImageModel(const Mesh3D& mesh); + ~SpinImageModel(); - void match(const SpinImageModel& scene, std::vector< std::vector >& result); + void selectRandomSubset(float ratio); + void setSubset(const std::vector& subset); + void compute(); - Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const; + void match(const SpinImageModel& scene, std::vector< std::vector >& result); - size_t getSpinCount() const { return spinImages.rows; } - Mat getSpinImage(size_t index) const { return spinImages.row((int)index); } - const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; } - const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; } + Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const; - const Mesh3D& getMesh() const { return mesh; } - Mesh3D& getMesh() { return mesh; } + size_t getSpinCount() const { return spinImages.rows; } + Mat getSpinImage(size_t index) const { return spinImages.row((int)index); } + const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; } + const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; } - /* static utility functions */ - static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result); + const Mesh3D& getMesh() const { return mesh; } + Mesh3D& getMesh() { return mesh; } - static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal); + /* static utility functions */ + static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result); - static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1, - const Point3f& pointModel1, const Point3f& normalModel1, - const Point3f& pointScene2, const Point3f& normalScene2, - const Point3f& pointModel2, const Point3f& normalModel2); + static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal); - static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1, + static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1, const Point3f& pointModel1, const Point3f& normalModel1, const Point3f& pointScene2, const Point3f& normalScene2, - const Point3f& pointModel2, const Point3f& normalModel2, - float gamma); - protected: - void defaultParams(); + const Point3f& pointModel2, const Point3f& normalModel2); - void matchSpinToModel(const Mat& spin, std::vector& indeces, - std::vector& corrCoeffs, bool useExtremeOutliers = true) const; + static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2, + float gamma); +protected: + void defaultParams(); - void repackSpinImages(const std::vector& mask, Mat& spinImages, bool reAlloc = true) const; + void matchSpinToModel(const Mat& spin, std::vector& indeces, + std::vector& corrCoeffs, bool useExtremeOutliers = true) const; - std::vector subset; - Mesh3D mesh; - Mat spinImages; - std::ostream* out; - }; + void repackSpinImages(const std::vector& mask, Mat& spinImages, bool reAlloc = true) const; - class CV_EXPORTS TickMeter - { - public: - TickMeter(); - void start(); - void stop(); - - int64 getTimeTicks() const; - double getTimeMicro() const; - double getTimeMilli() const; - double getTimeSec() const; - int64 getCounter() const; - - void reset(); - private: - int64 counter; - int64 sumTime; - int64 startTime; - }; - - CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm); + std::vector subset; + Mesh3D mesh; + Mat spinImages; +}; - class CV_EXPORTS SelfSimDescriptor - { - public: - SelfSimDescriptor(); - SelfSimDescriptor(int _ssize, int _lsize, - int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET, - int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS, - int _nangles=DEFAULT_NUM_ANGLES); - SelfSimDescriptor(const SelfSimDescriptor& ss); - virtual ~SelfSimDescriptor(); - SelfSimDescriptor& operator = (const SelfSimDescriptor& ss); - - size_t getDescriptorSize() const; - Size getGridSize( Size imgsize, Size winStride ) const; - - virtual void compute(const Mat& img, std::vector& descriptors, Size winStride=Size(), - const std::vector& locations=std::vector()) const; - virtual void computeLogPolarMapping(Mat& mappingMask) const; - virtual void SSD(const Mat& img, Point pt, Mat& ssd) const; - - int smallSize; - int largeSize; - int startDistanceBucket; - int numberOfDistanceBuckets; - int numberOfAngles; - - enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41, - DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3, - DEFAULT_NUM_DISTANCE_BUCKETS = 7 }; - }; +class CV_EXPORTS TickMeter +{ +public: + TickMeter(); + void start(); + void stop(); + int64 getTimeTicks() const; + double getTimeMicro() const; + double getTimeMilli() const; + double getTimeSec() const; + int64 getCounter() const; - typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data); + void reset(); +private: + int64 counter; + int64 sumTime; + int64 startTime; +}; - class CV_EXPORTS LevMarqSparse { - public: - LevMarqSparse(); - LevMarqSparse(int npoints, // number of points - int ncameras, // number of cameras - int nPointParams, // number of params per one point (3 in case of 3D points) - int nCameraParams, // number of parameters per one camera - int nErrParams, // number of parameters in measurement vector - // for 1 point at one camera (2 in case of 2D projections) - Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras - // 1 - point is visible for the camera, 0 - invisible - Mat& P0, // starting vector of parameters, first cameras then points - Mat& X, // measurements, in order of visibility. non visible cases are skipped - TermCriteria criteria, // termination criteria - - // callback for estimation of Jacobian matrices - void (CV_CDECL * fjac)(int i, int j, Mat& point_params, - Mat& cam_params, Mat& A, Mat& B, void* data), - // callback for estimation of backprojection errors - void (CV_CDECL * func)(int i, int j, Mat& point_params, - Mat& cam_params, Mat& estim, void* data), - void* data, // user-specific data passed to the callbacks - BundleAdjustCallback cb, void* user_data - ); - - virtual ~LevMarqSparse(); - - virtual void run( int npoints, // number of points - int ncameras, // number of cameras - int nPointParams, // number of params per one point (3 in case of 3D points) - int nCameraParams, // number of parameters per one camera - int nErrParams, // number of parameters in measurement vector - // for 1 point at one camera (2 in case of 2D projections) - Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras - // 1 - point is visible for the camera, 0 - invisible - Mat& P0, // starting vector of parameters, first cameras then points - Mat& X, // measurements, in order of visibility. non visible cases are skipped - TermCriteria criteria, // termination criteria - - // callback for estimation of Jacobian matrices - void (CV_CDECL * fjac)(int i, int j, Mat& point_params, - Mat& cam_params, Mat& A, Mat& B, void* data), - // callback for estimation of backprojection errors - void (CV_CDECL * func)(int i, int j, Mat& point_params, - Mat& cam_params, Mat& estim, void* data), - void* data // user-specific data passed to the callbacks - ); - - virtual void clear(); - - // useful function to do simple bundle adjustment tasks - static void bundleAdjust(std::vector& points, // positions of points in global coordinate system (input and output) - const std::vector >& imagePoints, // projections of 3d points for every camera - const std::vector >& visibility, // visibility of 3d points for every camera - std::vector& cameraMatrix, // intrinsic matrices of all cameras (input and output) - std::vector& R, // rotation matrices of all cameras (input and output) - std::vector& T, // translation vector of all cameras (input and output) - std::vector& distCoeffs, // distortion coefficients of all cameras (input and output) - const TermCriteria& criteria= - TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON), - BundleAdjustCallback cb = 0, void* user_data = 0); +//CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm); - public: - virtual void optimize(CvMat &_vis); //main function that runs minimization +class CV_EXPORTS SelfSimDescriptor +{ +public: + SelfSimDescriptor(); + SelfSimDescriptor(int _ssize, int _lsize, + int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET, + int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS, + int _nangles=DEFAULT_NUM_ANGLES); + SelfSimDescriptor(const SelfSimDescriptor& ss); + virtual ~SelfSimDescriptor(); + SelfSimDescriptor& operator = (const SelfSimDescriptor& ss); + + size_t getDescriptorSize() const; + Size getGridSize( Size imgsize, Size winStride ) const; + + virtual void compute(const Mat& img, std::vector& descriptors, Size winStride=Size(), + const std::vector& locations=std::vector()) const; + virtual void computeLogPolarMapping(Mat& mappingMask) const; + virtual void SSD(const Mat& img, Point pt, Mat& ssd) const; + + int smallSize; + int largeSize; + int startDistanceBucket; + int numberOfDistanceBuckets; + int numberOfAngles; + + enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41, + DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3, + DEFAULT_NUM_DISTANCE_BUCKETS = 7 }; +}; - //iteratively asks for measurement for visible camera-point pairs - void ask_for_proj(CvMat &_vis,bool once=false); - //iteratively asks for Jacobians for every camera_point pair - void ask_for_projac(CvMat &_vis); - CvMat* err; //error X-hX - double prevErrNorm, errNorm; - double lambda; - CvTermCriteria criteria; - int iters; +CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ, + CV_OUT std::vector >& results, CV_OUT std::vector& cost, + double templScale=1, int maxMatches = 20, + double minMatchDistance = 1.0, int padX = 3, + int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, + double orientationWeight = 0.5, double truncate = 20); - CvMat** U; //size of array is equal to number of cameras - CvMat** V; //size of array is equal to number of points - CvMat** inv_V_star; //inverse of V* - CvMat** A; - CvMat** B; - CvMat** W; +class CV_EXPORTS_W StereoVar +{ +public: + // Flags + enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16}; + enum {CYCLE_O, CYCLE_V}; + enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK}; + + //! the default constructor + CV_WRAP StereoVar(); + + //! the full constructor taking all the necessary algorithm parameters + CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + //! the destructor + virtual ~StereoVar(); + + //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair + CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp); + + CV_PROP_RW int levels; + CV_PROP_RW double pyrScale; + CV_PROP_RW int nIt; + CV_PROP_RW int minDisp; + CV_PROP_RW int maxDisp; + CV_PROP_RW int poly_n; + CV_PROP_RW double poly_sigma; + CV_PROP_RW float fi; + CV_PROP_RW float lambda; + CV_PROP_RW int penalization; + CV_PROP_RW int cycle; + CV_PROP_RW int flags; - CvMat* X; //measurement - CvMat* hX; //current measurement extimation given new parameter vector +private: + void autoParams(); + void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level); + void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); +}; - CvMat* prevP; //current already accepted parameter. - CvMat* P; // parameters used to evaluate function with new params - // this parameters may be rejected +CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order); - CvMat* deltaP; //computed increase of parameters (result of normal system solution ) +class CV_EXPORTS Directory +{ + public: + static std::vector GetListFiles ( const String& path, const String & exten = "*", bool addPath = true ); + static std::vector GetListFilesR ( const String& path, const String & exten = "*", bool addPath = true ); + static std::vector GetListFolders( const String& path, const String & exten = "*", bool addPath = true ); +}; - CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation - // length of array is j = number of cameras - CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation - // length of array is i = number of points +/* + * Generation of a set of different colors by the following way: + * 1) generate more then need colors (in "factor" times) in RGB, + * 2) convert them to Lab, + * 3) choose the needed count of colors from the set that are more different from + * each other, + * 4) convert the colors back to RGB + */ +CV_EXPORTS void generateColors( std::vector& colors, size_t count, size_t factor=100 ); + + +/* + * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper + * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011. + */ +enum { ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4 + }; +CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt, + const Mat& image0, const Mat& depth0, const Mat& mask0, + const Mat& image1, const Mat& depth1, const Mat& mask1, + const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f, + const std::vector& iterCounts=std::vector(), + const std::vector& minGradientMagnitudes=std::vector(), + int transformType=RIGID_BODY_MOTION ); + +/** +*Bilinear interpolation technique. +* +*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values +*of the four nearest neighbouring Cartesian pixels to the center of the RF. +*The same principle is applied to the inverse transformation. +* +*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 +*/ +class CV_EXPORTS LogPolar_Interp +{ +public: - CvMat** Yj; //length of array is i = num_points + LogPolar_Interp() {} - CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0, + int interp=INTER_LINEAR, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Interp(); - CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation +protected: - CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j + Mat Rsri; + Mat Csri; - int num_cams; - int num_points; - int num_err_param; - int num_cam_param; - int num_point_param; + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + int interp; - //target function and jacobian pointers, which needs to be initialized - void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data); - void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data); + Mat ETAyx; + Mat CSIyx; - void* data; + void create_map(int M, int N, int R, int S, double ro0); +}; - BundleAdjustCallback cb; - void* user_data; - }; +/** +*Overlapping circular receptive fields technique +* +*The Cartesian plane is divided in two regions: the fovea and the periphery. +*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in +*the periphery we use the overlapping Gaussian circular RFs. +* +*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 +*/ +class CV_EXPORTS LogPolar_Overlapping +{ +public: + LogPolar_Overlapping() {} - CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ, - CV_OUT std::vector >& results, CV_OUT std::vector& cost, - double templScale=1, int maxMatches = 20, - double minMatchDistance = 1.0, int padX = 3, - int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, - double orientationWeight = 0.5, double truncate = 20); + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Overlapping(int w, int h, Point2i center, int R=70, + double ro0=3.0, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Overlapping(); +protected: - class CV_EXPORTS_W StereoVar - { - public: - // Flags - enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16}; - enum {CYCLE_O, CYCLE_V}; - enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK}; - - //! the default constructor - CV_WRAP StereoVar(); - - //! the full constructor taking all the necessary algorithm parameters - CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); - - //! the destructor - virtual ~StereoVar(); - - //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair - CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp); - - CV_PROP_RW int levels; - CV_PROP_RW double pyrScale; - CV_PROP_RW int nIt; - CV_PROP_RW int minDisp; - CV_PROP_RW int maxDisp; - CV_PROP_RW int poly_n; - CV_PROP_RW double poly_sigma; - CV_PROP_RW float fi; - CV_PROP_RW float lambda; - CV_PROP_RW int penalization; - CV_PROP_RW int cycle; - CV_PROP_RW int flags; - - private: - void autoParams(); - void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level); - void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); - void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); - }; + Mat Rsri; + Mat Csri; + std::vector Rsr; + std::vector Csr; + std::vector Wsr; - CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order); + int S, R, M, N, ind1; + int top, bottom,left,right; + double ro0, romax, a, q; - class CV_EXPORTS Directory + struct kernel { - public: - static std::vector GetListFiles ( const String& path, const String & exten = "*", bool addPath = true ); - static std::vector GetListFilesR ( const String& path, const String & exten = "*", bool addPath = true ); - static std::vector GetListFolders( const String& path, const String & exten = "*", bool addPath = true ); + kernel() { w = 0; } + std::vector weights; + int w; }; - /* - * Generation of a set of different colors by the following way: - * 1) generate more then need colors (in "factor" times) in RGB, - * 2) convert them to Lab, - * 3) choose the needed count of colors from the set that are more different from - * each other, - * 4) convert the colors back to RGB - */ - CV_EXPORTS void generateColors( std::vector& colors, size_t count, size_t factor=100 ); + Mat ETAyx; + Mat CSIyx; + std::vector w_ker_2D; + void create_map(int M, int N, int R, int S, double ro0); +}; - /* - * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper - * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011. - */ - enum { ROTATION = 1, - TRANSLATION = 2, - RIGID_BODY_MOTION = 4 - }; - CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt, - const Mat& image0, const Mat& depth0, const Mat& mask0, - const Mat& image1, const Mat& depth1, const Mat& mask1, - const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f, - const std::vector& iterCounts=std::vector(), - const std::vector& minGradientMagnitudes=std::vector(), - int transformType=RIGID_BODY_MOTION ); +/** +* Adjacent receptive fields technique +* +*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF. +*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF. +*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements +*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370 +* +*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 +*/ +class CV_EXPORTS LogPolar_Adjacent +{ +public: + LogPolar_Adjacent() {} /** - *Bilinear interpolation technique. - * - *The value of a desired cortical pixel is obtained through a bilinear interpolation of the values - *of the four nearest neighbouring Cartesian pixels to the center of the RF. - *The same principle is applied to the inverse transformation. - * - *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 - */ - class CV_EXPORTS LogPolar_Interp - { - public: - - LogPolar_Interp() {} - - /** - *Constructor - *\param w the width of the input image - *\param h the height of the input image - *\param center the transformation center: where the output precision is maximal - *\param R the number of rings of the cortical image (default value 70 pixel) - *\param ro0 the radius of the blind spot (default value 3 pixel) - *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. - * \a 0 means that the retinal image is computed within the inscribed circle. - *\param S the number of sectors of the cortical image (default value 70 pixel). - * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. - *\param sp \a 1 (default value) means that the parameter \a S is internally computed. - * \a 0 means that the parameter \a S is provided by the user. - */ - LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0, - int interp=INTER_LINEAR, int full=1, int S=117, int sp=1); - /** - *Transformation from Cartesian image to cortical (log-polar) image. - *\param source the Cartesian image - *\return the transformed image (cortical image) - */ - const Mat to_cortical(const Mat &source); - /** - *Transformation from cortical image to retinal (inverse log-polar) image. - *\param source the cortical image - *\return the transformed image (retinal image) - */ - const Mat to_cartesian(const Mat &source); - /** - *Destructor - */ - ~LogPolar_Interp(); - - protected: - - Mat Rsri; - Mat Csri; - - int S, R, M, N; - int top, bottom,left,right; - double ro0, romax, a, q; - int interp; - - Mat ETAyx; - Mat CSIyx; - - void create_map(int M, int N, int R, int S, double ro0); - }; - + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param smin the size of the subpixel (default value 0.25 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1); /** - *Overlapping circular receptive fields technique - * - *The Cartesian plane is divided in two regions: the fovea and the periphery. - *The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in - *the periphery we use the overlapping Gaussian circular RFs. - * - *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 - */ - class CV_EXPORTS LogPolar_Overlapping - { - public: - LogPolar_Overlapping() {} - - /** - *Constructor - *\param w the width of the input image - *\param h the height of the input image - *\param center the transformation center: where the output precision is maximal - *\param R the number of rings of the cortical image (default value 70 pixel) - *\param ro0 the radius of the blind spot (default value 3 pixel) - *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. - * \a 0 means that the retinal image is computed within the inscribed circle. - *\param S the number of sectors of the cortical image (default value 70 pixel). - * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. - *\param sp \a 1 (default value) means that the parameter \a S is internally computed. - * \a 0 means that the parameter \a S is provided by the user. - */ - LogPolar_Overlapping(int w, int h, Point2i center, int R=70, - double ro0=3.0, int full=1, int S=117, int sp=1); - /** - *Transformation from Cartesian image to cortical (log-polar) image. - *\param source the Cartesian image - *\return the transformed image (cortical image) - */ - const Mat to_cortical(const Mat &source); - /** - *Transformation from cortical image to retinal (inverse log-polar) image. - *\param source the cortical image - *\return the transformed image (retinal image) - */ - const Mat to_cartesian(const Mat &source); - /** - *Destructor - */ - ~LogPolar_Overlapping(); - - protected: - - Mat Rsri; - Mat Csri; - std::vector Rsr; - std::vector Csr; - std::vector Wsr; - - int S, R, M, N, ind1; - int top, bottom,left,right; - double ro0, romax, a, q; - - struct kernel - { - kernel() { w = 0; } - std::vector weights; - int w; - }; - - Mat ETAyx; - Mat CSIyx; - std::vector w_ker_2D; - - void create_map(int M, int N, int R, int S, double ro0); - }; - + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); /** - * Adjacent receptive fields technique - * - *All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF. - *The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF. - *This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements - *to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370 - * - *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 - */ - class CV_EXPORTS LogPolar_Adjacent + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Adjacent(); + +protected: + struct pixel { - public: - LogPolar_Adjacent() {} - - /** - *Constructor - *\param w the width of the input image - *\param h the height of the input image - *\param center the transformation center: where the output precision is maximal - *\param R the number of rings of the cortical image (default value 70 pixel) - *\param ro0 the radius of the blind spot (default value 3 pixel) - *\param smin the size of the subpixel (default value 0.25 pixel) - *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. - * \a 0 means that the retinal image is computed within the inscribed circle. - *\param S the number of sectors of the cortical image (default value 70 pixel). - * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. - *\param sp \a 1 (default value) means that the parameter \a S is internally computed. - * \a 0 means that the parameter \a S is provided by the user. - */ - LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1); - /** - *Transformation from Cartesian image to cortical (log-polar) image. - *\param source the Cartesian image - *\return the transformed image (cortical image) - */ - const Mat to_cortical(const Mat &source); - /** - *Transformation from cortical image to retinal (inverse log-polar) image. - *\param source the cortical image - *\return the transformed image (retinal image) - */ - const Mat to_cartesian(const Mat &source); - /** - *Destructor - */ - ~LogPolar_Adjacent(); - - protected: - struct pixel - { - pixel() { u = v = 0; a = 0.; } - int u; - int v; - double a; - }; - int S, R, M, N; - int top, bottom,left,right; - double ro0, romax, a, q; - std::vector > L; - std::vector A; - - void subdivide_recursively(double x, double y, int i, int j, double length, double smin); - bool get_uv(double x, double y, int&u, int&v); - void create_map(int M, int N, int R, int S, double ro0, double smin); + pixel() { u = v = 0; a = 0.; } + int u; + int v; + double a; }; + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + std::vector > L; + std::vector A; + + void subdivide_recursively(double x, double y, int i, int j, double length, double smin); + bool get_uv(double x, double y, int&u, int&v); + void create_map(int M, int N, int R, int S, double ro0, double smin); +}; - CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src); - CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); +CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src); +CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); - class CV_EXPORTS LDA +class CV_EXPORTS LDA +{ +public: + // Initializes a LDA with num_components (default 0) and specifies how + // samples are aligned (default dataAsRow=true). + LDA(int num_components = 0) : + _num_components(num_components) {}; + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(InputArrayOfArrays src, InputArray labels, + int num_components = 0) : + _num_components(num_components) { - public: - // Initializes a LDA with num_components (default 0) and specifies how - // samples are aligned (default dataAsRow=true). - LDA(int num_components = 0) : - _num_components(num_components) {}; + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } - // Initializes and performs a Discriminant Analysis with Fisher's - // Optimization Criterion on given data in src and corresponding labels - // in labels. If 0 (or less) number of components are given, they are - // automatically determined for given data in computation. - LDA(InputArrayOfArrays src, InputArray labels, - int num_components = 0) : - _num_components(num_components) - { - this->compute(src, labels); //! compute eigenvectors and eigenvalues - } + // Serializes this object to a given filename. + void save(const String& filename) const; - // Serializes this object to a given filename. - void save(const String& filename) const; + // Deserializes this object from a given filename. + void load(const String& filename); - // Deserializes this object from a given filename. - void load(const String& filename); + // Serializes this object to a given cv::FileStorage. + void save(FileStorage& fs) const; - // Serializes this object to a given cv::FileStorage. - void save(FileStorage& fs) const; - - // Deserializes this object from a given cv::FileStorage. - void load(const FileStorage& node); + // Deserializes this object from a given cv::FileStorage. + void load(const FileStorage& node); - // Destructor. - ~LDA() {} + // Destructor. + ~LDA() {} - //! Compute the discriminants for data in src and labels. - void compute(InputArrayOfArrays src, InputArray labels); + //! Compute the discriminants for data in src and labels. + void compute(InputArrayOfArrays src, InputArray labels); - // Projects samples into the LDA subspace. - Mat project(InputArray src); + // Projects samples into the LDA subspace. + Mat project(InputArray src); - // Reconstructs projections from the LDA subspace. - Mat reconstruct(InputArray src); + // Reconstructs projections from the LDA subspace. + Mat reconstruct(InputArray src); - // Returns the eigenvectors of this LDA. - Mat eigenvectors() const { return _eigenvectors; }; + // Returns the eigenvectors of this LDA. + Mat eigenvectors() const { return _eigenvectors; }; - // Returns the eigenvalues of this LDA. - Mat eigenvalues() const { return _eigenvalues; } + // Returns the eigenvalues of this LDA. + Mat eigenvalues() const { return _eigenvalues; } - protected: - bool _dataAsRow; - int _num_components; - Mat _eigenvectors; - Mat _eigenvalues; +protected: + bool _dataAsRow; + int _num_components; + Mat _eigenvectors; + Mat _eigenvalues; - void lda(InputArrayOfArrays src, InputArray labels); - }; + void lda(InputArrayOfArrays src, InputArray labels); +}; - class CV_EXPORTS_W FaceRecognizer : public Algorithm - { - public: - //! virtual destructor - virtual ~FaceRecognizer() {} +class CV_EXPORTS_W FaceRecognizer : public Algorithm +{ +public: + //! virtual destructor + virtual ~FaceRecognizer() {} - // Trains a FaceRecognizer. - CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0; + // Trains a FaceRecognizer. + CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0; - // Updates a FaceRecognizer. - CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels); + // Updates a FaceRecognizer. + CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels); - // Gets a prediction from a FaceRecognizer. - virtual int predict(InputArray src) const = 0; + // Gets a prediction from a FaceRecognizer. + virtual int predict(InputArray src) const = 0; - // Predicts the label and confidence for a given sample. - CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0; + // Predicts the label and confidence for a given sample. + CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0; - // Serializes this object to a given filename. - CV_WRAP virtual void save(const String& filename) const; + // Serializes this object to a given filename. + CV_WRAP virtual void save(const String& filename) const; - // Deserializes this object from a given filename. - CV_WRAP virtual void load(const String& filename); + // Deserializes this object from a given filename. + CV_WRAP virtual void load(const String& filename); - // Serializes this object to a given cv::FileStorage. - virtual void save(FileStorage& fs) const = 0; + // Serializes this object to a given cv::FileStorage. + virtual void save(FileStorage& fs) const = 0; - // Deserializes this object from a given cv::FileStorage. - virtual void load(const FileStorage& fs) = 0; + // Deserializes this object from a given cv::FileStorage. + virtual void load(const FileStorage& fs) = 0; - }; +}; - CV_EXPORTS_W Ptr createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); - CV_EXPORTS_W Ptr createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); - CV_EXPORTS_W Ptr createLBPHFaceRecognizer(int radius=1, int neighbors=8, - int grid_x=8, int grid_y=8, double threshold = DBL_MAX); +CV_EXPORTS_W Ptr createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); +CV_EXPORTS_W Ptr createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); +CV_EXPORTS_W Ptr createLBPHFaceRecognizer(int radius=1, int neighbors=8, + int grid_x=8, int grid_y=8, double threshold = DBL_MAX); - enum - { - COLORMAP_AUTUMN = 0, - COLORMAP_BONE = 1, - COLORMAP_JET = 2, - COLORMAP_WINTER = 3, - COLORMAP_RAINBOW = 4, - COLORMAP_OCEAN = 5, - COLORMAP_SUMMER = 6, - COLORMAP_SPRING = 7, - COLORMAP_COOL = 8, - COLORMAP_HSV = 9, - COLORMAP_PINK = 10, - COLORMAP_HOT = 11 - }; +enum +{ + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11 +}; - CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); +CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); - CV_EXPORTS bool initModule_contrib(); +CV_EXPORTS bool initModule_contrib(); } #include "opencv2/contrib/retina.hpp" - #include "opencv2/contrib/openfabmap.hpp" #endif - -#endif diff --git a/modules/contrib/include/opencv2/contrib/compat.hpp b/modules/contrib/include/opencv2/contrib/compat.hpp new file mode 100644 index 0000000000..ba758c235f --- /dev/null +++ b/modules/contrib/include/opencv2/contrib/compat.hpp @@ -0,0 +1,384 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CONTRIB_COMPAT_HPP__ +#define __OPENCV_CONTRIB_COMPAT_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +/****************************************************************************************\ +* Adaptive Skin Detector * +\****************************************************************************************/ + +class CV_EXPORTS CvAdaptiveSkinDetector +{ +private: + enum { + GSD_HUE_LT = 3, + GSD_HUE_UT = 33, + GSD_INTENSITY_LT = 15, + GSD_INTENSITY_UT = 250 + }; + + class CV_EXPORTS Histogram + { + private: + enum { + HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1) + }; + + protected: + int findCoverageIndex(double surfaceToCover, int defaultValue = 0); + + public: + CvHistogram *fHistogram; + Histogram(); + virtual ~Histogram(); + + void findCurveThresholds(int &x1, int &x2, double percent = 0.05); + void mergeWith(Histogram *source, double weight); + }; + + int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider; + double fHistogramMergeFactor, fHuePercentCovered; + Histogram histogramHueMotion, skinHueHistogram; + IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame; + IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame; + +protected: + void initData(IplImage *src, int widthDivider, int heightDivider); + void adaptiveFilter(); + +public: + + enum { + MORPHING_METHOD_NONE = 0, + MORPHING_METHOD_ERODE = 1, + MORPHING_METHOD_ERODE_ERODE = 2, + MORPHING_METHOD_ERODE_DILATE = 3 + }; + + CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE); + virtual ~CvAdaptiveSkinDetector(); + + virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask); +}; + + +/****************************************************************************************\ + * Fuzzy MeanShift Tracker * + \****************************************************************************************/ + +class CV_EXPORTS CvFuzzyPoint { +public: + double x, y, value; + + CvFuzzyPoint(double _x, double _y); +}; + +class CV_EXPORTS CvFuzzyCurve { +private: + std::vector points; + double value, centre; + + bool between(double x, double x1, double x2); + +public: + CvFuzzyCurve(); + ~CvFuzzyCurve(); + + void setCentre(double _centre); + double getCentre(); + void clear(); + void addPoint(double x, double y); + double calcValue(double param); + double getValue(); + void setValue(double _value); +}; + +class CV_EXPORTS CvFuzzyFunction { +public: + std::vector curves; + + CvFuzzyFunction(); + ~CvFuzzyFunction(); + void addCurve(CvFuzzyCurve *curve, double value = 0); + void resetValues(); + double calcValue(); + CvFuzzyCurve *newCurve(); +}; + +class CV_EXPORTS CvFuzzyRule { +private: + CvFuzzyCurve *fuzzyInput1, *fuzzyInput2; + CvFuzzyCurve *fuzzyOutput; +public: + CvFuzzyRule(); + ~CvFuzzyRule(); + void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcValue(double param1, double param2); + CvFuzzyCurve *getOutputCurve(); +}; + +class CV_EXPORTS CvFuzzyController { +private: + std::vector rules; +public: + CvFuzzyController(); + ~CvFuzzyController(); + void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcOutput(double param1, double param2); +}; + +class CV_EXPORTS CvFuzzyMeanShiftTracker +{ +private: + class FuzzyResizer + { + private: + CvFuzzyFunction iInput, iOutput; + CvFuzzyController fuzzyController; + public: + FuzzyResizer(); + int calcOutput(double edgeDensity, double density); + }; + + class SearchWindow + { + public: + FuzzyResizer *fuzzyResizer; + int x, y; + int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth; + int ldx, ldy, ldw, ldh, numShifts, numIters; + int xGc, yGc; + long m00, m01, m10, m11, m02, m20; + double ellipseAngle; + double density; + unsigned int depthLow, depthHigh; + int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom; + + SearchWindow(); + ~SearchWindow(); + void setSize(int _x, int _y, int _width, int _height); + void initDepthValues(IplImage *maskImage, IplImage *depthMap); + bool shift(); + void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth); + void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth); + }; + +public: + enum TrackingState + { + tsNone = 0, + tsSearching = 1, + tsTracking = 2, + tsSetWindow = 3, + tsDisabled = 10 + }; + + enum ResizeMethod { + rmEdgeDensityLinear = 0, + rmEdgeDensityFuzzy = 1, + rmInnerDensity = 2 + }; + + enum { + MinKernelMass = 1000 + }; + + SearchWindow kernel; + int searchMode; + +private: + enum + { + MaxMeanShiftIteration = 5, + MaxSetSizeIteration = 5 + }; + + void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth); + +public: + CvFuzzyMeanShiftTracker(); + ~CvFuzzyMeanShiftTracker(); + + void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass); +}; + + +namespace cv +{ + +typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data); + +class CV_EXPORTS LevMarqSparse { +public: + LevMarqSparse(); + LevMarqSparse(int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (*fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (*func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data, // user-specific data passed to the callbacks + BundleAdjustCallback cb, void* user_data + ); + + virtual ~LevMarqSparse(); + + virtual void run( int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data // user-specific data passed to the callbacks + ); + + virtual void clear(); + + // useful function to do simple bundle adjustment tasks + static void bundleAdjust(std::vector& points, // positions of points in global coordinate system (input and output) + const std::vector >& imagePoints, // projections of 3d points for every camera + const std::vector >& visibility, // visibility of 3d points for every camera + std::vector& cameraMatrix, // intrinsic matrices of all cameras (input and output) + std::vector& R, // rotation matrices of all cameras (input and output) + std::vector& T, // translation vector of all cameras (input and output) + std::vector& distCoeffs, // distortion coefficients of all cameras (input and output) + const TermCriteria& criteria= + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON), + BundleAdjustCallback cb = 0, void* user_data = 0); + +public: + virtual void optimize(CvMat &_vis); //main function that runs minimization + + //iteratively asks for measurement for visible camera-point pairs + void ask_for_proj(CvMat &_vis,bool once=false); + //iteratively asks for Jacobians for every camera_point pair + void ask_for_projac(CvMat &_vis); + + CvMat* err; //error X-hX + double prevErrNorm, errNorm; + double lambda; + CvTermCriteria criteria; + int iters; + + CvMat** U; //size of array is equal to number of cameras + CvMat** V; //size of array is equal to number of points + CvMat** inv_V_star; //inverse of V* + + CvMat** A; + CvMat** B; + CvMat** W; + + CvMat* X; //measurement + CvMat* hX; //current measurement extimation given new parameter vector + + CvMat* prevP; //current already accepted parameter. + CvMat* P; // parameters used to evaluate function with new params + // this parameters may be rejected + + CvMat* deltaP; //computed increase of parameters (result of normal system solution ) + + CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation + // length of array is j = number of cameras + CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation + // length of array is i = number of points + + CvMat** Yj; //length of array is i = num_points + + CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params + + CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation + + CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j + + int num_cams; + int num_points; + int num_err_param; + int num_cam_param; + int num_point_param; + + //target function and jacobian pointers, which needs to be initialized + void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data); + void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data); + + void* data; + + BundleAdjustCallback cb; + void* user_data; +}; + +} // cv + +#endif /* __cplusplus */ + +#endif /* __OPENCV_CONTRIB_COMPAT_HPP__ */ diff --git a/modules/contrib/src/adaptiveskindetector.cpp b/modules/contrib/src/adaptiveskindetector.cpp index 35d945837e..1448e63842 100644 --- a/modules/contrib/src/adaptiveskindetector.cpp +++ b/modules/contrib/src/adaptiveskindetector.cpp @@ -36,6 +36,7 @@ #include "precomp.hpp" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/contrib/compat.hpp" #define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;} diff --git a/modules/contrib/src/ba.cpp b/modules/contrib/src/ba.cpp index 8e4faf2216..ff58073fc9 100644 --- a/modules/contrib/src/ba.cpp +++ b/modules/contrib/src/ba.cpp @@ -41,6 +41,7 @@ #include "precomp.hpp" #include "opencv2/calib3d.hpp" +#include "opencv2/contrib/compat.hpp" #include "opencv2/calib3d/calib3d_c.h" #include diff --git a/modules/contrib/src/chamfermatching.cpp b/modules/contrib/src/chamfermatching.cpp index 16ac947d63..fd2899c5f0 100644 --- a/modules/contrib/src/chamfermatching.cpp +++ b/modules/contrib/src/chamfermatching.cpp @@ -142,7 +142,7 @@ private: LocationScaleImageRange(const std::vector& locations, const std::vector& _scales) : locations_(locations), scales_(_scales) { - assert(locations.size()==_scales.size()); + CV_Assert(locations.size()==_scales.size()); } ImageIterator* iterator() const @@ -393,7 +393,7 @@ private: LocationScaleImageIterator(const std::vector& locations, const std::vector& _scales) : locations_(locations), scales_(_scales) { - assert(locations.size()==_scales.size()); + CV_Assert(locations.size()==_scales.size()); reset(); } @@ -622,7 +622,7 @@ void ChamferMatcher::Matching::followContour(Mat& templ_img, template_coords_t& coordinate_t next; unsigned char ptr; - assert (direction==-1 || !coords.empty()); + CV_Assert (direction==-1 || !coords.empty()); coordinate_t crt = coords.back(); @@ -903,18 +903,18 @@ void ChamferMatcher::Template::show() const p2.x = x + pad*(int)(sin(orientations[i])*100)/100; p2.y = y + pad*(int)(cos(orientations[i])*100)/100; - line(templ_color, p1,p2, CV_RGB(255,0,0)); + line(templ_color, p1,p2, Scalar(255,0,0)); } } - circle(templ_color,Point(center.x + pad, center.y + pad),1,CV_RGB(0,255,0)); + circle(templ_color,Point(center.x + pad, center.y + pad),1,Scalar(0,255,0)); #ifdef HAVE_OPENCV_HIGHGUI namedWindow("templ",1); imshow("templ",templ_color); waitKey(); #else - CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support"); + CV_Error(Error::StsNotImplemented, "OpenCV has been compiled without GUI support"); #endif templ_color.release(); @@ -1059,7 +1059,7 @@ void ChamferMatcher::Matching::fillNonContourOrientations(Mat& annotated_img, Ma int cols = annotated_img.cols; int rows = annotated_img.rows; - assert(orientation_img.cols==cols && orientation_img.rows==rows); + CV_Assert(orientation_img.cols==cols && orientation_img.rows==rows); for (int y=0;ycoords; for (size_t i=0;i indices = _indices.getMat(); _dst.create(src.rows, src.cols, src.type()); @@ -64,8 +64,8 @@ static Mat argsort(InputArray _src, bool ascending=true) { Mat src = _src.getMat(); if (src.rows != 1 && src.cols != 1) - CV_Error(CV_StsBadArg, "cv::argsort only sorts 1D matrices."); - int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING); + CV_Error(Error::StsBadArg, "cv::argsort only sorts 1D matrices."); + int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING); Mat sorted_indices; sortIdx(src.reshape(1,1),sorted_indices,flags); return sorted_indices; @@ -116,8 +116,8 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi) Mat Y = _Y.getMat(); Mat xi = _xi.getMat(); // check types & alignment - assert((x.type() == Y.type()) && (Y.type() == xi.type())); - assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols)); + CV_Assert((x.type() == Y.type()) && (Y.type() == xi.type())); + CV_Assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols)); // call templated interp1 switch(x.type()) { case CV_8SC1: return interp1_(x,Y,xi); break; @@ -127,7 +127,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi) case CV_32SC1: return interp1_(x,Y,xi); break; case CV_32FC1: return interp1_(x,Y,xi); break; case CV_64FC1: return interp1_(x,Y,xi); break; - default: CV_Error(CV_StsUnsupportedFormat, ""); break; + default: CV_Error(Error::StsUnsupportedFormat, ""); break; } return Mat(); } @@ -473,7 +473,7 @@ namespace colormap void ColorMap::operator()(InputArray _src, OutputArray _dst) const { if(_lut.total() != 256) - CV_Error(CV_StsAssert, "cv::LUT only supports tables of size 256."); + CV_Error(Error::StsAssert, "cv::LUT only supports tables of size 256."); Mat src = _src.getMat(); // Return original matrix if wrong type is given (is fail loud better here?) if(src.type() != CV_8UC1 && src.type() != CV_8UC3) @@ -521,7 +521,7 @@ namespace colormap colormap == COLORMAP_WINTER ? (colormap::ColorMap*)(new colormap::Winter) : 0; if( !cm ) - CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*"); + CV_Error( Error::StsBadArg, "Unknown colormap id; use one of COLORMAP_*"); (*cm)(src, dst); diff --git a/modules/contrib/src/facerec.cpp b/modules/contrib/src/facerec.cpp index bb1d21e96a..8fc401d559 100644 --- a/modules/contrib/src/facerec.cpp +++ b/modules/contrib/src/facerec.cpp @@ -51,7 +51,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double // make sure the input data is a vector of matrices or vector of vector if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) { String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >)."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // number of samples size_t n = src.total(); @@ -67,7 +67,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double // make sure data can be reshaped, throw exception if not! if(src.getMat(i).total() != d) { String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // get a hold of the current row Mat xi = data.row(i); @@ -306,13 +306,13 @@ void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) { } String error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str()); - CV_Error(CV_StsNotImplemented, error_msg); + CV_Error(Error::StsNotImplemented, error_msg); } void FaceRecognizer::save(const String& filename) const { FileStorage fs(filename, FileStorage::WRITE); if (!fs.isOpened()) - CV_Error(CV_StsError, "File can't be opened for writing!"); + CV_Error(Error::StsError, "File can't be opened for writing!"); this->save(fs); fs.release(); } @@ -320,7 +320,7 @@ void FaceRecognizer::save(const String& filename) const { void FaceRecognizer::load(const String& filename) { FileStorage fs(filename, FileStorage::READ); if (!fs.isOpened()) - CV_Error(CV_StsError, "File can't be opened for writing!"); + CV_Error(Error::StsError, "File can't be opened for writing!"); this->load(fs); fs.release(); } @@ -331,17 +331,17 @@ void FaceRecognizer::load(const String& filename) { void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) { if(_src.total() == 0) { String error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } else if(_local_labels.getMat().type() != CV_32SC1) { String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // make sure data has correct size if(_src.total() > 1) { for(int i = 1; i < static_cast(_src.total()); i++) { if(_src.getMat(i-1).total() != _src.getMat(i).total()) { String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total()); - CV_Error(CV_StsUnsupportedFormat, error_message); + CV_Error(Error::StsUnsupportedFormat, error_message); } } } @@ -355,7 +355,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) { // assert there are as much samples as labels if(static_cast(labels.total()) != n) { String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // clear existing model data _labels.release(); @@ -365,7 +365,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) { _num_components = n; // perform the PCA - PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components); + PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components); // copy the PCA results _mean = pca.mean.reshape(1,1); // store the mean vector _eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row @@ -386,11 +386,11 @@ void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const if(_projections.empty()) { // throw error if no data (or simply return -1?) String error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?"; - CV_Error(CV_StsError, error_message); + CV_Error(Error::StsError, error_message); } else if(_eigenvectors.rows != static_cast(src.total())) { // check data alignment just for clearer exception messages String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // project into PCA subspace Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1)); @@ -440,17 +440,17 @@ void Eigenfaces::save(FileStorage& fs) const { void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) { if(src.total() == 0) { String error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } else if(_lbls.getMat().type() != CV_32SC1) { String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // make sure data has correct size if(src.total() > 1) { for(int i = 1; i < static_cast(src.total()); i++) { if(src.getMat(i-1).total() != src.getMat(i).total()) { String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total()); - CV_Error(CV_StsUnsupportedFormat, error_message); + CV_Error(Error::StsUnsupportedFormat, error_message); } } } @@ -462,10 +462,10 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) { // make sure labels are passed in correct shape if(labels.total() != (size_t) N) { String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } else if(labels.rows != 1 && labels.cols != 1) { String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // clear existing model data _labels.release(); @@ -481,7 +481,7 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) { if((_num_components <= 0) || (_num_components > (C-1))) _num_components = (C-1); // perform a PCA and keep (N-C) components - PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, (N-C)); + PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C)); // project the data and perform a LDA on it LDA lda(pca.project(data),labels, _num_components); // store the total mean vector @@ -506,10 +506,10 @@ void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const if(_projections.empty()) { // throw error if no data (or simply return -1?) String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?"; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } else if(src.total() != (size_t) _eigenvectors.rows) { String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // project into LDA subspace Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1)); @@ -641,7 +641,7 @@ static void elbp(InputArray src, OutputArray dst, int radius, int neighbors) case CV_64FC1: elbp_(src,dst, radius, neighbors); break; default: String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type); - CV_Error(CV_StsNotImplemented, error_msg); + CV_Error(Error::StsNotImplemented, error_msg); break; } } @@ -687,7 +687,7 @@ static Mat histc(InputArray _src, int minVal, int maxVal, bool normed) return histc_(src, minVal, maxVal, normed); break; default: - CV_Error(CV_StsUnmatchedFormats, "This type is not implemented yet."); break; + CV_Error(Error::StsUnmatchedFormats, "This type is not implemented yet."); break; } return Mat(); } @@ -769,14 +769,14 @@ void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) { void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) { if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) { String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >)."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } if(_in_src.total() == 0) { String error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); - CV_Error(CV_StsUnsupportedFormat, error_message); + CV_Error(Error::StsUnsupportedFormat, error_message); } else if(_in_labels.getMat().type() != CV_32SC1) { String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type()); - CV_Error(CV_StsUnsupportedFormat, error_message); + CV_Error(Error::StsUnsupportedFormat, error_message); } // get the vector of matrices std::vector src; @@ -786,7 +786,7 @@ void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserv // check if data is well- aligned if(labels.total() != src.size()) { String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // if this model should be trained without preserving old data, delete old model data if(!preserveData) { @@ -817,7 +817,7 @@ void LBPH::predict(InputArray _src, int &minClass, double &minDist) const { if(_histograms.empty()) { // throw error if no data (or simply return -1?) String error_message = "This LBPH model is not computed yet. Did you call the train method?"; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } Mat src = _src.getMat(); // get the spatial histogram from input image diff --git a/modules/contrib/src/fuzzymeanshifttracker.cpp b/modules/contrib/src/fuzzymeanshifttracker.cpp index c83f915b03..7ad8cd838e 100644 --- a/modules/contrib/src/fuzzymeanshifttracker.cpp +++ b/modules/contrib/src/fuzzymeanshifttracker.cpp @@ -35,6 +35,7 @@ //M*/ #include "precomp.hpp" +#include "opencv2/contrib/compat.hpp" CvFuzzyPoint::CvFuzzyPoint(double _x, double _y) { diff --git a/modules/contrib/src/gencolors.cpp b/modules/contrib/src/gencolors.cpp index 688c98d9ff..24796ec03d 100644 --- a/modules/contrib/src/gencolors.cpp +++ b/modules/contrib/src/gencolors.cpp @@ -85,7 +85,7 @@ static void downsamplePoints( const Mat& src, Mat& dst, size_t count ) candidatePointsMask.at(0, maxLoc.x) = 0; Mat minDists; - reduce( activedDists, minDists, 0, CV_REDUCE_MIN ); + reduce( activedDists, minDists, 0, REDUCE_MIN ); minMaxLoc( minDists, 0, &maxVal, 0, &maxLoc, candidatePointsMask ); dst.at >((int)i) = src.at >(maxLoc.x); } diff --git a/modules/contrib/src/lda.cpp b/modules/contrib/src/lda.cpp index e643529ae0..60693fc772 100644 --- a/modules/contrib/src/lda.cpp +++ b/modules/contrib/src/lda.cpp @@ -43,9 +43,9 @@ static Mat argsort(InputArray _src, bool ascending=true) Mat src = _src.getMat(); if (src.rows != 1 && src.cols != 1) { String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } - int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING); + int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING); Mat sorted_indices; sortIdx(src.reshape(1,1),sorted_indices,flags); return sorted_indices; @@ -55,7 +55,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double // make sure the input data is a vector of matrices or vector of vector if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) { String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >)."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // number of samples size_t n = src.total(); @@ -71,7 +71,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double // make sure data can be reshaped, throw exception if not! if(src.getMat(i).total() != d) { String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // get a hold of the current row Mat xi = data.row(i); @@ -87,7 +87,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) { if(_indices.getMat().type() != CV_32SC1) { - CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!"); + CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!"); } Mat src = _src.getMat(); std::vector indices = _indices.getMat(); @@ -179,12 +179,12 @@ Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) { // make sure the data has the correct shape if(W.rows != d) { String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // make sure mean is correct if not empty if(!mean.empty() && (mean.total() != (size_t) d)) { String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // create temporary matrices Mat X, Y; @@ -217,12 +217,12 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src) // make sure the data has the correct shape if(W.cols != d) { String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // make sure mean is correct if not empty if(!mean.empty() && (mean.total() != (size_t) W.rows)) { String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // initalize temporary matrices Mat X, Y; @@ -939,7 +939,7 @@ public: void LDA::save(const String& filename) const { FileStorage fs(filename, FileStorage::WRITE); if (!fs.isOpened()) { - CV_Error(CV_StsError, "File can't be opened for writing!"); + CV_Error(Error::StsError, "File can't be opened for writing!"); } this->save(fs); fs.release(); @@ -949,7 +949,7 @@ void LDA::save(const String& filename) const { void LDA::load(const String& filename) { FileStorage fs(filename, FileStorage::READ); if (!fs.isOpened()) - CV_Error(CV_StsError, "File can't be opened for writing!"); + CV_Error(Error::StsError, "File can't be opened for writing!"); this->load(fs); fs.release(); } @@ -1002,12 +1002,12 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) { // want to separate from each other then? if(C == 1) { String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!"; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // throw error if less labels, than samples if (labels.size() != static_cast(N)) { String error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } // warn if within-classes scatter matrix becomes singular if (N < D) { @@ -1090,7 +1090,7 @@ void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) { break; default: String error_message= format("InputArray Datatype %d is not supported.", _src.kind()); - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); break; } } diff --git a/modules/contrib/src/octree.cpp b/modules/contrib/src/octree.cpp index 80d2564945..b612372753 100644 --- a/modules/contrib/src/octree.cpp +++ b/modules/contrib/src/octree.cpp @@ -258,7 +258,7 @@ namespace cv void Octree::buildTree(const std::vector& points3d, int maxLevels, int _minPoints) { - assert((size_t)maxLevels * 8 < MAX_STACK_SIZE); + CV_Assert((size_t)maxLevels * 8 < MAX_STACK_SIZE); points.resize(points3d.size()); std::copy(points3d.begin(), points3d.end(), points.begin()); minPoints = _minPoints; diff --git a/modules/contrib/src/retina.cpp b/modules/contrib/src/retina.cpp index 5dae919bff..5175480fea 100644 --- a/modules/contrib/src/retina.cpp +++ b/modules/contrib/src/retina.cpp @@ -450,7 +450,7 @@ bool Retina::_convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std:: inputMatToConvert.convertTo(dst, dsttype); } else - CV_Error(CV_StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered"); + CV_Error(Error::StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered"); return imageNumberOfChannels>1; // return bool : false for gray level image processing, true for color mode } diff --git a/modules/contrib/src/rgbdodometry.cpp b/modules/contrib/src/rgbdodometry.cpp index 6e1f217d09..6f86f17ec4 100644 --- a/modules/contrib/src/rgbdodometry.cpp +++ b/modules/contrib/src/rgbdodometry.cpp @@ -422,7 +422,7 @@ bool computeKsi( int transformType, computeCFuncPtr = computeC_Translation; } else - CV_Error( CV_StsBadFlag, "Unsupported value of transformation type flag."); + CV_Error(Error::StsBadFlag, "Unsupported value of transformation type flag."); Mat C( correspsCount, Cwidth, CV_64FC1 ); Mat dI_dt( correspsCount, 1, CV_64FC1 ); diff --git a/modules/contrib/src/spinimages.cpp b/modules/contrib/src/spinimages.cpp index 54eaf825c8..4e58472bfb 100644 --- a/modules/contrib/src/spinimages.cpp +++ b/modules/contrib/src/spinimages.cpp @@ -56,24 +56,24 @@ namespace { const static Scalar colors[] = { - CV_RGB(255, 0, 0), - CV_RGB( 0, 255, 0), - CV_RGB( 0, 0, 255), - CV_RGB(255, 255, 0), - CV_RGB(255, 0, 255), - CV_RGB( 0, 255, 255), - CV_RGB(255, 127, 127), - CV_RGB(127, 127, 255), - CV_RGB(127, 255, 127), - CV_RGB(255, 255, 127), - CV_RGB(127, 255, 255), - CV_RGB(255, 127, 255), - CV_RGB(127, 0, 0), - CV_RGB( 0, 127, 0), - CV_RGB( 0, 0, 127), - CV_RGB(127, 127, 0), - CV_RGB(127, 0, 127), - CV_RGB( 0, 127, 127) + Scalar(255, 0, 0), + Scalar( 0, 255, 0), + Scalar( 0, 0, 255), + Scalar(255, 255, 0), + Scalar(255, 0, 255), + Scalar( 0, 255, 255), + Scalar(255, 127, 127), + Scalar(127, 127, 255), + Scalar(127, 255, 127), + Scalar(255, 255, 127), + Scalar(127, 255, 255), + Scalar(255, 127, 255), + Scalar(127, 0, 0), + Scalar( 0, 127, 0), + Scalar( 0, 0, 127), + Scalar(127, 127, 0), + Scalar(127, 0, 127), + Scalar( 0, 127, 127) }; size_t colors_mum = sizeof(colors)/sizeof(colors[0]); @@ -199,7 +199,7 @@ void convertTransformMatrix(const float* matrix, float* sseMatrix) inline __m128 transformSSE(const __m128* matrix, const __m128& in) { - assert(((size_t)matrix & 15) == 0); + CV_DbgAssert(((size_t)matrix & 15) == 0); __m128 a0 = _mm_mul_ps(_mm_load_ps((float*)(matrix+0)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(0,0,0,0))); __m128 a1 = _mm_mul_ps(_mm_load_ps((float*)(matrix+1)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(1,1,1,1))); __m128 a2 = _mm_mul_ps(_mm_load_ps((float*)(matrix+2)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(2,2,2,2))); @@ -221,8 +221,8 @@ void computeSpinImages( const Octree& Octree, const std::vector& points float pixelsPerMeter = 1.f / binSize; float support = imageWidth * binSize; - assert(normals.size() == points.size()); - assert(mask.size() == points.size()); + CV_Assert(normals.size() == points.size()); + CV_Assert(mask.size() == points.size()); size_t points_size = points.size(); mask.resize(points_size); @@ -250,7 +250,7 @@ void computeSpinImages( const Octree& Octree, const std::vector& points if (mask[i] == 0) continue; - int t = cvGetThreadNum(); + int t = getThreadNum(); std::vector& pointsInSphere = pointsInSpherePool[t]; const Point3f& center = points[i]; @@ -289,7 +289,7 @@ void computeSpinImages( const Octree& Octree, const std::vector& points __m128 ppm4 = _mm_set1_ps(pixelsPerMeter); __m128i height4m1 = _mm_set1_epi32(spinImage.rows-1); __m128i width4m1 = _mm_set1_epi32(spinImage.cols-1); - assert( spinImage.step <= 0xffff ); + CV_Assert( spinImage.step <= 0xffff ); __m128i step4 = _mm_set1_epi16((short)step); __m128i zero4 = _mm_setzero_si128(); __m128i one4i = _mm_set1_epi32(1); @@ -472,7 +472,7 @@ float cv::Mesh3D::estimateResolution(float /*tryRatio*/) return resolution = (float)dist[ dist.size() / 2 ]; #else - CV_Error(CV_StsNotImplemented, ""); + CV_Error(Error::StsNotImplemented, ""); return 1.f; #endif } @@ -686,16 +686,15 @@ inline float cv::SpinImageModel::groupingCreteria(const Point3f& pointScene1, co } -cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh) , out(0) +cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh) { if (mesh.vtx.empty()) throw Mesh3D::EmptyMeshException(); defaultParams(); } -cv::SpinImageModel::SpinImageModel() : out(0) { defaultParams(); } -cv::SpinImageModel::~SpinImageModel() {} -void cv::SpinImageModel::setLogger(std::ostream* log) { out = log; } +cv::SpinImageModel::SpinImageModel() { defaultParams(); } +cv::SpinImageModel::~SpinImageModel() {} void cv::SpinImageModel::defaultParams() { @@ -756,7 +755,7 @@ Mat cv::SpinImageModel::packRandomScaledSpins(bool separateScale, size_t xCount, int sz = spins.front().cols; Mat result((int)(yCount * sz + (yCount - 1)), (int)(xCount * sz + (xCount - 1)), CV_8UC3); - result = colors[(static_cast(cvGetTickCount()/cvGetTickFrequency())/1000) % colors_mum]; + result = colors[(static_cast(getTickCount()/getTickFrequency())/1000) % colors_mum]; int pos = 0; for(int y = 0; y < (int)yCount; ++y) @@ -1030,12 +1029,8 @@ private: matchSpinToModel(scene.spinImages.row(i), indeces, coeffs); for(size_t t = 0; t < indeces.size(); ++t) allMatches.push_back(Match(i, indeces[t], coeffs[t])); - - if (out) if (i % 100 == 0) *out << "Comparing scene spinimage " << i << " of " << scene.spinImages.rows << std::endl; } corr_timer.stop(); - if (out) *out << "Spin correlation time = " << corr_timer << std::endl; - if (out) *out << "Matches number = " << allMatches.size() << std::endl; if(allMatches.empty()) return; @@ -1046,7 +1041,6 @@ private: allMatches.erase( remove_if(allMatches.begin(), allMatches.end(), bind2nd(std::less(), maxMeasure * fraction)), allMatches.end()); - if (out) *out << "Matches number [filtered by similarity measure] = " << allMatches.size() << std::endl; int matchesSize = (int)allMatches.size(); if(matchesSize == 0) @@ -1095,15 +1089,12 @@ private: allMatches.erase( std::remove_if(allMatches.begin(), allMatches.end(), std::bind2nd(std::equal_to(), infinity)), allMatches.end()); - if (out) *out << "Matches number [filtered by geometric consistency] = " << allMatches.size() << std::endl; matchesSize = (int)allMatches.size(); if(matchesSize == 0) return; - if (out) *out << "grouping ..." << std::endl; - Mat groupingMat((int)matchesSize, (int)matchesSize, CV_32F); groupingMat = Scalar(0); @@ -1151,8 +1142,6 @@ private: for(int g = 0; g < matchesSize; ++g) { - if (out) if (g % 100 == 0) *out << "G = " << g << std::endl; - group_t left = allMatchesInds; group_t group; @@ -1201,16 +1190,16 @@ private: cv::TickMeter::TickMeter() { reset(); } int64 cv::TickMeter::getTimeTicks() const { return sumTime; } -double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/cvGetTickFrequency(); } +double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/getTickFrequency(); } double cv::TickMeter::getTimeMilli() const { return getTimeMicro()*1e-3; } double cv::TickMeter::getTimeSec() const { return getTimeMilli()*1e-3; } int64 cv::TickMeter::getCounter() const { return counter; } void cv::TickMeter::reset() {startTime = 0; sumTime = 0; counter = 0; } -void cv::TickMeter::start(){ startTime = cvGetTickCount(); } +void cv::TickMeter::start(){ startTime = getTickCount(); } void cv::TickMeter::stop() { - int64 time = cvGetTickCount(); + int64 time = getTickCount(); if ( startTime == 0 ) return; @@ -1220,4 +1209,4 @@ void cv::TickMeter::stop() startTime = 0; } -std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; } +//std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; } diff --git a/modules/contrib/src/stereovar.cpp b/modules/contrib/src/stereovar.cpp index 30cd12d4c1..54dd82ac9f 100644 --- a/modules/contrib/src/stereovar.cpp +++ b/modules/contrib/src/stereovar.cpp @@ -239,8 +239,8 @@ void StereoVar::VariationalSolver(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level) void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level) { - CvSize imgSize = _u.size(); - CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5)); + Size imgSize = _u.size(); + Size frmSize = Size((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5)); Mat I1_h, I2_h, I2x_h, u_h, U, U_h; //PRE relaxation @@ -285,7 +285,7 @@ void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level) void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level) { double scale = std::pow(pyrScale, (double) level); - CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5)); + Size frmSize = Size((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5)); Mat I1_h, I2_h, I2x_h, u_h; //scaling DOWN @@ -350,7 +350,7 @@ void StereoVar::autoParams() void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp ) { CV_Assert(left.size() == right.size() && left.type() == right.type()); - CvSize imgSize = left.size(); + Size imgSize = left.size(); int MaxD = MAX(labs(minDisp), labs(maxDisp)); int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1; if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;} @@ -378,8 +378,8 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp ) equalizeHist(rightgray, rightgray); } if (poly_sigma > 0.0001) { - GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma); - GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma); + GaussianBlur(leftgray, leftgray, Size(poly_n, poly_n), poly_sigma); + GaussianBlur(rightgray, rightgray, Size(poly_n, poly_n), poly_sigma); } if (flags & USE_AUTO_PARAMS) { diff --git a/modules/core/doc/clustering.rst b/modules/core/doc/clustering.rst index 60fb189285..557e92eb44 100644 --- a/modules/core/doc/clustering.rst +++ b/modules/core/doc/clustering.rst @@ -13,8 +13,6 @@ Finds centers of clusters and groups input samples around the clusters. .. ocv:cfunction:: int cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* _centers=0, double* compactness=0 ) -.. ocv:pyoldfunction:: cv.KMeans2(samples, nclusters, labels, termcrit, attempts=1, flags=0, centers=None) -> float - :param samples: Floating-point matrix of input samples, one row per sample. :param cluster_count: Number of clusters to split the set by. diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 4f816c075a..451470a7b6 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -36,8 +36,6 @@ Draws a circle. .. ocv:cfunction:: void cvCircle( CvArr* img, CvPoint center, int radius, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)-> None - :param img: Image where the circle is drawn. :param center: Center of the circle. @@ -66,8 +64,6 @@ Clips the line against the image rectangle. .. ocv:cfunction:: int cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 ) -.. ocv:pyoldfunction:: cv.ClipLine(imgSize, pt1, pt2) -> (point1, point2) - :param imgSize: Image size. The image rectangle is ``Rect(0, 0, imgSize.width, imgSize.height)`` . :param imgRect: Image rectangle. @@ -93,12 +89,8 @@ Draws a simple or thick elliptic arc or fills an ellipse sector. .. ocv:cfunction:: void cvEllipse( CvArr* img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness=1, lineType=8, shift=0)-> None - .. ocv:cfunction:: void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.EllipseBox(img, box, color, thickness=1, lineType=8, shift=0)-> None - :param img: Image. :param center: Center of the ellipse. @@ -170,8 +162,6 @@ Fills a convex polygon. .. ocv:cfunction:: void cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.FillConvexPoly(img, pn, color, lineType=8, shift=0)-> None - :param img: Image. :param pts: Polygon vertices. @@ -202,8 +192,6 @@ Fills the area bounded by one or more polygons. .. ocv:cfunction:: void cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, int contours, CvScalar color, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.FillPoly(img, polys, color, lineType=8, shift=0)-> None - :param img: Image. :param pts: Array of polygons where each polygon is represented as an array of points. @@ -235,8 +223,6 @@ Calculates the width and height of a text string. .. ocv:cfunction:: void cvGetTextSize( const char* text_string, const CvFont* font, CvSize* text_size, int* baseline ) -.. ocv:pyoldfunction:: cv.GetTextSize(textString, font)-> (textSize, baseline) - :param text: Input text string. :param fontFace: Font to use. See the :ocv:func:`putText` for details. @@ -340,8 +326,6 @@ Draws a line segment connecting two points. .. ocv:cfunction:: void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None - :param img: Image. :param pt1: First point of the line segment. @@ -430,8 +414,6 @@ Draws a simple, thick, or filled up-right rectangle. .. ocv:cfunction:: void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.Rectangle(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)-> None - :param img: Image. :param pt1: Vertex of the rectangle. @@ -464,8 +446,6 @@ Draws several polygonal curves. .. ocv:cfunction:: void cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, int is_closed, CvScalar color, int thickness=1, int line_type=8, int shift=0 ) -.. ocv:pyoldfunction:: cv.PolyLine(img, polys, is_closed, color, thickness=1, lineType=8, shift=0) -> None - :param img: Image. :param pts: Array of polygonal curves. @@ -497,8 +477,6 @@ Draws contours outlines or filled contours. .. ocv:cfunction:: void cvDrawContours( CvArr * img, CvSeq* contour, CvScalar external_color, CvScalar hole_color, int max_level, int thickness=1, int line_type=8, CvPoint offset=cvPoint(0,0) ) -.. ocv:pyoldfunction:: cv.DrawContours(img, contour, external_color, hole_color, max_level, thickness=1, lineType=8, offset=(0, 0))-> None - :param image: Destination image. :param contours: All the input contours. Each contour is stored as a point vector. @@ -580,8 +558,6 @@ Draws a text string. .. ocv:cfunction:: void cvPutText( CvArr* img, const char* text, CvPoint org, const CvFont* font, CvScalar color ) -.. ocv:pyoldfunction:: cv.PutText(img, text, org, font, color)-> None - :param img: Image. :param text: Text string to be drawn. diff --git a/modules/core/doc/dynamic_structures.rst b/modules/core/doc/dynamic_structures.rst index e333971c06..1f2abd480d 100644 --- a/modules/core/doc/dynamic_structures.rst +++ b/modules/core/doc/dynamic_structures.rst @@ -279,7 +279,6 @@ CloneSeq Creates a copy of a sequence. .. ocv:cfunction:: CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage=NULL ) -.. ocv:pyoldfunction:: cv.CloneSeq(seq, storage)-> None :param seq: Sequence @@ -388,9 +387,6 @@ Creates memory storage. .. ocv:cfunction:: CvMemStorage* cvCreateMemStorage( int block_size=0 ) -.. ocv:pyoldfunction:: cv.CreateMemStorage(blockSize=0) -> memstorage - - :param block_size: Size of the storage blocks in bytes. If it is 0, the block size is set to a default value - currently it is about 64K. The function creates an empty memory storage. See diff --git a/modules/core/doc/old_basic_structures.rst b/modules/core/doc/old_basic_structures.rst index 0596e04bff..d86daa4a51 100644 --- a/modules/core/doc/old_basic_structures.rst +++ b/modules/core/doc/old_basic_structures.rst @@ -478,8 +478,6 @@ Clears a specific array element. .. ocv:cfunction:: void cvClearND( CvArr* arr, const int* idx ) -.. ocv:pyoldfunction:: cv.ClearND(arr, idx)-> None - :param arr: Input array :param idx: Array of the element indices @@ -490,7 +488,6 @@ CloneImage Makes a full copy of an image, including the header, data, and ROI. .. ocv:cfunction:: IplImage* cvCloneImage(const IplImage* image) -.. ocv:pyoldfunction:: cv.CloneImage(image) -> image :param image: The original image @@ -499,7 +496,6 @@ CloneMat Creates a full matrix copy. .. ocv:cfunction:: CvMat* cvCloneMat(const CvMat* mat) -.. ocv:pyoldfunction:: cv.CloneMat(mat) -> mat :param mat: Matrix to be copied @@ -510,7 +506,6 @@ CloneMatND Creates full copy of a multi-dimensional array and returns a pointer to the copy. .. ocv:cfunction:: CvMatND* cvCloneMatND(const CvMatND* mat) -.. ocv:pyoldfunction:: cv.CloneMatND(mat) -> matND :param mat: Input array @@ -530,8 +525,6 @@ ConvertScale Converts one array to another with optional linear transformation. .. ocv:cfunction:: void cvConvertScale(const CvArr* src, CvArr* dst, double scale=1, double shift=0) -.. ocv:pyoldfunction:: cv.ConvertScale(src, dst, scale=1.0, shift=0.0)-> None -.. ocv:pyoldfunction:: cv.Convert(src, dst)-> None :: @@ -569,7 +562,6 @@ Copy Copies one array to another. .. ocv:cfunction:: void cvCopy(const CvArr* src, CvArr* dst, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.Copy(src, dst, mask=None)-> None :param src: The source array @@ -591,7 +583,6 @@ CreateData Allocates array data .. ocv:cfunction:: void cvCreateData(CvArr* arr) -.. ocv:pyoldfunction:: cv.CreateData(arr) -> None :param arr: Array header @@ -603,7 +594,6 @@ CreateImage Creates an image header and allocates the image data. .. ocv:cfunction:: IplImage* cvCreateImage(CvSize size, int depth, int channels) -.. ocv:pyoldfunction:: cv.CreateImage(size, depth, channels)->image :param size: Image width and height @@ -621,7 +611,6 @@ CreateImageHeader Creates an image header but does not allocate the image data. .. ocv:cfunction:: IplImage* cvCreateImageHeader(CvSize size, int depth, int channels) -.. ocv:pyoldfunction:: cv.CreateImageHeader(size, depth, channels) -> image :param size: Image width and height @@ -634,7 +623,6 @@ CreateMat Creates a matrix header and allocates the matrix data. .. ocv:cfunction:: CvMat* cvCreateMat( int rows, int cols, int type) -.. ocv:pyoldfunction:: cv.CreateMat(rows, cols, type) -> mat :param rows: Number of rows in the matrix @@ -652,7 +640,6 @@ CreateMatHeader Creates a matrix header but does not allocate the matrix data. .. ocv:cfunction:: CvMat* cvCreateMatHeader( int rows, int cols, int type) -.. ocv:pyoldfunction:: cv.CreateMatHeader(rows, cols, type) -> mat :param rows: Number of rows in the matrix @@ -667,7 +654,6 @@ CreateMatND Creates the header and allocates the data for a multi-dimensional dense array. .. ocv:cfunction:: CvMatND* cvCreateMatND( int dims, const int* sizes, int type) -.. ocv:pyoldfunction:: cv.CreateMatND(dims, type) -> matND :param dims: Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be changed at build time). @@ -685,7 +671,6 @@ CreateMatNDHeader Creates a new matrix header but does not allocate the matrix data. .. ocv:cfunction:: CvMatND* cvCreateMatNDHeader( int dims, const int* sizes, int type) -.. ocv:pyoldfunction:: cv.CreateMatNDHeader(dims, type) -> matND :param dims: Number of array dimensions @@ -716,7 +701,6 @@ CrossProduct Calculates the cross product of two 3D vectors. .. ocv:cfunction:: void cvCrossProduct(const CvArr* src1, const CvArr* src2, CvArr* dst) -.. ocv:pyoldfunction:: cv.CrossProduct(src1, src2, dst)-> None :param src1: The first source vector @@ -742,7 +726,6 @@ DotProduct Calculates the dot product of two arrays in Euclidean metrics. .. ocv:cfunction:: double cvDotProduct(const CvArr* src1, const CvArr* src2) -.. ocv:pyoldfunction:: cv.DotProduct(src1, src2) -> float :param src1: The first source array @@ -767,11 +750,6 @@ Get?D .. ocv:cfunction:: CvScalar cvGet3D(const CvArr* arr, int idx0, int idx1, int idx2) .. ocv:cfunction:: CvScalar cvGetND( const CvArr* arr, const int* idx ) -.. ocv:pyoldfunction:: cv.Get1D(arr, idx) -> scalar -.. ocv:pyoldfunction:: cv.Get2D(arr, idx0, idx1) -> scalar -.. ocv:pyoldfunction:: cv.Get3D(arr, idx0, idx1, idx2) -> scalar -.. ocv:pyoldfunction:: cv.GetND(arr, indices) -> scalar - Return a specific array element. :param arr: Input array @@ -794,10 +772,6 @@ Returns one of more array columns. .. ocv:cfunction:: CvMat* cvGetCols( const CvArr* arr, CvMat* submat, int start_col, int end_col ) -.. ocv:pyoldfunction:: cv.GetCol(arr, col)-> submat - -.. ocv:pyoldfunction:: cv.GetCols(arr, startCol, endCol)-> submat - :param arr: Input array :param submat: Pointer to the resulting sub-array header @@ -815,7 +789,6 @@ GetDiag Returns one of array diagonals. .. ocv:cfunction:: CvMat* cvGetDiag(const CvArr* arr, CvMat* submat, int diag=0) -.. ocv:pyoldfunction:: cv.GetDiag(arr, diag=0)-> submat :param arr: Input array @@ -830,7 +803,6 @@ GetDims Return number of array dimensions .. ocv:cfunction:: int cvGetDims(const CvArr* arr, int* sizes=NULL) -.. ocv:pyoldfunction:: cv.GetDims(arr) -> (dim1, dim2, ...) :param arr: Input array @@ -861,7 +833,6 @@ GetElemType Returns type of array elements. .. ocv:cfunction:: int cvGetElemType(const CvArr* arr) -.. ocv:pyoldfunction:: cv.GetElemType(arr)-> int :param arr: Input array @@ -877,8 +848,6 @@ Returns image header for arbitrary array. .. ocv:cfunction:: IplImage* cvGetImage( const CvArr* arr, IplImage* image_header ) -.. ocv:pyoldfunction:: cv.GetImage(arr) -> iplimage - :param arr: Input array :param image_header: Pointer to ``IplImage`` structure used as a temporary buffer @@ -890,7 +859,6 @@ GetImageCOI Returns the index of the channel of interest. .. ocv:cfunction:: int cvGetImageCOI(const IplImage* image) -.. ocv:pyoldfunction:: cv.GetImageCOI(image) -> int :param image: A pointer to the image header @@ -902,7 +870,6 @@ GetImageROI Returns the image ROI. .. ocv:cfunction:: CvRect cvGetImageROI(const IplImage* image) -.. ocv:pyoldfunction:: cv.GetImageROI(image)-> CvRect :param image: A pointer to the image header @@ -913,7 +880,6 @@ GetMat Returns matrix header for arbitrary array. .. ocv:cfunction:: CvMat* cvGetMat(const CvArr* arr, CvMat* header, int* coi=NULL, int allowND=0) -.. ocv:pyoldfunction:: cv.GetMat(arr, allowND=0) -> mat :param arr: Input array @@ -1002,11 +968,6 @@ Return a specific element of single-channel 1D, 2D, 3D or nD array. .. ocv:cfunction:: double cvGetReal3D(const CvArr* arr, int idx0, int idx1, int idx2) .. ocv:cfunction:: double cvGetRealND( const CvArr* arr, const int* idx ) -.. ocv:pyoldfunction:: cv.GetReal1D(arr, idx0)->float -.. ocv:pyoldfunction:: cv.GetReal2D(arr, idx0, idx1)->float -.. ocv:pyoldfunction:: cv.GetReal3D(arr, idx0, idx1, idx2)->float -.. ocv:pyoldfunction:: cv.GetRealND(arr, idx)->float - :param arr: Input array. Must have a single channel. :param idx0: The first zero-based component of the element index @@ -1030,9 +991,6 @@ Returns array row or row span. .. ocv:cfunction:: CvMat* cvGetRows( const CvArr* arr, CvMat* submat, int start_row, int end_row, int delta_row=1 ) -.. ocv:pyoldfunction:: cv.GetRow(arr, row)-> submat -.. ocv:pyoldfunction:: cv.GetRows(arr, startRow, endRow, deltaRow=1)-> submat - :param arr: Input array :param submat: Pointer to the resulting sub-array header @@ -1053,7 +1011,6 @@ GetSize Returns size of matrix or image ROI. .. ocv:cfunction:: CvSize cvGetSize(const CvArr* arr) -.. ocv:pyoldfunction:: cv.GetSize(arr)-> (width, height) :param arr: array header @@ -1064,7 +1021,6 @@ GetSubRect Returns matrix header corresponding to the rectangular sub-array of input image or matrix. .. ocv:cfunction:: CvMat* cvGetSubRect(const CvArr* arr, CvMat* submat, CvRect rect) -.. ocv:pyoldfunction:: cv.GetSubRect(arr, rect) -> submat :param arr: Input array @@ -1357,7 +1313,6 @@ ResetImageROI Resets the image ROI to include the entire image and releases the ROI structure. .. ocv:cfunction:: void cvResetImageROI(IplImage* image) -.. ocv:pyoldfunction:: cv.ResetImageROI(image)-> None :param image: A pointer to the image header @@ -1374,8 +1329,6 @@ Changes shape of matrix/image without copying data. .. ocv:cfunction:: CvMat* cvReshape( const CvArr* arr, CvMat* header, int new_cn, int new_rows=0 ) -.. ocv:pyoldfunction:: cv.Reshape(arr, newCn, newRows=0) -> mat - :param arr: Input array :param header: Output header to be filled @@ -1412,8 +1365,6 @@ Changes the shape of a multi-dimensional array without copying the data. .. ocv:cfunction:: CvArr* cvReshapeMatND( const CvArr* arr, int sizeof_header, CvArr* header, int new_cn, int new_dims, int* new_sizes ) -.. ocv:pyoldfunction:: cv.ReshapeMatND(arr, newCn, newDims) -> mat - :param arr: Input array :param sizeof_header: Size of output header to distinguish between IplImage, CvMat and CvMatND output headers @@ -1453,7 +1404,6 @@ Set Sets every element of an array to a given value. .. ocv:cfunction:: void cvSet(CvArr* arr, CvScalar value, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.Set(arr, value, mask=None)-> None :param arr: The destination array @@ -1481,12 +1431,6 @@ Change the particular array element. .. ocv:cfunction:: void cvSetND( CvArr* arr, const int* idx, CvScalar value ) -.. ocv:pyoldfunction:: cv.Set1D(arr, idx, value) -> None -.. ocv:pyoldfunction:: cv.Set2D(arr, idx0, idx1, value) -> None -.. ocv:pyoldfunction:: cv.Set3D(arr, idx0, idx1, idx2, value) -> None -.. ocv:pyoldfunction:: cv.SetND(arr, indices, value) -> None - - :param arr: Input array :param idx0: The first zero-based component of the element index @@ -1506,7 +1450,6 @@ SetData Assigns user data to the array header. .. ocv:cfunction:: void cvSetData(CvArr* arr, void* data, int step) -.. ocv:pyoldfunction:: cv.SetData(arr, data, step)-> None :param arr: Array header @@ -1525,7 +1468,6 @@ SetImageCOI Sets the channel of interest in an IplImage. .. ocv:cfunction:: void cvSetImageCOI( IplImage* image, int coi) -.. ocv:pyoldfunction:: cv.SetImageCOI(image, coi)-> None :param image: A pointer to the image header @@ -1539,7 +1481,6 @@ SetImageROI Sets an image Region Of Interest (ROI) for a given rectangle. .. ocv:cfunction:: void cvSetImageROI( IplImage* image, CvRect rect) -.. ocv:pyoldfunction:: cv.SetImageROI(image, rect)-> None :param image: A pointer to the image header @@ -1562,11 +1503,6 @@ Change a specific array element. .. ocv:cfunction:: void cvSetRealND( CvArr* arr, const int* idx, double value ) -.. ocv:pyoldfunction:: cv.SetReal1D(arr, idx, value) -> None -.. ocv:pyoldfunction:: cv.SetReal2D(arr, idx0, idx1, value) -> None -.. ocv:pyoldfunction:: cv.SetReal3D(arr, idx0, idx1, idx2, value) -> None -.. ocv:pyoldfunction:: cv.SetRealND(arr, indices, value) -> None - :param arr: Input array :param idx0: The first zero-based component of the element index @@ -1588,7 +1524,6 @@ SetZero Clears the array. .. ocv:cfunction:: void cvSetZero(CvArr* arr) -.. ocv:pyoldfunction:: cv.SetZero(arr) -> None :param arr: Array to be cleared @@ -1599,7 +1534,6 @@ mGet Returns the particular element of single-channel floating-point matrix. .. ocv:cfunction:: double cvmGet(const CvMat* mat, int row, int col) -.. ocv:pyoldfunction:: cv.mGet(mat, row, col) -> float :param mat: Input matrix @@ -1614,7 +1548,6 @@ mSet Sets a specific element of a single-channel floating-point matrix. .. ocv:cfunction:: void cvmSet(CvMat* mat, int row, int col, double value) -.. ocv:pyoldfunction:: cv.mSet(mat, row, col, value)-> None :param mat: The matrix @@ -1655,7 +1588,6 @@ RNG Initializes a random number generator state. .. ocv:cfunction:: CvRNG cvRNG(int64 seed=-1) -.. ocv:pyoldfunction:: cv.RNG(seed=-1LL)-> CvRNG :param seed: 64-bit value used to initiate a random sequence @@ -1670,8 +1602,6 @@ Fills an array with random numbers and updates the RNG state. .. ocv:cfunction:: void cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, CvScalar param1, CvScalar param2 ) -.. ocv:pyoldfunction:: cv.RandArr(rng, arr, distType, param1, param2)-> None - :param rng: CvRNG state initialized by :ocv:cfunc:`RNG` :param arr: The destination array @@ -1695,7 +1625,6 @@ RandInt Returns a 32-bit unsigned integer and updates RNG. .. ocv:cfunction:: unsigned cvRandInt(CvRNG* rng) -.. ocv:pyoldfunction:: cv.RandInt(rng)-> unsigned :param rng: CvRNG state initialized by :ocv:cfunc:`RNG`. @@ -1707,7 +1636,6 @@ RandReal Returns a floating-point random number and updates RNG. .. ocv:cfunction:: double cvRandReal(CvRNG* rng) -.. ocv:pyoldfunction:: cv.RandReal(rng) -> float :param rng: RNG state initialized by :ocv:cfunc:`RNG` @@ -1718,8 +1646,6 @@ fromarray --------- Create a CvMat from an object that supports the array interface. -.. ocv:pyoldfunction:: cv.fromarray(array, allowND=False) -> mat - :param object: Any object that supports the array interface :param allowND: If true, will return a CvMatND diff --git a/modules/core/doc/old_xml_yaml_persistence.rst b/modules/core/doc/old_xml_yaml_persistence.rst index bdd4c2ded4..7492213b6c 100644 --- a/modules/core/doc/old_xml_yaml_persistence.rst +++ b/modules/core/doc/old_xml_yaml_persistence.rst @@ -327,8 +327,6 @@ Loads an object from a file. .. ocv:cfunction:: void* cvLoad( const char* filename, CvMemStorage* memstorage=NULL, const char* name=NULL, const char** real_name=NULL ) -.. ocv:pyoldfunction:: cv.Load(filename, storage=None, name=None)-> generic - :param filename: File name :param memstorage: Memory storage for dynamic structures, such as :ocv:struct:`CvSeq` or :ocv:struct:`CvGraph` . It is not used for matrices or images. @@ -596,8 +594,6 @@ Saves an object to a file. .. ocv:cfunction:: void cvSave( const char* filename, const void* struct_ptr, const char* name=NULL, const char* comment=NULL, CvAttrList attributes=cvAttrList() ) -.. ocv:pyoldfunction:: cv.Save(filename, structPtr, name=None, comment=None)-> None - :param filename: File name :param struct_ptr: Object to save diff --git a/modules/core/doc/operations_on_arrays.rst b/modules/core/doc/operations_on_arrays.rst index d42f2890b5..2df1461e20 100644 --- a/modules/core/doc/operations_on_arrays.rst +++ b/modules/core/doc/operations_on_arrays.rst @@ -36,8 +36,6 @@ Calculates the per-element absolute difference between two arrays or between an .. ocv:cfunction:: void cvAbsDiff(const CvArr* src1, const CvArr* src2, CvArr* dst) .. ocv:cfunction:: void cvAbsDiffS(const CvArr* src, CvArr* dst, CvScalar value) -.. ocv:pyoldfunction:: cv.AbsDiff(src1, src2, dst)-> None -.. ocv:pyoldfunction:: cv.AbsDiffS(src, dst, value)-> None :param src1: first input array or a scalar. @@ -90,8 +88,6 @@ Calculates the per-element sum of two arrays or an array and a scalar. .. ocv:cfunction:: void cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL) .. ocv:cfunction:: void cvAddS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.Add(src1, src2, dst, mask=None)-> None -.. ocv:pyoldfunction:: cv.AddS(src, value, dst, mask=None)-> None :param src1: first input array or a scalar. @@ -160,7 +156,6 @@ Calculates the weighted sum of two arrays. .. ocv:pyfunction:: cv2.addWeighted(src1, alpha, src2, beta, gamma[, dst[, dtype]]) -> dst .. ocv:cfunction:: void cvAddWeighted(const CvArr* src1, double alpha, const CvArr* src2, double beta, double gamma, CvArr* dst) -.. ocv:pyoldfunction:: cv.AddWeighted(src1, alpha, src2, beta, gamma, dst)-> None :param src1: first input array. @@ -210,8 +205,6 @@ Calculates the per-element bit-wise conjunction of two arrays or an array and a .. ocv:cfunction:: void cvAnd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL) .. ocv:cfunction:: void cvAndS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.And(src1, src2, dst, mask=None)-> None -.. ocv:pyoldfunction:: cv.AndS(src, value, dst, mask=None)-> None :param src1: first input array or a scalar. @@ -262,7 +255,6 @@ Inverts every bit of an array. .. ocv:pyfunction:: cv2.bitwise_not(src[, dst[, mask]]) -> dst .. ocv:cfunction:: void cvNot(const CvArr* src, CvArr* dst) -.. ocv:pyoldfunction:: cv.Not(src, dst)-> None :param src: input array. @@ -290,8 +282,6 @@ Calculates the per-element bit-wise disjunction of two arrays or an array and a .. ocv:cfunction:: void cvOr(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL) .. ocv:cfunction:: void cvOrS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.Or(src1, src2, dst, mask=None)-> None -.. ocv:pyoldfunction:: cv.OrS(src, value, dst, mask=None)-> None :param src1: first input array or a scalar. @@ -342,8 +332,6 @@ Calculates the per-element bit-wise "exclusive or" operation on two arrays or an .. ocv:cfunction:: void cvXor(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL) .. ocv:cfunction:: void cvXorS(const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL) -.. ocv:pyoldfunction:: cv.Xor(src1, src2, dst, mask=None)-> None -.. ocv:pyoldfunction:: cv.XorS(src, value, dst, mask=None)-> None :param src1: first input array or a scalar. @@ -396,8 +384,6 @@ Calculates the covariance matrix of a set of vectors. .. ocv:cfunction:: void cvCalcCovarMatrix( const CvArr** vects, int count, CvArr* cov_mat, CvArr* avg, int flags ) -.. ocv:pyoldfunction:: cv.CalcCovarMatrix(vects, covMat, avg, flags)-> None - :param samples: samples stored either as separate matrices or as rows/columns of a single matrix. :param nsamples: number of samples when they are stored separately. @@ -456,8 +442,6 @@ Calculates the magnitude and angle of 2D vectors. .. ocv:cfunction:: void cvCartToPolar( const CvArr* x, const CvArr* y, CvArr* magnitude, CvArr* angle=NULL, int angle_in_degrees=0 ) -.. ocv:pyoldfunction:: cv.CartToPolar(x, y, magnitude, angle=None, angleInDegrees=0)-> None - :param x: array of x-coordinates; this must be a single-precision or double-precision floating-point array. :param y: array of y-coordinates, that must have the same size and same type as ``x``. @@ -518,12 +502,8 @@ Performs the per-element comparison of two arrays or an array and scalar value. .. ocv:cfunction:: void cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ) -.. ocv:pyoldfunction:: cv.Cmp(src1, src2, dst, cmpOp)-> None - .. ocv:cfunction:: void cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ) -.. ocv:pyoldfunction:: cv.CmpS(src, value, dst, cmpOp)-> None - :param src1: first input array or a scalar (in the case of ``cvCmp``, ``cv.Cmp``, ``cvCmpS``, ``cv.CmpS`` it is always an array); when it is an array, it must have a single channel. :param src2: second input array or a scalar (in the case of ``cvCmp`` and ``cv.Cmp`` it is always an array; in the case of ``cvCmpS``, ``cv.CmpS`` it is always a scalar); when it is an array, it must have a single channel. @@ -624,7 +604,6 @@ Scales, calculates absolute values, and converts the result to 8-bit. .. ocv:pyfunction:: cv2.convertScaleAbs(src[, dst[, alpha[, beta]]]) -> dst .. ocv:cfunction:: void cvConvertScaleAbs(const CvArr* src, CvArr* dst, double scale=1, double shift=0) -.. ocv:pyoldfunction:: cv.ConvertScaleAbs(src, dst, scale=1.0, shift=0.0)-> None :param src: input array. @@ -668,8 +647,6 @@ Counts non-zero array elements. .. ocv:cfunction:: int cvCountNonZero(const CvArr* arr) -.. ocv:pyoldfunction:: cv.CountNonZero(arr)-> int - :param src: single-channel array. The function returns the number of non-zero elements in ``src`` : @@ -760,7 +737,6 @@ Performs a forward or inverse discrete Cosine transform of 1D or 2D array. .. ocv:pyfunction:: cv2.dct(src[, dst[, flags]]) -> dst .. ocv:cfunction:: void cvDCT(const CvArr* src, CvArr* dst, int flags) -.. ocv:pyoldfunction:: cv.DCT(src, dst, flags)-> None :param src: input floating-point array. @@ -855,8 +831,6 @@ Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating- .. ocv:cfunction:: void cvDFT( const CvArr* src, CvArr* dst, int flags, int nonzero_rows=0 ) -.. ocv:pyoldfunction:: cv.DFT(src, dst, flags, nonzeroRows=0)-> None - :param src: input array that could be real or complex. :param dst: output array whose size and type depends on the ``flags`` . @@ -1011,7 +985,6 @@ Performs per-element division of two arrays or a scalar by an array. .. ocv:pyfunction:: cv2.divide(scale, src2[, dst[, dtype]]) -> dst .. ocv:cfunction:: void cvDiv(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1) -.. ocv:pyoldfunction:: cv.Div(src1, src2, dst, scale=1) -> None :param src1: first input array. @@ -1058,8 +1031,6 @@ Returns the determinant of a square floating-point matrix. .. ocv:cfunction:: double cvDet( const CvArr* mat ) -.. ocv:pyoldfunction:: cv.Det(mat) -> float - :param mtx: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type and square size. The function ``determinant`` calculates and returns the determinant of the specified matrix. For small matrices ( ``mtx.cols=mtx.rows<=3`` ), @@ -1087,8 +1058,6 @@ Calculates eigenvalues and eigenvectors of a symmetric matrix. .. ocv:cfunction:: void cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, double eps=0, int lowindex=-1, int highindex=-1 ) -.. ocv:pyoldfunction:: cv.EigenVV(mat, evects, evals, eps, lowindex=-1, highindex=-1)-> None - :param src: input matrix that must have ``CV_32FC1`` or ``CV_64FC1`` type, square size and be symmetrical (``src`` :sup:`T` == ``src``). :param eigenvalues: output vector of eigenvalues of the same type as ``src``; the eigenvalues are stored in the descending order. @@ -1118,7 +1087,6 @@ Calculates the exponent of every array element. .. ocv:pyfunction:: cv2.exp(src[, dst]) -> dst .. ocv:cfunction:: void cvExp(const CvArr* src, CvArr* dst) -.. ocv:pyoldfunction:: cv.Exp(src, dst)-> None :param src: input array. @@ -1199,8 +1167,6 @@ Flips a 2D array around vertical, horizontal, or both axes. .. ocv:cfunction:: void cvFlip( const CvArr* src, CvArr* dst=NULL, int flip_mode=0 ) -.. ocv:pyoldfunction:: cv.Flip(src, dst=None, flipMode=0)-> None - :param src: input array. :param dst: output array of the same size and type as ``src``. @@ -1247,7 +1213,6 @@ Performs generalized matrix multiplication. .. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst .. ocv:cfunction:: void cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, const CvArr* src3, double beta, CvArr* dst, int tABC=0) -.. ocv:pyoldfunction:: cv.GEMM(src1, src2, alpha, src3, beta, dst, tABC=0)-> None :param src1: first multiplied input matrix that should have ``CV_32FC1``, ``CV_64FC1``, ``CV_32FC2``, or ``CV_64FC2`` type. @@ -1291,7 +1256,6 @@ Returns the optimal DFT size for a given vector size. .. ocv:pyfunction:: cv2.getOptimalDFTSize(vecsize) -> retval .. ocv:cfunction:: int cvGetOptimalDFTSize(int size0) -.. ocv:pyoldfunction:: cv.GetOptimalDFTSize(size0)-> int :param vecsize: vector size. @@ -1375,8 +1339,6 @@ Checks if array elements lie between the elements of two other arrays. .. ocv:cfunction:: void cvInRange(const CvArr* src, const CvArr* lower, const CvArr* upper, CvArr* dst) .. ocv:cfunction:: void cvInRangeS(const CvArr* src, CvScalar lower, CvScalar upper, CvArr* dst) -.. ocv:pyoldfunction:: cv.InRange(src, lower, upper, dst)-> None -.. ocv:pyoldfunction:: cv.InRangeS(src, lower, upper, dst)-> None :param src: first input array. @@ -1417,8 +1379,6 @@ Finds the inverse or pseudo-inverse of a matrix. .. ocv:cfunction:: double cvInvert( const CvArr* src, CvArr* dst, int method=CV_LU ) -.. ocv:pyoldfunction:: cv.Invert(src, dst, method=CV_LU) -> float - :param src: input floating-point ``M x N`` matrix. :param dst: output matrix of ``N x M`` size and the same type as ``src``. @@ -1456,7 +1416,6 @@ Calculates the natural logarithm of every array element. .. ocv:pyfunction:: cv2.log(src[, dst]) -> dst .. ocv:cfunction:: void cvLog(const CvArr* src, CvArr* dst) -.. ocv:pyoldfunction:: cv.Log(src, dst)-> None :param src: input array. @@ -1492,7 +1451,6 @@ Performs a look-up table transform of an array. .. ocv:pyfunction:: cv2.LUT(src, lut[, dst]) -> dst .. ocv:cfunction:: void cvLUT(const CvArr* src, CvArr* dst, const CvArr* lut) -.. ocv:pyoldfunction:: cv.LUT(src, dst, lut)-> None :param src: input array of 8-bit elements. @@ -1558,8 +1516,6 @@ Calculates the Mahalanobis distance between two vectors. .. ocv:cfunction:: double cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ) -.. ocv:pyoldfunction:: cv.Mahalonobis(vec1, vec2, mat) -> None - :param vec1: first 1D input vector. :param vec2: second 1D input vector. @@ -1596,8 +1552,6 @@ Calculates per-element maximum of two arrays or an array and a scalar. .. ocv:cfunction:: void cvMax(const CvArr* src1, const CvArr* src2, CvArr* dst) .. ocv:cfunction:: void cvMaxS(const CvArr* src, double value, CvArr* dst) -.. ocv:pyoldfunction:: cv.Max(src1, src2, dst)-> None -.. ocv:pyoldfunction:: cv.MaxS(src, value, dst)-> None :param src1: first input array. @@ -1643,8 +1597,6 @@ Calculates an average (mean) of array elements. .. ocv:cfunction:: CvScalar cvAvg( const CvArr* arr, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.Avg(arr, mask=None) -> scalar - :param src: input array that should have from 1 to 4 channels so that the result can be stored in :ocv:class:`Scalar_` . :param mask: optional operation mask. @@ -1676,8 +1628,6 @@ Calculates a mean and standard deviation of array elements. .. ocv:cfunction:: void cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.AvgSdv(arr, mask=None) -> (mean, stdDev) - :param src: input array that should have from 1 to 4 channels so that the results can be stored in :ocv:class:`Scalar_` 's. :param mean: output parameter: calculated mean value. @@ -1717,7 +1667,6 @@ Creates one multichannel array out of several single-channel ones. .. ocv:pyfunction:: cv2.merge(mv[, dst]) -> dst .. ocv:cfunction:: void cvMerge(const CvArr* src0, const CvArr* src1, const CvArr* src2, const CvArr* src3, CvArr* dst) -.. ocv:pyoldfunction:: cv.Merge(src0, src1, src2, src3, dst)-> None :param mv: input array or vector of matrices to be merged; all the matrices in ``mv`` must have the same size and the same depth. @@ -1757,8 +1706,6 @@ Calculates per-element minimum of two arrays or an array and a scalar. .. ocv:cfunction:: void cvMin(const CvArr* src1, const CvArr* src2, CvArr* dst) .. ocv:cfunction:: void cvMinS(const CvArr* src, double value, CvArr* dst) -.. ocv:pyoldfunction:: cv.Min(src1, src2, dst)-> None -.. ocv:pyoldfunction:: cv.MinS(src, value, dst)-> None :param src1: first input array. @@ -1838,8 +1785,6 @@ Finds the global minimum and maximum in an array. .. ocv:cfunction:: void cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, CvPoint* min_loc=NULL, CvPoint* max_loc=NULL, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.MinMaxLoc(arr, mask=None)-> (minVal, maxVal, minLoc, maxLoc) - :param src: input single-channel array. :param minVal: pointer to the returned minimum value; ``NULL`` is used if not required. @@ -1888,8 +1833,6 @@ Copies specified channels from input arrays to the specified channels of output .. ocv:cfunction:: void cvMixChannels( const CvArr** src, int src_count, CvArr** dst, int dst_count, const int* from_to, int pair_count ) -.. ocv:pyoldfunction:: cv.MixChannels(src, dst, fromTo) -> None - :param src: input array or vector of matricesl; all of the matrices must have the same size and the same depth. :param nsrcs: number of matrices in ``src``. @@ -1942,7 +1885,6 @@ Performs the per-element multiplication of two Fourier spectrums. .. ocv:pyfunction:: cv2.mulSpectrums(a, b, flags[, c[, conjB]]) -> c .. ocv:cfunction:: void cvMulSpectrums( const CvArr* src1, const CvArr* src2, CvArr* dst, int flags) -.. ocv:pyoldfunction:: cv.MulSpectrums(src1, src2, dst, flags)-> None :param src1: first input array. @@ -1972,7 +1914,6 @@ Calculates the per-element scaled product of two arrays. .. ocv:pyfunction:: cv2.multiply(src1, src2[, dst[, scale[, dtype]]]) -> dst .. ocv:cfunction:: void cvMul(const CvArr* src1, const CvArr* src2, CvArr* dst, double scale=1) -.. ocv:pyoldfunction:: cv.Mul(src1, src2, dst, scale=1) -> None :param src1: first input array. @@ -2022,8 +1963,6 @@ Calculates the product of a matrix and its transposition. .. ocv:cfunction:: void cvMulTransposed( const CvArr* src, CvArr* dst, int order, const CvArr* delta=NULL, double scale=1. ) -.. ocv:pyoldfunction:: cv.MulTransposed(src, dst, order, delta=None, scale=1.0) -> None - :param src: input single-channel matrix. Note that unlike :ocv:func:`gemm`, the function can multiply not only floating-point matrices. :param dst: output square matrix. @@ -2074,8 +2013,6 @@ Calculates an absolute array norm, an absolute difference norm, or a relative di .. ocv:cfunction:: double cvNorm( const CvArr* arr1, const CvArr* arr2=NULL, int norm_type=CV_L2, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.Norm(arr1, arr2, normType=CV_L2, mask=None) -> float - :param src1: first input array. :param src2: second input array of the same size and the same type as ``src1``. @@ -2331,7 +2268,6 @@ Performs the perspective matrix transformation of vectors. .. ocv:pyfunction:: cv2.perspectiveTransform(src, m[, dst]) -> dst .. ocv:cfunction:: void cvPerspectiveTransform(const CvArr* src, CvArr* dst, const CvMat* mat) -.. ocv:pyoldfunction:: cv.PerspectiveTransform(src, dst, mat)-> None :param src: input two-channel or three-channel floating-point array; each element is a 2D/3D vector to be transformed. @@ -2405,8 +2341,6 @@ Calculates x and y coordinates of 2D vectors from their magnitude and angle. .. ocv:cfunction:: void cvPolarToCart( const CvArr* magnitude, const CvArr* angle, CvArr* x, CvArr* y, int angle_in_degrees=0 ) -.. ocv:pyoldfunction:: cv.PolarToCart(magnitude, angle, x, y, angleInDegrees=0)-> None - :param magnitude: input floating-point array of magnitudes of 2D vectors; it can be an empty matrix (``=Mat()``), in this case, the function assumes that all the magnitudes are =1; if it is not empty, it must have the same size and type as ``angle``. :param angle: input floating-point array of angles of 2D vectors. @@ -2446,7 +2380,6 @@ Raises every array element to a power. .. ocv:pyfunction:: cv2.pow(src, power[, dst]) -> dst .. ocv:cfunction:: void cvPow( const CvArr* src, CvArr* dst, double power) -.. ocv:pyoldfunction:: cv.Pow(src, dst, power)-> None :param src: input array. @@ -2714,7 +2647,6 @@ Reduces a matrix to a vector. .. ocv:pyfunction:: cv2.reduce(src, dim, rtype[, dst[, dtype]]) -> dst .. ocv:cfunction:: void cvReduce(const CvArr* src, CvArr* dst, int dim=-1, int op=CV_REDUCE_SUM) -.. ocv:pyoldfunction:: cv.Reduce(src, dst, dim=-1, op=CV_REDUCE_SUM)-> None :param src: input 2D matrix. @@ -2752,8 +2684,6 @@ Fills the output array with repeated copies of the input array. .. ocv:cfunction:: void cvRepeat(const CvArr* src, CvArr* dst) -.. ocv:pyoldfunction:: cv.Repeat(src, dst)-> None - :param src: input array to replicate. :param dst: output array of the same type as ``src``. @@ -2788,7 +2718,6 @@ Calculates the sum of a scaled array and another array. .. ocv:pyfunction:: cv2.scaleAdd(src1, alpha, src2[, dst]) -> dst .. ocv:cfunction:: void cvScaleAdd(const CvArr* src1, CvScalar scale, const CvArr* src2, CvArr* dst) -.. ocv:pyoldfunction:: cv.ScaleAdd(src1, scale, src2, dst)-> None :param src1: first input array. @@ -2832,8 +2761,6 @@ Initializes a scaled identity matrix. .. ocv:cfunction:: void cvSetIdentity(CvArr* mat, CvScalar value=cvRealScalar(1)) -.. ocv:pyoldfunction:: cv.SetIdentity(mat, value=1)-> None - :param mtx: matrix to initialize (not necessarily square). :param value: value to assign to diagonal elements. @@ -2870,7 +2797,6 @@ Solves one or more linear systems or least-squares problems. .. ocv:pyfunction:: cv2.solve(src1, src2[, dst[, flags]]) -> retval, dst .. ocv:cfunction:: int cvSolve(const CvArr* src1, const CvArr* src2, CvArr* dst, int method=CV_LU) -.. ocv:pyoldfunction:: cv.Solve(A, B, X, method=CV_LU)-> None :param src1: input matrix on the left-hand side of the system. @@ -2921,8 +2847,6 @@ Finds the real roots of a cubic equation. .. ocv:cfunction:: int cvSolveCubic( const CvMat* coeffs, CvMat* roots ) -.. ocv:pyoldfunction:: cv.SolveCubic(coeffs, roots)-> None - :param coeffs: equation coefficients, an array of 3 or 4 elements. :param roots: output array of real roots that has 1 or 3 elements. @@ -3048,8 +2972,6 @@ Divides a multi-channel array into several single-channel arrays. .. ocv:cfunction:: void cvSplit(const CvArr* src, CvArr* dst0, CvArr* dst1, CvArr* dst2, CvArr* dst3) -.. ocv:pyoldfunction:: cv.Split(src, dst0, dst1, dst2, dst3)-> None - :param src: input multi-channel array. :param mv: output array or vector of arrays; in the first variant of the function the number of arrays must match ``src.channels()``; the arrays themselves are reallocated, if needed. @@ -3080,7 +3002,6 @@ Calculates a square root of array elements. .. ocv:pyfunction:: cv2.sqrt(src[, dst]) -> dst .. ocv:cfunction:: float cvSqrt(float value) -.. ocv:pyoldfunction:: cv.Sqrt(value)-> float :param src: input floating-point array. @@ -3107,10 +3028,6 @@ Calculates the per-element difference between two arrays or array and a scalar. .. ocv:cfunction:: void cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL ) .. ocv:cfunction:: void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.Sub(src1, src2, dst, mask=None) -> None -.. ocv:pyoldfunction:: cv.SubRS(src, value, dst, mask=None) -> None -.. ocv:pyoldfunction:: cv.SubS(src, value, dst, mask=None) -> None - :param src1: first input array or a scalar. :param src2: second input array or a scalar. @@ -3243,8 +3160,6 @@ Performs SVD of a matrix .. ocv:cfunction:: void cvSVD( CvArr* A, CvArr* W, CvArr* U=NULL, CvArr* V=NULL, int flags=0 ) -.. ocv:pyoldfunction:: cv.SVD(A, W, U=None, V=None, flags=0) -> None - :param src: decomposed matrix :param w: calculated singular values @@ -3293,8 +3208,6 @@ Performs a singular value back substitution. .. ocv:cfunction:: void cvSVBkSb( const CvArr* W, const CvArr* U, const CvArr* V, const CvArr* B, CvArr* X, int flags ) -.. ocv:pyoldfunction:: cv.SVBkSb(W, U, V, B, X, flags) -> None - :param w: singular values :param u: left singular vectors @@ -3329,8 +3242,6 @@ Calculates the sum of array elements. .. ocv:cfunction:: CvScalar cvSum(const CvArr* arr) -.. ocv:pyoldfunction:: cv.Sum(arr) -> scalar - :param arr: input array that must have from 1 to 4 channels. The functions ``sum`` calculate and return the sum of array elements, independently for each channel. @@ -3374,8 +3285,6 @@ Returns the trace of a matrix. .. ocv:cfunction:: CvScalar cvTrace(const CvArr* mat) -.. ocv:pyoldfunction:: cv.Trace(mat) -> scalar - :param mat: input matrix. The function ``trace`` returns the sum of the diagonal elements of the matrix ``mtx`` . @@ -3396,8 +3305,6 @@ Performs the matrix transformation of every array element. .. ocv:cfunction:: void cvTransform( const CvArr* src, CvArr* dst, const CvMat* transmat, const CvMat* shiftvec=NULL ) -.. ocv:pyoldfunction:: cv.Transform(src, dst, transmat, shiftvec=None)-> None - :param src: input array that must have as many channels (1 to 4) as ``m.cols`` or ``m.cols-1``. :param dst: output array of the same size and depth as ``src``; it has as many channels as ``m.rows``. @@ -3447,7 +3354,6 @@ Transposes a matrix. .. ocv:pyfunction:: cv2.transpose(src[, dst]) -> dst .. ocv:cfunction:: void cvTranspose(const CvArr* src, CvArr* dst) -.. ocv:pyoldfunction:: cv.Transpose(src, dst)-> None :param src: input array. diff --git a/modules/core/doc/utility_and_system_functions_and_macros.rst b/modules/core/doc/utility_and_system_functions_and_macros.rst index 16038e0a47..9cecb11a0f 100644 --- a/modules/core/doc/utility_and_system_functions_and_macros.rst +++ b/modules/core/doc/utility_and_system_functions_and_macros.rst @@ -76,7 +76,6 @@ Calculates the angle of a 2D vector in degrees. .. ocv:pyfunction:: cv2.fastAtan2(y, x) -> retval .. ocv:cfunction:: float cvFastArctan(float y, float x) -.. ocv:pyoldfunction:: cv.FastArctan(y, x)-> float :param x: x-coordinate of the vector. @@ -95,8 +94,6 @@ Computes the cube root of an argument. .. ocv:cfunction:: float cvCbrt( float value ) -.. ocv:pyoldfunction:: cv.Cbrt(value)-> float - :param val: A function argument. The function ``cubeRoot`` computes :math:`\sqrt[3]{\texttt{val}}`. Negative arguments are handled correctly. NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for single-precision data. @@ -107,7 +104,6 @@ Ceil Rounds floating-point number to the nearest integer not smaller than the original. .. ocv:cfunction:: int cvCeil(double value) -.. ocv:pyoldfunction:: cv.Ceil(value) -> int :param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined. @@ -123,7 +119,6 @@ Floor Rounds floating-point number to the nearest integer not larger than the original. .. ocv:cfunction:: int cvFloor(double value) -.. ocv:pyoldfunction:: cv.Floor(value) -> int :param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined. @@ -139,7 +134,6 @@ Round Rounds floating-point number to the nearest integer .. ocv:cfunction:: int cvRound(double value) -.. ocv:pyoldfunction:: cv.Round(value) -> int :param value: floating-point number. If the value is outside of ``INT_MIN`` ... ``INT_MAX`` range, the result is not defined. @@ -149,7 +143,6 @@ IsInf Determines if the argument is Infinity. .. ocv:cfunction:: int cvIsInf(double value) -.. ocv:pyoldfunction:: cv.IsInf(value)-> int :param value: The input floating-point value @@ -160,7 +153,6 @@ IsNaN Determines if the argument is Not A Number. .. ocv:cfunction:: int cvIsNaN(double value) -.. ocv:pyoldfunction:: cv.IsNaN(value)-> int :param value: The input floating-point value diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 8a54e7f8e4..e0b943676a 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -717,8 +717,8 @@ public: template MatConstIterator_<_Tp> begin() const; template MatConstIterator_<_Tp> end() const; - enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; - enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG }; + enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 }; /*! includes several bit-fields: - the magic signature diff --git a/modules/highgui/doc/reading_and_writing_images_and_video.rst b/modules/highgui/doc/reading_and_writing_images_and_video.rst index 914856dc3a..144ba222fd 100644 --- a/modules/highgui/doc/reading_and_writing_images_and_video.rst +++ b/modules/highgui/doc/reading_and_writing_images_and_video.rst @@ -67,10 +67,6 @@ Loads an image from a file. .. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR ) -.. ocv:pyoldfunction:: cv.LoadImage(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None - -.. ocv:pyoldfunction:: cv.LoadImageM(filename, iscolor=CV_LOAD_IMAGE_COLOR) -> None - :param filename: Name of file to be loaded. :param flags: Flags specifying the color type of a loaded image: @@ -126,8 +122,6 @@ Saves an image to a specified file. .. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 ) -.. ocv:pyoldfunction:: cv.SaveImage(filename, image)-> None - :param filename: Name of the file. :param image: Image to be saved. @@ -245,9 +239,7 @@ VideoCapture constructors. .. ocv:pyfunction:: cv2.VideoCapture(device) -> .. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device ) -.. ocv:pyoldfunction:: cv.CaptureFromCAM(index) -> CvCapture .. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename ) -.. ocv:pyoldfunction:: cv.CaptureFromFile(filename) -> CvCapture :param filename: name of the opened video file @@ -308,8 +300,6 @@ Grabs the next frame from video file or capturing device. .. ocv:cfunction:: int cvGrabFrame(CvCapture* capture) -.. ocv:pyoldfunction:: cv.GrabFrame(capture) -> int - The methods/functions grab the next frame from video file or camera and return true (non-zero) in the case of success. The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time. @@ -327,8 +317,6 @@ Decodes and returns the grabbed video frame. .. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 ) -.. ocv:pyoldfunction:: cv.RetrieveFrame(capture) -> image - The methods/functions decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer. .. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy. @@ -346,8 +334,6 @@ Grabs, decodes and returns the next video frame. .. ocv:cfunction:: IplImage* cvQueryFrame(CvCapture* capture) -.. ocv:pyoldfunction:: cv.QueryFrame(capture) -> image - The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`VideoCapture::retrieve` in one call. This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer. .. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy. @@ -363,9 +349,6 @@ Returns the specified ``VideoCapture`` property .. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int property_id ) -.. ocv:pyoldfunction:: cv.GetCaptureProperty(capture, property_id) -> float - - :param propId: Property identifier. It can be one of the following: * **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video capture timestamp. @@ -419,8 +402,6 @@ Sets a property in the ``VideoCapture``. .. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int property_id, double value ) -.. ocv:pyoldfunction:: cv.SetCaptureProperty(capture, property_id, value) -> retval - :param propId: Property identifier. It can be one of the following: * **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds. @@ -484,7 +465,6 @@ VideoWriter constructors .. ocv:pyfunction:: cv2.VideoWriter([filename, fourcc, fps, frameSize[, isColor]]) -> .. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 ) -.. ocv:pyoldfunction:: cv.CreateVideoWriter(filename, fourcc, fps, frame_size, is_color=true) -> CvVideoWriter .. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval .. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval @@ -544,7 +524,6 @@ Writes the next video frame .. ocv:pyfunction:: cv2.VideoWriter.write(image) -> None .. ocv:cfunction:: int cvWriteFrame( CvVideoWriter* writer, const IplImage* image ) -.. ocv:pyoldfunction:: cv.WriteFrame(writer, image)->int :param writer: Video writer structure (OpenCV 1.x API) diff --git a/modules/highgui/doc/user_interface.rst b/modules/highgui/doc/user_interface.rst index ad960100cb..f84a04c21e 100644 --- a/modules/highgui/doc/user_interface.rst +++ b/modules/highgui/doc/user_interface.rst @@ -11,8 +11,6 @@ Creates a trackbar and attaches it to the specified window. .. ocv:cfunction:: int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* value, int count, CvTrackbarCallback on_change=NULL ) -.. ocv:pyoldfunction:: cv.CreateTrackbar(trackbarName, windowName, value, count, onChange) -> None - :param trackbarname: Name of the created trackbar. :param winname: Name of the window that will be used as a parent of the created trackbar. @@ -43,8 +41,6 @@ Returns the trackbar position. .. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbar_name, const char* window_name ) -.. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName) -> retval - :param trackbarname: Name of the trackbar. :param winname: Name of the window that is the parent of the trackbar. @@ -65,8 +61,6 @@ Displays an image in the specified window. .. ocv:cfunction:: void cvShowImage( const char* name, const CvArr* image ) -.. ocv:pyoldfunction:: cv.ShowImage(name, image) -> None - :param winname: Name of the window. :param image: Image to be shown. @@ -90,8 +84,6 @@ Creates a window. .. ocv:cfunction:: int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE ) -.. ocv:pyoldfunction:: cv.NamedWindow(name, flags=CV_WINDOW_AUTOSIZE)-> None - :param name: Name of the window in the window caption that may be used as a window identifier. :param flags: Flags of the window. Currently the only supported flag is ``CV_WINDOW_AUTOSIZE`` . If this is set, the window size is automatically adjusted to fit the displayed image (see :ocv:func:`imshow` ), and you cannot change the window size manually. @@ -125,8 +117,6 @@ Destroys a window. .. ocv:cfunction:: void cvDestroyWindow( const char* name ) -.. ocv:pyoldfunction:: cv.DestroyWindow(name)-> None - :param winname: Name of the window to be destroyed. The function ``destroyWindow`` destroys the window with the given name. @@ -142,8 +132,6 @@ Destroys all of the HighGUI windows. .. ocv:cfunction:: void cvDestroyAllWindows() -.. ocv:pyoldfunction:: cv.DestroyAllWindows()-> None - The function ``destroyAllWindows`` destroys all of the opened HighGUI windows. @@ -157,8 +145,6 @@ Moves window to the specified position .. ocv:cfunction:: void cvMoveWindow( const char* name, int x, int y ) -.. ocv:pyoldfunction:: cv.MoveWindow(name, x, y)-> None - :param winname: Window name :param x: The new x-coordinate of the window @@ -176,8 +162,6 @@ Resizes window to the specified size .. ocv:cfunction:: void cvResizeWindow( const char* name, int width, int height ) -.. ocv:pyoldfunction:: cv.ResizeWindow(name, width, height)-> None - :param winname: Window name :param width: The new window width @@ -199,8 +183,6 @@ Sets mouse handler for the specified window .. ocv:cfunction:: void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL ) -.. ocv:pyoldfunction:: cv.SetMouseCallback(windowName, onMouse, param=None) -> None - :param winname: Window name :param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/ffilldemo.cpp, on how to specify and use the callback. @@ -218,8 +200,6 @@ Sets the trackbar position. .. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ) -.. ocv:pyoldfunction:: cv.SetTrackbarPos(trackbarName, windowName, pos)-> None - :param trackbarname: Name of the trackbar. :param winname: Name of the window that is the parent of trackbar. @@ -242,8 +222,6 @@ Waits for a pressed key. .. ocv:cfunction:: int cvWaitKey( int delay=0 ) -.. ocv:pyoldfunction:: cv.WaitKey(delay=0)-> int - :param delay: Delay in milliseconds. 0 is the special value that means "forever". The function ``waitKey`` waits for a key event infinitely (when diff --git a/modules/imgproc/doc/feature_detection.rst b/modules/imgproc/doc/feature_detection.rst index 5098ba1352..090848724c 100644 --- a/modules/imgproc/doc/feature_detection.rst +++ b/modules/imgproc/doc/feature_detection.rst @@ -15,8 +15,6 @@ Finds edges in an image using the [Canny86]_ algorithm. .. ocv:cfunction:: void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.Canny(image, edges, threshold1, threshold2, aperture_size=3) -> None - :param image: single-channel 8-bit input image. :param edges: output edge map; it has the same size and type as ``image`` . @@ -44,8 +42,6 @@ Calculates eigenvalues and eigenvectors of image blocks for corner detection. .. ocv:cfunction:: void cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, int block_size, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.CornerEigenValsAndVecs(image, eigenvv, blockSize, aperture_size=3) -> None - :param src: Input single-channel 8-bit or floating-point image. :param dst: Image to store the results. It has the same size as ``src`` and the type ``CV_32FC(6)`` . @@ -97,8 +93,6 @@ Harris edge detector. .. ocv:cfunction:: void cvCornerHarris( const CvArr* image, CvArr* harris_responce, int block_size, int aperture_size=3, double k=0.04 ) -.. ocv:pyoldfunction:: cv.CornerHarris(image, harris_dst, blockSize, aperture_size=3, k=0.04) -> None - :param src: Input single-channel 8-bit or floating-point image. :param dst: Image to store the Harris detector responses. It has the type ``CV_32FC1`` and the same size as ``src`` . @@ -137,8 +131,6 @@ Calculates the minimal eigenvalue of gradient matrices for corner detection. .. ocv:cfunction:: void cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, int block_size, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.CornerMinEigenVal(image, eigenval, blockSize, aperture_size=3) -> None - :param src: Input single-channel 8-bit or floating-point image. :param dst: Image to store the minimal eigenvalues. It has the type ``CV_32FC1`` and the same size as ``src`` . @@ -166,8 +158,6 @@ Refines the corner locations. .. ocv:cfunction:: void cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria ) -.. ocv:pyoldfunction:: cv.FindCornerSubPix(image, corners, win, zero_zone, criteria) -> corners - :param image: Input image. :param corners: Initial coordinates of the input corners and refined coordinates provided for output. @@ -228,8 +218,6 @@ Determines strong corners on an image. .. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, CvArr* temp_image, CvPoint2D32f* corners, int* corner_count, double quality_level, double min_distance, const CvArr* mask=NULL, int block_size=3, int use_harris=0, double k=0.04 ) -.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04) -> cornerCount - :param image: Input 8-bit or floating-point 32-bit, single-channel image. :param eig_image: The parameter is ignored. @@ -367,8 +355,6 @@ Finds lines in a binary image using the standard Hough transform. .. ocv:cfunction:: CvSeq* cvHoughLines2( CvArr* image, void* line_storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0 ) -.. ocv:pyoldfunction:: cv.HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)-> lines - :param image: 8-bit, single-channel binary source image. The image may be modified by the function. :param lines: Output vector of lines. Each line is represented by a two-element vector :math:`(\rho, \theta)` . :math:`\rho` is the distance from the coordinate origin :math:`(0,0)` (top-left corner of the image). :math:`\theta` is the line rotation angle in radians ( :math:`0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}` ). @@ -510,8 +496,6 @@ Calculates a feature map for corner detection. .. ocv:cfunction:: void cvPreCornerDetect( const CvArr* image, CvArr* corners, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.PreCornerDetect(image, corners, apertureSize=3)-> None - :param src: Source single-channel 8-bit of floating-point image. :param dst: Output image that has the type ``CV_32F`` and the same size as ``src`` . diff --git a/modules/imgproc/doc/filtering.rst b/modules/imgproc/doc/filtering.rst index 3e0bbb3a9a..2465ac9a45 100755 --- a/modules/imgproc/doc/filtering.rst +++ b/modules/imgproc/doc/filtering.rst @@ -750,7 +750,6 @@ Dilates an image by using a specific structuring element. .. ocv:pyfunction:: cv2.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst .. ocv:cfunction:: void cvDilate( const CvArr* src, CvArr* dst, IplConvKernel* element=NULL, int iterations=1 ) -.. ocv:pyoldfunction:: cv.Dilate(src, dst, element=None, iterations=1)-> None :param src: input image; the number of channels can be arbitrary, but the depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``. @@ -790,7 +789,6 @@ Erodes an image by using a specific structuring element. .. ocv:pyfunction:: cv2.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst .. ocv:cfunction:: void cvErode( const CvArr* src, CvArr* dst, IplConvKernel* element=NULL, int iterations=1) -.. ocv:pyoldfunction:: cv.Erode(src, dst, element=None, iterations=1)-> None :param src: input image; the number of channels can be arbitrary, but the depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``. @@ -832,8 +830,6 @@ Convolves an image with the kernel. .. ocv:cfunction:: void cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) ) -.. ocv:pyoldfunction:: cv.Filter2D(src, dst, kernel, anchor=(-1, -1))-> None - :param src: input image. :param dst: output image of the same size and the same number of channels as ``src``. @@ -1013,8 +1009,6 @@ Returns a structuring element of the specified size and shape for morphological .. ocv:cfunction:: IplConvKernel* cvCreateStructuringElementEx( int cols, int rows, int anchor_x, int anchor_y, int shape, int* values=NULL ) -.. ocv:pyoldfunction:: cv.CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)-> kernel - :param shape: Element shape that could be one of the following: * **MORPH_RECT** - a rectangular structuring element: @@ -1091,7 +1085,6 @@ Performs advanced morphological transformations. .. ocv:pyfunction:: cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst .. ocv:cfunction:: void cvMorphologyEx( const CvArr* src, CvArr* dst, CvArr* temp, IplConvKernel* element, int operation, int iterations=1 ) -.. ocv:pyoldfunction:: cv.MorphologyEx(src, dst, temp, element, operation, iterations=1)-> None :param src: Source image. The number of channels can be arbitrary. The depth should be one of ``CV_8U``, ``CV_16U``, ``CV_16S``, ``CV_32F` or ``CV_64F``. @@ -1168,8 +1161,6 @@ Calculates the Laplacian of an image. .. ocv:cfunction:: void cvLaplace( const CvArr* src, CvArr* dst, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.Laplace(src, dst, apertureSize=3) -> None - :param src: Source image. :param dst: Destination image of the same size and the same number of channels as ``src`` . @@ -1214,8 +1205,6 @@ Blurs an image and downsamples it. .. ocv:cfunction:: void cvPyrDown( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 ) -.. ocv:pyoldfunction:: cv.PyrDown(src, dst, filter=CV_GAUSSIAN_5X5) -> None - :param src: input image. :param dst: output image; it has the specified size and the same type as ``src``. @@ -1247,8 +1236,6 @@ Upsamples an image and then blurs it. .. ocv:cfunction:: cvPyrUp( const CvArr* src, CvArr* dst, int filter=CV_GAUSSIAN_5x5 ) -.. ocv:pyoldfunction:: cv.PyrUp(src, dst, filter=CV_GAUSSIAN_5X5) -> None - :param src: input image. :param dst: output image. It has the specified size and the same type as ``src`` . @@ -1274,8 +1261,6 @@ Performs initial step of meanshift segmentation of an image. .. ocv:cfunction:: void cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst, double sp, double sr, int max_level=1, CvTermCriteria termcrit= cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1)) -.. ocv:pyoldfunction:: cv.PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) -> None - :param src: The source 8-bit, 3-channel image. :param dst: The destination image of the same format and the same size as the source. @@ -1361,8 +1346,6 @@ Smooths the image in one of several ways. .. ocv:cfunction:: void cvSmooth( const CvArr* src, CvArr* dst, int smoothtype=CV_GAUSSIAN, int size1=3, int size2=0, double sigma1=0, double sigma2=0 ) -.. ocv:pyoldfunction:: cv.Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)-> None - :param src: The source image :param dst: The destination image @@ -1417,8 +1400,6 @@ Calculates the first, second, third, or mixed image derivatives using an extende .. ocv:cfunction:: void cvSobel( const CvArr* src, CvArr* dst, int xorder, int yorder, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.Sobel(src, dst, xorder, yorder, apertureSize=3)-> None - :param src: input image. :param dst: output image of the same size and the same number of channels as ``src`` . diff --git a/modules/imgproc/doc/geometric_transformations.rst b/modules/imgproc/doc/geometric_transformations.rst index c48da01d7e..2eac1fb29f 100644 --- a/modules/imgproc/doc/geometric_transformations.rst +++ b/modules/imgproc/doc/geometric_transformations.rst @@ -85,8 +85,6 @@ Calculates an affine transform from three pairs of the corresponding points. .. ocv:cfunction:: CvMat* cvGetAffineTransform( const CvPoint2D32f * src, const CvPoint2D32f * dst, CvMat * map_matrix ) -.. ocv:pyoldfunction:: cv.GetAffineTransform(src, dst, mapMatrix)-> None - :param src: Coordinates of triangle vertices in the source image. :param dst: Coordinates of the corresponding triangle vertices in the destination image. @@ -124,8 +122,6 @@ Calculates a perspective transform from four pairs of the corresponding points. .. ocv:cfunction:: CvMat* cvGetPerspectiveTransform( const CvPoint2D32f* src, const CvPoint2D32f* dst, CvMat* map_matrix ) -.. ocv:pyoldfunction:: cv.GetPerspectiveTransform(src, dst, mapMatrix)-> None - :param src: Coordinates of quadrangle vertices in the source image. :param dst: Coordinates of the corresponding quadrangle vertices in the destination image. @@ -160,7 +156,6 @@ Retrieves a pixel rectangle from an image with sub-pixel accuracy. .. ocv:pyfunction:: cv2.getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch .. ocv:cfunction:: void cvGetRectSubPix( const CvArr* src, CvArr* dst, CvPoint2D32f center ) -.. ocv:pyoldfunction:: cv.GetRectSubPix(src, dst, center)-> None :param src: Source image. @@ -202,8 +197,6 @@ Calculates an affine matrix of 2D rotation. .. ocv:cfunction:: CvMat* cv2DRotationMatrix( CvPoint2D32f center, double angle, double scale, CvMat* map_matrix ) -.. ocv:pyoldfunction:: cv.GetRotationMatrix2D(center, angle, scale, mapMatrix)-> None - :param center: Center of the rotation in the source image. :param angle: Rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner). @@ -264,8 +257,6 @@ Remaps an image to log-polar space. .. ocv:cfunction:: void cvLogPolar( const CvArr* src, CvArr* dst, CvPoint2D32f center, double M, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS ) -.. ocv:pyoldfunction:: cv.LogPolar(src, dst, center, M, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS)-> None - :param src: Source image :param dst: Destination image @@ -317,7 +308,6 @@ Applies a generic geometrical transformation to an image. .. ocv:pyfunction:: cv2.remap(src, map1, map2, interpolation[, dst[, borderMode[, borderValue]]]) -> dst .. ocv:cfunction:: void cvRemap( const CvArr* src, CvArr* dst, const CvArr* mapx, const CvArr* mapy, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) ) -.. ocv:pyoldfunction:: cv.Remap(src, dst, mapx, mapy, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None :param src: Source image. @@ -364,7 +354,6 @@ Resizes an image. .. ocv:pyfunction:: cv2.resize(src, dsize[, dst[, fx[, fy[, interpolation]]]]) -> dst .. ocv:cfunction:: void cvResize( const CvArr* src, CvArr* dst, int interpolation=CV_INTER_LINEAR ) -.. ocv:pyoldfunction:: cv.Resize(src, dst, interpolation=CV_INTER_LINEAR)-> None :param src: input image. @@ -434,12 +423,8 @@ Applies an affine transformation to an image. .. ocv:cfunction:: void cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) ) -.. ocv:pyoldfunction:: cv.WarpAffine(src, dst, mapMatrix, flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None - .. ocv:cfunction:: void cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst, const CvMat* map_matrix ) -.. ocv:pyoldfunction:: cv.GetQuadrangleSubPix(src, dst, mapMatrix)-> None - :param src: input image. :param dst: output image that has the size ``dsize`` and the same type as ``src`` . @@ -485,8 +470,6 @@ Applies a perspective transformation to an image. .. ocv:cfunction:: void cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, CvScalar fillval=cvScalarAll(0) ) -.. ocv:pyoldfunction:: cv.WarpPerspective(src, dst, mapMatrix, flags=CV_INNER_LINEAR+CV_WARP_FILL_OUTLIERS, fillval=(0, 0, 0, 0))-> None - :param src: input image. :param dst: output image that has the size ``dsize`` and the same type as ``src`` . @@ -534,9 +517,6 @@ Computes the undistortion and rectification transformation map. .. ocv:cfunction:: void cvInitUndistortRectifyMap( const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat * R, const CvMat* new_camera_matrix, CvArr* mapx, CvArr* mapy ) .. ocv:cfunction:: void cvInitUndistortMap( const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvArr* mapx, CvArr* mapy ) -.. ocv:pyoldfunction:: cv.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, map1, map2)-> None -.. ocv:pyoldfunction:: cv.InitUndistortMap(cameraMatrix, distCoeffs, map1, map2)-> None - :param cameraMatrix: Input camera matrix :math:`A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` . :param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. @@ -630,8 +610,6 @@ Transforms an image to compensate for lens distortion. .. ocv:cfunction:: void cvUndistort2( const CvArr* src, CvArr* dst, const CvMat* camera_matrix, const CvMat* distortion_coeffs, const CvMat* new_camera_matrix=0 ) -.. ocv:pyoldfunction:: cv.Undistort2(src, dst, cameraMatrix, distCoeffs)-> None - :param src: Input (distorted) image. :param dst: Output (corrected) image that has the same size and type as ``src`` . @@ -668,7 +646,6 @@ Computes the ideal point coordinates from the observed point coordinates. .. ocv:function:: void undistortPoints( InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray()) .. ocv:cfunction:: void cvUndistortPoints( const CvMat* src, CvMat* dst, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* R=0, const CvMat* P=0 ) -.. ocv:pyoldfunction:: cv.UndistortPoints(src, dst, cameraMatrix, distCoeffs, R=None, P=None)-> None :param src: Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2). diff --git a/modules/imgproc/doc/histograms.rst b/modules/imgproc/doc/histograms.rst index f478566f73..b6c390402e 100644 --- a/modules/imgproc/doc/histograms.rst +++ b/modules/imgproc/doc/histograms.rst @@ -16,7 +16,6 @@ Calculates a histogram of a set of arrays. .. ocv:pyfunction:: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist .. ocv:cfunction:: void cvCalcHist( IplImage** image, CvHistogram* hist, int accumulate=0, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.CalcHist(image, hist, accumulate=0, mask=None)-> None :param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels. @@ -113,7 +112,6 @@ Calculates the back projection of a histogram. .. ocv:pyfunction:: cv2.calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst .. ocv:cfunction:: void cvCalcBackProject( IplImage** image, CvArr* backProject, const CvHistogram* hist ) -.. ocv:pyoldfunction:: cv.CalcBackProject(image, back_project, hist) -> None :param images: Source arrays. They all should have the same depth, ``CV_8U`` or ``CV_32F`` , and the same size. Each of them can have an arbitrary number of channels. @@ -159,7 +157,6 @@ Compares two histograms. .. ocv:pyfunction:: cv2.compareHist(H1, H2, method) -> retval .. ocv:cfunction:: double cvCompareHist( const CvHistogram* hist1, const CvHistogram* hist2, int method ) -.. ocv:pyoldfunction:: cv.CompareHist(hist1, hist2, method)->float :param H1: First compared histogram. @@ -229,8 +226,6 @@ Computes the "minimal work" distance between two weighted point configurations. .. ocv:cfunction:: float cvCalcEMD2( const CvArr* signature1, const CvArr* signature2, int distance_type, CvDistanceFunction distance_func=NULL, const CvArr* cost_matrix=NULL, CvArr* flow=NULL, float* lower_bound=NULL, void* userdata=NULL ) -.. ocv:pyoldfunction:: cv.CalcEMD2(signature1, signature2, distance_type, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None) -> float - :param signature1: First signature, a :math:`\texttt{size1}\times \texttt{dims}+1` floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used. :param signature2: Second signature of the same format as ``signature1`` , though the number of rows may be different. The total weights may be different. In this case an extra "dummy" point is added to either ``signature1`` or ``signature2`` . @@ -304,8 +299,6 @@ Locates a template within an image by using a histogram comparison. .. ocv:cfunction:: void cvCalcBackProjectPatch( IplImage** images, CvArr* dst, CvSize patch_size, CvHistogram* hist, int method, double factor ) -.. ocv:pyoldfunction:: cv.CalcBackProjectPatch(images, dst, patch_size, hist, method, factor)-> None - :param images: Source images (though, you may pass CvMat** as well). :param dst: Destination image. @@ -329,8 +322,6 @@ Divides one histogram by another. .. ocv:cfunction:: void cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, CvHistogram* dst_hist, double scale=255 ) -.. ocv:pyoldfunction:: cv.CalcProbDensity(hist1, hist2, dst_hist, scale=255) -> None - :param hist1: First histogram (the divisor). :param hist2: Second histogram. @@ -351,7 +342,6 @@ ClearHist Clears the histogram. .. ocv:cfunction:: void cvClearHist( CvHistogram* hist ) -.. ocv:pyoldfunction:: cv.ClearHist(hist)-> None :param hist: Histogram. @@ -378,8 +368,6 @@ Creates a histogram. .. ocv:cfunction:: CvHistogram* cvCreateHist( int dims, int* sizes, int type, float** ranges=NULL, int uniform=1 ) -.. ocv:pyoldfunction:: cv.CreateHist(dims, type, ranges=None, uniform=1) -> hist - :param dims: Number of histogram dimensions. :param sizes: Array of the histogram dimension sizes. @@ -414,8 +402,6 @@ Finds the minimum and maximum histogram bins. .. ocv:cfunction:: void cvGetMinMaxHistValue( const CvHistogram* hist, float* min_value, float* max_value, int* min_idx=NULL, int* max_idx=NULL ) -.. ocv:pyoldfunction:: cv.GetMinMaxHistValue(hist)-> (min_value, max_value, min_idx, max_idx) - :param hist: Histogram. :param min_value: Pointer to the minimum value of the histogram. @@ -454,7 +440,6 @@ NormalizeHist Normalizes the histogram. .. ocv:cfunction:: void cvNormalizeHist( CvHistogram* hist, double factor ) -.. ocv:pyoldfunction:: cv.NormalizeHist(hist, factor)-> None :param hist: Pointer to the histogram. @@ -494,7 +479,6 @@ ThreshHist Thresholds the histogram. .. ocv:cfunction:: void cvThreshHist( CvHistogram* hist, double threshold ) -.. ocv:pyoldfunction:: cv.ThreshHist(hist, threshold) -> None :param hist: Pointer to the histogram. diff --git a/modules/imgproc/doc/miscellaneous_transformations.rst b/modules/imgproc/doc/miscellaneous_transformations.rst index a82923fab6..6b7f442d4c 100644 --- a/modules/imgproc/doc/miscellaneous_transformations.rst +++ b/modules/imgproc/doc/miscellaneous_transformations.rst @@ -14,8 +14,6 @@ Applies an adaptive threshold to an array. .. ocv:cfunction:: void cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, int adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, int threshold_type=CV_THRESH_BINARY, int block_size=3, double param1=5 ) -.. ocv:pyoldfunction:: cv.AdaptiveThreshold(src, dst, maxValue, adaptive_method=CV_ADAPTIVE_THRESH_MEAN_C, thresholdType=CV_THRESH_BINARY, blockSize=3, param1=5)-> None - :param src: Source 8-bit single-channel image. :param dst: Destination image of the same size and the same type as ``src`` . @@ -79,7 +77,6 @@ Converts an image from one color space to another. .. ocv:pyfunction:: cv2.cvtColor(src, code[, dst[, dstCn]]) -> dst .. ocv:cfunction:: void cvCvtColor( const CvArr* src, CvArr* dst, int code ) -.. ocv:pyoldfunction:: cv.CvtColor(src, dst, code)-> None :param src: input image: 8-bit unsigned, 16-bit unsigned ( ``CV_16UC...`` ), or single-precision floating-point. @@ -419,8 +416,6 @@ Calculates the distance to the closest zero pixel for each pixel of the source i .. ocv:cfunction:: void cvDistTransform( const CvArr* src, CvArr* dst, int distance_type=CV_DIST_L2, int mask_size=3, const float* mask=NULL, CvArr* labels=NULL, int labelType=CV_DIST_LABEL_CCOMP ) -.. ocv:pyoldfunction:: cv.DistTransform(src, dst, distance_type=CV_DIST_L2, mask_size=3, mask=None, labels=None) -> None - :param src: 8-bit, single-channel (binary) source image. :param dst: Output image with calculated distances. It is a 32-bit floating-point, single-channel image of the same size as ``src`` . @@ -491,7 +486,6 @@ Fills a connected component with the given color. .. ocv:pyfunction:: cv2.floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, image, mask, rect .. ocv:cfunction:: void cvFloodFill( CvArr* image, CvPoint seed_point, CvScalar new_val, CvScalar lo_diff=cvScalarAll(0), CvScalar up_diff=cvScalarAll(0), CvConnectedComp* comp=NULL, int flags=4, CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.FloodFill(image, seed_point, new_val, lo_diff=(0, 0, 0, 0), up_diff=(0, 0, 0, 0), flags=4, mask=None)-> comp :param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below. @@ -603,8 +597,6 @@ Calculates the integral of an image. .. ocv:cfunction:: void cvIntegral( const CvArr* image, CvArr* sum, CvArr* sqsum=NULL, CvArr* tilted_sum=NULL ) -.. ocv:pyoldfunction:: cv.Integral(image, sum, sqsum=None, tiltedSum=None)-> None - :param image: input image as :math:`W \times H`, 8-bit or floating-point (32f or 64f). :param sum: integral image as :math:`(W+1)\times (H+1)` , 32-bit integer or floating-point (32f or 64f). @@ -655,8 +647,6 @@ Applies a fixed-level threshold to each array element. .. ocv:cfunction:: double cvThreshold( const CvArr* src, CvArr* dst, double threshold, double max_value, int threshold_type ) -.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None - :param src: input array (single-channel, 8-bit or 32-bit floating point). :param dst: output array of the same size and type as ``src``. diff --git a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst index bcc372a6a2..c02125e618 100644 --- a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst +++ b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst @@ -13,8 +13,6 @@ Adds an image to the accumulator. .. ocv:cfunction:: void cvAcc( const CvArr* image, CvArr* sum, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.Acc(image, sum, mask=None) -> None - :param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point. :param dst: Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point. @@ -49,8 +47,6 @@ Adds the square of a source image to the accumulator. .. ocv:cfunction:: void cvSquareAcc( const CvArr* image, CvArr* sqsum, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.SquareAcc(image, sqsum, mask=None) -> None - :param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point. :param dst: Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point. @@ -83,8 +79,6 @@ Adds the per-element product of two input images to the accumulator. .. ocv:cfunction:: void cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.MultiplyAcc(image1, image2, acc, mask=None)-> None - :param src1: First input image, 1- or 3-channel, 8-bit or 32-bit floating point. :param src2: Second input image of the same type and the same size as ``src1`` . @@ -118,7 +112,6 @@ Updates a running average. .. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> dst .. ocv:cfunction:: void cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, const CvArr* mask=NULL ) -.. ocv:pyoldfunction:: cv.RunningAvg(image, acc, alpha, mask=None)-> None :param src: Input image as 1- or 3-channel, 8-bit or 32-bit floating point. diff --git a/modules/imgproc/doc/object_detection.rst b/modules/imgproc/doc/object_detection.rst index c6231a05e0..811ea8ff1c 100644 --- a/modules/imgproc/doc/object_detection.rst +++ b/modules/imgproc/doc/object_detection.rst @@ -12,7 +12,6 @@ Compares a template against overlapped image regions. .. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result .. ocv:cfunction:: void cvMatchTemplate( const CvArr* image, const CvArr* templ, CvArr* result, int method ) -.. ocv:pyoldfunction:: cv.MatchTemplate(image, templ, result, method)-> None :param image: Image where the search is running. It must be 8-bit or 32-bit floating-point. diff --git a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst index 4c1911a77d..04e29ad9ac 100644 --- a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst +++ b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst @@ -13,8 +13,6 @@ Calculates all of the moments up to the third order of a polygon or rasterized s .. ocv:cfunction:: void cvMoments( const CvArr* arr, CvMoments* moments, int binary=0 ) -.. ocv:pyoldfunction:: cv.Moments(arr, binary=0) -> moments - :param array: Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( :math:`1 \times N` or :math:`N \times 1` ) of 2D points (``Point`` or ``Point2f`` ). :param binaryImage: If it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only. @@ -98,8 +96,6 @@ Calculates seven Hu invariants. .. ocv:cfunction:: void cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments ) -.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu - :param moments: Input moments computed with :ocv:func:`moments` . :param hu: Output Hu invariants. @@ -163,8 +159,6 @@ Finds contours in a binary image. .. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) ) -.. ocv:pyoldfunction:: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) -> contours - :param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours. :param contours: Detected contours. Each contour is stored as a vector of points. @@ -243,8 +237,6 @@ Approximates Freeman chain(s) with a polygonal curve. .. ocv:cfunction:: CvSeq* cvApproxChains( CvSeq* src_seq, CvMemStorage* storage, int method=CV_CHAIN_APPROX_SIMPLE, double parameter=0, int minimal_perimeter=0, int recursive=0 ) -.. ocv:pyoldfunction:: cv.ApproxChains(src_seq, storage, method=CV_CHAIN_APPROX_SIMPLE, parameter=0, minimal_perimeter=0, recursive=0)-> contours - :param src_seq: Pointer to the approximated Freeman chain that can refer to other chains. :param storage: Storage location for the resulting polylines. @@ -270,8 +262,6 @@ Calculates a contour perimeter or a curve length. .. ocv:cfunction:: double cvArcLength( const void* curve, CvSlice slice=CV_WHOLE_SEQ, int is_closed=-1 ) -.. ocv:pyoldfunction:: cv.ArcLength(curve, slice=CV_WHOLE_SEQ, isClosed=-1) -> float - :param curve: Input vector of 2D points, stored in ``std::vector`` or ``Mat``. :param closed: Flag indicating whether the curve is closed or not. @@ -289,7 +279,6 @@ Calculates the up-right bounding rectangle of a point set. .. ocv:pyfunction:: cv2.boundingRect(points) -> retval .. ocv:cfunction:: CvRect cvBoundingRect( CvArr* points, int update=0 ) -.. ocv:pyoldfunction:: cv.BoundingRect(points, update=0)-> CvRect :param points: Input 2D point set, stored in ``std::vector`` or ``Mat``. @@ -308,8 +297,6 @@ Calculates a contour area. .. ocv:cfunction:: double cvContourArea( const CvArr* contour, CvSlice slice=CV_WHOLE_SEQ, int oriented=0 ) -.. ocv:pyoldfunction:: cv.ContourArea(contour, slice=CV_WHOLE_SEQ) -> float - :param contour: Input vector of 2D points (contour vertices), stored in ``std::vector`` or ``Mat``. :param oriented: Oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking the sign of an area. By default, the parameter is ``false``, which means that the absolute value is returned. @@ -349,8 +336,6 @@ Finds the convex hull of a point set. .. ocv:cfunction:: CvSeq* cvConvexHull2( const CvArr* input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0 ) -.. ocv:pyoldfunction:: cv.ConvexHull2(points, storage, orientation=CV_CLOCKWISE, return_points=0) -> convexHull - :param points: Input 2D point set, stored in ``std::vector`` or ``Mat``. :param hull: Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the ``hull`` elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, ``hull`` elements are the convex hull points themselves. @@ -379,8 +364,6 @@ Finds the convexity defects of a contour. .. ocv:cfunction:: CvSeq* cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, CvMemStorage* storage=NULL ) -.. ocv:pyoldfunction:: cv.ConvexityDefects(contour, convexhull, storage)-> convexityDefects - :param contour: Input contour. :param convexhull: Convex hull obtained using :ocv:func:`convexHull` that should contain indices of the contour points that make the hull. @@ -412,7 +395,6 @@ Fits an ellipse around a set of 2D points. .. ocv:pyfunction:: cv2.fitEllipse(points) -> retval .. ocv:cfunction:: CvBox2D cvFitEllipse2( const CvArr* points ) -.. ocv:pyoldfunction:: cv.FitEllipse2(points)-> Box2D :param points: Input 2D point set, stored in: @@ -434,8 +416,6 @@ Fits a line to a 2D or 3D point set. .. ocv:cfunction:: void cvFitLine( const CvArr* points, int dist_type, double param, double reps, double aeps, float* line ) -.. ocv:pyoldfunction:: cv.FitLine(points, dist_type, param, reps, aeps) -> line - :param points: Input vector of 2D or 3D points, stored in ``std::vector<>`` or ``Mat``. :param line: Output line parameters. In case of 2D fitting, it should be a vector of 4 elements (like ``Vec4f``) - ``(vx, vy, x0, y0)``, where ``(vx, vy)`` is a normalized vector collinear to the line and ``(x0, y0)`` is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like ``Vec6f``) - ``(vx, vy, vz, x0, y0, z0)``, where ``(vx, vy, vz)`` is a normalized vector collinear to the line and ``(x0, y0, z0)`` is a point on the line. @@ -507,7 +487,6 @@ Tests a contour convexity. .. ocv:pyfunction:: cv2.isContourConvex(contour) -> retval .. ocv:cfunction:: int cvCheckContourConvexity( const CvArr* contour ) -.. ocv:pyoldfunction:: cv.CheckContourConvexity(contour)-> int :param contour: Input vector of 2D points, stored in: @@ -531,8 +510,6 @@ Finds a rotated rectangle of the minimum area enclosing the input 2D point set. .. ocv:cfunction:: CvBox2D cvMinAreaRect2( const CvArr* points, CvMemStorage* storage=NULL ) -.. ocv:pyoldfunction:: cv.MinAreaRect2(points, storage=None) -> Box2D - :param points: Input vector of 2D points, stored in: * ``std::vector<>`` or ``Mat`` (C++ interface) @@ -555,8 +532,6 @@ Finds a circle of the minimum area enclosing a 2D point set. .. ocv:cfunction:: int cvMinEnclosingCircle( const CvArr* points, CvPoint2D32f* center, float* radius ) -.. ocv:pyoldfunction:: cv.MinEnclosingCircle(points)-> (int, center, radius) - :param points: Input vector of 2D points, stored in: * ``std::vector<>`` or ``Mat`` (C++ interface) @@ -582,7 +557,6 @@ Compares two shapes. .. ocv:pyfunction:: cv2.matchShapes(contour1, contour2, method, parameter) -> retval .. ocv:cfunction:: double cvMatchShapes( const void* object1, const void* object2, int method, double parameter=0 ) -.. ocv:pyoldfunction:: cv.MatchShapes(object1, object2, method, parameter=0) -> float :param object1: First contour or grayscale image. @@ -637,7 +611,6 @@ Performs a point-in-contour test. .. ocv:pyfunction:: cv2.pointPolygonTest(contour, pt, measureDist) -> retval .. ocv:cfunction:: double cvPointPolygonTest( const CvArr* contour, CvPoint2D32f pt, int measure_dist ) -.. ocv:pyoldfunction:: cv.PointPolygonTest(contour, pt, measure_dist) -> float :param contour: Input contour. diff --git a/modules/java/generator/rst_parser.py b/modules/java/generator/rst_parser.py index ad8358542c..86625f396c 100755 --- a/modules/java/generator/rst_parser.py +++ b/modules/java/generator/rst_parser.py @@ -79,8 +79,6 @@ class DeclarationParser(object): return "C" if line.startswith(".. ocv:pyfunction::"): return "Python2" - if line.startswith(".. ocv:pyoldfunction::"): - return "Python1" if line.startswith(".. ocv:jfunction::"): return "Java" return None diff --git a/modules/legacy/doc/histograms.rst b/modules/legacy/doc/histograms.rst index 0405bf2fe0..c4359e9e8d 100644 --- a/modules/legacy/doc/histograms.rst +++ b/modules/legacy/doc/histograms.rst @@ -39,11 +39,6 @@ Queries the value of the histogram bin. .. ocv:cfunction:: float cvQueryHistValue_3D(CvHistogram hist, int idx0, int idx1, int idx2) .. ocv:cfunction:: float cvQueryHistValue_nD(CvHistogram hist, const int* idx) -.. ocv:pyoldfunction:: cv.QueryHistValue_1D(hist, idx0) -> float -.. ocv:pyoldfunction:: cv.QueryHistValue_2D(hist, idx0, idx1) -> float -.. ocv:pyoldfunction:: cv.QueryHistValue_3D(hist, idx0, idx1, idx2) -> float -.. ocv:pyoldfunction:: cv.QueryHistValue_nD(hist, idx) -> float - :param hist: Histogram. :param idx0: 0-th index. diff --git a/modules/legacy/doc/motion_analysis.rst b/modules/legacy/doc/motion_analysis.rst index 005e186214..cd419b8001 100644 --- a/modules/legacy/doc/motion_analysis.rst +++ b/modules/legacy/doc/motion_analysis.rst @@ -10,8 +10,6 @@ Calculates the optical flow for two images by using the block matching method. .. ocv:cfunction:: void cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, CvSize block_size, CvSize shift_size, CvSize max_range, int use_previous, CvArr* velx, CvArr* vely ) -.. ocv:pyoldfunction:: cv.CalcOpticalFlowBM(prev, curr, blockSize, shiftSize, max_range, usePrevious, velx, vely)-> None - :param prev: First image, 8-bit, single-channel :param curr: Second image, 8-bit, single-channel @@ -45,8 +43,6 @@ Calculates the optical flow for two images using Horn-Schunck algorithm. .. ocv:cfunction:: void cvCalcOpticalFlowHS(const CvArr* prev, const CvArr* curr, int use_previous, CvArr* velx, CvArr* vely, double lambda, CvTermCriteria criteria) -.. ocv:pyoldfunction:: cv.CalcOpticalFlowHS(prev, curr, usePrevious, velx, vely, lambda, criteria)-> None - :param prev: First image, 8-bit, single-channel :param curr: Second image, 8-bit, single-channel @@ -71,8 +67,6 @@ Calculates the optical flow for two images using Lucas-Kanade algorithm. .. ocv:cfunction:: void cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, CvSize win_size, CvArr* velx, CvArr* vely ) -.. ocv:pyoldfunction:: cv.CalcOpticalFlowLK(prev, curr, winSize, velx, vely)-> None - :param prev: First image, 8-bit, single-channel :param curr: Second image, 8-bit, single-channel diff --git a/modules/legacy/doc/planar_subdivisions.rst b/modules/legacy/doc/planar_subdivisions.rst index f01e11ae8d..096309e934 100644 --- a/modules/legacy/doc/planar_subdivisions.rst +++ b/modules/legacy/doc/planar_subdivisions.rst @@ -116,7 +116,6 @@ CalcSubdivVoronoi2D Calculates the coordinates of the Voronoi diagram cells. .. ocv:cfunction:: void cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv ) -.. ocv:pyoldfunction:: cv.CalcSubdivVoronoi2D(subdiv)-> None :param subdiv: Delaunay subdivision, in which all the points are already added. @@ -130,7 +129,6 @@ ClearSubdivVoronoi2D Removes all virtual points. .. ocv:cfunction:: void cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv ) -.. ocv:pyoldfunction:: cv.ClearSubdivVoronoi2D(subdiv)-> None :param subdiv: Delaunay subdivision. @@ -145,7 +143,6 @@ CreateSubdivDelaunay2D Creates an empty Delaunay triangulation. .. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage ) -.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage) -> CvSubdiv2D :param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision. @@ -166,7 +163,6 @@ FindNearestPoint2D Finds the subdivision vertex closest to the given point. .. ocv:cfunction:: CvSubdiv2DPoint* cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt ) -.. ocv:pyoldfunction:: cv.FindNearestPoint2D(subdiv, pt)-> point :param subdiv: Delaunay or another subdivision. @@ -185,7 +181,6 @@ Subdiv2DEdgeDst Returns the edge destination. .. ocv:cfunction:: CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) -.. ocv:pyoldfunction:: cv.Subdiv2DEdgeDst(edge)-> point :param edge: Subdivision edge (not a quad-edge). @@ -200,7 +195,6 @@ Subdiv2DGetEdge Returns one of the edges related to the given edge. .. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type ) -.. ocv:pyoldfunction:: cv.Subdiv2DGetEdge(edge, type)-> CvSubdiv2DEdge :param edge: Subdivision edge (not a quad-edge). @@ -231,7 +225,6 @@ Subdiv2DNextEdge Returns next edge around the edge origin. .. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge ) -.. ocv:pyoldfunction:: cv.Subdiv2DNextEdge(edge)-> CvSubdiv2DEdge :param edge: Subdivision edge (not a quad-edge). @@ -246,7 +239,6 @@ Subdiv2DLocate Returns the location of a point within a Delaunay triangulation. .. ocv:cfunction:: CvSubdiv2DPointLocation cvSubdiv2DLocate( CvSubdiv2D* subdiv, CvPoint2D32f pt, CvSubdiv2DEdge* edge, CvSubdiv2DPoint** vertex=NULL ) -.. ocv:pyoldfunction:: cv.Subdiv2DLocate(subdiv, pt) -> (loc, where) :param subdiv: Delaunay or another subdivision. @@ -294,7 +286,6 @@ Subdiv2DRotateEdge Returns another edge of the same quad-edge. .. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate ) -.. ocv:pyoldfunction:: cv.Subdiv2DRotateEdge(edge, rotate)-> CvSubdiv2DEdge :param edge: Subdivision edge (not a quad-edge). @@ -315,7 +306,6 @@ SubdivDelaunay2DInsert Inserts a single point into a Delaunay triangulation. .. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt) -.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point :param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`. diff --git a/modules/nonfree/doc/feature_detection.rst b/modules/nonfree/doc/feature_detection.rst index c7ccb74932..0b79560808 100644 --- a/modules/nonfree/doc/feature_detection.rst +++ b/modules/nonfree/doc/feature_detection.rst @@ -108,8 +108,6 @@ Detects keypoints and computes SURF descriptors for them. .. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params ) -.. ocv:pyoldfunction:: cv.ExtractSURF(image, mask, storage, params)-> (keypoints, descriptors) - :param image: Input 8-bit grayscale image :param mask: Optional input mask that marks the regions where we should detect features. diff --git a/modules/objdetect/doc/cascade_classification.rst b/modules/objdetect/doc/cascade_classification.rst index 8428079641..961cf0aa57 100644 --- a/modules/objdetect/doc/cascade_classification.rst +++ b/modules/objdetect/doc/cascade_classification.rst @@ -195,8 +195,6 @@ Detects objects of different sizes in the input image. The detected objects are .. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor=1.1, int min_neighbors=3, int flags=0, CvSize min_size=cvSize(0,0), CvSize max_size=cvSize(0,0) ) -.. ocv:pyoldfunction:: cv.HaarDetectObjects(image, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, min_size=(0, 0)) -> detectedObjects - :param cascade: Haar classifier cascade (OpenCV 1.x API only). It can be loaded from XML or YAML file using :ocv:cfunc:`Load`. When the cascade is not needed anymore, release it using ``cvReleaseHaarClassifierCascade(&cascade)``. :param image: Matrix of the type ``CV_8U`` containing an image where objects are detected. diff --git a/modules/photo/doc/inpainting.rst b/modules/photo/doc/inpainting.rst index ef69ebc708..9b66266136 100644 --- a/modules/photo/doc/inpainting.rst +++ b/modules/photo/doc/inpainting.rst @@ -12,7 +12,6 @@ Restores the selected region in an image using the region neighborhood. .. ocv:pyfunction:: cv2.inpaint(src, inpaintMask, inpaintRadius, flags[, dst]) -> dst .. ocv:cfunction:: void cvInpaint( const CvArr* src, const CvArr* inpaint_mask, CvArr* dst, double inpaintRange, int flags ) -.. ocv:pyoldfunction:: cv.Inpaint(src, mask, dst, inpaintRadius, flags) -> None :param src: Input 8-bit 1-channel or 3-channel image. diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt index a29858afcb..389ce3f9d8 100644 --- a/modules/python/CMakeLists.txt +++ b/modules/python/CMakeLists.txt @@ -5,23 +5,20 @@ if(WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug") ocv_module_disable(python) endif() -if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_USE_NUMPY) + +if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_NUMPY_INCLUDE_DIR) ocv_module_disable(python) endif() set(the_description "The python bindings") ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_photo opencv_objdetect opencv_contrib opencv_legacy opencv_softcascade OPTIONAL opencv_nonfree) -add_definitions(-DPYTHON_USE_NUMPY=1) - ocv_module_include_directories( "${PYTHON_INCLUDE_PATH}" "${PYTHON_NUMPY_INCLUDE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/src2" ) - - set(opencv_hdrs "${OPENCV_MODULE_opencv_core_LOCATION}/include/opencv2/core.hpp" "${OPENCV_MODULE_opencv_core_LOCATION}/include/opencv2/core/base.hpp" @@ -53,13 +50,6 @@ set(cv2_generated_hdrs "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h" "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h") -add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated0.i - COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src2" - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/api - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/defs - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/gen.py) - add_custom_command( OUTPUT ${cv2_generated_hdrs} COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs} @@ -67,7 +57,7 @@ add_custom_command( DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/hdr_parser.py DEPENDS ${opencv_hdrs}) -add_library(${the_module} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp) +add_library(${the_module} SHARED src2/cv2.cpp ${cv2_generated_hdrs}) set_target_properties(${the_module} PROPERTIES COMPILE_DEFINITIONS OPENCV_NOSTL) if(PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES "optimized.*debug") diff --git a/modules/python/src2/api b/modules/python/src2/api deleted file mode 100644 index dc02e4ab04..0000000000 --- a/modules/python/src2/api +++ /dev/null @@ -1,1805 +0,0 @@ -# Macros -CV_RGB CvScalar - double red - double grn - double blu -CV_MAT_CN int - int i -CV_MAT_DEPTH int - int i -Scalar CvScalar - double val0 - double val1 0 - double val2 0 - double val3 0 -ScalarAll CvScalar - double val0123 -RealScalar CvScalar - double val0 -CV_IABS int - int a -CV_CMP int - int a - int b -CV_SIGN int - int a -CV_FOURCC int - char c1 - char c2 - char c3 - char c4 -CV_MAKETYPE int - int depth - int cn -CV_8UC int - int n -CV_8SC int - int n -CV_16UC int - int n -CV_16SC int - int n -CV_32SC int - int n -CV_32FC int - int n -CV_64FC int - int n - -# Initialization -CloneImage IplImage* - IplImage image -SetImageCOI - IplImage image - int coi -GetImageCOI int - IplImage image -SetImageROI - IplImage image - CvRect rect -ResetImageROI - IplImage image -GetImageROI CvRect - IplImage image -CloneMat CvMat* - CvMat mat -CloneMatND CvMatND* - CvMatND mat - -# Accessing Elements and sub-Arrays - -Get1D CvScalar - CvArr arr - int idx -Get2D CvScalar - CvArr arr - int idx0 - int idx1 -Get3D CvScalar - CvArr arr - int idx0 - int idx1 - int idx2 -GetND CvScalar - CvArr arr - ints indices -GetReal1D double - CvArr arr - int idx0 -GetReal2D double - CvArr arr - int idx0 - int idx1 -GetReal3D double - CvArr arr - int idx0 - int idx1 - int idx2 -GetRealND double - CvArr arr - ints idx -mGet double - CvMat mat - int row - int col -Set1D - CvArr arr - int idx - CvScalar value -Set2D - CvArr arr - int idx0 - int idx1 - CvScalar value -Set3D - CvArr arr - int idx0 - int idx1 - int idx2 - CvScalar value -SetND - CvArr arr - ints indices - CvScalar value -SetReal1D - CvArr arr - int idx - double value -SetReal2D - CvArr arr - int idx0 - int idx1 - double value -SetReal3D - CvArr arr - int idx0 - int idx1 - int idx2 - double value -SetRealND - CvArr arr - ints indices - double value -mSet - CvMat mat - int row - int col - double value -ClearND - CvArr arr - ints idx - -# Sequences -CV_IS_SEQ_INDEX int - CvSeq s -CV_IS_SEQ_CURVE int - CvSeq s -CV_IS_SEQ_CLOSED int - CvSeq s -CV_IS_SEQ_CONVEX int - CvSeq s -CV_IS_SEQ_HOLE int - CvSeq s -CV_IS_SEQ_SIMPLE int - CvSeq s - - -# Curves and Shapes -Line - CvArr img - CvPoint pt1 - CvPoint pt2 - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 -Rectangle - CvArr img - CvPoint pt1 - CvPoint pt2 - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 -Circle - CvArr img - CvPoint center - int radius - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 -Ellipse - CvArr img - CvPoint center - CvSize axes - double angle - double start_angle - double end_angle - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 -EllipseBox - CvArr img - CvBox2D box - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 -FillPoly - CvArr img - pts_npts_contours polys - CvScalar color - int lineType 8 - int shift 0 -FillConvexPoly - CvArr img - CvPoints pn - CvScalar color - int lineType 8 - int shift 0 -PolyLine - CvArr img - pts_npts_contours polys - int is_closed - CvScalar color - int thickness 1 - int lineType 8 - int shift 0 - -#Text -InitFont font - CvFont font /O - int fontFace - double hscale - double vscale - double shear 0 - int thickness 1 - int lineType 8 -PutText - CvArr img - char* text - CvPoint org - CvFont* font - CvScalar color -GetTextSize textSize,baseline - char* textString - CvFont* font - CvSize textSize /O - int baseline /O - -# Point Sets and Contours -DrawContours - CvArr img - CvSeq contour - CvScalar external_color - CvScalar hole_color - int max_level - int thickness 1 - int lineType 8 - CvPoint offset cvPoint(0,0) - -# RTTI and Generic Functions -Save - char* filename - generic structPtr - char* name NULL - char* comment NULL -Load generic - char* filename - CvMemStorage storage NULL - char* name NULL - -# Accessing Elements and sub-Arrays -GetRow submat - CvArr arr - CvMat submat /J:arr,O,A - int row -GetRows submat - CvArr arr - CvMat submat /J:arr,O,A - int startRow - int endRow - int deltaRow 1 -GetCol submat - CvArr arr - CvMat submat /J:arr,O,A - int col -GetCols submat - CvArr arr - CvMat submat /J:arr,O,A - int startCol - int endCol -GetDiag submat - CvArr arr - CvMat submat /J:arr,O,A - int diag 0 -GetSubRect submat - CvArr arr - CvMat submat /J:arr,O,A - CvRect rect -GetSize CvSize - CvArr arr -GetElemType int - CvArr arr - -# Copying and Filling -Copy - CvArr src - CvArr dst - CvArr mask NULL -Set - CvArr arr - CvScalar value - CvArr mask NULL -SetZero - CvArr arr -Zero - CvArr arr -SetIdentity - CvArr mat - CvScalar value cvRealScalar(1) -Range - CvArr mat - double start - double end - -# Transforms and Permutations -# Reshape, ReshapeND - requires special data refcount code -Repeat - CvArr src - CvArr dst -Flip - CvArr src - CvArr dst NULL - int flipMode 0 -Split - CvArr src - CvArr dst0 - CvArr dst1 - CvArr dst2 - CvArr dst3 -CvtPixToPlane - CvArr src - CvArr dst0 - CvArr dst1 - CvArr dst2 - CvArr dst3 -Merge - CvArr src0 - CvArr src1 - CvArr src2 - CvArr src3 - CvArr dst -MixChannels - cvarr_count src /K - cvarr_count dst - intpair fromTo -RandShuffle - CvArr mat - CvRNG* rng - double iter_factor 1.0 -Sort - CvArr src - CvArr dst - CvArr idxmat - int flags 0 - -# Arithmetic, Logic and Comparison -LUT - CvArr src - CvArr dst - CvArr lut -ConvertScale - CvArr src - CvArr dst - double scale 1.0 - double shift 0.0 -CvtScale - CvArr src - CvArr dst - double scale 1.0 - double shift 0.0 -Scale - CvArr src - CvArr dst - double scale 1.0 - double shift 0.0 -Convert - CvArr src - CvArr dst -ConvertScaleAbs - CvArr src - CvArr dst - double scale 1.0 - double shift 0.0 -Add - CvArr src1 - CvArr src2 - CvArr dst - CvArr mask NULL -AddS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -AddWeighted - CvArr src1 - double alpha - CvArr src2 - double beta - double gamma - CvArr dst -Sub - CvArr src1 - CvArr src2 - CvArr dst - CvArr mask NULL -SubS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -SubRS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -Mul - CvArr src1 - CvArr src2 - CvArr dst - double scale 1.0 -Div - CvArr src1 - CvArr src2 - CvArr dst - double scale 1.0 -And - CvArr src1 - CvArr src2 - CvArr dst - CvArr mask NULL -AndS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -Or - CvArr src1 - CvArr src2 - CvArr dst - CvArr mask NULL -OrS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -Xor - CvArr src1 - CvArr src2 - CvArr dst - CvArr mask NULL -XorS - CvArr src - CvScalar value - CvArr dst - CvArr mask NULL -Not - CvArr src - CvArr dst -Cmp - CvArr src1 - CvArr src2 - CvArr dst - int cmpOp -CmpS - CvArr src - double value - CvArr dst - int cmpOp -InRange - CvArr src - CvArr lower - CvArr upper - CvArr dst -InRangeS - CvArr src - CvScalar lower - CvScalar upper - CvArr dst -Max - CvArr src1 - CvArr src2 - CvArr dst -MaxS - CvArr src - double value - CvArr dst -Min - CvArr src1 - CvArr src2 - CvArr dst -MinS - CvArr src - double value - CvArr dst -AbsDiff - CvArr src1 - CvArr src2 - CvArr dst -AbsDiffS - CvArr src - CvArr dst - CvScalar value -Abs - CvArr src - CvArr dst - -# Statistics -CountNonZero int - CvArr arr -Sum CvScalar - CvArr arr -Avg CvScalar - CvArr arr - CvArr mask NULL -AvgSdv mean,stdDev - CvArr arr - CvScalar mean /O - CvScalar stdDev /O - CvArr mask NULL -MinMaxLoc minVal,maxVal,minLoc,maxLoc - CvArr arr - double minVal /O - double maxVal /O - CvPoint minLoc /O - CvPoint maxLoc /O - CvArr mask NULL -Norm double - CvArr arr1 - CvArr arr2 - int normType CV_L2 - CvArr mask NULL -Reduce - CvArr src - CvArr dst - int dim -1 - int op CV_REDUCE_SUM - -# Linear Algebra -DotProduct double - CvArr src1 - CvArr src2 -Normalize - CvArr src - CvArr dst - double a 1.0 - double b 0.0 - int norm_type CV_L2 - CvArr mask NULL -CrossProduct - CvArr src1 - CvArr src2 - CvArr dst -ScaleAdd - CvArr src1 - CvScalar scale - CvArr src2 - CvArr dst -GEMM - CvArr src1 - CvArr src2 - double alpha - CvArr src3 - double beta - CvArr dst - int tABC 0 -MatMulAdd - CvArr src1 - CvArr src2 - CvArr src3 - CvArr dst -MatMul - CvArr src1 - CvArr src2 - CvArr dst -Transform - CvArr src - CvArr dst - CvMat transmat - CvMat shiftvec NULL -PerspectiveTransform - CvArr src - CvArr dst - CvMat mat -MulTransposed - CvArr src - CvArr dst - int order - CvArr delta NULL - double scale 1.0 -Trace CvScalar - CvArr mat -Transpose - CvArr src - CvArr dst -Det double - CvArr mat -Invert double - CvArr src - CvArr dst - int method CV_LU -Solve - CvArr A - CvArr B - CvArr X - int method CV_LU -SVD - CvArr A - CvArr W - CvArr U NULL - CvArr V NULL - int flags 0 -SVBkSb - CvArr W - CvArr U - CvArr V - CvArr B - CvArr X - int flags -EigenVV - CvArr mat - CvArr evects - CvArr evals - double eps - int lowindex 0 - int highindex 0 -CalcCovarMatrix - cvarr_count vects /K - CvArr covMat - CvArr avg - int flags -Mahalonobis - CvArr vec1 - CvArr vec2 - CvArr mat -CalcPCA - CvArr data - CvArr avg - CvArr eigenvalues - CvArr eigenvectors - int flags -ProjectPCA - CvArr data - CvArr avg - CvArr eigenvectors - CvArr result -BackProjectPCA - CvArr proj - CvArr avg - CvArr eigenvects - CvArr result - -# Math Functions -Round int - double value -Floor int - double value -Ceil int - double value -Sqrt float - float value -InvSqrt float - float value -Cbrt float - float value -FastArctan float - float y - float x -IsNaN int - double value -IsInf int - double value -CartToPolar - CvArr x - CvArr y - CvArr magnitude - CvArr angle NULL - int angleInDegrees 0 -PolarToCart - CvArr magnitude - CvArr angle - CvArr x - CvArr y - int angleInDegrees 0 -Pow - CvArr src - CvArr dst - double power -Exp - CvArr src - CvArr dst -Log - CvArr src - CvArr dst -SolveCubic - CvMat coeffs - CvMat roots -SolvePoly - CvMat coeffs - CvMat roots - int maxiter 10 - int fig 10 - -# Random Number Generation -RNG CvRNG - int64 seed -1LL -RandArr - CvRNG* rng - CvArr arr - int distType - CvScalar param1 - CvScalar param2 -RandInt unsigned - CvRNG* rng -RandReal double - CvRNG* rng - -# Discrete Transforms -DFT - CvArr src - CvArr dst - int flags - int nonzeroRows 0 -GetOptimalDFTSize int - int size0 -MulSpectrums - CvArr src1 - CvArr src2 - CvArr dst - int flags -DCT - CvArr src - CvArr dst - int flags - -# Sequences -SeqRemove - CvSeq seq - int index -ClearSeq - CvSeq seq -CloneSeq - CvSeq seq - CvMemStorage storage -SeqRemoveSlice - CvSeq seq - CvSlice slice -SeqInvert - CvSeq seq - -# Miscellaneous Functions -CheckArr int - CvArr arr - int flags 0 - double min_val 0 - double max_val 0 -KMeans2 double - CvArr samples - int nclusters - CvArr labels - CvTermCriteria termcrit - int attempts 1 - int flags 0 - CvArr centers NULL - -# Gradients, Edges, Corners and Features -Sobel - CvArr src - CvArr dst - int xorder - int yorder - int apertureSize 3 -Laplace - CvArr src - CvArr dst - int apertureSize 3 -Canny - CvArr image - CvArr edges - double threshold1 - double threshold2 - int aperture_size 3 -PreCornerDetect - CvArr image - CvArr corners - int apertureSize 3 -CornerEigenValsAndVecs - CvArr image - CvArr eigenvv - int blockSize - int aperture_size 3 -CornerMinEigenVal - CvArr image - CvArr eigenval - int blockSize - int aperture_size 3 -CornerHarris - CvArr image - CvArr harris_dst - int blockSize - int aperture_size 3 - double k 0.04 -FindCornerSubPix corners - CvArr image - CvPoint2D32fs corners - CvSize win - CvSize zero_zone - CvTermCriteria criteria -GoodFeaturesToTrack cornerCount - CvArr image - CvArr eigImage - CvArr tempImage - cvpoint2d32f_count cornerCount - double qualityLevel - double minDistance - CvArr mask NULL - int blockSize 3 - int useHarris 0 - double k 0.04 -ExtractSURF keypoints,descriptors - CvArr image - CvArr mask - CvSeqOfCvSURFPoint* keypoints /O - CvSeqOfCvSURFDescriptor* descriptors /O - CvMemStorage storage - CvSURFParams params -GetStarKeypoints CvSeqOfCvStarKeypoint* - CvArr image - CvMemStorage storage - CvStarDetectorParams params cvStarDetectorParams() - -# Sampling, Interpolation and Geometrical Transforms -GetRectSubPix - CvArr src - CvArr dst - CvPoint2D32f center -GetQuadrangleSubPix - CvArr src - CvArr dst - CvMat mapMatrix -Resize - CvArr src - CvArr dst - int interpolation CV_INTER_LINEAR -WarpAffine - CvArr src - CvArr dst - CvMat mapMatrix - int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS - CvScalar fillval cvScalarAll(0) -GetAffineTransform - CvPoint2D32f* src - CvPoint2D32f* dst - CvMat mapMatrix -GetRotationMatrix2D - CvPoint2D32f center - double angle - double scale - CvMat mapMatrix -WarpPerspective - CvArr src - CvArr dst - CvMat mapMatrix - int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS - CvScalar fillval cvScalarAll(0) -GetPerspectiveTransform - CvPoint2D32f* src - CvPoint2D32f* dst - CvMat mapMatrix -Remap - CvArr src - CvArr dst - CvArr mapx - CvArr mapy - int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS - CvScalar fillval cvScalarAll(0) -ConvertMaps - CvArr mapx - CvArr mapy - CvArr mapxy - CvArr mapalpha -LogPolar - CvArr src - CvArr dst - CvPoint2D32f center - double M - int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS - -# Morphological Operations -CreateStructuringElementEx IplConvKernel* - int cols - int rows - int anchorX - int anchorY - int shape - ints values {NULL,0} -Erode - CvArr src - CvArr dst - IplConvKernel* element NULL - int iterations 1 -Dilate - CvArr src - CvArr dst - IplConvKernel* element NULL - int iterations 1 -MorphologyEx - CvArr src - CvArr dst - CvArr temp - IplConvKernel* element - int operation - int iterations 1 - -# Filters and Color Conversion -Smooth - CvArr src - CvArr dst - int smoothtype CV_GAUSSIAN - int param1 3 - int param2 0 - double param3 0 - double param4 0 -Filter2D - CvArr src - CvArr dst - CvMat kernel - CvPoint anchor cvPoint(-1,-1) -CopyMakeBorder - CvArr src - CvArr dst - CvPoint offset - int bordertype - CvScalar value cvScalarAll(0) -Integral - CvArr image - CvArr sum - CvArr sqsum NULL - CvArr tiltedSum NULL -CvtColor - CvArr src - CvArr dst - int code -Threshold - CvArr src - CvArr dst - double threshold - double maxValue - int thresholdType -AdaptiveThreshold - CvArr src - CvArr dst - double maxValue - int adaptive_method CV_ADAPTIVE_THRESH_MEAN_C /ch_adaptive_method - int thresholdType CV_THRESH_BINARY /ch_threshold_type - int blockSize 3 - double param1 5 - -# Pyramids and the Applications -PyrDown - CvArr src - CvArr dst - int filter CV_GAUSSIAN_5x5 -PyrUp - CvArr src - CvArr dst - int filter CV_GAUSSIAN_5x5 -PyrSegmentation comp - IplImage src - IplImage dst - CvMemStorage storage - CvSeq* comp /O - int level - double threshold1 - double threshold2 -PyrMeanShiftFiltering - CvArr src - CvArr dst - double sp - double sr - int max_level 1 - CvTermCriteria termcrit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1) - -# Image Segmentation, Connected Components and Contour Retrieval -FloodFill comp - CvArr image - CvPoint seed_point - CvScalar new_val - CvScalar lo_diff cvScalarAll(0) - CvScalar up_diff cvScalarAll(0) - CvConnectedComp comp /O - int flags 4 - CvArr mask NULL -Watershed - CvArr image - CvArr markers - -# Image and Contour Moments -Moments moments - cvarrseq arr - CvMoments moments /O - int binary 0 -GetSpatialMoment double - CvMoments* moments - int x_order - int y_order -GetCentralMoment double - CvMoments* moments - int x_order - int y_order -GetNormalizedCentralMoment double - CvMoments* moments - int x_order - int y_order - -# Special Image Transforms -HoughLines2 CvSeq* - CvArr image - CvMemStorage storage - int method - double rho - double theta - int threshold - double param1 0 - double param2 0 -HoughCircles - CvArr image - CvMat circle_storage - int method - double dp - double min_dist - double param1 100 - double param2 100 - int min_radius 0 - int max_radius 0 -DistTransform - CvArr src - CvArr dst - int distance_type CV_DIST_L2 - int mask_size 3 - floats mask {NULL,0} - CvArr labels NULL -Inpaint - CvArr src - CvArr mask - CvArr dst - double inpaintRadius - int flags - -# Histograms -ClearHist - CvHistogram hist -CalcArrHist - CvArrs image - CvHistogram hist - int accumulate 0 - CvArr mask NULL -CalcHist - IplImages image - CvHistogram hist - int accumulate 0 - CvArr mask NULL -NormalizeHist - CvHistogram hist - double factor -ThreshHist - CvHistogram hist - double threshold -CompareHist double - CvHistogram hist1 - CvHistogram hist2 - int method -# CopyHist -CalcBackProject - IplImages image - CvArr back_project - CvHistogram hist -CalcArrBackProject - CvArrs image - CvArr back_project - CvHistogram hist -CalcBackProjectPatch - IplImages images - CvArr dst - CvSize patch_size - CvHistogram hist - int method - float factor -CalcProbDensity - CvHistogram hist1 - CvHistogram hist2 - CvHistogram dst_hist - double scale 255 -EqualizeHist - CvArr src - CvArr dst -QueryHistValue_1D double - CvHistogram hist - int idx0 -QueryHistValue_2D double - CvHistogram hist - int idx0 - int idx1 -QueryHistValue_3D double - CvHistogram hist - int idx0 - int idx1 - int idx2 -QueryHistValue_nD double - CvHistogram hist - ints idx - -# Matching -MatchTemplate - CvArr image - CvArr templ - CvArr result - int method -MatchShapes double - CvSeq object1 - CvSeq object2 - int method - double parameter 0 - -# Contour Processing Functions -ApproxChains CvSeq* - CvSeq src_seq - CvMemStorage storage - int method CV_CHAIN_APPROX_SIMPLE - double parameter 0 - int minimal_perimeter 0 - int recursive 0 -BoundingRect CvRect - cvarrseq points - int update 0 -ContourArea double - cvarrseq contour - CvSlice slice CV_WHOLE_SEQ -ArcLength double - cvarrseq curve - CvSlice slice CV_WHOLE_SEQ - int isClosed -1 - -# Computational Geometry -MaxRect CvRect - CvRect* rect1 - CvRect* rect2 -# TODO PointSeqFromMat -BoxPoints points - CvBox2D box - CvPoint2D32f_4 points /O,A -FitEllipse2 CvBox2D - CvArr points -ConvexHull2 CvSeq* - cvarrseq points - CvMemStorage storage - int orientation CV_CLOCKWISE - int return_points 0 -CheckContourConvexity int - cvarrseq contour -ConvexityDefects CvSeqOfCvConvexityDefect* - cvarrseq contour - CvSeq convexhull - CvMemStorage storage -PointPolygonTest double - cvarrseq contour - CvPoint2D32f pt - int measure_dist -MinAreaRect2 CvBox2D - cvarrseq points - CvMemStorage storage NULL -MinEnclosingCircle int,center,radius - cvarrseq points - CvPoint2D32f center /O - float radius /O - -# Planar Subdivisions - -Subdiv2DGetEdge CvSubdiv2DEdge - CvSubdiv2DEdge edge - CvNextEdgeType type -Subdiv2DNextEdge CvSubdiv2DEdge - CvSubdiv2DEdge edge -Subdiv2DRotateEdge CvSubdiv2DEdge - CvSubdiv2DEdge edge - int rotate -Subdiv2DEdgeOrg CvSubdiv2DPoint* - CvSubdiv2DEdge edge -Subdiv2DEdgeDst CvSubdiv2DPoint* - CvSubdiv2DEdge edge -CreateSubdivDelaunay2D CvSubdiv2D* - CvRect rect - CvMemStorage storage -SubdivDelaunay2DInsert CvSubdiv2DPoint* - CvSubdiv2D* subdiv - CvPoint2D32f pt -CalcSubdivVoronoi2D - CvSubdiv2D* subdiv -ClearSubdivVoronoi2D - CvSubdiv2D* subdiv -FindNearestPoint2D CvSubdiv2DPoint* - CvSubdiv2D* subdiv - CvPoint2D32f pt - -# Object Detection -HaarDetectObjects CvSeqOfCvAvgComp* - CvArr image - CvHaarClassifierCascade* cascade - CvMemStorage storage - double scale_factor 1.1 /ch_doubleAbove1 - int min_neighbors 3 - int flags 0 - CvSize min_size cvSize(0,0) - -ComputeCorrespondEpilines - CvMat points - int whichImage - CvMat F - CvMat lines -ConvertPointsHomogeneous - CvMat src - CvMat dst -ProjectPoints2 - CvMat objectPoints - CvMat rvec - CvMat tvec - CvMat cameraMatrix - CvMat distCoeffs - CvMat imagePoints - CvMat dpdrot NULL - CvMat dpdt NULL - CvMat dpdf NULL - CvMat dpdc NULL - CvMat dpddist NULL -ReprojectImageTo3D - CvArr disparity - CvArr _3dImage - CvMat Q - int handleMissingValues 0 -RQDecomp3x3 eulerAngles - CvMat M - CvMat R - CvMat Q - CvMat Qx NULL - CvMat Qy NULL - CvMat Qz NULL - CvPoint3D64f eulerAngles /O -FindHomography - CvMat srcPoints - CvMat dstPoints - CvMat H - int method 0 - double ransacReprojThreshold 3.0 - CvMat status NULL -CreateStereoBMState CvStereoBMState* - int preset CV_STEREO_BM_BASIC - int numberOfDisparities 0 -CreateStereoGCState CvStereoGCState* - int numberOfDisparities - int maxIters -FindStereoCorrespondenceBM - CvArr left - CvArr right - CvArr disparity - CvStereoBMState* state -FindStereoCorrespondenceGC - CvArr left - CvArr right - CvArr dispLeft - CvArr dispRight - CvStereoGCState* state - int useDisparityGuess 0 -CalibrateCamera2 - CvMat objectPoints - CvMat imagePoints - CvMat pointCounts - CvSize imageSize - CvMat cameraMatrix - CvMat distCoeffs - CvMat rvecs - CvMat tvecs - int flags 0 -CalibrationMatrixValues fovx,fovy,focalLength,principalPoint,pixelAspectRatio - CvMat calibMatr - CvSize image_size - double apertureWidth 0 - double apertureHeight 0 - double fovx /O - double fovy /O - double focalLength /O - CvPoint2D64f principalPoint /O - double pixelAspectRatio /O -FindExtrinsicCameraParams2 - CvMat objectPoints - CvMat imagePoints - CvMat cameraMatrix - CvMat distCoeffs - CvMat rvec - CvMat tvec - int useExtrinsicGuess 0 -FindFundamentalMat int - CvMat points1 - CvMat points2 - CvMat fundamentalMatrix - int method CV_FM_RANSAC - double param1 1. - double param2 0.99 - CvMat status NULL -StereoCalibrate - CvMat objectPoints - CvMat imagePoints1 - CvMat imagePoints2 - CvMat pointCounts - CvMat cameraMatrix1 - CvMat distCoeffs1 - CvMat cameraMatrix2 - CvMat distCoeffs2 - CvSize imageSize - CvMat R - CvMat T - CvMat E NULL - CvMat F NULL - CvTermCriteria term_crit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6) - int flags CV_CALIB_FIX_INTRINSIC -GetOptimalNewCameraMatrix - CvMat cameraMatrix - CvMat distCoeffs - CvSize imageSize - double alpha - CvMat newCameraMatrix - CvSize newImageSize cvSize(0,0) - CvRect* validPixROI NULL - int centerPrincipalPoint 0 -InitIntrinsicParams2D - CvMat objectPoints - CvMat imagePoints - CvMat npoints - CvSize imageSize - CvMat cameraMatrix - double aspectRatio 1. -StereoRectify roi1,roi2 - CvMat cameraMatrix1 - CvMat cameraMatrix2 - CvMat distCoeffs1 - CvMat distCoeffs2 - CvSize imageSize - CvMat R - CvMat T - CvMat R1 - CvMat R2 - CvMat P1 - CvMat P2 - CvMat Q NULL - int flags CV_CALIB_ZERO_DISPARITY - double alpha -1 - CvSize newImageSize cvSize(0,0) - CvRect roi1 /O - CvRect roi2 /O -StereoRectifyUncalibrated - CvMat points1 - CvMat points2 - CvMat F - CvSize imageSize - CvMat H1 - CvMat H2 - double threshold 5 -Rodrigues2 - CvMat src - CvMat dst - CvMat jacobian 0 -Undistort2 - CvArr src - CvArr dst - CvMat cameraMatrix - CvMat distCoeffs -InitUndistortMap - CvMat cameraMatrix - CvMat distCoeffs - CvArr map1 - CvArr map2 -InitUndistortRectifyMap - CvMat cameraMatrix - CvMat distCoeffs - CvMat R - CvMat newCameraMatrix - CvArr map1 - CvArr map2 -UndistortPoints - CvMat src - CvMat dst - CvMat cameraMatrix - CvMat distCoeffs - CvMat R NULL - CvMat P NULL -DecomposeProjectionMatrix eulerAngles - CvMat projMatrix - CvMat cameraMatrix - CvMat rotMatrix - CvMat transVect - CvMat rotMatrX NULL - CvMat rotMatrY NULL - CvMat rotMatrZ NULL - CvPoint3D64f eulerAngles /O -DrawChessboardCorners - CvArr image - CvSize patternSize - CvPoint2D32fs corners - int patternWasFound - -CreatePOSITObject CvPOSITObject* - CvPoint3D32fs points -POSIT rotationMatrix,translation_vector - CvPOSITObject* posit_object - CvPoint2D32f* imagePoints - double focal_length - CvTermCriteria criteria - CvMatr32f_i rotationMatrix /O,A - CvVect32f_i translation_vector /O,A - -EstimateRigidTransform - CvArr A - CvArr B - CvMat M - int full_affine - -# Accumulation of Background Statistics -Acc - CvArr image - CvArr sum - CvArr mask NULL -SquareAcc - CvArr image - CvArr sqsum - CvArr mask NULL -MultiplyAcc - CvArr image1 - CvArr image2 - CvArr acc - CvArr mask NULL -RunningAvg - CvArr image - CvArr acc - double alpha - CvArr mask NULL - -# Motion Templates -UpdateMotionHistory - CvArr silhouette - CvArr mhi - double timestamp - double duration -CalcMotionGradient - CvArr mhi /ch_matF - CvArr mask - CvArr orientation /ch_matF - double delta1 - double delta2 - int apertureSize 3 /ch_aperture -CalcGlobalOrientation double - CvArr orientation - CvArr mask - CvArr mhi - double timestamp - double duration -SegmentMotion CvSeq* - CvArr mhi - CvArr seg_mask - CvMemStorage storage - double timestamp - double seg_thresh - -# Object Tracking -MeanShift comp - CvArr prob_image - CvRect window - CvTermCriteria criteria - CvConnectedComp comp /O -CamShift int,comp,box - CvArr prob_image - CvRect window - CvTermCriteria criteria - CvConnectedComp comp /O - CvBox2D box /O -CreateKalman CvKalman* - int dynam_params - int measure_params - int control_params 0 -KalmanCorrect ROCvMat* - CvKalman* kalman - CvMat measurement -KalmanPredict ROCvMat* - CvKalman* kalman - CvMat control NULL -SnakeImage points - IplImage image - CvPoints points - floats alpha - floats beta - floats gamma - CvSize win - CvTermCriteria criteria - int calc_gradient 1 - -# Optical Flow -CalcOpticalFlowLK - CvArr prev - CvArr curr - CvSize winSize - CvArr velx - CvArr vely -CalcOpticalFlowBM - CvArr prev /ch_image8 - CvArr curr /ch_image8 - CvSize blockSize - CvSize shiftSize - CvSize max_range - int usePrevious - CvArr velx /ch_vel - CvArr vely /ch_vel -CalcOpticalFlowHS - CvArr prev /ch_image8 - CvArr curr /ch_image8 - int usePrevious - CvArr velx /ch_vel_64 - CvArr vely /ch_vel_64 - double lambda - CvTermCriteria criteria -CalcOpticalFlowFarneback - CvArr prev /ch_image8 - CvArr curr /ch_image8 - CvArr flow - double pyr_scale 0.5 - int levels 3 - int winsize 15 - int iterations 3 - int poly_n 7 - double poly_sigma 1.5 - int flags 0 - -# Highgui -ConvertImage - CvArr src - CvArr dst - int flags 0 -NamedWindow - char* name - int flags CV_WINDOW_AUTOSIZE -DestroyWindow - char* name -DestroyAllWindows -ResizeWindow - char* name - int width - int height -MoveWindow - char* name - int x - int y -ShowImage - char* name - CvArr image -GetTrackbarPos int - char* trackbarName - char* windowName -SetTrackbarPos - char* trackbarName - char* windowName - int pos -#WaitKey int -# int delay 0 -SaveImage - char* filename - CvArr image -CaptureFromFile CvCapture* - char* filename -CreateFileCapture CvCapture* - char* filename -CaptureFromCAM CvCapture* - int index -CreateCameraCapture CvCapture* - int index -GrabFrame int - CvCapture* capture -RetrieveFrame ROIplImage* - CvCapture* capture -QueryFrame ROIplImage* - CvCapture* capture -GetCaptureProperty double - CvCapture* capture - int property_id -SetCaptureProperty int - CvCapture* capture - int property_id - double value -CreateVideoWriter CvVideoWriter* - char* filename - int fourcc - double fps - CvSize frame_size - int is_color 1 -WriteFrame int - CvVideoWriter* writer - IplImage image -EncodeImage CvMat* - char* ext - CvArr image - ints0 params {&zero,1} -DecodeImage IplImage* - CvMat buf - int iscolor CV_LOAD_IMAGE_COLOR -DecodeImageM CvMat* - CvMat buf - int iscolor CV_LOAD_IMAGE_COLOR -StartWindowThread -SetWindowProperty - char* name - int prop_id - double prop_value -GetWindowProperty double - char* name - int prop_id - -GetTickCount int64 -GetTickFrequency int64 - -# cvaux stuff -HOGDetectMultiScale CvSeq* - CvArr image - CvMemStorage storage - CvArr svm_classifier NULL - CvSize win_stride cvSize(0,0) - double hit_threshold 0 - double scale 1.05 - int group_threshold 2 - CvSize padding cvSize(0,0) - CvSize win_size cvSize(64,128) - CvSize block_size cvSize(16,16) - CvSize block_stride cvSize(8,8) - CvSize cell_size cvSize(8,8) - int nbins 9 - int gammaCorrection 1 - -GrabCut - CvArr image - CvArr mask - CvRect rect - CvArr bgdModel - CvArr fgdModel - int iterCount - int mode - -# These functions are handwritten in cv.cpp; they appear here as 'doconly' declarations -# so that their documentation can be auto-generated -ApproxPoly /doconly - cvarrseq src_seq - CvMemStorage storage - int method - double parameter 0.0 - int parameter2 0 -CalcEMD2 float /doconly - CvArr signature1 - CvArr signature2 - int distance_type - PyCallableObject* distance_func NULL - CvArr cost_matrix NULL - CvArr flow NULL - float lower_bound 0.0 - PyObject* userdata NULL -CalcOpticalFlowPyrLK currFeatures,status,track_error /doconly - CvArr prev - CvArr curr - CvArr prevPyr - CvArr currPyr - CvPoint2D32f* prevFeatures - CvSize winSize - int level - CvTermCriteria criteria - int flags - CvPoint2D32f* guesses NULL - CvPoint2D32f currFeatures /O - char status /O - float track_error /O -ClipLine point1,point2 /doconly - CvSize imgSize - CvPoint pt1 - CvPoint pt2 -CreateData /doconly - CvArr arr -CreateHist CvHistogram /doconly - ints dims - int type - ranges ranges None - int uniform 1 -CreateImageHeader IplImage* /doconly - CvSize size - int depth - int channels -CreateImage IplImage* /doconly - CvSize size - int depth - int channels -CreateMatHeader CvMat /doconly - int rows - int cols - int type -CreateMat CvMat /doconly - int rows - int cols - int type -CreateMatNDHeader CvMatND /doconly - ints dims - int type -CreateMatND CvMatND /doconly - ints dims - int type -CreateMemStorage CvMemStorage /doconly - int blockSize 0 -CreateTrackbar /doconly - char* trackbarName - char* windowName - int value - int count - PyCallableObject* onChange -FindChessboardCorners corners /doconly - CvArr image - CvSize patternSize - CvPoint2D32fs corners /O - int flags CV_CALIB_CB_ADAPTIVE_THRESH -FindContours CvSeq /doconly - CvArr image - CvMemStorage storage - int mode CV_RETR_LIST - int method CV_CHAIN_APPROX_SIMPLE - CvPoint offset (0,0) -FitLine line /doconly - CvArr points - int dist_type - double param - double reps - double aeps - PyObject* line /O -GetDims dim1,dim2,... /doconly - CvArr arr -GetHuMoments hu /doconly - CvMoments moments - PyObject* hu /O -GetImage iplimage /doconly - CvMat arr -GetMat CvMat /doconly - IplImage arr - int allowND 0 -GetMinMaxHistValue min_value,max_value,min_idx,max_idx /doconly - CvHistogram hist - CvScalar min_value /O - CvScalar max_value /O - ints min_idx /O - ints max_idx /O -InitLineIterator line_iterator /doconly - CvArr image - CvPoint pt1 - CvPoint pt2 - iter line_iterator /O - int connectivity 8 - int left_to_right 0 -LoadImageM /doconly - char* filename - int iscolor CV_LOAD_IMAGE_COLOR -LoadImage /doconly - char* filename - int iscolor CV_LOAD_IMAGE_COLOR -ReshapeMatND CvMat /doconly - CvMat arr - int newCn - ints newDims -Reshape CvMat /doconly - CvArr arr - int newCn - int newRows 0 -SetData /doconly - CvArr arr - PyObject* data - int step -SetMouseCallback /doconly - char* windowName - PyCallableObject* onMouse - PyObject* param None -Subdiv2DLocate loc,where /doconly - CvSubdiv2D* subdiv - CvPoint2D32f pt - int loc /O - edgeorpoint where /O -WaitKey int /doconly - int delay 0 diff --git a/modules/python/src2/cv.py b/modules/python/src2/cv.py deleted file mode 100644 index 2d4daf08b0..0000000000 --- a/modules/python/src2/cv.py +++ /dev/null @@ -1 +0,0 @@ -from cv2.cv import * diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 7d5fbaee96..af015eb8da 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -1,12 +1,7 @@ #include -#if !PYTHON_USE_NUMPY -#error "The module can only be built if NumPy is available" -#endif - #define MODULESTR "cv2" - -#include "numpy/ndarrayobject.h" +#include #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" @@ -14,7 +9,6 @@ #include "opencv2/flann/miniflann.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" -#include "opencv2/ml.hpp" #include "opencv2/features2d.hpp" #include "opencv2/objdetect.hpp" #include "opencv2/softcascade.hpp" @@ -22,13 +16,9 @@ #include "opencv2/photo.hpp" #include "opencv2/highgui.hpp" -#include "opencv2/highgui/highgui_c.h" -#include "opencv2/photo/photo_c.h" -#include "opencv2/video/tracking_c.h" -#include "opencv2/objdetect/objdetect_c.h" +#include "opencv2/ml.hpp" #include "opencv2/opencv_modules.hpp" - #ifdef HAVE_OPENCV_NONFREE # include "opencv2/nonfree.hpp" #endif @@ -196,19 +186,14 @@ public: depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT : depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT; int i; - npy_intp _sizes[CV_MAX_DIM+1]; + cv::AutoBuffer _sizes(dims + 1); for( i = 0; i < dims; i++ ) _sizes[i] = sizes[i]; if( cn > 1 ) - { - /*if( _sizes[dims-1] == 1 ) - _sizes[dims-1] = cn; - else*/ - _sizes[dims++] = cn; - } + _sizes[dims++] = cn; PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum); if(!o) - CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims)); + CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims)); refcount = refcountFromPyObject(o); npy_intp* _strides = PyArray_STRIDES(o); for( i = 0; i < dims - (cn > 1); i++ ) @@ -229,11 +214,19 @@ public: NumpyAllocator g_numpyAllocator; + +template static +bool pyopencv_to(PyObject* obj, T& p, const char* name = ""); + +template static +PyObject* pyopencv_from(const T& src); + enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 }; // special case, when the convertor needs full ArgInfo structure -static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allowND=true) +static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) { + bool allowND = true; if(!o || o == Py_None) { if( !m.data ) @@ -306,6 +299,10 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow } } +#ifndef CV_MAX_DIM + const int CV_MAX_DIM = 32; +#endif + int ndims = PyArray_NDIM(o); if(ndims >= CV_MAX_DIM) { @@ -314,7 +311,8 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow } int size[CV_MAX_DIM+1]; - size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type); + size_t step[CV_MAX_DIM+1]; + size_t elemsize = CV_ELEM_SIZE1(type); const npy_intp* _sizes = PyArray_DIMS(o); const npy_intp* _strides = PyArray_STRIDES(o); bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX; @@ -388,7 +386,8 @@ static int pyopencv_to(const PyObject* o, Mat& m, const ArgInfo info, bool allow return true; } -static PyObject* pyopencv_from(const Mat& m) +template<> +PyObject* pyopencv_from(const Mat& m) { if( !m.data ) Py_RETURN_NONE; @@ -403,7 +402,8 @@ static PyObject* pyopencv_from(const Mat& m) return pyObjectFromRefcount(p->refcount); } -static bool pyopencv_to(PyObject *o, Scalar& s, const char *name = "") +template<> +bool pyopencv_to(PyObject *o, Scalar& s, const char *name) { if(!o || o == Py_None) return true; @@ -437,17 +437,20 @@ static bool pyopencv_to(PyObject *o, Scalar& s, const char *name = "") return true; } -static inline PyObject* pyopencv_from(const Scalar& src) +template<> +PyObject* pyopencv_from(const Scalar& src) { return Py_BuildValue("(dddd)", src[0], src[1], src[2], src[3]); } -static PyObject* pyopencv_from(bool value) +template<> +PyObject* pyopencv_from(const bool& value) { return PyBool_FromLong(value); } -static bool pyopencv_to(PyObject* obj, bool& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, bool& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -459,12 +462,14 @@ static bool pyopencv_to(PyObject* obj, bool& value, const char* name = " +PyObject* pyopencv_from(const size_t& value) { return PyLong_FromSize_t(value); } -static bool pyopencv_to(PyObject* obj, size_t& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, size_t& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -473,22 +478,33 @@ static bool pyopencv_to(PyObject* obj, size_t& value, const char* name = " +PyObject* pyopencv_from(const int& value) { return PyInt_FromLong(value); } -static PyObject* pyopencv_from(cvflann_flann_algorithm_t value) +template<> +PyObject* pyopencv_from(const cvflann_flann_algorithm_t& value) { return PyInt_FromLong(int(value)); } -static PyObject* pyopencv_from(cvflann_flann_distance_t value) +template<> +PyObject* pyopencv_from(const cvflann_flann_distance_t& value) { return PyInt_FromLong(int(value)); } -static bool pyopencv_to(PyObject* obj, int& value, const char* name = "") +template<> +bool pyopencv_to(PyObject*, cv::flann::SearchParams &, const char *) +{ + CV_Assert(!"not implemented"); + return false; +} + +template<> +bool pyopencv_to(PyObject* obj, int& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -502,12 +518,14 @@ static bool pyopencv_to(PyObject* obj, int& value, const char* name = " return value != -1 || !PyErr_Occurred(); } -static PyObject* pyopencv_from(uchar value) +template<> +PyObject* pyopencv_from(const uchar& value) { return PyInt_FromLong(value); } -static bool pyopencv_to(PyObject* obj, uchar& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, uchar& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -517,12 +535,14 @@ static bool pyopencv_to(PyObject* obj, uchar& value, const char* name = " +PyObject* pyopencv_from(const double& value) { return PyFloat_FromDouble(value); } -static bool pyopencv_to(PyObject* obj, double& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, double& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -534,12 +554,14 @@ static bool pyopencv_to(PyObject* obj, double& value, const char* name = " +PyObject* pyopencv_from(const float& value) { return PyFloat_FromDouble(value); } -static bool pyopencv_to(PyObject* obj, float& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, float& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -551,17 +573,20 @@ static bool pyopencv_to(PyObject* obj, float& value, const char* name = " +PyObject* pyopencv_from(const int64& value) { return PyLong_FromLongLong(value); } -static PyObject* pyopencv_from(const String& value) +template<> +PyObject* pyopencv_from(const String& value) { return PyString_FromString(value.empty() ? "" : value.c_str()); } -static bool pyopencv_to(PyObject* obj, String& value, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, String& value, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -573,7 +598,8 @@ static bool pyopencv_to(PyObject* obj, String& value, const char* name = " +bool pyopencv_to(PyObject* obj, Size& sz, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -581,12 +607,14 @@ static inline bool pyopencv_to(PyObject* obj, Size& sz, const char* name = " 0; } -static inline PyObject* pyopencv_from(const Size& sz) +template<> +PyObject* pyopencv_from(const Size& sz) { return Py_BuildValue("(ii)", sz.width, sz.height); } -static inline bool pyopencv_to(PyObject* obj, Rect& r, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, Rect& r, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -594,12 +622,14 @@ static inline bool pyopencv_to(PyObject* obj, Rect& r, const char* name = " 0; } -static inline PyObject* pyopencv_from(const Rect& r) +template<> +PyObject* pyopencv_from(const Rect& r) { return Py_BuildValue("(iiii)", r.x, r.y, r.width, r.height); } -static inline bool pyopencv_to(PyObject* obj, Range& r, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, Range& r, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -612,30 +642,14 @@ static inline bool pyopencv_to(PyObject* obj, Range& r, const char* name = " 0; } -static inline PyObject* pyopencv_from(const Range& r) +template<> +PyObject* pyopencv_from(const Range& r) { return Py_BuildValue("(ii)", r.start, r.end); } -static inline bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name = "") -{ - (void)name; - if(!obj || obj == Py_None) - return true; - if(PyObject_Size(obj) == 0) - { - r = CV_WHOLE_SEQ; - return true; - } - return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0; -} - -static inline PyObject* pyopencv_from(const CvSlice& r) -{ - return Py_BuildValue("(ii)", r.start_index, r.end_index); -} - -static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, Point& p, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -650,7 +664,8 @@ static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = " 0; } -static inline bool pyopencv_to(PyObject* obj, Point2f& p, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, Point2f& p, const char* name) { (void)name; if(!obj || obj == Py_None) @@ -665,17 +680,20 @@ static inline bool pyopencv_to(PyObject* obj, Point2f& p, const char* name = " 0; } -static inline PyObject* pyopencv_from(const Point& p) +template<> +PyObject* pyopencv_from(const Point& p) { return Py_BuildValue("(ii)", p.x, p.y); } -static inline PyObject* pyopencv_from(const Point2f& p) +template<> +PyObject* pyopencv_from(const Point2f& p) { return Py_BuildValue("(dd)", p.x, p.y); } -static inline bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name = "") +template<> +bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name) { (void)name; if(!obj) @@ -683,17 +701,20 @@ static inline bool pyopencv_to(PyObject* obj, Vec3d& v, const char* name = " 0; } -static inline PyObject* pyopencv_from(const Vec3d& v) +template<> +PyObject* pyopencv_from(const Vec3d& v) { return Py_BuildValue("(ddd)", v[0], v[1], v[2]); } -static inline PyObject* pyopencv_from(const Vec2d& v) +template<> +PyObject* pyopencv_from(const Vec2d& v) { return Py_BuildValue("(dd)", v[0], v[1]); } -static inline PyObject* pyopencv_from(const Point2d& p) +template<> +PyObject* pyopencv_from(const Point2d& p) { return Py_BuildValue("(dd)", p.x, p.y); } @@ -807,20 +828,18 @@ template struct pyopencvVecConverter } }; - -template static inline bool pyopencv_to(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info) +template +bool pyopencv_to(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info) { return pyopencvVecConverter<_Tp>::to(obj, value, info); } -template static inline PyObject* pyopencv_from(const std::vector<_Tp>& value) +template +PyObject* pyopencv_from(const std::vector<_Tp>& value) { return pyopencvVecConverter<_Tp>::from(value); } -static PyObject* pyopencv_from(const KeyPoint&); -static PyObject* pyopencv_from(const DMatch&); - template static inline bool pyopencv_to_generic_vec(PyObject* obj, std::vector<_Tp>& value, const ArgInfo info) { if(!obj || obj == Py_None) @@ -930,21 +949,8 @@ template<> struct pyopencvVecConverter } }; - -static inline bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name="") -{ - (void)name; - if(!obj) - return true; - return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0; -} - -static inline PyObject* pyopencv_from(const CvTermCriteria& src) -{ - return Py_BuildValue("(iid)", src.type, src.max_iter, src.epsilon); -} - -static inline bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *name="") +template<> +bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *name) { (void)name; if(!obj) @@ -952,12 +958,14 @@ static inline bool pyopencv_to(PyObject *obj, TermCriteria& dst, const char *nam return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.maxCount, &dst.epsilon) > 0; } -static inline PyObject* pyopencv_from(const TermCriteria& src) +template<> +PyObject* pyopencv_from(const TermCriteria& src) { return Py_BuildValue("(iid)", src.type, src.maxCount, src.epsilon); } -static inline bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name="") +template<> +bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name) { (void)name; if(!obj) @@ -965,12 +973,14 @@ static inline bool pyopencv_to(PyObject *obj, RotatedRect& dst, const char *name return PyArg_ParseTuple(obj, "(ff)(ff)f", &dst.center.x, &dst.center.y, &dst.size.width, &dst.size.height, &dst.angle) > 0; } -static inline PyObject* pyopencv_from(const RotatedRect& src) +template<> +PyObject* pyopencv_from(const RotatedRect& src) { return Py_BuildValue("((ff)(ff)f)", src.center.x, src.center.y, src.size.width, src.size.height, src.angle); } -static inline PyObject* pyopencv_from(const Moments& m) +template<> +PyObject* pyopencv_from(const Moments& m) { return Py_BuildValue("{s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d,s:d}", "m00", m.m00, "m10", m.m10, "m01", m.m01, @@ -982,14 +992,8 @@ static inline PyObject* pyopencv_from(const Moments& m) "nu30", m.nu30, "nu21", m.nu21, "nu12", m.nu12, "nu03", m.nu03); } -static inline PyObject* pyopencv_from(const CvDTreeNode* node) -{ - double value = node->value; - int ivalue = cvRound(value); - return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value); -} - -static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name="") +template<> +bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name) { (void)name; bool ok = false; @@ -1037,15 +1041,15 @@ static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name return ok; } -template -static bool pyopencv_to(PyObject *o, Ptr& p, const char *name="") +template +bool pyopencv_to(PyObject *o, Ptr& p, const char *name) { p = new T(); return pyopencv_to(o, *p, name); } - -static bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char *name="") +template<> +bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char *name) { int d = (int)dist; bool ok = pyopencv_to(o, d, name); @@ -1053,6 +1057,41 @@ static bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char return ok; } + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// TODO: REMOVE used only by ml wrapper + +template<> +bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name) +{ + (void)name; + if(!obj) + return true; + return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0; +} + +template<> +bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name) +{ + (void)name; + if(!obj || obj == Py_None) + return true; + if(PyObject_Size(obj) == 0) + { + r = CV_WHOLE_SEQ; + return true; + } + return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0; +} + +template<> +PyObject* pyopencv_from(CvDTreeNode* const & node) +{ + double value = node->value; + int ivalue = cvRound(value); + return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value); +} + //////////////////////////////////////////////////////////////////////////////////////////////////// static void OnMouse(int event, int x, int y, int flags, void* param) @@ -1088,7 +1127,7 @@ static PyObject *pycvSetMouseCallback(PyObject*, PyObject *args, PyObject *kw) if (param == NULL) { param = Py_None; } - ERRWRAP2(cvSetMouseCallback(name, OnMouse, Py_BuildValue("OO", on_mouse, param))); + ERRWRAP2(setMouseCallback(name, OnMouse, Py_BuildValue("OO", on_mouse, param))); Py_RETURN_NONE; } @@ -1120,12 +1159,23 @@ static PyObject *pycvCreateTrackbar(PyObject*, PyObject *args) PyErr_SetString(PyExc_TypeError, "on_change must be callable"); return NULL; } - ERRWRAP2(cvCreateTrackbar2(trackbar_name, window_name, value, count, OnChange, Py_BuildValue("OO", on_change, Py_None))); + ERRWRAP2(createTrackbar(trackbar_name, window_name, value, count, OnChange, Py_BuildValue("OO", on_change, Py_None))); Py_RETURN_NONE; } /////////////////////////////////////////////////////////////////////////////////////// +static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name") +{ + if (PyString_Check(o) && PyString_Size(o) == 1) { + *dst = PyString_AsString(o)[0]; + return 1; + } else { + (*dst) = 0; + return failmsg("Expected single character string for argument '%s'", name); + } +} + #define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return #ifdef __GNUC__ @@ -1155,8 +1205,6 @@ static int to_ok(PyTypeObject *to) return (PyType_Ready(to) == 0); } -#include "cv2.cv.hpp" - extern "C" #if defined WIN32 || defined _WIN32 __declspec(dllexport) @@ -1165,13 +1213,9 @@ void initcv2(); void initcv2() { -#if PYTHON_USE_NUMPY - import_array(); -#endif + import_array(); -#if PYTHON_USE_NUMPY #include "pyopencv_generated_type_reg.h" -#endif PyObject* m = Py_InitModule(MODULESTR, methods); PyObject* d = PyModule_GetDict(m); @@ -1181,27 +1225,10 @@ void initcv2() opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL); PyDict_SetItemString(d, "error", opencv_error); - PyObject* cv_m = init_cv(); - - PyDict_SetItemString(d, "cv", cv_m); - #define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I)) -#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I)) +//#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I)) #define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value)) - PUBLISHU(IPL_DEPTH_8U); - PUBLISHU(IPL_DEPTH_8S); - PUBLISHU(IPL_DEPTH_16U); - PUBLISHU(IPL_DEPTH_16S); - PUBLISHU(IPL_DEPTH_32S); - PUBLISHU(IPL_DEPTH_32F); - PUBLISHU(IPL_DEPTH_64F); - - PUBLISH(CV_LOAD_IMAGE_COLOR); - PUBLISH(CV_LOAD_IMAGE_GRAYSCALE); - PUBLISH(CV_LOAD_IMAGE_UNCHANGED); - PUBLISH(CV_HIST_ARRAY); - PUBLISH(CV_HIST_SPARSE); PUBLISH(CV_8U); PUBLISH(CV_8UC1); PUBLISH(CV_8UC2); @@ -1237,37 +1264,7 @@ void initcv2() PUBLISH(CV_64FC2); PUBLISH(CV_64FC3); PUBLISH(CV_64FC4); - PUBLISH(CV_NEXT_AROUND_ORG); - PUBLISH(CV_NEXT_AROUND_DST); - PUBLISH(CV_PREV_AROUND_ORG); - PUBLISH(CV_PREV_AROUND_DST); - PUBLISH(CV_NEXT_AROUND_LEFT); - PUBLISH(CV_NEXT_AROUND_RIGHT); - PUBLISH(CV_PREV_AROUND_LEFT); - PUBLISH(CV_PREV_AROUND_RIGHT); - - PUBLISH(CV_WINDOW_AUTOSIZE); - - PUBLISH(CV_PTLOC_INSIDE); - PUBLISH(CV_PTLOC_ON_EDGE); - PUBLISH(CV_PTLOC_VERTEX); - PUBLISH(CV_PTLOC_OUTSIDE_RECT); - - PUBLISH(GC_BGD); - PUBLISH(GC_FGD); - PUBLISH(GC_PR_BGD); - PUBLISH(GC_PR_FGD); - PUBLISH(GC_INIT_WITH_RECT); - PUBLISH(GC_INIT_WITH_MASK); - PUBLISH(GC_EVAL); - - PUBLISH(CV_ROW_SAMPLE); - PUBLISH(CV_VAR_NUMERICAL); - PUBLISH(CV_VAR_ORDERED); - PUBLISH(CV_VAR_CATEGORICAL); - - PUBLISH(CV_AA); #include "pyopencv_generated_const_reg.h" -} +} diff --git a/modules/python/src2/cv2.cv.hpp b/modules/python/src2/cv2.cv.hpp deleted file mode 100644 index eb406af62b..0000000000 --- a/modules/python/src2/cv2.cv.hpp +++ /dev/null @@ -1,4054 +0,0 @@ -#include "opencv2/legacy.hpp" -#include "opencv2/legacy/compat.hpp" -#include "opencv2/highgui/highgui_c.h" - -#define OLD_MODULESTR "cv2.cv" - -struct memtrack_t { - PyObject_HEAD - int owner; - void *ptr; - int freeptr; - Py_ssize_t size; - PyObject *backing; - CvArr *backingmat; -}; - -struct iplimage_t { - PyObject_HEAD - IplImage *a; - PyObject *data; - size_t offset; -}; - -struct cvmat_t { - PyObject_HEAD - CvMat *a; - PyObject *data; - size_t offset; -}; - -struct cvmatnd_t { - PyObject_HEAD - CvMatND *a; - PyObject *data; - size_t offset; -}; - -struct cvhistogram_t { - PyObject_HEAD - CvHistogram h; - PyObject *bins; -}; - -struct cvmemstorage_t { - PyObject_HEAD - CvMemStorage *a; -}; - -struct cvseq_t { - PyObject_HEAD - CvSeq *a; - PyObject *container; // Containing cvmemstorage_t -}; - -struct cvset_t { - PyObject_HEAD - CvSet *a; - PyObject *container; // Containing cvmemstorage_t - int i; -}; - -struct cvsubdiv2d_t { - PyObject_HEAD - CvSubdiv2D *a; - PyObject *container; // Containing cvmemstorage_t -}; - -struct cvsubdiv2dpoint_t { - PyObject_HEAD - CvSubdiv2DPoint *a; - PyObject *container; // Containing cvmemstorage_t -}; - -struct cvsubdiv2dedge_t { - PyObject_HEAD - CvSubdiv2DEdge a; - PyObject *container; // Containing cvmemstorage_t -}; - -struct cvlineiterator_t { - PyObject_HEAD - CvLineIterator iter; - int count; - int type; -}; - -typedef IplImage ROIplImage; -typedef const CvMat ROCvMat; -typedef PyObject PyCallableObject; - -struct cvfont_t { - PyObject_HEAD - CvFont a; -}; - -struct cvcontourtree_t { - PyObject_HEAD - CvContourTree *a; -}; - -struct cvrng_t { - PyObject_HEAD - CvRNG a; -}; - -static int is_iplimage(PyObject *o); -static int is_cvmat(PyObject *o); -static int is_cvmatnd(PyObject *o); -static int convert_to_CvArr(PyObject *o, CvArr **dst, const char *name = "no_name"); -static int convert_to_IplImage(PyObject *o, IplImage **dst, const char *name = "no_name"); -static int convert_to_CvMat(PyObject *o, CvMat **dst, const char *name = "no_name"); -static int convert_to_CvMatND(PyObject *o, CvMatND **dst, const char *name = "no_name"); -static PyObject *what_data(PyObject *o); -static PyObject *FROM_CvMat(CvMat *r); -static PyObject *FROM_ROCvMatPTR(ROCvMat *r); -static PyObject *shareDataND(PyObject *donor, CvMatND *pdonor, CvMatND *precipient); - -#define FROM_double(r) PyFloat_FromDouble(r) -#define FROM_float(r) PyFloat_FromDouble(r) -#define FROM_int(r) PyInt_FromLong(r) -#define FROM_int64(r) PyLong_FromLongLong(r) -#define FROM_unsigned(r) PyLong_FromUnsignedLong(r) -#define FROM_CvBox2D(r) Py_BuildValue("(ff)(ff)f", r.center.x, r.center.y, r.size.width, r.size.height, r.angle) -#define FROM_CvScalar(r) Py_BuildValue("(ffff)", r.val[0], r.val[1], r.val[2], r.val[3]) -#define FROM_CvPoint(r) Py_BuildValue("(ii)", r.x, r.y) -#define FROM_CvPoint2D32f(r) Py_BuildValue("(ff)", r.x, r.y) -#define FROM_CvPoint3D64f(r) Py_BuildValue("(fff)", r.x, r.y, r.z) -#define FROM_CvSize(r) Py_BuildValue("(ii)", r.width, r.height) -#define FROM_CvRect(r) Py_BuildValue("(iiii)", r.x, r.y, r.width, r.height) -#define FROM_CvSeqPTR(r) _FROM_CvSeqPTR(r, pyobj_storage) -#define FROM_CvSubdiv2DPTR(r) _FROM_CvSubdiv2DPTR(r, pyobj_storage) -#define FROM_CvPoint2D64f(r) Py_BuildValue("(ff)", r.x, r.y) -#define FROM_CvConnectedComp(r) Py_BuildValue("(fNN)", (r).area, FROM_CvScalar((r).value), FROM_CvRect((r).rect)) - -#if PYTHON_USE_NUMPY -static PyObject *fromarray(PyObject *o, int allowND); -#endif - -static void translate_error_to_exception(void) -{ - PyErr_SetString(opencv_error, cvErrorStr(cvGetErrStatus())); - cvSetErrStatus(0); -} - -#define ERRCHK do { if (cvGetErrStatus() != 0) { translate_error_to_exception(); return 0; } } while (0) -#define ERRWRAPN(F, N) \ - do { \ - try \ - { \ - F; \ - } \ - catch (const cv::Exception &e) \ - { \ - PyErr_SetString(opencv_error, e.err.c_str()); \ - return N; \ - } \ - ERRCHK; \ - } while(0) -#define ERRWRAP(F) ERRWRAPN(F, NULL) // for most functions, exception -> NULL return - -/************************************************************************/ - -/* These get/setters are polymorphic, used in both iplimage and cvmat */ - -static PyObject *PyObject_FromCvScalar(CvScalar s, int type) -{ - int i, spe = CV_MAT_CN(type); - PyObject *r; - if (spe > 1) { - r = PyTuple_New(spe); - for (i = 0; i < spe; i++) - PyTuple_SET_ITEM(r, i, PyFloat_FromDouble(s.val[i])); - } else { - r = PyFloat_FromDouble(s.val[0]); - } - return r; -} - -static PyObject *cvarr_GetItem(PyObject *o, PyObject *key); -static int cvarr_SetItem(PyObject *o, PyObject *key, PyObject *v); - -// o is a Python string or buffer object. Return its size. - -static Py_ssize_t what_size(PyObject *o) -{ - void *buffer; - Py_ssize_t buffer_len; - - if (PyString_Check(o)) { - return PyString_Size(o); - } else if (PyObject_AsWriteBuffer(o, &buffer, &buffer_len) == 0) { - return buffer_len; - } else { - assert(0); // argument must be string or buffer. - return 0; - } -} - - -/************************************************************************/ - -static CvMat *PyCvMat_AsCvMat(PyObject *o) -{ - assert(0); // not yet implemented: reference counting for CvMat in Kalman is unclear... - return NULL; -} - -#define cvReleaseIplConvKernel(x) cvReleaseStructuringElement(x) - -#if defined _MSC_VER && _MSC_VER >= 1200 - #pragma warning( push ) - #pragma warning( disable : 4244 ) -#endif - -#include "generated3.i" - -#if defined _MSC_VER && _MSC_VER >= 1200 - #pragma warning( pop ) -#endif - -/* iplimage */ - -static void iplimage_dealloc(PyObject *self) -{ - iplimage_t *pc = (iplimage_t*)self; - cvReleaseImageHeader((IplImage**)&pc->a); - Py_DECREF(pc->data); - PyObject_Del(self); -} - -static PyObject *iplimage_repr(PyObject *self) -{ - iplimage_t *cva = (iplimage_t*)self; - IplImage* ipl = (IplImage*)(cva->a); - char str[1000]; - sprintf(str, "nChannels); - d += strlen(d); - sprintf(d, "width=%d ", ipl->width); - d += strlen(d); - sprintf(d, "height=%d ", ipl->height); - d += strlen(d); - sprintf(d, "widthStep=%d ", ipl->widthStep); - d += strlen(d); - sprintf(d, ")>"); - return PyString_FromString(str); -} - -static PyObject *iplimage_tostring(PyObject *self, PyObject *args) -{ - IplImage *i=0; - if (!convert_to_IplImage(self, &i, "self")) - return NULL; - if (i == NULL) - return NULL; - cv::Mat img = cvarrToMat(i); - size_t esz = img.elemSize(); - int nrows = img.rows, ncols = img.cols; - - if( !img.isContinuous() ) - img = img.clone(); - return PyString_FromStringAndSize((char*)img.data, (Py_ssize_t)(esz*nrows*ncols)); -} - -static struct PyMethodDef iplimage_methods[] = -{ - {"tostring", iplimage_tostring, METH_VARARGS}, - {NULL, NULL} -}; - -static PyObject *iplimage_getnChannels(iplimage_t *cva) -{ - return PyInt_FromLong(((IplImage*)(cva->a))->nChannels); -} -static PyObject *iplimage_getwidth(iplimage_t *cva) -{ - return PyInt_FromLong(((IplImage*)(cva->a))->width); -} -static PyObject *iplimage_getheight(iplimage_t *cva) -{ - return PyInt_FromLong(((IplImage*)(cva->a))->height); -} -static PyObject *iplimage_getdepth(iplimage_t *cva) -{ - return PyLong_FromUnsignedLong((unsigned)((IplImage*)(cva->a))->depth); -} -static PyObject *iplimage_getorigin(iplimage_t *cva) -{ - return PyInt_FromLong(((IplImage*)(cva->a))->origin); -} -static void iplimage_setorigin(iplimage_t *cva, PyObject *v) -{ - ((IplImage*)(cva->a))->origin = PyInt_AsLong(v); -} - -static PyGetSetDef iplimage_getseters[] = { - {(char*)"nChannels", (getter)iplimage_getnChannels, (setter)NULL, (char*)"nChannels", NULL}, - {(char*)"channels", (getter)iplimage_getnChannels, (setter)NULL, (char*)"nChannels", NULL}, - {(char*)"width", (getter)iplimage_getwidth, (setter)NULL, (char*)"width", NULL}, - {(char*)"height", (getter)iplimage_getheight, (setter)NULL, (char*)"height", NULL}, - {(char*)"depth", (getter)iplimage_getdepth, (setter)NULL, (char*)"depth", NULL}, - {(char*)"origin", (getter)iplimage_getorigin, (setter)iplimage_setorigin, (char*)"origin", NULL}, - {NULL} /* Sentinel */ -}; - -static PyMappingMethods iplimage_as_map = { - NULL, - &cvarr_GetItem, - &cvarr_SetItem, -}; - -static PyTypeObject iplimage_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".iplimage", /*name*/ - sizeof(iplimage_t), /*basicsize*/ -}; - -static void iplimage_specials(void) -{ - iplimage_Type.tp_dealloc = iplimage_dealloc; - iplimage_Type.tp_as_mapping = &iplimage_as_map; - iplimage_Type.tp_repr = iplimage_repr; - iplimage_Type.tp_methods = iplimage_methods; - iplimage_Type.tp_getset = iplimage_getseters; -} - -static int is_iplimage(PyObject *o) -{ - return PyType_IsSubtype(o->ob_type, &iplimage_Type); -} - -/************************************************************************/ - -/* cvmat */ - -static void cvmat_dealloc(PyObject *self) -{ - cvmat_t *pc = (cvmat_t*)self; - Py_XDECREF(pc->data); - //cvDecRefData(pc->a); - cvFree(&pc->a); - PyObject_Del(self); -} - -static PyObject *cvmat_repr(PyObject *self) -{ - CvMat *m = ((cvmat_t*)self)->a; - char str[1000]; - sprintf(str, "type); - d += strlen(d); - switch (CV_MAT_DEPTH(m->type)) { - case CV_8U: strcpy(d, "8U"); break; - case CV_8S: strcpy(d, "8S"); break; - case CV_16U: strcpy(d, "16U"); break; - case CV_16S: strcpy(d, "16S"); break; - case CV_32S: strcpy(d, "32S"); break; - case CV_32F: strcpy(d, "32F"); break; - case CV_64F: strcpy(d, "64F"); break; - } - d += strlen(d); - sprintf(d, "C%d ", CV_MAT_CN(m->type)); - d += strlen(d); - sprintf(d, "rows=%d ", m->rows); - d += strlen(d); - sprintf(d, "cols=%d ", m->cols); - d += strlen(d); - sprintf(d, "step=%d ", m->step); - d += strlen(d); - sprintf(d, ")>"); - return PyString_FromString(str); -} - -static PyObject *cvmat_tostring(PyObject *self, PyObject *args) -{ - CvMat *m; - if (!convert_to_CvMat(self, &m, "self")) - return NULL; - - int bps; // bytes per sample - - switch (CV_MAT_DEPTH(m->type)) { - case CV_8U: - case CV_8S: - bps = CV_MAT_CN(m->type) * 1; - break; - case CV_16U: - case CV_16S: - bps = CV_MAT_CN(m->type) * 2; - break; - case CV_32S: - case CV_32F: - bps = CV_MAT_CN(m->type) * 4; - break; - case CV_64F: - bps = CV_MAT_CN(m->type) * 8; - break; - default: - failmsg("Unrecognized depth %d", CV_MAT_DEPTH(m->type)); - return (PyObject*)0; - } - - int bpl = m->cols * bps; // bytes per line - cvmat_t *pc = (cvmat_t*)self; - if (PyString_Check(pc->data) && bpl == m->step && pc->offset == 0 && ((bpl * m->rows) == what_size(pc->data))) { - Py_INCREF(pc->data); - return pc->data; - } else { - int l = bpl * m->rows; - char *s = new char[l]; - int y; - for (y = 0; y < m->rows; y++) { - memcpy(s + y * bpl, m->data.ptr + y * m->step, bpl); - } - PyObject *r = PyString_FromStringAndSize(s, l); - delete[] s; - return r; - } -} - -static struct PyMethodDef cvmat_methods[] = -{ - {"tostring", cvmat_tostring, METH_VARARGS}, - {NULL, NULL} -}; - -static PyObject *cvmat_gettype(cvmat_t *cva) -{ - return PyInt_FromLong(cvGetElemType(cva->a)); -} - -static PyObject *cvmat_getstep(cvmat_t *cva) -{ - return PyInt_FromLong(cva->a->step); -} - -static PyObject *cvmat_getrows(cvmat_t *cva) -{ - return PyInt_FromLong(cva->a->rows); -} - -static PyObject *cvmat_getcols(cvmat_t *cva) -{ - return PyInt_FromLong(cva->a->cols); -} - -static PyObject *cvmat_getchannels(cvmat_t *cva) -{ - return PyInt_FromLong(CV_MAT_CN(cva->a->type)); -} - -#if PYTHON_USE_NUMPY -#include "numpy/ndarrayobject.h" - -// A PyArrayInterface, with an associated python object that should be DECREF'ed on release -struct arrayTrack { - PyArrayInterface s; - PyObject *o; -}; - -static void arrayTrackDtor(void *p) -{ - struct arrayTrack *at = (struct arrayTrack *)p; - delete[] at->s.shape; - delete[] at->s.strides; - if (at->s.descr) - Py_DECREF(at->s.descr); - Py_DECREF(at->o); -} - -// Fill in fields of PyArrayInterface s using mtype. This code is common -// to cvmat and cvmatnd - -static void arrayinterface_common(PyArrayInterface *s, int mtype) -{ - s->two = 2; - - switch (CV_MAT_DEPTH(mtype)) { - case CV_8U: - s->typekind = 'u'; - s->itemsize = 1; - break; - case CV_8S: - s->typekind = 'i'; - s->itemsize = 1; - break; - case CV_16U: - s->typekind = 'u'; - s->itemsize = 2; - break; - case CV_16S: - s->typekind = 'i'; - s->itemsize = 2; - break; - case CV_32S: - s->typekind = 'i'; - s->itemsize = 4; - break; - case CV_32F: - s->typekind = 'f'; - s->itemsize = 4; - break; - case CV_64F: - s->typekind = 'f'; - s->itemsize = 8; - break; - default: - assert(0); - } - - s->flags = NPY_WRITEABLE | NPY_NOTSWAPPED; -} - -static PyObject *cvmat_array_struct(cvmat_t *cva) -{ - CvMat *m; - convert_to_CvMat((PyObject *)cva, &m, ""); - - arrayTrack *at = new arrayTrack; - PyArrayInterface *s = &at->s; - - at->o = cva->data; - Py_INCREF(at->o); - - arrayinterface_common(s, m->type); - - if (CV_MAT_CN(m->type) == 1) { - s->nd = 2; - s->shape = new npy_intp[2]; - s->shape[0] = m->rows; - s->shape[1] = m->cols; - s->strides = new npy_intp[2]; - s->strides[0] = m->step; - s->strides[1] = s->itemsize; - } else { - s->nd = 3; - s->shape = new npy_intp[3]; - s->shape[0] = m->rows; - s->shape[1] = m->cols; - s->shape[2] = CV_MAT_CN(m->type); - s->strides = new npy_intp[3]; - s->strides[0] = m->step; - s->strides[1] = s->itemsize * CV_MAT_CN(m->type); - s->strides[2] = s->itemsize; - } - s->data = (void*)(m->data.ptr); - s->descr = PyList_New(1); - char typestr[10]; - sprintf(typestr, "<%c%d", s->typekind, s->itemsize); - PyList_SetItem(s->descr, 0, Py_BuildValue("(ss)", "x", typestr)); - - return PyCObject_FromVoidPtr(s, arrayTrackDtor); -} - -static PyObject *cvmatnd_array_struct(cvmatnd_t *cva) -{ - CvMatND *m = 0; - convert_to_CvMatND((PyObject *)cva, &m, ""); - - arrayTrack *at = new arrayTrack; - PyArrayInterface *s = &at->s; - - at->o = cva->data; - Py_INCREF(at->o); - - arrayinterface_common(s, m->type); - - int i; - if (CV_MAT_CN(m->type) == 1) { - s->nd = m->dims; - s->shape = new npy_intp[s->nd]; - for (i = 0; i < s->nd; i++) - s->shape[i] = m->dim[i].size; - s->strides = new npy_intp[s->nd]; - for (i = 0; i < (s->nd - 1); i++) - s->strides[i] = m->dim[i].step; - s->strides[s->nd - 1] = s->itemsize; - } else { - s->nd = m->dims + 1; - s->shape = new npy_intp[s->nd]; - for (i = 0; i < (s->nd - 1); i++) - s->shape[i] = m->dim[i].size; - s->shape[s->nd - 1] = CV_MAT_CN(m->type); - - s->strides = new npy_intp[s->nd]; - for (i = 0; i < (s->nd - 2); i++) - s->strides[i] = m->dim[i].step; - s->strides[s->nd - 2] = s->itemsize * CV_MAT_CN(m->type); - s->strides[s->nd - 1] = s->itemsize; - } - s->data = (void*)(m->data.ptr); - s->descr = PyList_New(1); - char typestr[10]; - sprintf(typestr, "<%c%d", s->typekind, s->itemsize); - PyList_SetItem(s->descr, 0, Py_BuildValue("(ss)", "x", typestr)); - - return PyCObject_FromVoidPtr(s, arrayTrackDtor); -} -#endif - -static PyGetSetDef cvmat_getseters[] = { - {(char*)"type", (getter)cvmat_gettype, (setter)NULL, (char*)"type", NULL}, - {(char*)"step", (getter)cvmat_getstep, (setter)NULL, (char*)"step", NULL}, - {(char*)"rows", (getter)cvmat_getrows, (setter)NULL, (char*)"rows", NULL}, - {(char*)"cols", (getter)cvmat_getcols, (setter)NULL, (char*)"cols", NULL}, - {(char*)"channels",(getter)cvmat_getchannels, (setter)NULL, (char*)"channels", NULL}, - {(char*)"width", (getter)cvmat_getcols, (setter)NULL, (char*)"width", NULL}, - {(char*)"height", (getter)cvmat_getrows, (setter)NULL, (char*)"height", NULL}, -#if PYTHON_USE_NUMPY - {(char*)"__array_struct__", (getter)cvmat_array_struct, (setter)NULL, (char*)"__array_struct__", NULL}, -#endif - {NULL} /* Sentinel */ -}; - -static PyMappingMethods cvmat_as_map = { - NULL, - &cvarr_GetItem, - &cvarr_SetItem, -}; - -static PyTypeObject cvmat_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvmat", /*name*/ - sizeof(cvmat_t), /*basicsize*/ -}; - -static int illegal_init(PyObject *self, PyObject *args, PyObject *kwds) -{ - PyErr_SetString(opencv_error, "Cannot create cvmat directly; use CreateMat() instead"); - return -1; -} - -static void cvmat_specials(void) -{ - cvmat_Type.tp_dealloc = cvmat_dealloc; - cvmat_Type.tp_as_mapping = &cvmat_as_map; - cvmat_Type.tp_repr = cvmat_repr; - cvmat_Type.tp_methods = cvmat_methods; - cvmat_Type.tp_getset = cvmat_getseters; - cvmat_Type.tp_init = illegal_init; -} - -static int is_cvmat(PyObject *o) -{ - return PyType_IsSubtype(o->ob_type, &cvmat_Type); -} - -/************************************************************************/ - -/* cvmatnd */ - -static void cvmatnd_dealloc(PyObject *self) -{ - cvmatnd_t *pc = (cvmatnd_t*)self; - Py_XDECREF(pc->data); - cvDecRefData(pc->a); - cvFree(&pc->a); - PyObject_Del(self); -} - -static PyObject *cvmatnd_repr(PyObject *self) -{ - CvMatND *m = ((cvmatnd_t*)self)->a; - char str[1000]; - sprintf(str, "type); - d += strlen(d); - sprintf(d, ")>"); - return PyString_FromString(str); -} - -static size_t cvmatnd_size(CvMatND *m) -{ - int bps = 1; - switch (CV_MAT_DEPTH(m->type)) { - case CV_8U: - case CV_8S: - bps = CV_MAT_CN(m->type) * 1; - break; - case CV_16U: - case CV_16S: - bps = CV_MAT_CN(m->type) * 2; - break; - case CV_32S: - case CV_32F: - bps = CV_MAT_CN(m->type) * 4; - break; - case CV_64F: - bps = CV_MAT_CN(m->type) * 8; - break; - default: - assert(0); - } - size_t l = bps; - for (int d = 0; d < m->dims; d++) { - l *= m->dim[d].size; - } - return l; -} - -static PyObject *cvmatnd_tostring(PyObject *self, PyObject *args) -{ - CvMatND *m = 0; - if (!convert_to_CvMatND(self, &m, "self")) - return NULL; - - int bps; - switch (CV_MAT_DEPTH(m->type)) { - case CV_8U: - case CV_8S: - bps = CV_MAT_CN(m->type) * 1; - break; - case CV_16U: - case CV_16S: - bps = CV_MAT_CN(m->type) * 2; - break; - case CV_32S: - case CV_32F: - bps = CV_MAT_CN(m->type) * 4; - break; - case CV_64F: - bps = CV_MAT_CN(m->type) * 8; - break; - default: - return failmsg("Unrecognised depth %d", CV_MAT_DEPTH(m->type)), (PyObject*)0; - } - - int d, l = bps; - for (d = 0; d < m->dims; d++) { - l *= m->dim[d].size; - } - int i[CV_MAX_DIM]; - for (d = 0; d < m->dims; d++) { - i[d] = 0; - } - int rowsize = m->dim[m->dims-1].size * bps; - char *s = new char[l]; - char *ps = s; - - int finished = 0; - while (!finished) { - memcpy(ps, cvPtrND(m, i), rowsize); - ps += rowsize; - for (d = m->dims - 2; 0 <= d; d--) { - if (++i[d] < cvGetDimSize(m, d)) { - break; - } else { - i[d] = 0; - } - } - if (d < 0) - finished = 1; - } - - return PyString_FromStringAndSize(s, ps - s); -} - -static struct PyMethodDef cvmatnd_methods[] = -{ - {"tostring", cvmatnd_tostring, METH_VARARGS}, - {NULL, NULL} -}; - -static PyObject *cvmatnd_getchannels(cvmatnd_t *cva) -{ - return PyInt_FromLong(CV_MAT_CN(cva->a->type)); -} - -static PyGetSetDef cvmatnd_getseters[] = { -#if PYTHON_USE_NUMPY - {(char*)"__array_struct__", (getter)cvmatnd_array_struct, (setter)NULL, (char*)"__array_struct__", NULL}, -#endif - {(char*)"channels",(getter)cvmatnd_getchannels, (setter)NULL, (char*)"channels", NULL}, - {NULL} /* Sentinel */ -}; - -static PyMappingMethods cvmatnd_as_map = { - NULL, - &cvarr_GetItem, - &cvarr_SetItem, -}; - -static PyTypeObject cvmatnd_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvmatnd", /*name*/ - sizeof(cvmatnd_t), /*basicsize*/ -}; - -static void cvmatnd_specials(void) -{ - cvmatnd_Type.tp_dealloc = cvmatnd_dealloc; - cvmatnd_Type.tp_as_mapping = &cvmatnd_as_map; - cvmatnd_Type.tp_repr = cvmatnd_repr; - cvmatnd_Type.tp_methods = cvmatnd_methods; - cvmatnd_Type.tp_getset = cvmatnd_getseters; -} - -static int is_cvmatnd(PyObject *o) -{ - return PyType_IsSubtype(o->ob_type, &cvmatnd_Type); -} - -/************************************************************************/ - -/* cvhistogram */ - -static void cvhistogram_dealloc(PyObject *self) -{ - cvhistogram_t *cvh = (cvhistogram_t*)self; - Py_DECREF(cvh->bins); - PyObject_Del(self); -} - -static PyTypeObject cvhistogram_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvhistogram", /*name*/ - sizeof(cvhistogram_t), /*basicsize*/ -}; - -static PyObject *cvhistogram_getbins(cvhistogram_t *cvh) -{ - Py_INCREF(cvh->bins); - return cvh->bins; -} - -static PyGetSetDef cvhistogram_getseters[] = { - {(char*)"bins", (getter)cvhistogram_getbins, (setter)NULL, (char*)"bins", NULL}, - {NULL} /* Sentinel */ -}; - -static void cvhistogram_specials(void) -{ - cvhistogram_Type.tp_dealloc = cvhistogram_dealloc; - cvhistogram_Type.tp_getset = cvhistogram_getseters; -} - -/************************************************************************/ - -/* cvlineiterator */ - -static PyObject *cvlineiterator_iter(PyObject *o) -{ - Py_INCREF(o); - return o; -} - -static PyObject *cvlineiterator_next(PyObject *o) -{ - cvlineiterator_t *pi = (cvlineiterator_t*)o; - - if (pi->count) { - pi->count--; - - CvScalar r; - cvRawDataToScalar( (void*)(pi->iter.ptr), pi->type, &r); - PyObject *pr = PyObject_FromCvScalar(r, pi->type); - - CV_NEXT_LINE_POINT(pi->iter); - - return pr; - } else { - return NULL; - } -} - -static PyTypeObject cvlineiterator_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvlineiterator", /*name*/ - sizeof(cvlineiterator_t), /*basicsize*/ -}; - -static void cvlineiterator_specials(void) -{ - cvlineiterator_Type.tp_iter = cvlineiterator_iter; - cvlineiterator_Type.tp_iternext = cvlineiterator_next; -} - -/************************************************************************/ - -/* memtrack */ - -/* Motivation for memtrack is when the storage for a Mat is an array or buffer -object. By setting 'data' to be a memtrack, can deallocate the storage at -object destruction. - -For array objects, 'backing' is the actual storage object. memtrack holds the reference, -then DECREF's it at dealloc. - -For MatND's, we need to cvDecRefData() on release, and this is what field 'backingmat' is for. - -If freeptr is true, then a straight cvFree() of ptr happens. - -*/ - - -static void memtrack_dealloc(PyObject *self) -{ - memtrack_t *pi = (memtrack_t*)self; - if (pi->backing) - Py_DECREF(pi->backing); - if (pi->backingmat) - cvDecRefData(pi->backingmat); - if (pi->freeptr) - cvFree(&pi->ptr); - PyObject_Del(self); -} - -static PyTypeObject memtrack_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".memtrack", /*name*/ - sizeof(memtrack_t), /*basicsize*/ -}; - -static Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr) -{ - *ptrptr = &((memtrack_t*)self)->ptr; - return ((memtrack_t*)self)->size; -} - -static Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr) -{ - *ptrptr = ((memtrack_t*)self)->ptr; - return ((memtrack_t*)self)->size; -} - -static Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp) -{ - return (Py_ssize_t)1; -} - -PyBufferProcs memtrack_as_buffer = { - memtrack_getreadbuffer, - memtrack_getwritebuffer, - memtrack_getsegcount -}; - -static void memtrack_specials(void) -{ - memtrack_Type.tp_dealloc = memtrack_dealloc; - memtrack_Type.tp_as_buffer = &memtrack_as_buffer; -} - -/************************************************************************/ - -/* cvmemstorage */ - -static void cvmemstorage_dealloc(PyObject *self) -{ - cvmemstorage_t *ps = (cvmemstorage_t*)self; - cvReleaseMemStorage(&(ps->a)); - PyObject_Del(self); -} - -static PyTypeObject cvmemstorage_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvmemstorage", /*name*/ - sizeof(cvmemstorage_t), /*basicsize*/ -}; - -static void cvmemstorage_specials(void) -{ - cvmemstorage_Type.tp_dealloc = cvmemstorage_dealloc; -} - -/************************************************************************/ - -/* cvfont */ - -static PyTypeObject cvfont_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvfont", /*name*/ - sizeof(cvfont_t), /*basicsize*/ -}; - -static void cvfont_specials(void) { } - -/************************************************************************/ - -/* cvrng */ - -static PyTypeObject cvrng_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvrng", /*name*/ - sizeof(cvrng_t), /*basicsize*/ -}; - -static void cvrng_specials(void) -{ -} - -/************************************************************************/ - -/* cvcontourtree */ - -static PyTypeObject cvcontourtree_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvcontourtree", /*name*/ - sizeof(cvcontourtree_t), /*basicsize*/ -}; - -static void cvcontourtree_specials(void) { } - - -/************************************************************************/ - -/* cvsubdiv2dedge */ - -static PyTypeObject cvsubdiv2dedge_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvsubdiv2dedge", /*name*/ - sizeof(cvsubdiv2dedge_t), /*basicsize*/ -}; - -static int cvsubdiv2dedge_compare(PyObject *o1, PyObject *o2) -{ - cvsubdiv2dedge_t *e1 = (cvsubdiv2dedge_t*)o1; - cvsubdiv2dedge_t *e2 = (cvsubdiv2dedge_t*)o2; - if (e1->a < e2->a) - return -1; - else if (e1->a > e2->a) - return 1; - else - return 0; -} - -static PyObject *cvquadedge_repr(PyObject *self) -{ - CvSubdiv2DEdge m = ((cvsubdiv2dedge_t*)self)->a; - char str[1000]; - sprintf(str, ""); - return PyString_FromString(str); -} - -static void cvsubdiv2dedge_specials(void) { - cvsubdiv2dedge_Type.tp_compare = cvsubdiv2dedge_compare; - cvsubdiv2dedge_Type.tp_repr = cvquadedge_repr; -} - -/************************************************************************/ - -/* cvseq */ - -static void cvseq_dealloc(PyObject *self) -{ - cvseq_t *ps = (cvseq_t*)self; - Py_DECREF(ps->container); - PyObject_Del(self); -} - -static PyObject *cvseq_h_next(PyObject *self, PyObject *args); -static PyObject *cvseq_h_prev(PyObject *self, PyObject *args); -static PyObject *cvseq_v_next(PyObject *self, PyObject *args); -static PyObject *cvseq_v_prev(PyObject *self, PyObject *args); - -static struct PyMethodDef cvseq_methods[] = -{ - {"h_next", cvseq_h_next, METH_VARARGS}, - {"h_prev", cvseq_h_prev, METH_VARARGS}, - {"v_next", cvseq_v_next, METH_VARARGS}, - {"v_prev", cvseq_v_prev, METH_VARARGS}, - {NULL, NULL} -}; - -static Py_ssize_t cvseq_seq_length(PyObject *o) -{ - cvseq_t *ps = (cvseq_t*)o; - if (ps->a == NULL) - return (Py_ssize_t)0; - else - return (Py_ssize_t)(ps->a->total); -} - -static PyObject* cvseq_seq_getitem(PyObject *o, Py_ssize_t i) -{ - cvseq_t *ps = (cvseq_t*)o; - CvPoint *pt; - struct pointpair{ - CvPoint a, b; - } *pp; - CvPoint2D32f *pt2; - CvPoint3D32f *pt3; - - if (i < cvseq_seq_length(o)) { - switch (CV_SEQ_ELTYPE(ps->a)) { - - case CV_SEQ_ELTYPE_POINT: - pt = CV_GET_SEQ_ELEM(CvPoint, ps->a, (int)i); - return Py_BuildValue("ii", pt->x, pt->y); - - case CV_SEQ_ELTYPE_GENERIC: - switch (ps->a->elem_size) { - case sizeof(CvQuadEdge2D): - { - cvsubdiv2dedge_t *r = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type); - r->a = (CvSubdiv2DEdge)CV_GET_SEQ_ELEM(CvQuadEdge2D, ps->a, (int)i); - r->container = ps->container; - Py_INCREF(r->container); - return (PyObject*)r; - } - case sizeof(CvConnectedComp): - { - CvConnectedComp *cc = CV_GET_SEQ_ELEM(CvConnectedComp, ps->a, (int)i); - return FROM_CvConnectedComp(*cc); - } - default: - printf("seq elem size is %d\n", ps->a->elem_size); - printf("KIND %d\n", CV_SEQ_KIND(ps->a)); - assert(0); - } - return PyInt_FromLong(*CV_GET_SEQ_ELEM(unsigned char, ps->a, (int)i)); - - case CV_SEQ_ELTYPE_PTR: - case CV_SEQ_ELTYPE_INDEX: - return PyInt_FromLong(*CV_GET_SEQ_ELEM(int, ps->a, (int)i)); - - case CV_32SC4: - pp = CV_GET_SEQ_ELEM(pointpair, ps->a, (int)i); - return Py_BuildValue("(ii),(ii)", pp->a.x, pp->a.y, pp->b.x, pp->b.y); - - case CV_32FC2: - pt2 = CV_GET_SEQ_ELEM(CvPoint2D32f, ps->a, (int)i); - return Py_BuildValue("ff", pt2->x, pt2->y); - - case CV_SEQ_ELTYPE_POINT3D: - pt3 = CV_GET_SEQ_ELEM(CvPoint3D32f, ps->a, (int)i); - return Py_BuildValue("fff", pt3->x, pt3->y, pt3->z); - - default: - printf("Unknown element type %08x\n", CV_SEQ_ELTYPE(ps->a)); - assert(0); - return NULL; - } - } else - return NULL; -} - -static PyObject* cvseq_map_getitem(PyObject *o, PyObject *item) -{ - if (PyInt_Check(item)) { - int i = (int)PyInt_AS_LONG(item); - if (i < 0) - i += (int)cvseq_seq_length(o); - return cvseq_seq_getitem(o, i); - } else if (!!PySlice_Check(item)) { - Py_ssize_t start, stop, step, slicelength, cur, i; - PyObject* result; - - if (PySlice_GetIndicesEx((PySliceObject*)item, cvseq_seq_length(o), - &start, &stop, &step, &slicelength) < 0) { - return NULL; - } - - if (slicelength <= 0) { - return PyList_New(0); - } else { - result = PyList_New(slicelength); - if (!result) return NULL; - - for (cur = start, i = 0; i < slicelength; - cur += step, i++) { - PyList_SET_ITEM(result, i, cvseq_seq_getitem(o, cur)); - } - - return result; - } - } else { - PyErr_SetString(PyExc_TypeError, "CvSeq indices must be integers"); - return NULL; - } -} - -static -PySequenceMethods cvseq_sequence = { - cvseq_seq_length, - NULL, - NULL, - cvseq_seq_getitem -}; - -static PyMappingMethods cvseq_mapping = { - cvseq_seq_length, - cvseq_map_getitem, - NULL, -}; - -static PyTypeObject cvseq_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvseq", /*name*/ - sizeof(cvseq_t), /*basicsize*/ -}; - -static void cvseq_specials(void) -{ - cvseq_Type.tp_dealloc = cvseq_dealloc; - cvseq_Type.tp_as_sequence = &cvseq_sequence; - cvseq_Type.tp_as_mapping = &cvseq_mapping; - cvseq_Type.tp_methods = cvseq_methods; -} - -#define MK_ACCESSOR(FIELD) \ -static PyObject *cvseq_##FIELD(PyObject *self, PyObject *args) \ -{ \ - cvseq_t *ps = (cvseq_t*)self; \ - CvSeq *s = ps->a; \ - if (s->FIELD == NULL) { \ - Py_RETURN_NONE; \ - } else { \ - cvseq_t *r = PyObject_NEW(cvseq_t, &cvseq_Type); \ - r->a = s->FIELD; \ - r->container = ps->container; \ - Py_INCREF(r->container); \ - return (PyObject*)r; \ - } \ -} - -MK_ACCESSOR(h_next) -MK_ACCESSOR(h_prev) -MK_ACCESSOR(v_next) -MK_ACCESSOR(v_prev) -#undef MK_ACCESSOR - -/************************************************************************/ - -/* cvset */ - -static void cvset_dealloc(PyObject *self) -{ - cvset_t *ps = (cvset_t*)self; - Py_DECREF(ps->container); - PyObject_Del(self); -} - -static PyTypeObject cvset_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvset", /*name*/ - sizeof(cvset_t), /*basicsize*/ -}; - -static PyObject *cvset_iter(PyObject *o) -{ - Py_INCREF(o); - cvset_t *ps = (cvset_t*)o; - ps->i = 0; - return o; -} - -static PyObject *cvset_next(PyObject *o) -{ - cvset_t *ps = (cvset_t*)o; - - while (ps->i < ps->a->total) { - CvSetElem *e = cvGetSetElem(ps->a, ps->i); - int prev_i = ps->i++; - if (e != NULL) { - return cvseq_seq_getitem(o, prev_i); - } - } - return NULL; -} - -static void cvset_specials(void) -{ - cvset_Type.tp_dealloc = cvset_dealloc; - cvset_Type.tp_iter = cvset_iter; - cvset_Type.tp_iternext = cvset_next; -} - -/************************************************************************/ - -/* cvsubdiv2d */ - -static PyTypeObject cvsubdiv2d_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvsubdiv2d", /*name*/ - sizeof(cvsubdiv2d_t), /*basicsize*/ -}; - -static PyObject *cvsubdiv2d_getattro(PyObject *o, PyObject *name) -{ - cvsubdiv2d_t *p = (cvsubdiv2d_t*)o; - if (strcmp(PyString_AsString(name), "edges") == 0) { - cvset_t *r = PyObject_NEW(cvset_t, &cvset_Type); - r->a = p->a->edges; - r->container = p->container; - Py_INCREF(r->container); - return (PyObject*)r; - } else { - PyErr_SetString(PyExc_TypeError, "cvsubdiv2d has no such attribute"); - return NULL; - } -} - -static void cvsubdiv2d_specials(void) -{ - cvsubdiv2d_Type.tp_getattro = cvsubdiv2d_getattro; -} - -/************************************************************************/ - -/* cvsubdiv2dpoint */ - -static PyTypeObject cvsubdiv2dpoint_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - OLD_MODULESTR".cvsubdiv2dpoint", /*name*/ - sizeof(cvsubdiv2dpoint_t), /*basicsize*/ -}; - -static PyObject *cvsubdiv2dpoint_getattro(PyObject *o, PyObject *name) -{ - cvsubdiv2dpoint_t *p = (cvsubdiv2dpoint_t*)o; - if (strcmp(PyString_AsString(name), "first") == 0) { - cvsubdiv2dedge_t *r = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type); - r->a = p->a->first; - r->container = p->container; - Py_INCREF(r->container); - return (PyObject*)r; - } else if (strcmp(PyString_AsString(name), "pt") == 0) { - return Py_BuildValue("(ff)", p->a->pt.x, p->a->pt.y); - } else { - PyErr_SetString(PyExc_TypeError, "cvsubdiv2dpoint has no such attribute"); - return NULL; - } -} - -static void cvsubdiv2dpoint_specials(void) -{ - cvsubdiv2dpoint_Type.tp_getattro = cvsubdiv2dpoint_getattro; -} - -/************************************************************************/ -/* convert_to_X: used after PyArg_ParseTuple in the generated code */ - -/*static int convert_to_PyObjectPTR(PyObject *o, PyObject **dst, const char *name = "no_name") -{ - *dst = o; - return 1; -} - -static int convert_to_PyCallableObjectPTR(PyObject *o, PyObject **dst, const char *name = "no_name") -{ - *dst = o; - return 1; -}*/ - -static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name") -{ - if (PyString_Check(o) && PyString_Size(o) == 1) { - *dst = PyString_AsString(o)[0]; - return 1; - } else { - (*dst) = 0; - return failmsg("Expected single character string for argument '%s'", name); - } -} - -static int convert_to_CvMemStorage(PyObject *o, CvMemStorage **dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvmemstorage_Type)) { - (*dst) = (((cvmemstorage_t*)o)->a); - return 1; - } else { - (*dst) = (CvMemStorage*)NULL; - return failmsg("Expected CvMemStorage for argument '%s'", name); - } -} - -static int convert_to_CvSeq(PyObject *o, CvSeq **dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvseq_Type)) { - (*dst) = (((cvseq_t*)o)->a); - return 1; - } else { - (*dst) = (CvSeq*)NULL; - return failmsg("Expected CvSeq for argument '%s'", name); - } -} - -static int convert_to_CvSize(PyObject *o, CvSize *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "ii", &dst->width, &dst->height)) - return failmsg("CvSize argument '%s' expects two integers", name); - else - return 1; -} - -static int convert_to_CvScalar(PyObject *o, CvScalar *s, const char *name = "no_name") -{ - if (PySequence_Check(o)) { - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - if (4 < PySequence_Fast_GET_SIZE(fi)) - return failmsg("CvScalar value for argument '%s' is longer than 4", name); - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (PyFloat_Check(item) || PyInt_Check(item)) { - s->val[i] = PyFloat_AsDouble(item); - } else { - return failmsg("CvScalar value for argument '%s' is not numeric", name); - } - } - Py_DECREF(fi); - } else { - if (PyFloat_Check(o) || PyInt_Check(o)) { - s->val[0] = PyFloat_AsDouble(o); - } else { - return failmsg("CvScalar value for argument '%s' is not numeric", name); - } - } - return 1; -} - -static int convert_to_CvPointPTR(PyObject *o, CvPoint **p, const char *name = "no_name") -{ - if (!PySequence_Check(o)) - return failmsg("Expected sequence for point list argument '%s'", name); - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - *p = new CvPoint[PySequence_Fast_GET_SIZE(fi)]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!PyTuple_Check(item)) - return failmsg("Expected tuple for element in point list argument '%s'", name); - if (!PyArg_ParseTuple(item, "ii", &((*p)[i].x), &((*p)[i].y))) { - return 0; - } - } - Py_DECREF(fi); - return 1; -} - -static int convert_to_CvPoint2D32fPTR(PyObject *o, CvPoint2D32f **p, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - *p = new CvPoint2D32f[PySequence_Fast_GET_SIZE(fi)]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!PyTuple_Check(item)) - return failmsg("Expected tuple for CvPoint2D32f argument '%s'", name); - if (!PyArg_ParseTuple(item, "ff", &((*p)[i].x), &((*p)[i].y))) { - return 0; - } - } - Py_DECREF(fi); - return 1; -} - -#if 0 // not used -static int convert_to_CvPoint3D32fPTR(PyObject *o, CvPoint3D32f **p, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - *p = new CvPoint3D32f[PySequence_Fast_GET_SIZE(fi)]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!PyTuple_Check(item)) - return failmsg("Expected tuple for CvPoint3D32f argument '%s'", name); - if (!PyArg_ParseTuple(item, "fff", &((*p)[i].x), &((*p)[i].y), &((*p)[i].z))) { - return 0; - } - } - Py_DECREF(fi); - return 1; -} -#endif - -static int convert_to_CvStarDetectorParams(PyObject *o, CvStarDetectorParams *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, - "iiiii", - &dst->maxSize, - &dst->responseThreshold, - &dst->lineThresholdProjected, - &dst->lineThresholdBinarized, - &dst->suppressNonmaxSize)) - return failmsg("CvRect argument '%s' expects four integers", name); - else - return 1; -} - -static int convert_to_CvRect(PyObject *o, CvRect *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "iiii", &dst->x, &dst->y, &dst->width, &dst->height)) - return failmsg("CvRect argument '%s' expects four integers", name); - else - return 1; -} - -static int convert_to_CvRectPTR(PyObject *o, CvRect **dst, const char *name = "no_name") -{ - *dst = new CvRect; - if (!PyArg_ParseTuple(o, "iiii", &(*dst)->x, &(*dst)->y, &(*dst)->width, &(*dst)->height)) - return failmsg("CvRect argument '%s' expects four integers", name); - else - return 1; -} - -static int convert_to_CvSlice(PyObject *o, CvSlice *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "ii", &dst->start_index, &dst->end_index)) - return failmsg("CvSlice argument '%s' expects two integers", name); - else - return 1; -} - -static int convert_to_CvPoint(PyObject *o, CvPoint *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "ii", &dst->x, &dst->y)) - return failmsg("CvPoint argument '%s' expects two integers", name); - else - return 1; -} - -static int convert_to_CvPoint2D32f(PyObject *o, CvPoint2D32f *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "ff", &dst->x, &dst->y)) - return failmsg("CvPoint2D32f argument '%s' expects two floats", name); - else - return 1; -} - -static int convert_to_CvPoint3D32f(PyObject *o, CvPoint3D32f *dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "fff", &dst->x, &dst->y, &dst->z)) - return failmsg("CvPoint3D32f argument '%s' expects three floats", name); - else - return 1; -} - -static int convert_to_IplImage(PyObject *o, IplImage **dst, const char *name) -{ - iplimage_t *ipl = (iplimage_t*)o; - void *buffer; - Py_ssize_t buffer_len; - - if (!is_iplimage(o)) { - return failmsg("Argument '%s' must be IplImage", name); - } else if (PyString_Check(ipl->data)) { - cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep); - assert(cvGetErrStatus() == 0); - *dst = ipl->a; - return 1; - } else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) { - cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep); - assert(cvGetErrStatus() == 0); - *dst = ipl->a; - return 1; - } else { - return failmsg("IplImage argument '%s' has no data", name); - } -} - -static int convert_to_CvMat(PyObject *o, CvMat **dst, const char *name) -{ - cvmat_t *m = (cvmat_t*)o; - void *buffer; - Py_ssize_t buffer_len; - - if (!is_cvmat(o)) { -//#if !PYTHON_USE_NUMPY - return failmsg("Argument '%s' must be CvMat. Use fromarray() to convert numpy arrays to CvMat", name); -/*#else - PyObject *asmat = fromarray(o, 0); - if (asmat == NULL) - return failmsg("Argument '%s' must be CvMat", name); - // now have the array obect as a cvmat, can use regular conversion - return convert_to_CvMat(asmat, dst, name); -#endif*/ - } else { - m->a->refcount = NULL; - if (m->data && PyString_Check(m->data)) { - assert(cvGetErrStatus() == 0); - char *ptr = PyString_AsString(m->data) + m->offset; - cvSetData(m->a, ptr, m->a->step); - assert(cvGetErrStatus() == 0); - *dst = m->a; - return 1; - } else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0) { - cvSetData(m->a, (void*)((char*)buffer + m->offset), m->a->step); - assert(cvGetErrStatus() == 0); - *dst = m->a; - return 1; - } else if (m->data && m->a->data.ptr){ - *dst = m->a; - return 1; - } - else { - return failmsg("CvMat argument '%s' has no data", name); - } - } -} - -static int convert_to_CvMatND(PyObject *o, CvMatND **dst, const char *name) -{ - cvmatnd_t *m = (cvmatnd_t*)o; - void *buffer; - Py_ssize_t buffer_len; - - if (!is_cvmatnd(o)) { - return failmsg("Argument '%s' must be CvMatND", name); - } else if (m->data && PyString_Check(m->data)) { - m->a->data.ptr = ((uchar*)PyString_AsString(m->data)) + m->offset; - *dst = m->a; - return 1; - } else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0) { - m->a->data.ptr = ((uchar*)buffer + m->offset); - *dst = m->a; - return 1; - } else { - return failmsg("CvMatND argument '%s' has no data", name); - } -} - -static int convert_to_CvArr(PyObject *o, CvArr **dst, const char *name) -{ - if (o == Py_None) { - *dst = (void*)NULL; - return 1; - } else if (is_iplimage(o)) { - return convert_to_IplImage(o, (IplImage**)dst, name); - } else if (is_cvmat(o)) { - return convert_to_CvMat(o, (CvMat**)dst, name); - } else if (is_cvmatnd(o)) { - return convert_to_CvMatND(o, (CvMatND**)dst, name); - } else { -//#if !PYTHON_USE_NUMPY - return failmsg("CvArr argument '%s' must be IplImage, CvMat or CvMatND. Use fromarray() to convert numpy arrays to CvMat or cvMatND", name); -/*#else - PyObject *asmat = fromarray(o, 0); - if (asmat == NULL) - return failmsg("CvArr argument '%s' must be IplImage, CvMat, CvMatND, or support the array interface", name); - // now have the array obect as a cvmat, can use regular conversion - return convert_to_CvArr(asmat, dst, name); -#endif*/ - } -} - -static int convert_to_CvHistogram(PyObject *o, CvHistogram **dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvhistogram_Type)) { - cvhistogram_t *ht = (cvhistogram_t*)o; - *dst = &ht->h; - return convert_to_CvArr(ht->bins, &(ht->h.bins), "bins"); - } else { - *dst = (CvHistogram *)NULL; - return failmsg("Expected CvHistogram for argument '%s'", name); - } -} - -// Used by FillPoly, FillConvexPoly, PolyLine -struct pts_npts_contours { - CvPoint** pts; - int* npts; - int contours; -}; - -static int convert_to_pts_npts_contours(PyObject *o, pts_npts_contours *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->contours = (int)PySequence_Fast_GET_SIZE(fi); - dst->pts = new CvPoint*[dst->contours]; - dst->npts = new int[dst->contours]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - if (!convert_to_CvPointPTR(PySequence_Fast_GET_ITEM(fi, i), &dst->pts[i], name)) - return 0; - dst->npts[i] = (int)PySequence_Size(PySequence_Fast_GET_ITEM(fi, i)); // safe because convert_ just succeeded - } - Py_DECREF(fi); - return 1; -} - -class cvarrseq { -public: - union { - CvSeq *seq; - CvArr *mat; - }; - int freemat; - cvarrseq() { - freemat = false; - } - ~cvarrseq() { - if (freemat) { - cvReleaseMat((CvMat**)&mat); - } - } -}; - -static int is_convertible_to_mat(PyObject *o) -{ -#if PYTHON_USE_NUMPY - if (PyObject_HasAttrString(o, "__array_struct__")) { - PyObject *ao = PyObject_GetAttrString(o, "__array_struct__"); - if (ao != NULL && - PyCObject_Check(ao) && - ((PyArrayInterface*)PyCObject_AsVoidPtr(ao))->two == 2) { - return 1; - } - } -#endif - return is_iplimage(o) && is_cvmat(o) && is_cvmatnd(o); -} - -static int convert_to_cvarrseq(PyObject *o, cvarrseq *dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvseq_Type)) { - return convert_to_CvSeq(o, &(dst->seq), name); - } else if (is_convertible_to_mat(o)) { - int r = convert_to_CvArr(o, &(dst->mat), name); - return r; - } else if (PySequence_Check(o)) { - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - Py_ssize_t size = -1; - // Make a pass through the sequence, checking that each element is - // a sequence and that they are all the same size - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *e = PySequence_Fast_GET_ITEM(fi, i); - - if (!PySequence_Check(e)) - return failmsg("Sequence '%s' must contain sequences", name); - if (i == 0) - size = (int)PySequence_Size(e); - else if (size != PySequence_Size(e)) - return failmsg("All elements of sequence '%s' must be same size", name); - } - assert(size != -1); - CvMat *mt = cvCreateMat((int)PySequence_Fast_GET_SIZE(fi), 1, CV_32SC((int)size)); - dst->freemat = true; // dealloc this mat when done - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *e = PySequence_Fast_GET_ITEM(fi, i); - PyObject *fe = PySequence_Fast(e, name); - assert(fe != NULL); - int *pdst = (int*)cvPtr2D(mt, (int)i, 0); - for (Py_ssize_t j = 0; j < size; j++) { - PyObject *num = PySequence_Fast_GET_ITEM(fe, j); - if (!PyNumber_Check(num)) { - return failmsg("Sequence must contain numbers", name); - } - *pdst++ = PyInt_AsLong(num); - } - Py_DECREF(fe); - } - Py_DECREF(fi); - dst->mat = mt; - return 1; - } else { - return failmsg("Argument '%s' must be CvSeq, CvArr, or a sequence of numbers"); - } -} - -struct cvarr_count { - CvArr **cvarr; - int count; -}; - -static int convert_to_cvarr_count(PyObject *o, cvarr_count *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->cvarr = new CvArr*[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - if (!convert_to_CvArr(PySequence_Fast_GET_ITEM(fi, i), &dst->cvarr[i], name)) - return 0; - } - Py_DECREF(fi); - return 1; -} - -struct intpair -{ - int *pairs; - int count; -}; - -static int convert_to_intpair(PyObject *o, intpair *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->pairs = new int[2 * dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!PyArg_ParseTuple(item, "ii", &dst->pairs[2 * i], &dst->pairs[2 * i + 1])) { - return 0; - } - } - Py_DECREF(fi); - return 1; -} - -struct cvpoint2d32f_count { - CvPoint2D32f* points; - int count; -}; - -static int convert_to_cvpoint2d32f_count(PyObject *o, cvpoint2d32f_count *dst, const char *name = "no_name") -{ - if (PyInt_Check(o)) { - dst->count = PyInt_AsLong(o); - dst->points = new CvPoint2D32f[dst->count]; - return 1; - } else { - return failmsg("Expected integer for CvPoint2D32f count"); - } -} - -struct floats { - float *f; - int count; -}; -static int convert_to_floats(PyObject *o, floats *dst, const char *name = "no_name") -{ - if (PySequence_Check(o)) { - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->f = new float[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - dst->f[i] = (float)PyFloat_AsDouble(item); - } - Py_DECREF(fi); - } else if (PyNumber_Check(o)) { - dst->count = 1; - dst->f = new float[1]; - dst->f[0] = (float)PyFloat_AsDouble(o); - } else { - return failmsg("Expected list of floats, or float for argument '%s'", name); - } - return 1; -} - -struct chars { - char *f; - int count; -}; -/// convert_to_chars not used - -struct CvPoints { - CvPoint *p; - int count; -}; -static int convert_to_CvPoints(PyObject *o, CvPoints *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->p = new CvPoint[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - convert_to_CvPoint(item, &dst->p[i], name); - } - Py_DECREF(fi); - return 1; -} - -struct CvPoint3D32fs { - CvPoint3D32f *p; - int count; -}; -static int convert_to_CvPoint3D32fs(PyObject *o, CvPoint3D32fs *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->p = new CvPoint3D32f[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - convert_to_CvPoint3D32f(item, &dst->p[i], name); - } - Py_DECREF(fi); - return 1; -} - -struct CvPoint2D32fs { - CvPoint2D32f *p; - int count; -}; -static int convert_to_CvPoint2D32fs(PyObject *o, CvPoint2D32fs *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->p = new CvPoint2D32f[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - convert_to_CvPoint2D32f(item, &dst->p[i], name); - } - Py_DECREF(fi); - return 1; -} - -struct ints { - int *i; - int count; -}; -static int convert_to_ints(PyObject *o, ints *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->i = new int[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - dst->i[i] = PyInt_AsLong(item); - } - Py_DECREF(fi); - return 1; -} - -struct ints0 { - int *i; - int count; -}; -static int convert_to_ints0(PyObject *o, ints0 *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->i = new int[dst->count + 1]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - dst->i[i] = PyInt_AsLong(item); - } - dst->i[dst->count] = 0; - Py_DECREF(fi); - return 1; -} - -struct dims -{ - int count; - int i[CV_MAX_DIM]; - int step[CV_MAX_DIM]; - int length[CV_MAX_DIM]; -}; - -static int convert_to_dim(PyObject *item, int i, dims *dst, CvArr *cva, const char *name = "no_name") -{ - if (!!PySlice_Check(item)) { - Py_ssize_t start, stop, step, slicelength; - PySlice_GetIndicesEx((PySliceObject*)item, cvGetDimSize(cva, i), &start, &stop, &step, &slicelength); - dst->i[i] = (int)start; - dst->step[i] = (int)step; - dst->length[i] = (int)slicelength; - } else { - int index = PyInt_AsLong(item); - if (0 <= index) - dst->i[i] = index; - else - dst->i[i] = cvGetDimSize(cva, i) + index; - dst->step[i] = 0; - dst->length[i] = 1; - } - return 1; -} - -static int convert_to_dims(PyObject *o, dims *dst, CvArr *cva, const char *name = "no_name") -{ - if (!PyTuple_Check(o)) { - dst->count = 1; - return convert_to_dim(o, 0, dst, cva, name); - } else { - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) { - PyErr_SetString(PyExc_TypeError, "Expected tuple for index"); - return 0; - } - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - if (i >= cvGetDims(cva)) { - return failmsg("Access specifies %d dimensions, but array only has %d", PySequence_Fast_GET_SIZE(fi), cvGetDims(cva)); - } - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!convert_to_dim(item, (int)i, dst, cva, name)) - return 0; - } - Py_DECREF(fi); - return 1; - } -} - -struct IplImages { - IplImage **ims; - int count; -}; -static int convert_to_IplImages(PyObject *o, IplImages *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->ims = new IplImage*[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!convert_to_IplImage(item, &dst->ims[i])) - return 0; - } - Py_DECREF(fi); - return 1; -} - -struct CvArrs { - CvArr **ims; - int count; -}; -static int convert_to_CvArrs(PyObject *o, CvArrs *dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - dst->count = (int)PySequence_Fast_GET_SIZE(fi); - dst->ims = new CvArr*[dst->count]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - if (!convert_to_CvArr(item, &dst->ims[i])) - return 0; - } - Py_DECREF(fi); - return 1; -} - -/*static int convert_to_floatPTRPTR(PyObject *o, float*** dst, const char *name = "no_name") -{ - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - Py_ssize_t sz = (int)PySequence_Fast_GET_SIZE(fi); - float **r = new float*[sz]; - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - floats ff; - if (!convert_to_floats(item, &ff)) - return 0; - r[i] = ff.f; - } - *dst = r; - return 1; -}*/ - -static int convert_to_CvFontPTR(PyObject *o, CvFont** dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvfont_Type)) { - (*dst) = &(((cvfont_t*)o)->a); - return 1; - } else { - (*dst) = (CvFont*)NULL; - return failmsg("Expected CvFont for argument '%s'", name); - } -} - -/*static int convert_to_CvContourTreePTR(PyObject *o, CvContourTree** dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvcontourtree_Type)) { - (*dst) = ((cvcontourtree_t*)o)->a; - return 1; - } else { - (*dst) = NULL; - return failmsg("Expected CvContourTree for argument '%s'", name); - } -}*/ - -static int convert_to_CvRNGPTR(PyObject *o, CvRNG** dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvrng_Type)) { - (*dst) = &(((cvrng_t*)o)->a); - return 1; - } else { - (*dst) = (CvRNG*)NULL; - return failmsg("Expected CvRNG for argument '%s'", name); - } -} - -typedef void* generic; -static int convert_to_generic(PyObject *o, generic *dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &iplimage_Type)) - return convert_to_IplImage(o, (IplImage**)dst, name); - else if (PyType_IsSubtype(o->ob_type, &cvmat_Type)) - return convert_to_CvMat(o, (CvMat**)dst, name); - else if (PyType_IsSubtype(o->ob_type, &cvmatnd_Type)) - return convert_to_CvMatND(o, (CvMatND**)dst, name); - else { - return failmsg("Cannot identify type of '%s'", name); - } -} - -static int convert_to_CvTermCriteria(PyObject *o, CvTermCriteria* dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "iid", &dst->type, &dst->max_iter, &dst->epsilon)) - return 0; - return 1; -} - -static int convert_to_CvBox2D(PyObject *o, CvBox2D* dst, const char *name = "no_name") -{ - if (!PyArg_ParseTuple(o, "(ff)(ff)f", &dst->center.x, &dst->center.y, &dst->size.width, &dst->size.height, &dst->angle)) - return 0; - return 1; -} - -static int convert_to_CvSubdiv2DPTR(PyObject *o, CvSubdiv2D** dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvsubdiv2d_Type)) { - (*dst) = (((cvsubdiv2d_t*)o)->a); - return 1; - } else { - (*dst) = (CvSubdiv2D*)NULL; - return failmsg("Expected CvSubdiv2D for argument '%s'", name); - } -} - -static int convert_to_CvNextEdgeType(PyObject *o, CvNextEdgeType *dst, const char *name = "no_name") -{ - if (!PyInt_Check(o)) { - *dst = (CvNextEdgeType)NULL; - return failmsg("Expected number for CvNextEdgeType argument '%s'", name); - } else { - *dst = (CvNextEdgeType)PyInt_AsLong(o); - return 1; - } -} - -static int convert_to_CvSubdiv2DEdge(PyObject *o, CvSubdiv2DEdge *dst, const char *name = "no_name") -{ - if (PyType_IsSubtype(o->ob_type, &cvsubdiv2dedge_Type)) { - (*dst) = (((cvsubdiv2dedge_t*)o)->a); - return 1; - } else { - *dst = 0L; - return failmsg("Expected CvSubdiv2DEdge for argument '%s'", name); - } -} - -/************************************************************************/ - -static PyObject *pythonize_CvMat(cvmat_t *m) -{ - // Need to make this CvMat look like any other, with a Python - // buffer object as its data. - CvMat *mat = m->a; - assert(mat->step != 0); -#if 0 - PyObject *data = PyString_FromStringAndSize((char*)(mat->data.ptr), mat->rows * mat->step); -#else - memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type); - size_t gap = mat->data.ptr - (uchar*)mat->refcount; - o->ptr = mat->refcount; - o->owner = __LINE__; - o->freeptr = true; - o->size = gap + mat->rows * mat->step; - o->backing = NULL; - o->backingmat = NULL; - PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)gap, mat->rows * mat->step); - if (data == NULL) - return NULL; -#endif - m->data = data; - m->offset = 0; - Py_DECREF(o); - - // Now m has a reference to data, which has a reference to o. - - return (PyObject*)m; -} - -static PyObject *pythonize_IplImage(iplimage_t *cva) -{ - // Need to make this iplimage look like any other, with a Python - // string as its data. - // So copy the image data into a Python string object, then release - // it. - - IplImage *ipl = (IplImage*)(cva->a); - // PyObject *data = PyString_FromStringAndSize(ipl->imageData, ipl->imageSize); - - memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type); - assert(ipl->imageDataOrigin == ipl->imageData); - o->ptr = ipl->imageDataOrigin; - o->owner = __LINE__; - o->freeptr = true; - o->size = ipl->height * ipl->widthStep; - o->backing = NULL; - o->backingmat = NULL; - PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)0, o->size); - if (data == NULL) - return NULL; - Py_DECREF(o); - cva->data = data; - cva->offset = 0; - - return (PyObject*)cva; -} - -static PyObject *pythonize_CvMatND(cvmatnd_t *m, PyObject *backing = NULL) -{ - // - // Need to make this CvMatND look like any other, with a Python - // buffer object as its data. - // - - CvMatND *mat = m->a; - assert(mat->dim[0].step != 0); -#if 0 - PyObject *data = PyString_FromStringAndSize((char*)(mat->data.ptr), mat->dim[0].size * mat->dim[0].step); -#else - memtrack_t *o = PyObject_NEW(memtrack_t, &memtrack_Type); - o->ptr = mat->data.ptr; - o->owner = __LINE__; - o->freeptr = false; - o->size = cvmatnd_size(mat); - Py_XINCREF(backing); - o->backing = backing; - o->backingmat = mat; - PyObject *data = PyBuffer_FromReadWriteObject((PyObject*)o, (size_t)0, o->size); - Py_DECREF(o); // Now 'data' holds the only reference to 'o' - if (data == NULL) - return NULL; -#endif - m->data = data; - m->offset = 0; - - return (PyObject*)m; -} - -/************************************************************************/ -/* FROM_xxx: C -> Python converters. - * - * Turn various OpenCV types (and some aggregate types above) - * into Python objects. Used by the generated code. - * - * All these functions and macros return a new reference. - */ - - -static PyObject *_FROM_CvSeqPTR(CvSeq *s, PyObject *storage) -{ - cvseq_t *ps = PyObject_NEW(cvseq_t, &cvseq_Type); - ps->a = s; - ps->container = storage; - Py_INCREF(ps->container); - return (PyObject*)ps; -} - -static PyObject *_FROM_CvSubdiv2DPTR(CvSubdiv2D *s, PyObject *storage) -{ - cvsubdiv2d_t *ps = PyObject_NEW(cvsubdiv2d_t, &cvsubdiv2d_Type); - ps->a = s; - ps->container = storage; - Py_INCREF(ps->container); - return (PyObject*)ps; -} - -static PyObject *FROM_floats(floats r) -{ - PyObject *pr; - - pr = PyList_New(r.count); - for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) { - PyList_SetItem(pr, i, PyFloat_FromDouble(r.f[i])); - } - return pr; -} - -static PyObject *FROM_chars(chars r) -{ - PyObject *pr; - - pr = PyList_New(r.count); - for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) { - PyList_SetItem(pr, i, PyInt_FromLong(r.f[i])); - } - return pr; -} - -static PyObject *FROM_cvpoint2d32f_count(cvpoint2d32f_count r) -{ - PyObject *pr; - - pr = PyList_New(r.count); - for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) { - PyList_SetItem(pr, i, FROM_CvPoint2D32f(r.points[i])); - } - return pr; -} - -static PyObject *FROM_CvPoint2D32fs(CvPoint2D32fs r) -{ - PyObject *pr; - - pr = PyList_New(r.count); - for (Py_ssize_t i = 0; i < (Py_ssize_t)r.count; i++) { - PyList_SetItem(pr, i, FROM_CvPoint2D32f(r.p[i])); - } - return pr; -} - -typedef CvSeq CvSeqOfCvConvexityDefect; -static PyObject *FROM_CvSeqOfCvConvexityDefectPTR(CvSeqOfCvConvexityDefect *r) -{ - PyObject *pr; - pr = PyList_New(r->total); - for (int i = 0; i < r->total; i++) { - CvConvexityDefect *pd = CV_GET_SEQ_ELEM(CvConvexityDefect, r, i); - PyList_SetItem(pr, i, Py_BuildValue("(ii)(ii)(ii)f", - pd->start->x, pd->start->y, - pd->end->x, pd->end->y, - pd->depth_point->x, pd->depth_point->y, - pd->depth)); - } - // This function has copied the CvSeq data into a list. Hence the - // CvSeq is not being returned to the caller. Hence, no reference - // count increase for the storage, unlike _FROM_CvSeqPTR. - return pr; -} - -typedef CvSeq CvSeqOfCvAvgComp; -static PyObject *FROM_CvSeqOfCvAvgCompPTR(CvSeqOfCvAvgComp *r) -{ - PyObject *pr; - pr = PyList_New(r->total); - for (int i = 0; i < r->total; i++) { - CvAvgComp *pd = CV_GET_SEQ_ELEM(CvAvgComp, r, i); - PyList_SetItem(pr, i, Py_BuildValue("(iiii)i", - pd->rect.x, pd->rect.y, - pd->rect.width, pd->rect.height, - pd->neighbors)); - } - // This function has copied the CvSeq data into a list. Hence the - // CvSeq is not being returned to the caller. Hence, no reference - // count increase for the storage, unlike _FROM_CvSeqPTR. - return pr; -} - -typedef CvSeq CvSeqOfCvStarKeypoint; -static PyObject *FROM_CvSeqOfCvStarKeypointPTR(CvSeqOfCvStarKeypoint *r) -{ - PyObject *pr; - pr = PyList_New(r->total); - for (int i = 0; i < r->total; i++) { - CvStarKeypoint *pd = CV_GET_SEQ_ELEM(CvStarKeypoint, r, i); - PyList_SetItem(pr, i, Py_BuildValue("(ii)if", - pd->pt.x, pd->pt.y, - pd->size, - pd->response)); - } - // This function has copied the CvSeq data into a list. Hence the - // CvSeq is not being returned to the caller. Hence, no reference - // count increase for the storage, unlike _FROM_CvSeqPTR. - return pr; -} - -typedef CvSeq CvSeqOfCvSURFPoint; -static PyObject *FROM_CvSeqOfCvSURFPointPTR(CvSeqOfCvSURFPoint *r) -{ - PyObject *pr; - pr = PyList_New(r->total); - for (int i = 0; i < r->total; i++) { - CvSURFPoint *pd = CV_GET_SEQ_ELEM(CvSURFPoint, r, i); - PyList_SetItem(pr, i, Py_BuildValue("(ff)iiff", - pd->pt.x, pd->pt.y, - pd->laplacian, - pd->size, - pd->dir, - pd->hessian)); - } - // This function has copied the CvSeq data into a list. Hence the - // CvSeq is not being returned to the caller. Hence, no reference - // count increase for the storage, unlike _FROM_CvSeqPTR. - return pr; -} - -typedef CvSeq CvSeqOfCvSURFDescriptor; -static PyObject *FROM_CvSeqOfCvSURFDescriptorPTR(CvSeqOfCvSURFDescriptor *r) -{ - PyObject *pr; - pr = PyList_New(r->total); - for (int i = 0; i < r->total; i++) { - float *pd = (float*)cvGetSeqElem(r, i); - int count = r->elem_size / sizeof(float); - PyObject *oi = PyList_New(count); - for (int j = 0; j < count; j++) { - PyList_SetItem(oi, j, PyFloat_FromDouble(pd[j])); - } - PyList_SetItem(pr, i, oi); - } - // This function has copied the CvSeq data into a list. Hence the - // CvSeq is not being returned to the caller. Hence, no reference - // count increase for the storage, unlike _FROM_CvSeqPTR. - return pr; -} - -typedef CvPoint2D32f CvPoint2D32f_4[4]; -static PyObject *FROM_CvPoint2D32f_4(CvPoint2D32f* r) -{ - return Py_BuildValue("(ff)(ff)(ff)(ff)", - r[0].x, r[0].y, - r[1].x, r[1].y, - r[2].x, r[2].y, - r[3].x, r[3].y); -} - -typedef float CvMatr32f_i[9]; - -static PyObject *FROM_CvMatr32f_i(CvMatr32f_i r) -{ - return Py_BuildValue("(fff)(fff)(fff)", - r[0], r[1], r[2], - r[3], r[4], r[5], - r[6], r[7], r[8]); -} - -typedef float CvVect32f_i[3]; -static PyObject *FROM_CvVect32f_i(CvVect32f_i r) -{ - return Py_BuildValue("fff", - r[0], r[1], r[2]); -} - -static PyObject *FROM_CvFont(CvFont r) -{ - cvfont_t *cf = PyObject_NEW(cvfont_t, &cvfont_Type); - cf->a = r; - return (PyObject*)cf; -} - -static PyObject *FROM_CvSubdiv2DPointPTR(CvSubdiv2DPoint* r) -{ - if (r != NULL) { - cvsubdiv2dpoint_t *cf = PyObject_NEW(cvsubdiv2dpoint_t, &cvsubdiv2dpoint_Type); - cf->a = r; - return (PyObject*)cf; - } else { - Py_INCREF(Py_None); - return Py_None; - } -} - -static PyObject *FROM_IplImagePTR(IplImage *r) -{ - iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type); - cva->a = r; - return pythonize_IplImage(cva); -} - -static PyObject *FROM_ROIplImagePTR(ROIplImage *r) -{ - if (r != NULL) { - iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type); - cva->a = cvCreateImageHeader(cvSize(100,100), 8, 1); - *(cva->a) = *r; - cva->data = PyBuffer_FromReadWriteMemory(r->imageData, r->height * r->widthStep); - cva->offset = 0; - return (PyObject*)cva; - } else { - Py_RETURN_NONE; - } -} - -static PyObject *FROM_ROCvMatPTR(ROCvMat *r) -{ - if (r != NULL) { - cvmat_t *cva = PyObject_NEW(cvmat_t, &cvmat_Type); - cva->a = cvCreateMatHeader(100, 100, CV_8U); - *(cva->a) = *r; - cva->data = PyBuffer_FromReadWriteMemory(r->data.ptr, r->rows * r->step); - cva->offset = 0; - return (PyObject*)cva; - } else { - Py_RETURN_NONE; - } -} - -static PyObject *FROM_CvMatPTR(CvMat *r) -{ - cvmat_t *cvm = PyObject_NEW(cvmat_t, &cvmat_Type); - cvm->a = r; - - return pythonize_CvMat(cvm); -} - -static PyObject *FROM_CvMat(CvMat *r) -{ - cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type); - m->a = r; - return pythonize_CvMat(m); -} - -static PyObject *FROM_CvMatNDPTR(CvMatND *r) -{ - cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - m->a = r; - return pythonize_CvMatND(m); -} - -static PyObject *FROM_CvRNG(CvRNG r) -{ - cvrng_t *m = PyObject_NEW(cvrng_t, &cvrng_Type); - m->a = r; - return (PyObject*)m; -} - -/*static PyObject *FROM_CvContourTreePTR(CvContourTree *r) -{ - cvcontourtree_t *m = PyObject_NEW(cvcontourtree_t, &cvcontourtree_Type); - m->a = r; - return (PyObject*)m; -}*/ - -static PyObject *FROM_generic(generic r) -{ - if (r == NULL) { - failmsg("OpenCV returned NULL"); - return NULL; - } - CvTypeInfo* t = cvTypeOf(r); - if (strcmp(t->type_name, "opencv-image") == 0) - return FROM_IplImagePTR((IplImage*)r); - else if (strcmp(t->type_name, "opencv-matrix") == 0) - return FROM_CvMat((CvMat*)r); - else if (strcmp(t->type_name, "opencv-nd-matrix") == 0) - return FROM_CvMatNDPTR((CvMatND*)r); - else if (strcmp(t->type_name, "opencv-haar-classifier") == 0) - return FROM_CvHaarClassifierCascadePTR((CvHaarClassifierCascade*)r); - else { - failmsg("Unknown OpenCV type '%s'", t->type_name); - return NULL; - } -} - -static PyObject *FROM_CvSubdiv2DEdge(CvSubdiv2DEdge r) -{ - cvsubdiv2dedge_t *m = PyObject_NEW(cvsubdiv2dedge_t, &cvsubdiv2dedge_Type); - m->a = r; - m->container = Py_None; // XXX - Py_INCREF(m->container); - return (PyObject*)m; -} - -static PyObject *FROM_CvPoints(CvPoints src) -{ - PyObject *pr; - pr = PyList_New(src.count); - for (int i = 0; i < src.count; i++) { - PyList_SetItem(pr, i, FROM_CvPoint(src.p[i])); - } - return pr; -} - -/************************************************************************/ - -/* A few functions are too odd to be generated, - * so are handwritten here */ - -static PyObject *pycvWaitKey(PyObject *self, PyObject *args, PyObject *kw) -{ - int delay = 0; - - const char *keywords[] = { "delay", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kw, "|i", (char**)keywords, &delay)) - return NULL; - int r; - Py_BEGIN_ALLOW_THREADS - r = cvWaitKey(delay); - Py_END_ALLOW_THREADS - return FROM_int(r); -} - -static PyObject *pycvLoadImage(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "filename", "iscolor", NULL }; - char *filename; - int iscolor = CV_LOAD_IMAGE_COLOR; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "s|i", (char**)keywords, &filename, &iscolor)) - return NULL; - - // Inside ALLOW_THREADS, must not reference 'filename' because it might move. - // So make a local copy 'filename_copy'. - char filename_copy[2048]; - strncpy(filename_copy, filename, sizeof(filename_copy)); - - IplImage *r; - Py_BEGIN_ALLOW_THREADS - r = cvLoadImage(filename_copy, iscolor); - Py_END_ALLOW_THREADS - - if (r == NULL) { - PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); - return NULL; - } else { - return FROM_IplImagePTR(r); - } -} - -static PyObject *pycvLoadImageM(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "filename", "iscolor", NULL }; - char *filename; - int iscolor = CV_LOAD_IMAGE_COLOR; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "s|i", (char**)keywords, &filename, &iscolor)) - return NULL; - - // Inside ALLOW_THREADS, must not reference 'filename' because it might move. - // So make a local copy 'filename_copy'. - char filename_copy[2048]; - strncpy(filename_copy, filename, sizeof(filename_copy)); - - CvMat *r; - Py_BEGIN_ALLOW_THREADS - r = cvLoadImageM(filename_copy, iscolor); - Py_END_ALLOW_THREADS - - if (r == NULL) { - PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); - return NULL; - } else { - return FROM_CvMatPTR(r); - } -} - -static PyObject *pycvCreateImageHeader(PyObject *self, PyObject *args) -{ - int w, h, depth, channels; - if (!PyArg_ParseTuple(args, "(ii)Ii", &w, &h, &depth, &channels)) - return NULL; - iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type); - cva->a = cvCreateImageHeader(cvSize(w, h), depth, channels); - if (cva->a == NULL) { - PyErr_SetString(PyExc_TypeError, "CreateImage failed"); - return NULL; - } else { - cva->data = Py_None; - Py_INCREF(cva->data); - cva->offset = 0; - - return (PyObject*)cva; - } -} - -static PyObject *pycvCreateImage(PyObject *self, PyObject *args) -{ - int w, h, depth, channels; - if (!PyArg_ParseTuple(args, "(ii)Ii:CreateImage", &w, &h, &depth, &channels)) - return NULL; - iplimage_t *cva = PyObject_NEW(iplimage_t, &iplimage_Type); - ERRWRAP(cva->a = cvCreateImage(cvSize(w, h), depth, channels)); - if (cva->a == NULL) { - PyErr_SetString(PyExc_TypeError, "CreateImage failed"); - return NULL; - } else { - return pythonize_IplImage(cva); - } -} - -static PyObject *pycvCreateMatHeader(PyObject *self, PyObject *args) -{ - int rows, cols, type; - if (!PyArg_ParseTuple(args, "iii", &rows, &cols, &type)) - return NULL; - cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type); - ERRWRAP(m->a = cvCreateMatHeader(rows, cols, type)); - if (m->a == NULL) { - PyErr_SetString(PyExc_TypeError, "CreateMat failed"); - return NULL; - } else { - m->data = Py_None; - Py_INCREF(m->data); - m->offset = 0; - return (PyObject*)m; - } -} - -static PyObject *pycvCreateMat(PyObject *self, PyObject *args) -{ - int rows, cols, type; - if (!PyArg_ParseTuple(args, "iii", &rows, &cols, &type)) - return NULL; - cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type); - ERRWRAP(m->a = cvCreateMat(rows, cols, type)); - if (m->a == NULL) { - PyErr_SetString(PyExc_TypeError, "CreateMat failed"); - return NULL; - } else { - return pythonize_CvMat(m); - } -} - -static PyObject *pycvCreateMatNDHeader(PyObject *self, PyObject *args) -{ - ints dims; - int type; - - if (!PyArg_ParseTuple(args, "O&i", convert_to_ints, (void*)&dims, &type)) - return NULL; - cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - ERRWRAP(m->a = cvCreateMatNDHeader(dims.count, dims.i, type)); - - m->data = Py_None; - Py_INCREF(m->data); - delete [] dims.i; - return (PyObject*)m; -} - - -static PyObject *pycvCreateMatND(PyObject *self, PyObject *args) -{ - ints dims; - int type; - - if (!PyArg_ParseTuple(args, "O&i", convert_to_ints, (void*)&dims, &type)) - return NULL; - cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - ERRWRAP(m->a = cvCreateMatND(dims.count, dims.i, type)); - delete [] dims.i; - return pythonize_CvMatND(m); -} - -#if PYTHON_USE_NUMPY -static PyObject *pycvfromarray(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "arr", "allowND", NULL }; - PyObject *o; - int allowND = 0; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "O|i", (char**)keywords, &o, &allowND)) - return NULL; - return fromarray(o, allowND); -} - -static PyObject *fromarray(PyObject *o, int allowND) -{ - PyObject *ao = PyObject_GetAttrString(o, "__array_struct__"); - PyObject *retval; - - if ((ao == NULL) || !PyCObject_Check(ao)) { - PyErr_SetString(PyExc_TypeError, "object does not have array interface"); - return NULL; - } - PyArrayInterface *pai = (PyArrayInterface*)PyCObject_AsVoidPtr(ao); - if (pai->two != 2) { - PyErr_SetString(PyExc_TypeError, "object does not have array interface"); - Py_DECREF(ao); - return NULL; - } - - int type = -1; - - switch (pai->typekind) { - case 'i': - if (pai->itemsize == 1) - type = CV_8SC1; - else if (pai->itemsize == 2) - type = CV_16SC1; - else if (pai->itemsize == 4) - type = CV_32SC1; - break; - - case 'u': - if (pai->itemsize == 1) - type = CV_8UC1; - else if (pai->itemsize == 2) - type = CV_16UC1; - break; - - case 'f': - if (pai->itemsize == 4) - type = CV_32FC1; - else if (pai->itemsize == 8) - type = CV_64FC1; - break; - - } - if (type == -1) { - PyErr_SetString(PyExc_TypeError, "the array type is not supported by OpenCV"); - Py_DECREF(ao); - return NULL; - } - - if (!allowND) { - cvmat_t *m = PyObject_NEW(cvmat_t, &cvmat_Type); - if (pai->nd == 2) { - if (pai->strides[1] != pai->itemsize) { - return failmsg("cv.fromarray array can only accept arrays with contiguous data"), (PyObject*)0; - } - ERRWRAP(m->a = cvCreateMatHeader((int)pai->shape[0], (int)pai->shape[1], type)); - m->a->step = (int)pai->strides[0]; - } else if (pai->nd == 3) { - if (pai->shape[2] > CV_CN_MAX) { - Py_DECREF(ao); - return failmsg("cv.fromarray too many channels, see allowND argument"), (PyObject*)0; - } - ERRWRAP(m->a = cvCreateMatHeader((int)pai->shape[0], (int)pai->shape[1], type + ((int)(pai->shape[2] - 1) << CV_CN_SHIFT))); - m->a->step = (int)pai->strides[0]; - } else { - Py_DECREF(ao); - return failmsg("cv.fromarray array can be 2D or 3D only, see allowND argument"), (PyObject*)0; - } - m->a->data.ptr = (uchar*)pai->data; - //retval = pythonize_foreign_CvMat(m); - m->data = o; - m->offset = 0; - retval = (PyObject*)m; - } else { - int dims[CV_MAX_DIM]; - int i; - for (i = 0; i < pai->nd; i++) - dims[i] = (int)pai->shape[i]; - cvmatnd_t *m = PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - ERRWRAP(m->a = cvCreateMatNDHeader(pai->nd, dims, type)); - m->a->data.ptr = (uchar*)pai->data; - m->data = o; - m->offset = 0; - retval = (PyObject*)m; - //retval = pythonize_CvMatND(m, ao); - } - Py_DECREF(ao); - Py_INCREF(o); - return retval; -} -#endif - -class ranges { -public: - Py_ssize_t len; - float **rr; - ranges() { - len = 0; - rr = NULL; - } - int fromobj(PyObject *o, const char *name = "no_name") { - PyObject *fi = PySequence_Fast(o, name); - if (fi == NULL) - return 0; - len = (int)PySequence_Fast_GET_SIZE(fi); - rr = new float*[len]; - for (Py_ssize_t i = 0; i < len; i++) { - PyObject *item = PySequence_Fast_GET_ITEM(fi, i); - floats ff; ff.f = 0; - if (!convert_to_floats(item, &ff)) - return 0; - rr[i] = ff.f; - } - Py_DECREF(fi); - return 1; - } - ~ranges() { - for (Py_ssize_t i = 0; i < len; i++) - delete rr[i]; - delete[] rr; - } -}; - -static int ranges_converter(PyObject *o, ranges* dst) -{ - return dst->fromobj(o); -} - -static PyObject *pycvCreateHist(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "dims", "type", "ranges", "uniform", NULL }; - PyObject *dims; - int type; - int uniform = 1; - ranges r; - if (!PyArg_ParseTupleAndKeywords(args, kw, "Oi|O&i", (char**)keywords, &dims, &type, ranges_converter, (void*)&r, &uniform)) { - return NULL; - } - cvhistogram_t *h = PyObject_NEW(cvhistogram_t, &cvhistogram_Type); - args = Py_BuildValue("Oi", dims, CV_32FC1); - memset(&h->h, 0, sizeof(h->h)); - h->bins = pycvCreateMatND(self, args); - Py_DECREF(args); - if (h->bins == NULL) { - return NULL; - } - h->h.type = CV_HIST_MAGIC_VAL + CV_HIST_UNIFORM_FLAG; - if (!convert_to_CvArr(h->bins, &(h->h.bins), "bins")) - return NULL; - - if(r.rr) - { - ERRWRAP(cvSetHistBinRanges(&(h->h), r.rr, uniform)); - } - - return (PyObject*)h; -} - -static PyObject *pycvInitLineIterator(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "image", "pt1", "pt2", "connectivity", "left_to_right", NULL }; - CvArr *image; - CvPoint pt1; - CvPoint pt2; - int connectivity = 8; - int left_to_right = 0; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "O&O&O&|ii", (char**)keywords, - convert_to_CvArr, &image, - convert_to_CvPoint, &pt1, - convert_to_CvPoint, &pt2, - &connectivity, - &left_to_right)) - return NULL; - - cvlineiterator_t *pi = PyObject_NEW(cvlineiterator_t, &cvlineiterator_Type); - pi->count = cvInitLineIterator(image, pt1, pt2, &pi->iter, connectivity, left_to_right); - ERRWRAP(pi->type = cvGetElemType(image)); - return (PyObject*)pi; -} - -static PyObject *pycvCreateMemStorage(PyObject *self, PyObject *args) -{ - int block_size = 0; - if (!PyArg_ParseTuple(args, "|i", &block_size)) - return NULL; - cvmemstorage_t *pm = PyObject_NEW(cvmemstorage_t, &cvmemstorage_Type); - pm->a = cvCreateMemStorage(block_size); - return (PyObject*)pm; -} - -// single index: return row -// 2 indices: row, column -// both row and column can be slices. column slice must have a step of 1. -// -// returns a scalar when all dimensions are specified and all are integers. Otherwise returns a CvMat. -// -static PyObject *cvarr_GetItem(PyObject *o, PyObject *key) -{ - dims dd; - - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return NULL; - - if (!convert_to_dims(key, &dd, cva, "key")) { - return NULL; - } - - // Figure out if all supplied indices have a stride of zero - means they are not slices - // and if all indices are positive - int all0 = 1; - for (int i = 0; i < dd.count; i++) { - all0 &= (dd.step[i] == 0) && (0 <= dd.i[i]); - } - - // if every dimension supplied, and none are slices, return the scalar - if ((cvGetDims(cva) == dd.count) && all0) { - CvScalar s; - ERRWRAP(s = cvGetND(cva, dd.i)); - return PyObject_FromCvScalar(s, cvGetElemType(cva)); - } else { - // pad missing dimensions - for (int i = dd.count; i < cvGetDims(cva); i++) { - dd.i[i] = 0; - dd.step[i] = 1; - dd.length[i] = cvGetDimSize(cva, i); - } - dd.count = cvGetDims(cva); - - // negative steps are illegal for OpenCV - for (int i = 0; i < dd.count; i++) { - if (dd.step[i] < 0) - return failmsg("Negative step is illegal"), (PyObject*)0; - } - - // zero length illegal for OpenCV - for (int i = 0; i < dd.count; i++) { - if (dd.length[i] == 0) - return failmsg("Zero sized dimension is illegal"), (PyObject*)0; - } - - // column step can only be 0 or 1 - if ((dd.step[dd.count-1] != 0) && (dd.step[dd.count-1] != 1)) - return failmsg("Column step is illegal"), (PyObject*)0; - - if (is_cvmat(o) || is_iplimage(o)) { - cvmat_t *sub = PyObject_NEW(cvmat_t, &cvmat_Type); - sub->a = cvCreateMatHeader(dd.length[0], dd.length[1], cvGetElemType(cva)); - uchar *old0; // pointer to first element in old mat - int oldstep; - cvGetRawData(cva, &old0, &oldstep); - uchar *new0; // pointer to first element in new mat - ERRWRAP(new0 = cvPtrND(cva, dd.i)); - - sub->a->step = oldstep * dd.step[0]; - sub->data = what_data(o); - Py_INCREF(sub->data); - sub->offset = new0 - old0; - return (PyObject*)sub; - } else { - cvmatnd_t *sub = PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - sub->a = cvCreateMatNDHeader(dd.count, dd.length, cvGetElemType(cva)); - uchar *old0; // pointer to first element in old mat - cvGetRawData(cva, &old0); - uchar *new0; // pointer to first element in new mat - ERRWRAP(new0 = cvPtrND(cva, dd.i)); - - for (int d = 0; d < dd.count; d++) { - int stp = dd.step[d]; - sub->a->dim[d].step = ((CvMatND*)cva)->dim[d].step * ((stp == 0) ? 1 : stp); - sub->a->dim[d].size = dd.length[d]; - } - sub->data = what_data(o); - Py_INCREF(sub->data); - sub->offset = new0 - old0; - return (PyObject*)sub; - } - } -} - -static int cvarr_SetItem(PyObject *o, PyObject *key, PyObject *v) -{ - dims dd; - - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return -1; - - if (!convert_to_dims(key, &dd, cva, "key")) { - return -1; - } - - if (cvGetDims(cva) != dd.count) { - PyErr_SetString(PyExc_TypeError, "key length does not match array dimension"); - return -1; - } - - CvScalar s; - if (PySequence_Check(v)) { - PyObject *fi = PySequence_Fast(v, "v"); - if (fi == NULL) - return -1; - if (PySequence_Fast_GET_SIZE(fi) != CV_MAT_CN(cvGetElemType(cva))) { - PyErr_SetString(PyExc_TypeError, "sequence size must be same as channel count"); - return -1; - } - for (Py_ssize_t i = 0; i < PySequence_Fast_GET_SIZE(fi); i++) - s.val[i] = PyFloat_AsDouble(PySequence_Fast_GET_ITEM(fi, i)); - Py_DECREF(fi); - } else { - if (1 != CV_MAT_CN(cvGetElemType(cva))) { - PyErr_SetString(PyExc_TypeError, "scalar supplied but channel count does not equal 1"); - return -1; - } - s.val[0] = PyFloat_AsDouble(v); - } - switch (dd.count) { - case 1: - ERRWRAPN(cvSet1D(cva, dd.i[0], s), -1); - break; - case 2: - ERRWRAPN(cvSet2D(cva, dd.i[0], dd.i[1], s), -1); - break; - case 3: - ERRWRAPN(cvSet3D(cva, dd.i[0], dd.i[1], dd.i[2], s), -1); - break; - default: - ERRWRAPN(cvSetND(cva, dd.i, s), -1); - // XXX - OpenCV bug? - seems as if an error in cvSetND does not set error status? - break; - } - if (cvGetErrStatus() != 0) { - translate_error_to_exception(); - return -1; - } - - return 0; -} - - -static PyObject *pycvSetData(PyObject *self, PyObject *args) -{ - PyObject *o, *s; - int step = CV_AUTO_STEP; - - if (!PyArg_ParseTuple(args, "OO|i", &o, &s, &step)) - return NULL; - if (is_iplimage(o)) { - iplimage_t *ipl = (iplimage_t*)o; - ipl->a->widthStep = step; - Py_DECREF(ipl->data); - ipl->data = s; - Py_INCREF(ipl->data); - } else if (is_cvmat(o)) { - cvmat_t *m = (cvmat_t*)o; - m->a->step = step; - Py_DECREF(m->data); - m->data = s; - Py_INCREF(m->data); - } else if (is_cvmatnd(o)) { - cvmatnd_t *m = (cvmatnd_t*)o; - Py_DECREF(m->data); - m->data = s; - Py_INCREF(m->data); - } else { - PyErr_SetString(PyExc_TypeError, "SetData argument must be either IplImage, CvMat or CvMatND"); - return NULL; - } - - Py_RETURN_NONE; -} - -static PyObject *what_data(PyObject *o) -{ - if (is_iplimage(o)) { - iplimage_t *ipl = (iplimage_t*)o; - return ipl->data; - } else if (is_cvmat(o)) { - cvmat_t *m = (cvmat_t*)o; - return m->data; - } else if (is_cvmatnd(o)) { - cvmatnd_t *m = (cvmatnd_t*)o; - return m->data; - } else { - assert(0); - return NULL; - } -} - -static PyObject *pycvCreateData(PyObject *self, PyObject *args) -{ - PyObject *o; - - if (!PyArg_ParseTuple(args, "O", &o)) - return NULL; - - CvArr *a; - if (!convert_to_CvArr(o, &a, "arr")) - return NULL; - ERRWRAP(cvCreateData(a)); - - Py_DECREF(what_data(o)); - if (is_iplimage(o)) { - iplimage_t *ipl = (iplimage_t*)o; - pythonize_IplImage(ipl); - } else if (is_cvmat(o)) { - cvmat_t *m = (cvmat_t*)o; - pythonize_CvMat(m); - } else if (is_cvmatnd(o)) { - cvmatnd_t *m = (cvmatnd_t*)o; - pythonize_CvMatND(m); - } else { - PyErr_SetString(PyExc_TypeError, "CreateData argument must be either IplImage, CvMat or CvMatND"); - return NULL; - } - - Py_RETURN_NONE; -} - -static PyObject *pycvGetDims(PyObject *self, PyObject *args) -{ - PyObject *o; - - if (!PyArg_ParseTuple(args, "O", &o)) - return NULL; - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return NULL; - - int i, nd; - ERRWRAP(nd = cvGetDims(cva)); - PyObject *r = PyTuple_New(nd); - for (i = 0; i < nd; i++) - PyTuple_SetItem(r, i, PyInt_FromLong(cvGetDimSize(cva, i))); - return r; -} - -static PyObject *pycvGetImage(PyObject *self, PyObject *args) -{ - PyObject *o, *r; - - if (!PyArg_ParseTuple(args, "O", &o)) - return NULL; - if (is_iplimage(o)) { - r = o; - Py_INCREF(o); - } else { - IplImage *ipl = cvCreateImageHeader(cvSize(100,100), 8, 1); // these args do not matter, because overwritten - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return NULL; - ERRWRAP(cvGetImage(cva, ipl)); - - iplimage_t *oipl = PyObject_NEW(iplimage_t, &iplimage_Type); - oipl->a = ipl; - oipl->data = what_data(o); - Py_INCREF(oipl->data); - oipl->offset = 0; - - r = (PyObject*)oipl; - } - return r; -} - -static PyObject *pycvGetMat(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "arr", "allowND", NULL }; - PyObject *o, *r; - int allowND = 0; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "O|i", (char**)keywords, &o, &allowND)) - return NULL; - if (is_cvmat(o)) { - r = o; - Py_INCREF(o); - } else { - CvMat *m = cvCreateMatHeader(100,100, 1); // these args do not matter, because overwritten - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return NULL; - ERRWRAP(cvGetMat(cva, m, NULL, allowND)); - - cvmat_t *om = PyObject_NEW(cvmat_t, &cvmat_Type); - om->a = m; - om->data = what_data(o); - Py_INCREF(om->data); - om->offset = 0; - - r = (PyObject*)om; - } - return r; -} - -static PyObject *pycvReshape(PyObject *self, PyObject *args) -{ - PyObject *o; - int new_cn; - int new_rows = 0; - - if (!PyArg_ParseTuple(args, "Oi|i", &o, &new_cn, &new_rows)) - return NULL; - - CvMat *m = cvCreateMatHeader(100,100, 1); // these args do not matter, because overwritten - CvArr *cva; - if (!convert_to_CvArr(o, &cva, "src")) - return NULL; - ERRWRAP(cvReshape(cva, m, new_cn, new_rows)); - - cvmat_t *om = PyObject_NEW(cvmat_t, &cvmat_Type); - om->a = m; - om->data = what_data(o); - Py_INCREF(om->data); - om->offset = 0; - - return (PyObject*)om; -} - -static PyObject *pycvReshapeMatND(PyObject *self, PyObject *args) -{ - PyObject *o; - int new_cn = 0; - PyObject *new_dims = NULL; - - if (!PyArg_ParseTuple(args, "OiO", &o, &new_cn, &new_dims)) - return NULL; - - CvMatND *cva; - if (!convert_to_CvMatND(o, &cva, "src")) - return NULL; - ints dims={0,0}; - if (new_dims != NULL) { - if (!convert_to_ints(new_dims, &dims, "new_dims")) - return NULL; - } - - if (new_cn == 0) - new_cn = CV_MAT_CN(cvGetElemType(cva)); - - int i; - int count = CV_MAT_CN(cvGetElemType(cva)); - for (i = 0; i < cva->dims; i++) - count *= cva->dim[i].size; - - int newcount = new_cn; - for (i = 0; i < dims.count; i++) - newcount *= dims.i[i]; - - if (count != newcount) { - PyErr_SetString(PyExc_TypeError, "Total number of elements must be unchanged"); - return NULL; - } - - CvMatND *pn = cvCreateMatNDHeader(dims.count, dims.i, CV_MAKETYPE(CV_MAT_TYPE(cva->type), new_cn)); - return shareDataND(o, cva, pn); -} - -static PyObject *pycvFindContours(PyObject *self, PyObject *args, PyObject *kw) -{ - CvArr* image; - PyObject *pyobj_image = NULL; - CvMemStorage* storage; - PyObject *pyobj_storage = NULL; - CvSeq* first_contour; - int header_size = sizeof(CvContour); - int mode = CV_RETR_LIST; - int method = CV_CHAIN_APPROX_SIMPLE; - CvPoint offset = cvPoint(0,0); - PyObject *pyobj_offset = NULL; - - const char *keywords[] = { "image", "storage", "mode", "method", "offset", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|iiO", (char**)keywords, &pyobj_image, &pyobj_storage, &mode, &method, &pyobj_offset)) - return NULL; - if (!convert_to_CvArr(pyobj_image, &image, "image")) return NULL; - if (!convert_to_CvMemStorage(pyobj_storage, &storage, "storage")) return NULL; - if ((pyobj_offset != NULL) && !convert_to_CvPoint(pyobj_offset, &offset, "offset")) return NULL; - ERRWRAP(cvFindContours(image, storage, &first_contour, header_size, mode, method, offset)); - cvseq_t *ps = PyObject_NEW(cvseq_t, &cvseq_Type); - ps->a = first_contour; - ps->container = PyTuple_GetItem(args, 1); // storage - Py_INCREF(ps->container); - return (PyObject*)ps; -} - -static PyObject *pycvApproxPoly(PyObject *self, PyObject *args, PyObject *kw) -{ - cvarrseq src_seq; - PyObject *pyobj_src_seq = NULL; - int header_size = sizeof(CvContour); - CvMemStorage* storage; - PyObject *pyobj_storage = NULL; - int method; - double parameter = 0; - int parameter2 = 0; - - const char *keywords[] = { "src_seq", "storage", "method", "parameter", "parameter2", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kw, "OOi|di", (char**)keywords, &pyobj_src_seq, &pyobj_storage, &method, ¶meter, ¶meter2)) - return NULL; - if (!convert_to_cvarrseq(pyobj_src_seq, &src_seq, "src_seq")) return NULL; - if (!convert_to_CvMemStorage(pyobj_storage, &storage, "storage")) return NULL; - CvSeq* r; - ERRWRAP(r = cvApproxPoly(src_seq.mat, header_size, storage, method, parameter, parameter2)); - return FROM_CvSeqPTR(r); -} - -static float distance_function_glue( const float* a, const float* b, void* user_param ) -{ - PyObject *o = (PyObject*)user_param; - PyObject *args = Py_BuildValue("(ff)(ff)O", a[0], a[1], b[0], b[1], PyTuple_GetItem(o, 1)); - PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL); - Py_DECREF(args); - return (float)PyFloat_AsDouble(r); -} - -static PyObject *pycvCalcEMD2(PyObject *self, PyObject *args, PyObject *kw) -{ - const char *keywords[] = { "signature1", "signature2", "distance_type", "distance_func", "cost_matrix", "flow", "lower_bound", "userdata", NULL }; - CvArr* signature1; - PyObject *pyobj_signature1; - CvArr* signature2; - PyObject *pyobj_signature2; - int distance_type; - PyObject *distance_func = NULL; - CvArr* cost_matrix=NULL; - PyObject *pyobj_cost_matrix = NULL; - CvArr* flow=NULL; - PyObject *pyobj_flow = NULL; - float lower_bound = FLT_MAX; - PyObject *userdata = NULL; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "OOi|OOOfO", (char**)keywords, - &pyobj_signature1, - &pyobj_signature2, - &distance_type, - &distance_func, - &pyobj_cost_matrix, - &pyobj_flow, - &lower_bound, - &userdata)) - return NULL; - if (!convert_to_CvArr(pyobj_signature1, &signature1, "signature1")) return NULL; - if (!convert_to_CvArr(pyobj_signature2, &signature2, "signature2")) return NULL; - if (pyobj_cost_matrix && !convert_to_CvArr(pyobj_cost_matrix, &cost_matrix, "cost_matrix")) return NULL; - if (pyobj_flow && !convert_to_CvArr(pyobj_flow, &flow, "flow")) return NULL; - - if (distance_func == NULL) { - distance_func = Py_None; - } - if (userdata == NULL) { - userdata = Py_None; - } - - PyObject *ud = Py_BuildValue("OO", distance_func, userdata); - float r; - ERRWRAP(r = cvCalcEMD2(signature1, signature2, distance_type, distance_function_glue, cost_matrix, flow, &lower_bound, (void*)ud)); - Py_DECREF(ud); - - return PyFloat_FromDouble(r); -} - -static PyObject *pycvSubdiv2DLocate(PyObject *self, PyObject *args) -{ - PyObject *pyobj_subdiv; - PyObject *pyobj_pt; - CvSubdiv2D *subdiv; - CvPoint2D32f pt; - CvSubdiv2DEdge edge; - CvSubdiv2DPoint* vertex; - - if (!PyArg_ParseTuple(args, "OO", &pyobj_subdiv, &pyobj_pt)) - return NULL; - if (!convert_to_CvSubdiv2DPTR(pyobj_subdiv, &subdiv, "subdiv")) - return NULL; - if (!convert_to_CvPoint2D32f(pyobj_pt, &pt, "pt")) - return NULL; - - CvSubdiv2DPointLocation loc = cvSubdiv2DLocate(subdiv, pt, &edge, &vertex); - PyObject *r; - switch (loc) { - case CV_PTLOC_INSIDE: - case CV_PTLOC_ON_EDGE: - r = FROM_CvSubdiv2DEdge(edge); - break; - case CV_PTLOC_VERTEX: - r = FROM_CvSubdiv2DPointPTR(vertex); - break; - case CV_PTLOC_OUTSIDE_RECT: - r = Py_None; - Py_INCREF(Py_None); - break; - default: - return failmsg("Unexpected loc from cvSubdiv2DLocate"), (PyObject*)0; - } - return Py_BuildValue("iO", (int)loc, r); -} - -static PyObject *pycvCalcOpticalFlowPyrLK(PyObject *self, PyObject *args) -{ - CvArr* prev; - PyObject *pyobj_prev = NULL; - CvArr* curr; - PyObject *pyobj_curr = NULL; - CvArr* prev_pyr; - PyObject *pyobj_prev_pyr = NULL; - CvArr* curr_pyr; - PyObject *pyobj_curr_pyr = NULL; - CvPoint2D32f* prev_features; - PyObject *pyobj_prev_features = NULL; - PyObject *pyobj_curr_features = NULL; - CvPoint2D32f* curr_features; - CvSize win_size; - int level; - CvTermCriteria criteria; - int flags; - - if (!PyArg_ParseTuple(args, "OOOOO(ii)i(iif)i|O", - &pyobj_prev, &pyobj_curr, &pyobj_prev_pyr, &pyobj_curr_pyr, - &pyobj_prev_features, - &win_size.width, &win_size.height, &level, - &criteria.type, &criteria.max_iter, &criteria.epsilon, - &flags, - &pyobj_curr_features)) - return NULL; - if (!convert_to_CvArr(pyobj_prev, &prev, "prev")) return NULL; - if (!convert_to_CvArr(pyobj_curr, &curr, "curr")) return NULL; - if (!convert_to_CvArr(pyobj_prev_pyr, &prev_pyr, "prev_pyr")) return NULL; - if (!convert_to_CvArr(pyobj_curr_pyr, &curr_pyr, "curr_pyr")) return NULL; - if (!convert_to_CvPoint2D32fPTR(pyobj_prev_features, &prev_features, "prev_features")) return NULL; - int count = (int)PySequence_Length(pyobj_prev_features); - if (flags & CV_LKFLOW_INITIAL_GUESSES) { - failmsg("flag CV_LKFLOW_INITIAL_GUESSES is determined automatically from function arguments - it is not required"); - return NULL; - } - if (!pyobj_curr_features) { - curr_features = new CvPoint2D32f[count]; - } else { - if (PySequence_Length(pyobj_curr_features) != count) { - failmsg("curr_features must have same length as prev_features"); - return NULL; - } - if (!convert_to_CvPoint2D32fPTR(pyobj_curr_features, &curr_features, "curr_features")) return NULL; - flags |= CV_LKFLOW_INITIAL_GUESSES; - } - float *track_error = new float[count]; - char* status = new char[count]; - ERRWRAP(cvCalcOpticalFlowPyrLK(prev, curr, prev_pyr, curr_pyr, prev_features, curr_features, count, win_size, level, status, track_error, criteria, flags)); - - cvpoint2d32f_count r0; - r0.points = curr_features; - r0.count = count; - - chars r1; - r1.f = status; - r1.count = count; - - floats r2; - r2.f = track_error; - r2.count = count; - - return Py_BuildValue("NNN", FROM_cvpoint2d32f_count(r0), FROM_chars(r1), FROM_floats(r2)); -} - -// pt1,pt2 are input and output arguments here - -static PyObject *pycvClipLine(PyObject *self, PyObject *args) -{ - CvSize img_size; - PyObject *pyobj_img_size = NULL; - CvPoint pt1; - PyObject *pyobj_pt1 = NULL; - CvPoint pt2; - PyObject *pyobj_pt2 = NULL; - - if (!PyArg_ParseTuple(args, "OOO", &pyobj_img_size, &pyobj_pt1, &pyobj_pt2)) - return NULL; - if (!convert_to_CvSize(pyobj_img_size, &img_size, "img_size")) return NULL; - if (!convert_to_CvPoint(pyobj_pt1, &pt1, "pt1")) return NULL; - if (!convert_to_CvPoint(pyobj_pt2, &pt2, "pt2")) return NULL; - int r; - ERRWRAP(r = cvClipLine(img_size, &pt1, &pt2)); - if (r == 0) { - Py_RETURN_NONE; - } else { - return Py_BuildValue("NN", FROM_CvPoint(pt1), FROM_CvPoint(pt2)); - } -} - -static PyObject *pyfinddatamatrix(PyObject *self, PyObject *args) -{ - PyObject *pyim; - if (!PyArg_ParseTuple(args, "O", &pyim)) - return NULL; - - CvMat *image; - if (!convert_to_CvMat(pyim, &image, "image")) return NULL; - - std::deque codes; - ERRWRAP(codes = cvFindDataMatrix(image)); - - PyObject *pycodes = PyList_New(codes.size()); - for (size_t i = 0; i < codes.size(); i++) { - CvDataMatrixCode *pc = &codes[i]; - PyList_SetItem(pycodes, i, Py_BuildValue("(sOO)", pc->msg, FROM_CvMat(pc->corners), FROM_CvMat(pc->original))); - } - - return pycodes; -} - -static PyObject *temp_test(PyObject *self, PyObject *args) -{ -#if 0 - CvArr *im = cvLoadImage("../samples/c/lena.jpg", 0); - printf("im=%p\n", im); - CvMat *m = cvEncodeImage(".jpeg", im); -#endif -#if 0 - CvArr *im = cvLoadImage("lena.jpg", 0); - float r0[] = { 0, 255 }; - float *ranges[] = { r0 }; - int hist_size[] = { 256 }; - CvHistogram *hist = cvCreateHist(1, hist_size, CV_HIST_ARRAY, ranges, 1); - cvCalcHist(im, hist, 0, 0); -#endif - -#if 0 - CvMat* mat = cvCreateMat( 3, 3, CV_32F ); - CvMat row_header, *row; - row = cvReshape( mat, &row_header, 0, 1 ); - printf("%d,%d\n", row_header.rows, row_header.cols); - printf("ge %08x\n", cvGetElemType(mat)); -#endif - -#if 0 - CvMat *m = cvCreateMat(1, 10, CV_8UC1); - printf("CvMat stride ===> %d\n", m->step); -#endif - -#if 0 - CvPoint2D32f src[3] = { { 0,0 }, { 1,0 }, { 0,1 } }; - CvPoint2D32f dst[3] = { { 0,0 }, { 17,0 }, { 0,17 } }; - - CvMat* mapping = cvCreateMat(2, 3, CV_32FC1); - cvGetAffineTransform(src, dst, mapping); - printf("===> %f\n", cvGetReal2D(mapping, 0, 0)); -#endif - -#if 0 - CvArr *im = cvLoadImage("checker77.png"); - CvPoint2D32f corners[49]; - int count; - cvFindChessboardCorners(im, cvSize(7,7), corners, &count, 0); - printf("count=%d\n", count); -#endif - -#if 0 - CvMat *src = cvCreateMat(512, 512, CV_8UC3); - CvMat *dst = cvCreateMat(512, 512, CV_8UC3); - cvPyrMeanShiftFiltering(src, dst, 5, 5); - return FROM_CvMat(src); -#endif - - return PyFloat_FromDouble(0.0); -} - -static PyObject *pycvFindChessboardCorners(PyObject *self, PyObject *args, PyObject *kw) -{ - CvArr* image; - PyObject *pyobj_image = NULL; - CvSize pattern_size; - PyObject *pyobj_pattern_size = NULL; - cvpoint2d32f_count corners; - int flags = CV_CALIB_CB_ADAPTIVE_THRESH; - - const char *keywords[] = { "image", "pattern_size", "flags", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kw, "OO|i", (char**)keywords, &pyobj_image, &pyobj_pattern_size, &flags)) - return NULL; - if (!convert_to_CvArr(pyobj_image, &image, "image")) return NULL; - if (!convert_to_CvSize(pyobj_pattern_size, &pattern_size, "pattern_size")) return NULL; - int r; - corners.points = new CvPoint2D32f[pattern_size.width * pattern_size.height]; - ERRWRAP(r = cvFindChessboardCorners(image, pattern_size, corners.points,&corners.count, flags)); - return Py_BuildValue("NN", FROM_int(r), FROM_cvpoint2d32f_count(corners)); -} - -// For functions GetSubRect, GetRow, GetCol. -// recipient has a view into donor's data, and needs to share it. -// make recipient use the donor's data, compute the offset, -// and manage reference counts. - -static void preShareData(CvArr *donor, CvMat **recipient) -{ - *recipient = cvCreateMatHeader(4, 4, cvGetElemType(donor)); -} - -static PyObject *shareData(PyObject *donor, CvArr *pdonor, CvMat *precipient) -{ - PyObject *recipient = (PyObject*)PyObject_NEW(cvmat_t, &cvmat_Type); - ((cvmat_t*)recipient)->a = precipient; - ((cvmat_t*)recipient)->offset = cvPtr1D(precipient, 0) - cvPtr1D(pdonor, 0); - - PyObject *arr_data; - if (is_cvmat(donor)) { - arr_data = ((cvmat_t*)donor)->data; - ((cvmat_t*)recipient)->offset += ((cvmat_t*)donor)->offset; - } else if (is_iplimage(donor)) { - arr_data = ((iplimage_t*)donor)->data; - ((cvmat_t*)recipient)->offset += ((iplimage_t*)donor)->offset; - } else { - return failmsg("Argument 'mat' must be either IplImage or CvMat"), (PyObject*)0; - } - ((cvmat_t*)recipient)->data = arr_data; - Py_INCREF(arr_data); - return recipient; -} - -static PyObject *shareDataND(PyObject *donor, CvMatND *pdonor, CvMatND *precipient) -{ - PyObject *recipient = (PyObject*)PyObject_NEW(cvmatnd_t, &cvmatnd_Type); - ((cvmatnd_t*)recipient)->a = precipient; - ((cvmatnd_t*)recipient)->offset = 0; - - PyObject *arr_data; - arr_data = ((cvmatnd_t*)donor)->data; - ((cvmatnd_t*)recipient)->data = arr_data; - Py_INCREF(arr_data); - return recipient; -} - -static PyObject *pycvGetHuMoments(PyObject *self, PyObject *args) -{ - CvMoments* moments; - PyObject *pyobj_moments = NULL; - - if (!PyArg_ParseTuple(args, "O", &pyobj_moments)) - return NULL; - if (!convert_to_CvMomentsPTR(pyobj_moments, &moments, "moments")) return NULL; - CvHuMoments r; - ERRWRAP(cvGetHuMoments(moments, &r)); - return Py_BuildValue("ddddddd", r.hu1, r.hu2, r.hu3, r.hu4, r.hu5, r.hu6, r.hu7); -} - -static PyObject *pycvFitLine(PyObject *self, PyObject *args) -{ - cvarrseq points; - PyObject *pyobj_points = NULL; - int dist_type; - float param; - float reps; - float aeps; - float r[6]; - - if (!PyArg_ParseTuple(args, "Oifff", &pyobj_points, &dist_type, ¶m, &reps, &aeps)) - return NULL; - if (!convert_to_cvarrseq(pyobj_points, &points, "points")) return NULL; - ERRWRAP(cvFitLine(points.mat, dist_type, param, reps, aeps, r)); - int dimension; - if (strcmp("opencv-matrix", cvTypeOf(points.mat)->type_name) == 0) - dimension = CV_MAT_CN(cvGetElemType(points.mat)); - else { - // sequence case... don't think there is a sequence of 3d points, - // so assume 2D - dimension = 2; - } - if (dimension == 2) - return Py_BuildValue("dddd", r[0], r[1], r[2], r[3]); - else - return Py_BuildValue("dddddd", r[0], r[1], r[2], r[3], r[4], r[5]); -} - -static PyObject *pycvGetMinMaxHistValue(PyObject *self, PyObject *args) -{ - CvHistogram* hist; - PyObject *pyobj_hist = NULL; - float min_val; - float max_val; - int min_loc[CV_MAX_DIM]; - int max_loc[CV_MAX_DIM]; - - if (!PyArg_ParseTuple(args, "O", &pyobj_hist)) - return NULL; - if (!convert_to_CvHistogram(pyobj_hist, &hist, "hist")) return NULL; - ERRWRAP(cvGetMinMaxHistValue(hist, &min_val, &max_val, min_loc, max_loc)); - int d = cvGetDims(hist->bins); - PyObject *pminloc = PyTuple_New(d), *pmaxloc = PyTuple_New(d); - for (int i = 0; i < d; i++) { - PyTuple_SetItem(pminloc, i, PyInt_FromLong(min_loc[i])); - PyTuple_SetItem(pmaxloc, i, PyInt_FromLong(max_loc[i])); - } - return Py_BuildValue("ffNN", min_val, max_val, pminloc, pmaxloc); -} - -static CvSeq* cvHOGDetectMultiScale( const CvArr* image, CvMemStorage* storage, - const CvArr* svm_classifier=NULL, CvSize win_stride=cvSize(0,0), - double hit_threshold=0, double scale=1.05, - int group_threshold=2, CvSize padding=cvSize(0,0), - CvSize win_size=cvSize(64,128), CvSize block_size=cvSize(16,16), - CvSize block_stride=cvSize(8,8), CvSize cell_size=cvSize(8,8), - int nbins=9, int gammaCorrection=1 ) -{ - cv::HOGDescriptor hog(win_size, block_size, block_stride, cell_size, nbins, 1, -1, cv::HOGDescriptor::L2Hys, 0.2, gammaCorrection!=0); - if(win_stride.width == 0 && win_stride.height == 0) - win_stride = block_stride; - cv::Mat img = cv::cvarrToMat(image); - std::vector found; - if(svm_classifier) - { - CvMat stub, *m = cvGetMat(svm_classifier, &stub); - int sz = m->cols*m->rows; - CV_Assert(CV_IS_MAT_CONT(m->type) && (m->cols == 1 || m->rows == 1) && CV_MAT_TYPE(m->type) == CV_32FC1); - std::vector w(sz); - std::copy(m->data.fl, m->data.fl + sz, w.begin()); - hog.setSVMDetector(w); - } - else - hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector()); - hog.detectMultiScale(img, found, hit_threshold, win_stride, padding, scale, group_threshold); - CvSeq* seq = cvCreateSeq(cv::DataType::type, sizeof(CvSeq), sizeof(cv::Rect), storage); - if(found.size()) - cvSeqPushMulti(seq, &found[0], (int)found.size()); - return seq; -} - -static void cvGrabCut(CvArr *image, - CvArr *mask, - CvRect rect, - CvArr *bgdModel, - CvArr *fgdModel, - int iterCount, - int mode) -{ - cv::Mat _image = cv::cvarrToMat(image); - cv::Mat _mask = cv::cvarrToMat(mask); - cv::Mat _bgdModel = cv::cvarrToMat(bgdModel); - cv::Mat _fgdModel = cv::cvarrToMat(fgdModel); - grabCut(_image, _mask, rect, _bgdModel, _fgdModel, iterCount, mode); -} - -static int zero = 0; - -/************************************************************************/ -/* Custom Validators */ - -#define CVPY_VALIDATE_DrawChessboardCorners() do { \ - if ((patternSize.width * patternSize.height) != corners.count) \ - return (PyObject*)0; \ - } while (0) - -#define cvGetRotationMatrix2D cv2DRotationMatrix - -/************************************************************************/ -/* Generated functions */ - -#define constCvMat const CvMat -#define FROM_constCvMatPTR(x) FROM_CvMatPTR((CvMat*)x) - -#define cvSnakeImage(image, points, length, a, b, g, win, criteria, calc_gradient) \ - do { \ - int coeff_usage; \ - if ((alpha.count == 1) && (beta.count == 1) && (gamma.count == 1)) \ - coeff_usage = CV_VALUE; \ - else if ((length == alpha.count) && (alpha.count == beta.count) && (beta.count == gamma.count)) \ - coeff_usage = CV_ARRAY; \ - else { \ - failmsg("SnakeImage weights invalid"); \ - return (PyObject*)0; } \ - cvSnakeImage(image, points, length, a, b, g, coeff_usage, win, criteria, calc_gradient); \ - } while (0) - -static double cppKMeans(const CvArr* _samples, int cluster_count, CvArr* _labels, - CvTermCriteria termcrit, int attempts, int flags, CvArr* _centers) -{ - cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers; - if( _centers ) - centers = cv::cvarrToMat(_centers); - CV_Assert( labels.isContinuous() && labels.type() == CV_32S && - (labels.cols == 1 || labels.rows == 1) && - labels.cols + labels.rows - 1 == data.rows ); - return cv::kmeans(data, cluster_count, labels, termcrit, attempts, - flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() ); -} - -#define cvKMeans2(samples, nclusters, labels, termcrit, attempts, flags, centers) \ - cppKMeans(samples, nclusters, labels, termcrit, attempts, flags, centers) - -#if defined _MSC_VER && _MSC_VER >= 1200 - #pragma warning( push ) - #pragma warning( disable : 4244 ) -#endif - -#include "generated0.i" - -#if defined _MSC_VER && _MSC_VER >= 1200 - #pragma warning( pop ) -#endif - -static PyMethodDef old_methods[] = { - -#if PYTHON_USE_NUMPY - {"fromarray", (PyCFunction)pycvfromarray, METH_KEYWORDS, "fromarray(array [, allowND]) -> CvMat"}, -#endif - - {"FindDataMatrix", pyfinddatamatrix, METH_VARARGS}, - {"temp_test", temp_test, METH_VARARGS}, - -#include "generated1.i" - - {NULL, NULL}, -}; - -/************************************************************************/ -/* Module init */ - -#include "opencv2/opencv_modules.hpp" - -#ifdef HAVE_OPENCV_NONFREE -# include "opencv2/nonfree.hpp" -#endif - -#ifdef HAVE_OPENCV_FEATURES2D -# include "opencv2/features2d.hpp" -#endif - -#ifdef HAVE_OPENCV_VIDEO -# include "opencv2/video.hpp" -#endif - -#ifdef HAVE_OPENCV_ML -# include "opencv2/ml.hpp" -#endif - -#ifdef HAVE_OPENCV_CONTRIB -# include "opencv2/contrib.hpp" -#endif - -static PyObject* init_cv() -{ - PyObject *m, *d; - cvSetErrMode(CV_ErrModeParent); - - bool init = true; -#ifdef HAVE_OPENCV_NONFREE - init &= cv::initModule_nonfree(); -#endif -#ifdef HAVE_OPENCV_FEATURES2D - init &= cv::initModule_features2d(); -#endif -#ifdef HAVE_OPENCV_VIDEO - init &= cv::initModule_video(); -#endif -#ifdef HAVE_OPENCV_ML - init &= cv::initModule_ml(); -#endif - #ifdef HAVE_OPENCV_CONTRIB - init &= cv::initModule_contrib(); -#endif - if(!init) - return 0; - - #define MKTYPE(NAME) NAME##_specials(); to_ok(&NAME##_Type) - - MKTYPE(cvcontourtree); - MKTYPE(cvfont); - MKTYPE(cvhistogram); - MKTYPE(cvlineiterator); - MKTYPE(cvmat); - MKTYPE(cvmatnd); - MKTYPE(cvmemstorage); - MKTYPE(cvsubdiv2dedge); - MKTYPE(cvrng); - MKTYPE(cvseq); - MKTYPE(cvset); - MKTYPE(cvsubdiv2d); - MKTYPE(cvsubdiv2dpoint); - MKTYPE(iplimage); - MKTYPE(memtrack); - -#include "generated4.i" - - #undef MKTYPE - - m = Py_InitModule(OLD_MODULESTR, old_methods); - d = PyModule_GetDict(m); - - PyDict_SetItemString(d, "__version__", PyString_FromString(CV_VERSION)); - PyDict_SetItemString(d, "error", opencv_error); - - // Couple of warnings about strict aliasing here. Not clear how to fix. - union { - PyObject *o; - PyTypeObject *to; - } convert; - convert.to = &iplimage_Type; - PyDict_SetItemString(d, "iplimage", convert.o); - convert.to = &cvmat_Type; - PyDict_SetItemString(d, "cvmat", convert.o); - - // AFAIK the only floating-point constant - PyDict_SetItemString(d, "CV_PI", PyFloat_FromDouble(CV_PI)); - -#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I)) -#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I)) -#define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value)) - - PUBLISHU(IPL_DEPTH_8U); - PUBLISHU(IPL_DEPTH_8S); - PUBLISHU(IPL_DEPTH_16U); - PUBLISHU(IPL_DEPTH_16S); - PUBLISHU(IPL_DEPTH_32S); - PUBLISHU(IPL_DEPTH_32F); - PUBLISHU(IPL_DEPTH_64F); - - PUBLISH(CV_LOAD_IMAGE_COLOR); - PUBLISH(CV_LOAD_IMAGE_GRAYSCALE); - PUBLISH(CV_LOAD_IMAGE_UNCHANGED); - PUBLISH(CV_HIST_ARRAY); - PUBLISH(CV_HIST_SPARSE); - PUBLISH(CV_8U); - PUBLISH(CV_8UC1); - PUBLISH(CV_8UC2); - PUBLISH(CV_8UC3); - PUBLISH(CV_8UC4); - PUBLISH(CV_8S); - PUBLISH(CV_8SC1); - PUBLISH(CV_8SC2); - PUBLISH(CV_8SC3); - PUBLISH(CV_8SC4); - PUBLISH(CV_16U); - PUBLISH(CV_16UC1); - PUBLISH(CV_16UC2); - PUBLISH(CV_16UC3); - PUBLISH(CV_16UC4); - PUBLISH(CV_16S); - PUBLISH(CV_16SC1); - PUBLISH(CV_16SC2); - PUBLISH(CV_16SC3); - PUBLISH(CV_16SC4); - PUBLISH(CV_32S); - PUBLISH(CV_32SC1); - PUBLISH(CV_32SC2); - PUBLISH(CV_32SC3); - PUBLISH(CV_32SC4); - PUBLISH(CV_32F); - PUBLISH(CV_32FC1); - PUBLISH(CV_32FC2); - PUBLISH(CV_32FC3); - PUBLISH(CV_32FC4); - PUBLISH(CV_64F); - PUBLISH(CV_64FC1); - PUBLISH(CV_64FC2); - PUBLISH(CV_64FC3); - PUBLISH(CV_64FC4); - PUBLISH(CV_NEXT_AROUND_ORG); - PUBLISH(CV_NEXT_AROUND_DST); - PUBLISH(CV_PREV_AROUND_ORG); - PUBLISH(CV_PREV_AROUND_DST); - PUBLISH(CV_NEXT_AROUND_LEFT); - PUBLISH(CV_NEXT_AROUND_RIGHT); - PUBLISH(CV_PREV_AROUND_LEFT); - PUBLISH(CV_PREV_AROUND_RIGHT); - - PUBLISH(CV_WINDOW_AUTOSIZE); - - PUBLISH(CV_PTLOC_INSIDE); - PUBLISH(CV_PTLOC_ON_EDGE); - PUBLISH(CV_PTLOC_VERTEX); - PUBLISH(CV_PTLOC_OUTSIDE_RECT); - - PUBLISH(GC_BGD); - PUBLISH(GC_FGD); - PUBLISH(GC_PR_BGD); - PUBLISH(GC_PR_FGD); - PUBLISH(GC_INIT_WITH_RECT); - PUBLISH(GC_INIT_WITH_MASK); - PUBLISH(GC_EVAL); - -#include "generated2.i" - -#undef PUBLISH -#undef PUBLISHU -#undef PUBLISH2 - - return m; -} - diff --git a/modules/python/src2/defs b/modules/python/src2/defs deleted file mode 100644 index 0298e5be0a..0000000000 --- a/modules/python/src2/defs +++ /dev/null @@ -1,360 +0,0 @@ -#define CV_BLUR_NO_SCALE 0 -#define CV_BLUR 1 -#define CV_GAUSSIAN 2 -#define CV_MEDIAN 3 -#define CV_BILATERAL 4 -#define CV_INPAINT_NS 0 -#define CV_INPAINT_TELEA 1 -#define CV_SCHARR -1 -#define CV_MAX_SOBEL_KSIZE 7 -#define CV_BGR2BGRA 0 -#define CV_RGB2RGBA CV_BGR2BGRA -#define CV_BGRA2BGR 1 -#define CV_RGBA2RGB CV_BGRA2BGR -#define CV_BGR2RGBA 2 -#define CV_RGB2BGRA CV_BGR2RGBA -#define CV_RGBA2BGR 3 -#define CV_BGRA2RGB CV_RGBA2BGR -#define CV_BGR2RGB 4 -#define CV_RGB2BGR CV_BGR2RGB -#define CV_BGRA2RGBA 5 -#define CV_RGBA2BGRA CV_BGRA2RGBA -#define CV_BGR2GRAY 6 -#define CV_RGB2GRAY 7 -#define CV_GRAY2BGR 8 -#define CV_GRAY2RGB CV_GRAY2BGR -#define CV_GRAY2BGRA 9 -#define CV_GRAY2RGBA CV_GRAY2BGRA -#define CV_BGRA2GRAY 10 -#define CV_RGBA2GRAY 11 -#define CV_BGR2BGR565 12 -#define CV_RGB2BGR565 13 -#define CV_BGR5652BGR 14 -#define CV_BGR5652RGB 15 -#define CV_BGRA2BGR565 16 -#define CV_RGBA2BGR565 17 -#define CV_BGR5652BGRA 18 -#define CV_BGR5652RGBA 19 -#define CV_GRAY2BGR565 20 -#define CV_BGR5652GRAY 21 -#define CV_BGR2BGR555 22 -#define CV_RGB2BGR555 23 -#define CV_BGR5552BGR 24 -#define CV_BGR5552RGB 25 -#define CV_BGRA2BGR555 26 -#define CV_RGBA2BGR555 27 -#define CV_BGR5552BGRA 28 -#define CV_BGR5552RGBA 29 -#define CV_GRAY2BGR555 30 -#define CV_BGR5552GRAY 31 -#define CV_BGR2XYZ 32 -#define CV_RGB2XYZ 33 -#define CV_XYZ2BGR 34 -#define CV_XYZ2RGB 35 -#define CV_BGR2YCrCb 36 -#define CV_RGB2YCrCb 37 -#define CV_YCrCb2BGR 38 -#define CV_YCrCb2RGB 39 -#define CV_BGR2HSV 40 -#define CV_RGB2HSV 41 -#define CV_BGR2Lab 44 -#define CV_RGB2Lab 45 -#define CV_BayerBG2BGR 46 -#define CV_BayerGB2BGR 47 -#define CV_BayerRG2BGR 48 -#define CV_BayerGR2BGR 49 -#define CV_BayerBG2RGB CV_BayerRG2BGR -#define CV_BayerGB2RGB CV_BayerGR2BGR -#define CV_BayerRG2RGB CV_BayerBG2BGR -#define CV_BayerGR2RGB CV_BayerGB2BGR -#define CV_BayerBG2BGR_VNG 62 -#define CV_BayerGB2BGR_VNG 63 -#define CV_BayerRG2BGR_VNG 64 -#define CV_BayerGR2BGR_VNG 65 -#define CV_BGR2Luv 50 -#define CV_RGB2Luv 51 -#define CV_BGR2HLS 52 -#define CV_RGB2HLS 53 -#define CV_HSV2BGR 54 -#define CV_HSV2RGB 55 -#define CV_Lab2BGR 56 -#define CV_Lab2RGB 57 -#define CV_Luv2BGR 58 -#define CV_Luv2RGB 59 -#define CV_HLS2BGR 60 -#define CV_HLS2RGB 61 -#define CV_COLORCVT_MAX 100 -#define CV_INTER_NN 0 -#define CV_INTER_LINEAR 1 -#define CV_INTER_CUBIC 2 -#define CV_INTER_AREA 3 -#define CV_WARP_FILL_OUTLIERS 8 -#define CV_WARP_INVERSE_MAP 16 -#define CV_SHAPE_RECT 0 -#define CV_SHAPE_CROSS 1 -#define CV_SHAPE_ELLIPSE 2 -#define CV_SHAPE_CUSTOM 100 -#define CV_MOP_OPEN 2 -#define CV_MOP_CLOSE 3 -#define CV_MOP_GRADIENT 4 -#define CV_MOP_TOPHAT 5 -#define CV_MOP_BLACKHAT 6 -#define CV_TM_SQDIFF 0 -#define CV_TM_SQDIFF_NORMED 1 -#define CV_TM_CCORR 2 -#define CV_TM_CCORR_NORMED 3 -#define CV_TM_CCOEFF 4 -#define CV_TM_CCOEFF_NORMED 5 -#define CV_LKFLOW_PYR_A_READY 1 -#define CV_LKFLOW_PYR_B_READY 2 -#define CV_LKFLOW_INITIAL_GUESSES 4 -#define CV_LKFLOW_GET_MIN_EIGENVALS 8 -#define CV_POLY_APPROX_DP 0 -#define CV_CONTOURS_MATCH_I1 1 -#define CV_CONTOURS_MATCH_I2 2 -#define CV_CONTOURS_MATCH_I3 3 -#define CV_CLOCKWISE 1 -#define CV_COUNTER_CLOCKWISE 2 -#define CV_COMP_CORREL 0 -#define CV_COMP_CHISQR 1 -#define CV_COMP_INTERSECT 2 -#define CV_COMP_BHATTACHARYYA 3 -#define CV_DIST_MASK_3 3 -#define CV_DIST_MASK_5 5 -#define CV_DIST_MASK_PRECISE 0 -#define CV_THRESH_BINARY 0 /* value = value > threshold ? max_value : 0 */ -#define CV_THRESH_BINARY_INV 1 /* value = value > threshold ? 0 : max_value */ -#define CV_THRESH_TRUNC 2 /* value = value > threshold ? threshold : value */ -#define CV_THRESH_TOZERO 3 /* value = value > threshold ? value : 0 */ -#define CV_THRESH_TOZERO_INV 4 /* value = value > threshold ? 0 : value */ -#define CV_THRESH_MASK 7 -#define CV_THRESH_OTSU 8 /* use Otsu algorithm to choose the optimal threshold value; -#define CV_ADAPTIVE_THRESH_MEAN_C 0 -#define CV_ADAPTIVE_THRESH_GAUSSIAN_C 1 -#define CV_FLOODFILL_FIXED_RANGE (1 << 16) -#define CV_FLOODFILL_MASK_ONLY (1 << 17) -#define CV_CANNY_L2_GRADIENT (1 << 31) -#define CV_HOUGH_STANDARD 0 -#define CV_HOUGH_PROBABILISTIC 1 -#define CV_HOUGH_MULTI_SCALE 2 -#define CV_HOUGH_GRADIENT 3 -#define CV_HAAR_DO_CANNY_PRUNING 1 -#define CV_HAAR_SCALE_IMAGE 2 -#define CV_HAAR_FIND_BIGGEST_OBJECT 4 -#define CV_HAAR_DO_ROUGH_SEARCH 8 -#define CV_LMEDS 4 -#define CV_RANSAC 8 -#define CV_CALIB_CB_ADAPTIVE_THRESH 1 -#define CV_CALIB_CB_NORMALIZE_IMAGE 2 -#define CV_CALIB_CB_FILTER_QUADS 4 -#define CV_CALIB_USE_INTRINSIC_GUESS 1 -#define CV_CALIB_FIX_ASPECT_RATIO 2 -#define CV_CALIB_FIX_PRINCIPAL_POINT 4 -#define CV_CALIB_ZERO_TANGENT_DIST 8 -#define CV_CALIB_FIX_FOCAL_LENGTH 16 -#define CV_CALIB_FIX_K1 32 -#define CV_CALIB_FIX_K2 64 -#define CV_CALIB_FIX_K3 128 -#define CV_CALIB_FIX_INTRINSIC 256 -#define CV_CALIB_SAME_FOCAL_LENGTH 512 -#define CV_CALIB_ZERO_DISPARITY 1024 -#define CV_FM_7POINT 1 -#define CV_FM_8POINT 2 -#define CV_FM_LMEDS_ONLY CV_LMEDS -#define CV_FM_RANSAC_ONLY CV_RANSAC -#define CV_FM_LMEDS CV_LMEDS -#define CV_FM_RANSAC CV_RANSAC -#define CV_STEREO_BM_NORMALIZED_RESPONSE 0 -#define CV_STEREO_BM_BASIC 0 -#define CV_STEREO_BM_FISH_EYE 1 -#define CV_STEREO_BM_NARROW 2 -#define CV_STEREO_GC_OCCLUDED SHRT_MAX -#define CV_AUTOSTEP 0x7fffffff -#define CV_MAX_ARR 10 -#define CV_NO_DEPTH_CHECK 1 -#define CV_NO_CN_CHECK 2 -#define CV_NO_SIZE_CHECK 4 -#define CV_CMP_EQ 0 -#define CV_CMP_GT 1 -#define CV_CMP_GE 2 -#define CV_CMP_LT 3 -#define CV_CMP_LE 4 -#define CV_CMP_NE 5 -#define CV_CHECK_RANGE 1 -#define CV_CHECK_QUIET 2 -#define CV_RAND_UNI 0 -#define CV_RAND_NORMAL 1 -#define CV_SORT_EVERY_ROW 0 -#define CV_SORT_EVERY_COLUMN 1 -#define CV_SORT_ASCENDING 0 -#define CV_SORT_DESCENDING 16 -#define CV_GEMM_A_T 1 -#define CV_GEMM_B_T 2 -#define CV_GEMM_C_T 4 -#define CV_SVD_MODIFY_A 1 -#define CV_SVD_U_T 2 -#define CV_SVD_V_T 4 -#define CV_LU 0 -#define CV_SVD 1 -#define CV_SVD_SYM 2 -#define CV_CHOLESKY 3 -#define CV_QR 4 -#define CV_NORMAL 16 -#define CV_COVAR_SCRAMBLED 0 -#define CV_COVAR_NORMAL 1 -#define CV_COVAR_USE_AVG 2 -#define CV_COVAR_SCALE 4 -#define CV_COVAR_ROWS 8 -#define CV_COVAR_COLS 16 -#define CV_PCA_DATA_AS_ROW 0 -#define CV_PCA_DATA_AS_COL 1 -#define CV_PCA_USE_AVG 2 -#define CV_C 1 -#define CV_L1 2 -#define CV_L2 4 -#define CV_NORM_MASK 7 -#define CV_RELATIVE 8 -#define CV_DIFF 16 -#define CV_MINMAX 32 -#define CV_DIFF_C (CV_DIFF | CV_C) -#define CV_DIFF_L1 (CV_DIFF | CV_L1) -#define CV_DIFF_L2 (CV_DIFF | CV_L2) -#define CV_RELATIVE_C (CV_RELATIVE | CV_C) -#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) -#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) -#define CV_REDUCE_SUM 0 -#define CV_REDUCE_AVG 1 -#define CV_REDUCE_MAX 2 -#define CV_REDUCE_MIN 3 -#define CV_DXT_FORWARD 0 -#define CV_DXT_INVERSE 1 -#define CV_DXT_SCALE 2 /* divide result by size of array */ -#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) -#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE -#define CV_DXT_ROWS 4 /* transform each row individually */ -#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ -#define CV_FRONT 1 -#define CV_BACK 0 -#define CV_GRAPH_VERTEX 1 -#define CV_GRAPH_TREE_EDGE 2 -#define CV_GRAPH_BACK_EDGE 4 -#define CV_GRAPH_FORWARD_EDGE 8 -#define CV_GRAPH_CROSS_EDGE 16 -#define CV_GRAPH_ANY_EDGE 30 -#define CV_GRAPH_NEW_TREE 32 -#define CV_GRAPH_BACKTRACKING 64 -#define CV_GRAPH_OVER -1 -#define CV_GRAPH_ALL_ITEMS -1 -#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) -#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) -#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) -#define CV_FILLED -1 -#define CV_AA 16 -#define CV_FONT_HERSHEY_SIMPLEX 0 -#define CV_FONT_HERSHEY_PLAIN 1 -#define CV_FONT_HERSHEY_DUPLEX 2 -#define CV_FONT_HERSHEY_COMPLEX 3 -#define CV_FONT_HERSHEY_TRIPLEX 4 -#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 -#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 -#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 -#define CV_FONT_ITALIC 16 -#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX -#define CV_KMEANS_USE_INITIAL_LABELS 1 -#define CV_ErrModeLeaf 0 /* Print error and exit program */ -#define CV_ErrModeParent 1 /* Print error and continue */ -#define CV_ErrModeSilent 2 /* Don't print and continue */ -#define CV_RETR_EXTERNAL 0 -#define CV_RETR_LIST 1 -#define CV_RETR_CCOMP 2 -#define CV_RETR_TREE 3 -#define CV_CHAIN_CODE 0 -#define CV_CHAIN_APPROX_NONE 1 -#define CV_CHAIN_APPROX_SIMPLE 2 -#define CV_CHAIN_APPROX_TC89_L1 3 -#define CV_CHAIN_APPROX_TC89_KCOS 4 -#define CV_LINK_RUNS 5 -#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) -#define CV_DIST_USER -1 /* User defined distance */ -#define CV_DIST_L1 1 /* distance = |x1-x2| + |y1-y2| */ -#define CV_DIST_L2 2 /* the simple euclidean distance */ -#define CV_DIST_C 3 /* distance = max(|x1-x2|,|y1-y2|) */ -#define CV_DIST_L12 4 /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ -#define CV_DIST_FAIR 5 /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ -#define CV_DIST_WELSCH 6 /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ -#define CV_DIST_HUBER 7 /* distance = |x| 2: - if fields[2][0] == '/': - self.flags = fields[2][1:].split(",") - else: - self.init = fields[2] - -api = [] -for l in open("%s/api" % sys.argv[1]): - if l[0] == '#': - continue - l = l.rstrip() - if (not l.startswith(' ')) and ('/' in l): - (l, flags) = l.split('/') - else: - flags = "" - f = l.split() - if len(f) != 0: - if l[0] != ' ': - if len(f) > 1: - ty = f[1] - else: - ty = None - api.append((f[0], [], ty, flags)) - else: - api[-1][1].append(argument(f)) - -# Validation: check that any optional arguments are last -had_error = False -for (f, args, ty, flags) in api: - has_init = [(a.init != None) for a in args if not 'O' in a.flags] - if True in has_init and not all(has_init[has_init.index(True):]): - print 'Error in definition for "%s", optional arguments must be last' % f - had_error = True - -if had_error: - sys.exit(1) - -def cname(n): - if n.startswith("CV"): - return '_' + n - elif n[0].isdigit(): - return '_' + n - else: - return n - -# RHS is how the aggregate gets expanded in the C call -aggregate = { - 'pts_npts_contours' : '!.pts,!.npts,!.contours', - 'cvarr_count' : '!.cvarr,!.count', - 'cvarr_plane_count' : '!.cvarr,!.count', - 'floats' : '!.f', - 'ints' : '!.i', - 'ints0' : '!.i', - 'CvPoints' : '!.p,!.count', - 'CvPoint2D32fs' : '!.p,!.count', - 'CvPoint3D32fs' : '!.p,!.count', - 'cvarrseq' : '!.seq', - 'CvArrs' : '!.ims', - 'IplImages' : '!.ims', - 'intpair' : '!.pairs,!.count', - 'cvpoint2d32f_count' : '!.points,&!.count' -} -conversion_types = [ -'char', -'CvArr', -'CvArrSeq', -'CvBox2D', # '((ff)(ff)f)', -'CvBox2D*', -'CvCapture*', -'CvStereoBMState*', -'CvStereoGCState*', -'CvKalman*', -'CvVideoWriter*', -'CvContourTree*', -'CvFont', -'CvFont*', -'CvHaarClassifierCascade*', -'CvHistogram', -'CvMat', -'CvMatND', -'CvMemStorage', -'CvMoments', -'CvMoments*', -'CvNextEdgeType', -'CvPoint', -'CvPoint*', -'CvPoint2D32f', # '(ff)', -'CvPoint2D32f*', -'CvPoint3D32f*', -'CvPoint2D64f', -'CvPOSITObject*', -'CvRect', -'CvRect*', -'CvRNG*', -'CvScalar', -'CvSeq', -'CvSeqOfCvConvexityDefect', -'CvSize', -'CvSlice', -'CvStarDetectorParams', -'CvSubdiv2D*', -'CvSubdiv2DEdge', -'CvTermCriteria', -'generic', -'IplConvKernel*', -'IplImage', -'PyObject*', -'PyCallableObject*' -] - -def safename(s): - return s.replace('*', 'PTR').replace('[', '_').replace(']', '_') - -def has_optional(al): - """ return true if any argument is optional """ - return any([a.init for a in al]) - -def gen(name, args, ty, flags): - yield "" - if has_optional(args): - yield "static PyObject *pycv%s(PyObject *self, PyObject *args, PyObject *kw)" % cname(name) - else: - yield "static PyObject *pycv%s(PyObject *self, PyObject *args)" % cname(name) - if 'doconly' in flags: - yield ";" - else: - yield "{" - - destinations = [] - for a in args: - remap = { - 'CvArr' : 'CvArr*', - 'CvMat' : 'CvMat*', - 'CvMatND' : 'CvMatND*', - 'IplImage' : 'IplImage*', - 'CvMemStorage' : 'CvMemStorage*', - 'CvHistogram':'CvHistogram*', - 'CvSeq':'CvSeq*', - 'CvHaarClassifierCascade' : 'CvHaarClassifierCascade*' - } - ctype = remap.get(a.ty, a.ty) - if a.init: - init = " = %s" % a.init - else: - init = '' - yield " %s %s%s;" % (ctype, a.nm, init) - if 'O' in a.flags: - continue - if a.ty in (conversion_types + aggregate.keys()): - yield ' PyObject *pyobj_%s = NULL;' % (a.nm) - destinations.append('&pyobj_%s' % (a.nm)) - elif a.ty in [ 'CvPoint2D32f' ]: - destinations.append('&%s.x, &%s.y' % (a.nm, a.nm)) - elif a.ty in [ 'CvTermCriteria' ]: - destinations.append('&%s.type, &%s.max_iter, &%s.epsilon' % ((a.nm,)*3)) - elif a.ty in [ 'CvSURFParams' ]: - destinations.append('&%s.extended, &%s.hessianThreshold, &%s.nOctaves, &%s.nOctaveLayers' % ((a.nm,)*4)) - elif a.nm in [ 'CvBox2D' ]: - s = ", ".join([('&' + a.nm +'.' + fld) for fld in [ 'center.x', 'center.y', 'size.width', 'size.height', 'angle' ] ]) - destinations.append(s) - else: - destinations.append('&%s' % a.nm) - fmap = { - 'CvSURFParams' : '(idii)', - 'double' : 'd', - 'float' : 'f', - 'int' : 'i', - 'int64' : 'L', - 'char*' : 's', - } - for k in (conversion_types + aggregate.keys()): - fmap[k] = 'O' - in_args = [ a for a in args if not 'O' in a.flags ] - fmt0 = "".join([ fmap[a.ty] for a in in_args if not a.init]) - fmt1 = "".join([ fmap[a.ty] for a in in_args if a.init]) - - yield '' - if len(fmt0 + fmt1) > 0: - if len(fmt1) > 0: - yield ' const char *keywords[] = { %s };' % (", ".join([ '"%s"' % arg.nm for arg in args if not 'O' in arg.flags ] + ['NULL'])) - yield ' if (!PyArg_ParseTupleAndKeywords(args, kw, "%s|%s", %s))' % (fmt0, fmt1, ", ".join(['(char**)keywords'] + destinations)) - if '(' in (fmt0 + fmt1): - print "Tuple with kwargs is not allowed, function", name - sys.exit(1) - else: - yield ' if (!PyArg_ParseTuple(args, "%s", %s))' % (fmt0, ", ".join(destinations)) - yield ' return NULL;' - - # Do the conversions: - for a in args: - joinwith = [f[2:] for f in a.flags if f.startswith("J:")] - if len(joinwith) > 0: - yield 'preShareData(%s, &%s);' % (joinwith[0], a.nm) - if 'O' in a.flags: - continue - if a.ty in (conversion_types + aggregate.keys()): - if a.init: - pred = '(pyobj_%s != NULL) && ' % a.nm - else: - pred = '' - yield ' if (%s!convert_to_%s(pyobj_%s, &%s, "%s")) return NULL;' % (pred, safename(a.ty), a.nm, a.nm, a.nm) - - yield '#ifdef CVPY_VALIDATE_%s' % name - yield 'CVPY_VALIDATE_%s();' % name - yield '#endif' - - def invokename(a): - if 'K' in a.flags: - prefix = "(const CvArr **)" - elif 'O' in a.flags and not 'A' in a.flags: - prefix = "&" - else: - prefix = "" - if a.ty in aggregate: - return prefix + aggregate[a.ty].replace('!', a.nm) - else: - return prefix + a.nm - - def funcname(s): - # The name by which the function is called, in C - if s.startswith("CV"): - return s - else: - return "cv" + s - tocall = '%s(%s)' % (funcname(name), ", ".join(invokename(a) for a in args)) - if 'stub' in flags: - yield ' return stub%s(%s);' % (name, ", ".join(invokename(a) for a in args)) - elif ty == None: - yield ' ERRWRAP(%s);' % tocall - yield ' Py_RETURN_NONE;' - else: - Rtypes = [ - 'int', - 'int64', - 'double', - 'CvCapture*', - 'CvVideoWriter*', - 'CvPOSITObject*', - 'CvScalar', - 'CvSize', - 'CvRect', - 'CvSeq*', - 'CvBox2D', - 'CvSeqOfCvAvgComp*', - 'CvSeqOfCvConvexityDefect*', - 'CvSeqOfCvStarKeypoint*', - 'CvSeqOfCvSURFPoint*', - 'CvSeqOfCvSURFDescriptor*', - 'CvContourTree*', - 'IplConvKernel*', - 'IplImage*', - 'CvMat*', - 'constCvMat*', - 'ROCvMat*', - 'CvMatND*', - 'CvPoint2D32f_4', - 'CvRNG', - 'CvSubdiv2D*', - 'CvSubdiv2DPoint*', - 'CvSubdiv2DEdge', - 'ROIplImage*', - 'CvStereoBMState*', - 'CvStereoGCState*', - 'CvKalman*', - 'float', - 'generic', - 'unsigned' ] - - if ty in Rtypes: - yield ' %s r;' % (ty) - yield ' ERRWRAP(r = %s);' % (tocall) - yield ' return FROM_%s(r);' % safename(ty) - else: - all_returns = ty.split(",") - return_value_from_call = len(set(Rtypes) & set(all_returns)) != 0 - if return_value_from_call: - yield ' %s r;' % list(set(Rtypes) & set(all_returns))[0] - yield ' ERRWRAP(r = %s);' % (tocall) - else: - yield ' ERRWRAP(%s);' % (tocall) - typed = dict([ (a.nm,a.ty) for a in args]) - for i in range(len(all_returns)): - if all_returns[i] in Rtypes: - typed['r'] = all_returns[i] - all_returns[i] = "r" - if len(all_returns) == 1: - af = dict([ (a.nm,a.flags) for a in args]) - joinwith = [f[2:] for f in af.get(all_returns[0], []) if f.startswith("J:")] - if len(joinwith) > 0: - yield ' return shareData(pyobj_%s, %s, %s);' % (joinwith[0], joinwith[0], all_returns[0]) - else: - yield ' return FROM_%s(%s);' % (safename(typed[all_returns[0]]), all_returns[0]) - else: - yield ' return Py_BuildValue("%s", %s);' % ("N" * len(all_returns), ", ".join(["FROM_%s(%s)" % (safename(typed[n]), n) for n in all_returns])) - - yield '}' - -gen_c = [ open("generated%d.i" % i, "w") for i in range(5) ] - -print "Generated %d functions" % len(api) -for nm,args,ty,flags in sorted(api): - - # Figure out docstring into ds_* - ds_args = [] - mandatory = [a.nm for a in args if not ('O' in a.flags) and not a.init] - optional = [a.nm for a in args if not ('O' in a.flags) and a.init] - ds_args = ", ".join(mandatory) - def o2s(o): - if o == []: - return "" - else: - return ' [, %s%s]' % (o[0], o2s(o[1:])) - ds_args += o2s(optional) - - ds = "%s(%s) -> %s" % (nm, ds_args, str(ty)) - #print ds - - if has_optional(args): - entry = '{"%%s", (PyCFunction)pycv%s, METH_KEYWORDS, "%s"},' % (cname(nm), ds) - else: - entry = '{"%%s", pycv%s, METH_VARARGS, "%s"},' % (cname(nm), ds) - print >>gen_c[1], entry % (nm) - if nm.startswith('CV_'): - print >>gen_c[1], entry % (nm[3:]) - for l in gen(nm,args,ty,flags): - print >>gen_c[0], l - -for l in open("%s/defs" % sys.argv[1]): - print >>gen_c[2], "PUBLISH(%s);" % l.split()[1] - -######################################################################## -# Generated objects. -######################################################################## - -# gen_c[3] is the code, gen_c[4] initializers - -gensimple = Template(""" -/* - ${cvtype} is the OpenCV C struct - ${ourname}_t is the Python object -*/ - -struct ${ourname}_t { - PyObject_HEAD - ${cvtype} v; -}; - -static PyObject *${ourname}_repr(PyObject *self) -{ - ${ourname}_t *p = (${ourname}_t*)self; - char str[1000]; - sprintf(str, "<${ourname} %p>", p); - return PyString_FromString(str); -} - -${getset_funcs} - -static PyGetSetDef ${ourname}_getseters[] = { - - ${getset_inits} - {NULL} /* Sentinel */ -}; - -static PyTypeObject ${ourname}_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - MODULESTR".${ourname}", /*name*/ - sizeof(${ourname}_t), /*basicsize*/ -}; - -static void ${ourname}_specials(void) -{ - ${ourname}_Type.tp_repr = ${ourname}_repr; - ${ourname}_Type.tp_getset = ${ourname}_getseters; -} - -static PyObject *FROM_${cvtype}(${cvtype} r) -{ - ${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type); - m->v = r; - return (PyObject*)m; -} - -static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name") -{ - ${allownull} - if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) { - *dst = &(((${ourname}_t*)o)->v); - return 1; - } else { - (*dst) = (${cvtype}*)NULL; - return failmsg("Expected ${cvtype} for argument '%s'", name); - } -} - -""") - -genptr = Template(""" -/* - ${cvtype} is the OpenCV C struct - ${ourname}_t is the Python object -*/ - -struct ${ourname}_t { - PyObject_HEAD - ${cvtype} *v; -}; - -static void ${ourname}_dealloc(PyObject *self) -{ - ${ourname}_t *p = (${ourname}_t*)self; - cvRelease${ourname}(&p->v); - PyObject_Del(self); -} - -static PyObject *${ourname}_repr(PyObject *self) -{ - ${ourname}_t *p = (${ourname}_t*)self; - char str[1000]; - sprintf(str, "<${ourname} %p>", p); - return PyString_FromString(str); -} - -${getset_funcs} - -static PyGetSetDef ${ourname}_getseters[] = { - - ${getset_inits} - {NULL} /* Sentinel */ -}; - -static PyTypeObject ${ourname}_Type = { - PyObject_HEAD_INIT(&PyType_Type) - 0, /*size*/ - MODULESTR".${ourname}", /*name*/ - sizeof(${ourname}_t), /*basicsize*/ -}; - -static void ${ourname}_specials(void) -{ - ${ourname}_Type.tp_dealloc = ${ourname}_dealloc; - ${ourname}_Type.tp_repr = ${ourname}_repr; - ${ourname}_Type.tp_getset = ${ourname}_getseters; -} - -static PyObject *FROM_${cvtype}PTR(${cvtype} *r) -{ - ${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type); - m->v = r; - return (PyObject*)m; -} - -static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name") -{ - ${allownull} - if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) { - *dst = ((${ourname}_t*)o)->v; - return 1; - } else { - (*dst) = (${cvtype}*)NULL; - return failmsg("Expected ${cvtype} for argument '%s'", name); - } -} - -""") - -getset_func_template = Template(""" -static PyObject *${ourname}_get_${member}(${ourname}_t *p, void *closure) -{ - return ${rconverter}(p->v${accessor}${member}); -} - -static int ${ourname}_set_${member}(${ourname}_t *p, PyObject *value, void *closure) -{ - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); - return -1; - } - - if (! ${checker}(value)) { - PyErr_SetString(PyExc_TypeError, "The ${member} attribute value must be a ${typename}"); - return -1; - } - - p->v${accessor}${member} = ${converter}(value); - return 0; -} - -""") - -getset_init_template = Template(""" - {(char*)"${member}", (getter)${ourname}_get_${member}, (setter)${ourname}_set_${member}, (char*)"${member}", NULL}, -""") - -objects = [ - ( 'IplConvKernel', ['allownull'], { - "nCols" : 'i', - "nRows" : 'i', - "anchorX" : 'i', - "anchorY" : 'i', - }), - ( 'CvCapture', [], {}), - ( 'CvHaarClassifierCascade', [], {}), - ( 'CvPOSITObject', [], {}), - ( 'CvVideoWriter', [], {}), - ( 'CvStereoBMState', [], { - "preFilterType" : 'i', - "preFilterSize" : 'i', - "preFilterCap" : 'i', - "SADWindowSize" : 'i', - "minDisparity" : 'i', - "numberOfDisparities" : 'i', - "textureThreshold" : 'i', - "uniquenessRatio" : 'i', - "speckleWindowSize" : 'i', - "speckleRange" : 'i', - }), - ( 'CvStereoGCState', [], { - "Ithreshold" : 'i', - "interactionRadius" : 'i', - "K" : 'f', - "lambda" : 'f', - "lambda1" : 'f', - "lambda2" : 'f', - "occlusionCost" : 'i', - "minDisparity" : 'i', - "numberOfDisparities" : 'i', - "maxIters" : 'i', - }), - ( 'CvKalman', [], { - "MP" : 'i', - "DP" : 'i', - "CP" : 'i', - "state_pre" : 'mr', - "state_post" : 'mr', - "transition_matrix" : 'mr', - "control_matrix" : 'mr', - "measurement_matrix" : 'mr', - "control_matrix" : 'mr', - "process_noise_cov" : 'mr', - "measurement_noise_cov" : 'mr', - "error_cov_pre" : 'mr', - "gain" : 'mr', - "error_cov_post" : 'mr', - }), - ( 'CvMoments', ['copy'], { - "m00" : 'f', - "m10" : 'f', - "m01" : 'f', - "m20" : 'f', - "m11" : 'f', - "m02" : 'f', - "m30" : 'f', - "m21" : 'f', - "m12" : 'f', - "m03" : 'f', - "mu20" : 'f', - "mu11" : 'f', - "mu02" : 'f', - "mu30" : 'f', - "mu21" : 'f', - "mu12" : 'f', - "mu03" : 'f', - "inv_sqrt_m00" : 'f', - }), -] - -checkers = { - 'i' : 'PyNumber_Check', - 'f' : 'PyNumber_Check', - 'm' : 'is_cvmat', - 'mr' : 'is_cvmat' -} -# Python -> C -converters = { - 'i' : 'PyInt_AsLong', - 'f' : 'PyFloat_AsDouble', - 'm' : 'PyCvMat_AsCvMat', - 'mr' : 'PyCvMat_AsCvMat' -} -# C -> Python -rconverters = { - 'i' : 'PyInt_FromLong', - 'f' : 'PyFloat_FromDouble', - 'm' : 'FROM_CvMat', - 'mr' : 'FROM_ROCvMatPTR' -} -# Human-readable type names -typenames = { - 'i' : 'integer', - 'f' : 'float', - 'm' : 'list of CvMat', - 'mr' : 'list of CvMat', -} - -for (t, flags, members) in objects: - map = {'cvtype' : t, - 'ourname' : t.replace('Cv', '')} - # gsf is all the generated code for the member accessors - if 'copy' in flags: - a = '.' - else: - a = '->' - gsf = "".join([getset_func_template.substitute(map, accessor = a, member = m, checker = checkers[t], converter = converters[t], rconverter = rconverters[t], typename = typenames[t]) for (m, t) in members.items()]) - # gsi is the generated code for the initializer for each accessor - gsi = "".join([getset_init_template.substitute(map, member = m) for (m, t) in members.items()]) - # s is the template that pulls everything together - if 'allownull' in flags: - nullcode = """if (o == Py_None) { *dst = (%s*)NULL; return 1; }""" % map['cvtype'] - else: - nullcode = "" - if 'copy' in flags: - print >>gen_c[3], gensimple.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode) - else: - print >>gen_c[3], genptr.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode) - print >>gen_c[4], "MKTYPE(%s);" % map['ourname'] - -for f in gen_c: - f.close() diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index d1b3358f67..0fed1838b0 100755 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -53,14 +53,14 @@ static void pyopencv_${name}_dealloc(PyObject* self) PyObject_Del(self); } -static PyObject* pyopencv_from(const ${cname}& r) +template<> PyObject* pyopencv_from(const ${cname}& r) { pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); m->v = r; return (PyObject*)m; } -static bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name="") +template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name) { if( src == NULL || src == Py_None ) return true; @@ -96,7 +96,7 @@ static void pyopencv_${name}_dealloc(PyObject* self) PyObject_Del(self); } -static PyObject* pyopencv_from(const Ptr<${cname}>& r) +template<> PyObject* pyopencv_from(const Ptr<${cname}>& r) { pyopencv_${name}_t *m = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type); new (&(m->v)) Ptr<$cname1>(); // init Ptr with placement new @@ -104,7 +104,7 @@ static PyObject* pyopencv_from(const Ptr<${cname}>& r) return (PyObject*)m; } -static bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name="") +template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name) { if( src == NULL || src == Py_None ) return true; @@ -120,7 +120,7 @@ static bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name=" bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name); """) gen_template_set_prop_from_map = Template(""" @@ -213,7 +213,6 @@ gen_template_rw_prop_init = Template(""" simple_argtype_mapping = { "bool": ("bool", "b", "0"), - "char": ("char", "b", "0"), "int": ("int", "i", "0"), "float": ("float", "f", "0.f"), "double": ("double", "d", "0"), @@ -619,7 +618,10 @@ class FuncInfo(object): if amapping[1] == "O": code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,) parse_name = "pyobj_" + a.name - code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr())) + if a.tp == 'char': + code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)"% (a.name, a.name, a.crepr())) + else: + code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr())) all_cargs.append([amapping, parse_name]) diff --git a/modules/python/test/test.py b/modules/python/test/test.py old mode 100755 new mode 100644 index 0886834f35..2da740de9b --- a/modules/python/test/test.py +++ b/modules/python/test/test.py @@ -13,75 +13,12 @@ import os import getopt import operator import functools - import numpy as np import cv2 -import cv2.cv as cv - -from test2 import * - -class OpenCVTests(unittest.TestCase): - - depths = [ cv.IPL_DEPTH_8U, cv.IPL_DEPTH_8S, cv.IPL_DEPTH_16U, cv.IPL_DEPTH_16S, cv.IPL_DEPTH_32S, cv.IPL_DEPTH_32F, cv.IPL_DEPTH_64F ] - - mat_types = [ - cv.CV_8UC1, - cv.CV_8UC2, - cv.CV_8UC3, - cv.CV_8UC4, - cv.CV_8SC1, - cv.CV_8SC2, - cv.CV_8SC3, - cv.CV_8SC4, - cv.CV_16UC1, - cv.CV_16UC2, - cv.CV_16UC3, - cv.CV_16UC4, - cv.CV_16SC1, - cv.CV_16SC2, - cv.CV_16SC3, - cv.CV_16SC4, - cv.CV_32SC1, - cv.CV_32SC2, - cv.CV_32SC3, - cv.CV_32SC4, - cv.CV_32FC1, - cv.CV_32FC2, - cv.CV_32FC3, - cv.CV_32FC4, - cv.CV_64FC1, - cv.CV_64FC2, - cv.CV_64FC3, - cv.CV_64FC4, - ] - mat_types_single = [ - cv.CV_8UC1, - cv.CV_8SC1, - cv.CV_16UC1, - cv.CV_16SC1, - cv.CV_32SC1, - cv.CV_32FC1, - cv.CV_64FC1, - ] - def depthsize(self, d): - return { cv.IPL_DEPTH_8U : 1, - cv.IPL_DEPTH_8S : 1, - cv.IPL_DEPTH_16U : 2, - cv.IPL_DEPTH_16S : 2, - cv.IPL_DEPTH_32S : 4, - cv.IPL_DEPTH_32F : 4, - cv.IPL_DEPTH_64F : 8 }[d] +class NewOpenCVTests(unittest.TestCase): - def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR): - if not filename in self.image_cache: - filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor) - return self.image_cache[filename] - - def get_sample2(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR): + def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR): if not filename in self.image_cache: filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read() self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) @@ -90,2123 +27,106 @@ class OpenCVTests(unittest.TestCase): def setUp(self): self.image_cache = {} - def snap(self, img): - self.snapL([img]) - - def snapL(self, L): - for i,img in enumerate(L): - cv.NamedWindow("snap-%d" % i, 1) - cv.ShowImage("snap-%d" % i, img) - cv.WaitKey() - cv.DestroyAllWindows() - def hashimg(self, im): """ Compute a hash for an image, useful for image comparisons """ return hashlib.md5(im.tostring()).digest() -# Tests to run first; check the handful of basic operations that the later tests rely on - -class PreliminaryTests(OpenCVTests): - - def test_lena(self): - # Check that the lena jpg image has loaded correctly - # This test uses a 'golden' MD5 hash of the Lena image - # If the JPEG decompressor changes, it is possible that the MD5 hash will change, - # so the hash here will need to change. - - im = self.get_sample("samples/c/lena.jpg") - # self.snap(im) # uncomment this line to view the image, when regilding - self.assertEqual(hashlib.md5(im.tostring()).hexdigest(), "9dcd9247f9811c6ce86675ba7b0297b6") - - def test_LoadImage(self): - self.assertRaises(TypeError, lambda: cv.LoadImage()) - self.assertRaises(TypeError, lambda: cv.LoadImage(4)) - self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', 1, 1)) - self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', xiscolor=cv.CV_LOAD_IMAGE_COLOR)) - - def test_types(self): - self.assert_(type(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == cv.iplimage) - self.assert_(type(cv.CreateMat(5, 7, cv.CV_32FC1)) == cv.cvmat) - for i,t in enumerate(self.mat_types): - basefunc = [ - cv.CV_8UC, - cv.CV_8SC, - cv.CV_16UC, - cv.CV_16SC, - cv.CV_32SC, - cv.CV_32FC, - cv.CV_64FC, - ][i / 4] - self.assertEqual(basefunc(1 + (i % 4)), t) - - def test_tostring(self): - - for w in [ 1, 4, 64, 512, 640]: - for h in [ 1, 4, 64, 480, 512]: - for c in [1, 2, 3, 4]: - for d in self.depths: - a = cv.CreateImage((w,h), d, c); - self.assert_(len(a.tostring()) == w * h * c * self.depthsize(d)) - - for w in [ 32, 96, 480 ]: - for h in [ 32, 96, 480 ]: - depth_size = { - cv.IPL_DEPTH_8U : 1, - cv.IPL_DEPTH_8S : 1, - cv.IPL_DEPTH_16U : 2, - cv.IPL_DEPTH_16S : 2, - cv.IPL_DEPTH_32S : 4, - cv.IPL_DEPTH_32F : 4, - cv.IPL_DEPTH_64F : 8 - } - for f in self.depths: - for channels in (1,2,3,4): - img = cv.CreateImage((w, h), f, channels) - esize = (w * h * channels * depth_size[f]) - self.assert_(len(img.tostring()) == esize) - cv.SetData(img, " " * esize, w * channels * depth_size[f]) - self.assert_(len(img.tostring()) == esize) - - mattype_size = { - cv.CV_8UC1 : 1, - cv.CV_8UC2 : 1, - cv.CV_8UC3 : 1, - cv.CV_8UC4 : 1, - cv.CV_8SC1 : 1, - cv.CV_8SC2 : 1, - cv.CV_8SC3 : 1, - cv.CV_8SC4 : 1, - cv.CV_16UC1 : 2, - cv.CV_16UC2 : 2, - cv.CV_16UC3 : 2, - cv.CV_16UC4 : 2, - cv.CV_16SC1 : 2, - cv.CV_16SC2 : 2, - cv.CV_16SC3 : 2, - cv.CV_16SC4 : 2, - cv.CV_32SC1 : 4, - cv.CV_32SC2 : 4, - cv.CV_32SC3 : 4, - cv.CV_32SC4 : 4, - cv.CV_32FC1 : 4, - cv.CV_32FC2 : 4, - cv.CV_32FC3 : 4, - cv.CV_32FC4 : 4, - cv.CV_64FC1 : 8, - cv.CV_64FC2 : 8, - cv.CV_64FC3 : 8, - cv.CV_64FC4 : 8 - } - - for t in self.mat_types: - for im in [cv.CreateMat(h, w, t), cv.CreateMatND([h, w], t)]: - elemsize = cv.CV_MAT_CN(cv.GetElemType(im)) * mattype_size[cv.GetElemType(im)] - cv.SetData(im, " " * (w * h * elemsize), (w * elemsize)) - esize = (w * h * elemsize) - self.assert_(len(im.tostring()) == esize) - cv.SetData(im, " " * esize, w * elemsize) - self.assert_(len(im.tostring()) == esize) - -# Tests for specific OpenCV functions - -class FunctionTests(OpenCVTests): - - def test_AvgSdv(self): - m = cv.CreateMat(1, 8, cv.CV_32FC1) - for i,v in enumerate([2, 4, 4, 4, 5, 5, 7, 9]): - m[0,i] = (v,) - self.assertAlmostEqual(cv.Avg(m)[0], 5.0, 3) - avg,sdv = cv.AvgSdv(m) - self.assertAlmostEqual(avg[0], 5.0, 3) - self.assertAlmostEqual(sdv[0], 2.0, 3) - - def test_CalcEMD2(self): - cc = {} - for r in [ 5, 10, 37, 38 ]: - scratch = cv.CreateImage((100,100), 8, 1) - cv.SetZero(scratch) - cv.Circle(scratch, (50,50), r, 255, -1) - storage = cv.CreateMemStorage() - seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE) - arr = cv.CreateMat(len(seq), 3, cv.CV_32FC1) - for i,e in enumerate(seq): - arr[i,0] = 1 - arr[i,1] = e[0] - arr[i,2] = e[1] - cc[r] = arr - def myL1(A, B, D): - return abs(A[0]-B[0]) + abs(A[1]-B[1]) - def myL2(A, B, D): - return math.sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2) - def myC(A, B, D): - return max(abs(A[0]-B[0]), abs(A[1]-B[1])) - contours = set(cc.values()) - for c0 in contours: - for c1 in contours: - self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L1) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL1)) < 1e-3) - self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L2) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL2)) < 1e-3) - self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_C) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myC)) < 1e-3) - - def test_CalcOpticalFlowBM(self): - a = self.get_sample("samples/c/lena.jpg", 0) - b = self.get_sample("samples/c/lena.jpg", 0) - (w,h) = cv.GetSize(a) - vel_size = (w - 8 + 1, h - 8 + 1) - velx = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1) - vely = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1) - cv.CalcOpticalFlowBM(a, b, (8,8), (1,1), (8,8), 0, velx, vely) - - def test_CalcOpticalFlowPyrLK(self): - a = self.get_sample("samples/c/lena.jpg", 0) - map = cv.CreateMat(2, 3, cv.CV_32FC1) - cv.GetRotationMatrix2D((256, 256), 10, 1.0, map) - b = cv.CloneMat(a) - cv.WarpAffine(a, b, map) - - eig_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1) - temp_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1) - - prevPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1) - currPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1) - prevFeatures = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 400, 0.01, 0.01) - (currFeatures, status, track_error) = cv.CalcOpticalFlowPyrLK(a, - b, - prevPyr, - currPyr, - prevFeatures, - (10, 10), - 3, - (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03), - 0) - if 0: # enable visualization - print - print sum(status), "Points found in curr image" - for prev,this in zip(prevFeatures, currFeatures): - iprev = tuple([int(c) for c in prev]) - ithis = tuple([int(c) for c in this]) - cv.Circle(a, iprev, 3, 255) - cv.Circle(a, ithis, 3, 0) - cv.Line(a, iprev, ithis, 128) - - self.snapL([a, b]) - - def test_CartToPolar(self): - x = cv.CreateMat(5, 5, cv.CV_32F) - y = cv.CreateMat(5, 5, cv.CV_32F) - mag = cv.CreateMat(5, 5, cv.CV_32F) - angle = cv.CreateMat(5, 5, cv.CV_32F) - x2 = cv.CreateMat(5, 5, cv.CV_32F) - y2 = cv.CreateMat(5, 5, cv.CV_32F) - - for i in range(5): - for j in range(5): - x[i, j] = i - y[i, j] = j - - for in_degrees in [False, True]: - cv.CartToPolar(x, y, mag, angle, in_degrees) - cv.PolarToCart(mag, angle, x2, y2, in_degrees) - for i in range(5): - for j in range(5): - self.assertAlmostEqual(x[i, j], x2[i, j], 1) - self.assertAlmostEqual(y[i, j], y2[i, j], 1) - - def test_Circle(self): - for w,h in [(2,77), (77,2), (256, 256), (640,480)]: - img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1) - cv.SetZero(img) - tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000] - for x0 in tricky: - for y0 in tricky: - for r in [ 0, 1, 2, 3, 4, 5, w/2, w-1, w, w+1, h/2, h-1, h, h+1, 8000 ]: - for thick in [1, 2, 10]: - for t in [0, 8, 4, cv.CV_AA]: - cv.Circle(img, (x0,y0), r, 255, thick, t) - # just check that something was drawn - self.assert_(cv.Sum(img)[0] > 0) - - def test_ConvertImage(self): - i1 = cv.GetImage(self.get_sample("samples/c/lena.jpg", 1)) - i2 = cv.CloneImage(i1) - i3 = cv.CloneImage(i1) - cv.ConvertImage(i1, i2, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB) - self.assertNotEqual(self.hashimg(i1), self.hashimg(i2)) - cv.ConvertImage(i2, i3, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB) - self.assertEqual(self.hashimg(i1), self.hashimg(i3)) - - def test_ConvexHull2(self): - # Draw a series of N-pointed stars, find contours, assert the contour is not convex, - # assert the hull has N segments, assert that there are N convexity defects. - - def polar2xy(th, r): - return (int(400 + r * math.cos(th)), int(400 + r * math.sin(th))) - storage = cv.CreateMemStorage(0) - for way in ['CvSeq', 'CvMat', 'list']: - for points in range(3,20): - scratch = cv.CreateImage((800,800), 8, 1) - cv.SetZero(scratch) - sides = 2 * points - cv.FillPoly(scratch, [ [ polar2xy(i * 2 * math.pi / sides, [100,350][i&1]) for i in range(sides) ] ], 255) - - seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE) - - if way == 'CvSeq': - # pts is a CvSeq - pts = seq - elif way == 'CvMat': - # pts is a CvMat - arr = cv.CreateMat(len(seq), 1, cv.CV_32SC2) - for i,e in enumerate(seq): - arr[i,0] = e - pts = arr - elif way == 'list': - # pts is a list of 2-tuples - pts = list(seq) - else: - assert False - - self.assert_(cv.CheckContourConvexity(pts) == 0) - hull = cv.ConvexHull2(pts, storage, return_points = 1) - self.assert_(cv.CheckContourConvexity(hull) == 1) - self.assert_(len(hull) == points) - - if way in [ 'CvSeq', 'CvMat' ]: - defects = cv.ConvexityDefects(pts, cv.ConvexHull2(pts, storage), storage) - self.assert_(len([depth for (_,_,_,depth) in defects if (depth > 5)]) == points) - - def test_CreateImage(self): - for w in [ 1, 4, 64, 512, 640]: - for h in [ 1, 4, 64, 480, 512]: - for c in [1, 2, 3, 4]: - for d in self.depths: - a = cv.CreateImage((w,h), d, c); - self.assert_(a.width == w) - self.assert_(a.height == h) - self.assert_(a.nChannels == c) - self.assert_(a.depth == d) - self.assert_(cv.GetSize(a) == (w, h)) - # self.assert_(cv.GetElemType(a) == d) - self.assertRaises(cv.error, lambda: cv.CreateImage((100, 100), 9, 1)) - - def test_CreateMat(self): - for rows in [1, 2, 4, 16, 64, 512, 640]: - for cols in [1, 2, 4, 16, 64, 512, 640]: - for t in self.mat_types: - m = cv.CreateMat(rows, cols, t) - self.assertEqual(cv.GetElemType(m), t) - self.assertEqual(m.type, t) - self.assertRaises(cv.error, lambda: cv.CreateMat(-1, 100, cv.CV_8SC4)) - self.assertRaises(cv.error, lambda: cv.CreateMat(100, -1, cv.CV_8SC4)) - self.assertRaises(cv.error, lambda: cv.cvmat()) - - def test_DrawChessboardCorners(self): - im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 3) - cv.SetZero(im) - cv.DrawChessboardCorners(im, (5, 5), [ ((i/5)*100+50,(i%5)*100+50) for i in range(5 * 5) ], 1) - - def test_ExtractSURF(self): - img = self.get_sample("samples/c/lena.jpg", 0) - w,h = cv.GetSize(img) - for hessthresh in [ 300,400,500]: - for dsize in [0,1]: - for layers in [1,3,10]: - kp,desc = cv.ExtractSURF(img, None, cv.CreateMemStorage(), (dsize, hessthresh, 3, layers)) - self.assert_(len(kp) == len(desc)) - for d in desc: - self.assert_(len(d) == {0:64, 1:128}[dsize]) - for pt,laplacian,size,dir,hessian in kp: - self.assert_((0 <= pt[0]) and (pt[0] <= w)) - self.assert_((0 <= pt[1]) and (pt[1] <= h)) - self.assert_(laplacian in [-1, 0, 1]) - self.assert_((0 <= dir) and (dir <= 360)) - self.assert_(hessian >= hessthresh) - - def test_FillPoly(self): - scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1) - random.seed(0) - for i in range(50): - cv.SetZero(scribble) - self.assert_(cv.CountNonZero(scribble) == 0) - cv.FillPoly(scribble, [ [ (random.randrange(640), random.randrange(480)) for i in range(100) ] ], (255,)) - self.assert_(cv.CountNonZero(scribble) != 0) - - def test_FindChessboardCorners(self): - im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 1) - cv.Set(im, 128) - - # Empty image run - status,corners = cv.FindChessboardCorners( im, (7,7) ) - - # Perfect checkerboard - def xf(i,j, o): - return ((96 + o) + 40 * i, (96 + o) + 40 * j) - for i in range(8): - for j in range(8): - color = ((i ^ j) & 1) * 255 - cv.Rectangle(im, xf(i,j, 0), xf(i,j, 39), color, cv.CV_FILLED) - status,corners = cv.FindChessboardCorners( im, (7,7) ) - self.assert_(status) - self.assert_(len(corners) == (7 * 7)) - - # Exercise corner display - im3 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 3) - cv.Merge(im, im, im, None, im3) - cv.DrawChessboardCorners(im3, (7,7), corners, status) - - if 0: - self.snap(im3) - - # Run it with too many corners - cv.Set(im, 128) - for i in range(40): - for j in range(40): - color = ((i ^ j) & 1) * 255 - x = 30 + 6 * i - y = 30 + 4 * j - cv.Rectangle(im, (x, y), (x+4, y+4), color, cv.CV_FILLED) - status,corners = cv.FindChessboardCorners( im, (7,7) ) - - # XXX - this is very slow - if 0: - rng = cv.RNG(0) - cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 255.0) - self.snap(im) - status,corners = cv.FindChessboardCorners( im, (7,7) ) - - def test_FindContours(self): - random.seed(0) - - storage = cv.CreateMemStorage() - - # First run FindContours on a black image. - for mode in [cv.CV_RETR_EXTERNAL, cv.CV_RETR_LIST, cv.CV_RETR_CCOMP, cv.CV_RETR_TREE]: - for method in [cv.CV_CHAIN_CODE, cv.CV_CHAIN_APPROX_NONE, cv.CV_CHAIN_APPROX_SIMPLE, cv.CV_CHAIN_APPROX_TC89_L1, cv.CV_CHAIN_APPROX_TC89_KCOS, cv.CV_LINK_RUNS]: - scratch = cv.CreateImage((800,800), 8, 1) - cv.SetZero(scratch) - seq = cv.FindContours(scratch, storage, mode, method) - x = len(seq) - if seq: - pass - for s in seq: - pass - - for trial in range(10): - scratch = cv.CreateImage((800,800), 8, 1) - cv.SetZero(scratch) - def plot(center, radius, mode): - cv.Circle(scratch, center, radius, mode, -1) - if radius < 20: - return 0 - else: - newmode = 255 - mode - subs = random.choice([1,2,3]) - if subs == 1: - return [ plot(center, radius - 5, newmode) ] - else: - newradius = int({ 2: radius / 2, 3: radius / 2.3 }[subs] - 5) - r = radius / 2 - ret = [] - for i in range(subs): - th = i * (2 * math.pi) / subs - ret.append(plot((int(center[0] + r * math.cos(th)), int(center[1] + r * math.sin(th))), newradius, newmode)) - return sorted(ret) - - actual = plot((400,400), 390, 255 ) - - seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE) - - def traverse(s): - if s == None: - return 0 - else: - self.assert_(abs(cv.ContourArea(s)) > 0.0) - ((x,y),(w,h),th) = cv.MinAreaRect2(s, cv.CreateMemStorage()) - self.assert_(((w / h) - 1.0) < 0.01) - self.assert_(abs(cv.ContourArea(s)) > 0.0) - r = [] - while s: - r.append(traverse(s.v_next())) - s = s.h_next() - return sorted(r) - self.assert_(traverse(seq.v_next()) == actual) - - if 1: - original = cv.CreateImage((800,800), 8, 1) - cv.SetZero(original) - cv.Circle(original, (400, 400), 200, 255, -1) - cv.Circle(original, (100, 100), 20, 255, -1) - else: - original = self.get_sample("samples/c/lena.jpg", 0) - cv.Threshold(original, original, 128, 255, cv.CV_THRESH_BINARY); - - contours = cv.FindContours(original, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) - - - def contour_iterator(contour): - while contour: - yield contour - contour = contour.h_next() - - # Should be 2 contours from the two circles above - self.assertEqual(len(list(contour_iterator(contours))), 2) - - # Smoke DrawContours - sketch = cv.CreateImage(cv.GetSize(original), 8, 3) - cv.SetZero(sketch) - red = cv.RGB(255, 0, 0) - green = cv.RGB(0, 255, 0) - for c in contour_iterator(contours): - cv.DrawContours(sketch, c, red, green, 0) - # self.snap(sketch) - - def test_GetAffineTransform(self): - mapping = cv.CreateMat(2, 3, cv.CV_32FC1) - cv.GetAffineTransform([ (0,0), (1,0), (0,1) ], [ (0,0), (17,0), (0,17) ], mapping) - self.assertAlmostEqual(mapping[0,0], 17, 2) - self.assertAlmostEqual(mapping[1,1], 17, 2) - - def test_GetRotationMatrix2D(self): - mapping = cv.CreateMat(2, 3, cv.CV_32FC1) - for scale in [0.0, 1.0, 2.0]: - for angle in [0.0, 360.0]: - cv.GetRotationMatrix2D((0,0), angle, scale, mapping) - for r in [0, 1]: - for c in [0, 1, 2]: - if r == c: - e = scale - else: - e = 0.0 - self.assertAlmostEqual(mapping[r, c], e, 2) - - def test_GetSize(self): - self.assert_(cv.GetSize(cv.CreateMat(5, 7, cv.CV_32FC1)) == (7,5)) - self.assert_(cv.GetSize(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == (7,5)) - - def test_GetStarKeypoints(self): - src = self.get_sample("samples/c/lena.jpg", 0) - storage = cv.CreateMemStorage() - kp = cv.GetStarKeypoints(src, storage) - self.assert_(len(kp) > 0) - for (x,y),scale,r in kp: - self.assert_(0 <= x) - self.assert_(x <= cv.GetSize(src)[0]) - self.assert_(0 <= y) - self.assert_(y <= cv.GetSize(src)[1]) - return - scribble = cv.CreateImage(cv.GetSize(src), 8, 3) - cv.CvtColor(src, scribble, cv.CV_GRAY2BGR) - for (x,y),scale,r in kp: - print x,y,scale,r - cv.Circle(scribble, (x,y), scale, cv.RGB(255,0,0)) - self.snap(scribble) - - def test_GetSubRect(self): - src = cv.CreateImage((100,100), 8, 1) - data = "z" * (100 * 100) - - cv.SetData(src, data, 100) - start_count = sys.getrefcount(data) - - iter = 77 - subs = [] - for i in range(iter): - sub = cv.GetSubRect(src, (0, 0, 10, 10)) - subs.append(sub) - self.assert_(sys.getrefcount(data) == (start_count + iter)) - - src = self.get_sample("samples/c/lena.jpg", 0) - made = cv.CreateImage(cv.GetSize(src), 8, 1) - sub = cv.CreateMat(32, 32, cv.CV_8UC1) - for x in range(0, 512, 32): - for y in range(0, 512, 32): - sub = cv.GetSubRect(src, (x, y, 32, 32)) - cv.SetImageROI(made, (x, y, 32, 32)) - cv.Copy(sub, made) - cv.ResetImageROI(made) - cv.AbsDiff(made, src, made) - self.assert_(cv.CountNonZero(made) == 0) - - for m1 in [cv.CreateMat(1, 10, cv.CV_8UC1), cv.CreateImage((10, 1), 8, 1)]: - for i in range(10): - m1[0, i] = i - def aslist(cvmat): return list(array.array('B', cvmat.tostring())) - m2 = cv.GetSubRect(m1, (5, 0, 4, 1)) - m3 = cv.GetSubRect(m2, (1, 0, 2, 1)) - self.assertEqual(aslist(m1), range(10)) - self.assertEqual(aslist(m2), range(5, 9)) - self.assertEqual(aslist(m3), range(6, 8)) - - def xtest_grabCut(self): - image = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR) - tmp1 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1) - tmp2 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1) - mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1) - cv.GrabCut(image, mask, (10,10,200,200), tmp1, tmp2, 10, cv.GC_INIT_WITH_RECT) - - def test_HoughLines2_PROBABILISTIC(self): - li = cv.HoughLines2(self.yield_line_image(), - cv.CreateMemStorage(), - cv.CV_HOUGH_PROBABILISTIC, - 1, - math.pi/180, - 50, - 50, - 10) - self.assert_(len(li) > 0) - self.assert_(li[0] != None) - - def test_HoughLines2_STANDARD(self): - li = cv.HoughLines2(self.yield_line_image(), - cv.CreateMemStorage(), - cv.CV_HOUGH_STANDARD, - 1, - math.pi/180, - 100, - 0, - 0) - self.assert_(len(li) > 0) - self.assert_(li[0] != None) - - def test_InPaint(self): - src = self.get_sample("samples/cpp/building.jpg") - msk = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1) - damaged = cv.CloneMat(src) - repaired = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 3) - difference = cv.CloneImage(repaired) - cv.SetZero(msk) - for method in [ cv.CV_INPAINT_NS, cv.CV_INPAINT_TELEA ]: - for (p0,p1) in [ ((10,10), (400,400)) ]: - cv.Line(damaged, p0, p1, cv.RGB(255, 0, 255), 2) - cv.Line(msk, p0, p1, 255, 2) - cv.Inpaint(damaged, msk, repaired, 10., cv.CV_INPAINT_NS) - cv.AbsDiff(src, repaired, difference) - #self.snapL([src, damaged, repaired, difference]) - - def test_InitLineIterator(self): - scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1) - self.assert_(len(list(cv.InitLineIterator(scribble, (20,10), (30,10)))) == 11) - - def test_InRange(self): - - sz = (256,256) - Igray1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - Ilow1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - Ihi1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - Igray2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - Ilow2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - Ihi2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1) - - Imask = cv.CreateImage(sz, cv.IPL_DEPTH_8U,1) - Imaskt = cv.CreateImage(sz,cv.IPL_DEPTH_8U,1) - - cv.InRange(Igray1, Ilow1, Ihi1, Imask); - cv.InRange(Igray2, Ilow2, Ihi2, Imaskt); - - cv.Or(Imask, Imaskt, Imask); - - def test_Line(self): - w,h = 640,480 - img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1) - cv.SetZero(img) - tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000] - for x0 in tricky: - for y0 in tricky: - for x1 in tricky: - for y1 in tricky: - for thickness in [ 0, 1, 8 ]: - for line_type in [0, 4, 8, cv.CV_AA ]: - cv.Line(img, (x0,y0), (x1,y1), 255, thickness, line_type) - # just check that something was drawn - self.assert_(cv.Sum(img)[0] > 0) - - def test_MinMaxLoc(self): - scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1) - los = [ (random.randrange(480), random.randrange(640)) for i in range(100) ] - his = [ (random.randrange(480), random.randrange(640)) for i in range(100) ] - for (lo,hi) in zip(los,his): - cv.Set(scribble, 128) - scribble[lo] = 0 - scribble[hi] = 255 - r = cv.MinMaxLoc(scribble) - self.assert_(r == (0, 255, tuple(reversed(lo)), tuple(reversed(hi)))) - - def xxx_test_PyrMeanShiftFiltering(self): # XXX - ticket #306 - if 0: - src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR) - dst = cv.CloneMat(src) - cv.PyrMeanShiftFiltering(src, dst, 5, 5) - print src, dst - self.snap(src) - else: - r = cv.temp_test() - print r - print len(r.tostring()) - self.snap(r) - - def test_Reshape(self): - # 97 rows - # 12 cols - rows = 97 - cols = 12 - im = cv.CreateMat( rows, cols, cv.CV_32FC1 ) - elems = rows * cols * 1 - def crd(im): - return cv.GetSize(im) + (cv.CV_MAT_CN(cv.GetElemType(im)),) - - for c in (1, 2, 3, 4): - nc,nr,nd = crd(cv.Reshape(im, c)) - self.assert_(nd == c) - self.assert_((nc * nr * nd) == elems) - - nc,nr,nd = crd(cv.Reshape(im, 0, 97*2)) - self.assert_(nr == 97*2) - self.assert_((nc * nr * nd) == elems) - - nc,nr,nd = crd(cv.Reshape(im, 3, 97*2)) - self.assert_(nr == 97*2) - self.assert_(nd == 3) - self.assert_((nc * nr * nd) == elems) - - # Now test ReshapeMatND - mat = cv.CreateMatND([24], cv.CV_32FC1) - cv.Set(mat, 1.0) - self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [24, 1])), (24, 1)) - self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [6, 4])), (6, 4)) - self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 24, [1])), (1,)) - self.assertRaises(TypeError, lambda: cv.ReshapeMatND(mat, 12, [1])) - - def test_Save(self): - for o in [ cv.CreateImage((128,128), cv.IPL_DEPTH_8U, 1), cv.CreateMat(16, 16, cv.CV_32FC1), cv.CreateMatND([7,9,4], cv.CV_32FC1) ]: - cv.Save("test.save", o) - loaded = cv.Load("test.save", cv.CreateMemStorage()) - self.assert_(type(o) == type(loaded)) - - def test_SetIdentity(self): - for r in range(1,16): - for c in range(1, 16): - for t in self.mat_types_single: - M = cv.CreateMat(r, c, t) - cv.SetIdentity(M) - for rj in range(r): - for cj in range(c): - if rj == cj: - expected = 1.0 - else: - expected = 0.0 - self.assertEqual(M[rj,cj], expected) - - def test_SnakeImage(self): - src = self.get_sample("samples/c/lena.jpg", 0) - pts = [ (512-i,i) for i in range(0, 512, 8) ] - - # Make sure that weight arguments get validated - self.assertRaises(TypeError, lambda: cv.SnakeImage(cv.GetImage(src), pts, [1,2], .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))) - - # Smoke by making sure that points are changed by call - r = cv.SnakeImage(cv.GetImage(src), pts, .01, .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1)) - if 0: - cv.PolyLine(src, [ r ], 0, 255) - self.snap(src) - self.assertEqual(len(r), len(pts)) - self.assertNotEqual(r, pts) - - # Ensure that list of weights is same as scalar weight - w = [.01] * len(pts) - r2 = cv.SnakeImage(cv.GetImage(src), pts, w, w, w, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1)) - self.assertEqual(r, r2) - - def test_KMeans2(self): - size = 500 - samples = cv.CreateMat(size, 1, cv.CV_32FC3) - labels = cv.CreateMat(size, 1, cv.CV_32SC1) - centers = cv.CreateMat(2, 3, cv.CV_32FC1) - - cv.Zero(samples) - cv.Zero(labels) - cv.Zero(centers) - - cv.Set(cv.GetSubRect(samples, (0, 0, 1, size/2)), (255, 255, 255)) - - compact = cv.KMeans2(samples, 2, labels, (cv.CV_TERMCRIT_ITER, 100, 0.1), 1, 0, centers) - - self.assertEqual(int(compact), 0) - - random.seed(0) - for i in range(50): - index = random.randrange(size) - if index < size/2: - self.assertEqual(samples[index, 0], (255, 255, 255)) - self.assertEqual(labels[index, 0], 1) - else: - self.assertEqual(samples[index, 0], (0, 0, 0)) - self.assertEqual(labels[index, 0], 0) - - for cluster in (0, 1): - for channel in (0, 1, 2): - self.assertEqual(int(centers[cluster, channel]), cluster*255) - - def test_Sum(self): - for r in range(1,11): - for c in range(1, 11): - for t in self.mat_types_single: - M = cv.CreateMat(r, c, t) - cv.Set(M, 1) - self.assertEqual(cv.Sum(M)[0], r * c) - - def test_Threshold(self): - #""" directed test for bug 2790622 """ - src = self.get_sample("samples/c/lena.jpg", 0) - results = set() - for i in range(10): - dst = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1) - cv.Threshold(src, dst, 128, 128, cv.CV_THRESH_BINARY) - results.add(dst.tostring()) - # Should have produced the same answer every time, so results set should have size 1 - self.assert_(len(results) == 1) - - # ticket #71 repro attempt - image = self.get_sample("samples/c/lena.jpg", 0) - red = cv.CreateImage(cv.GetSize(image), 8, 1) - binary = cv.CreateImage(cv.GetSize(image), 8, 1) - cv.Split(image, red, None, None, None) - cv.Threshold(red, binary, 42, 255, cv.CV_THRESH_BINARY) - - ############################################################################## - - def yield_line_image(self): - """ Needed by HoughLines tests """ - src = self.get_sample("samples/cpp/building.jpg", 0) - dst = cv.CreateImage(cv.GetSize(src), 8, 1) - cv.Canny(src, dst, 50, 200, 3) - return dst - -# Tests for functional areas - -class AreaTests(OpenCVTests): - - def test_numpy(self): - if 'fromarray' in dir(cv): - import numpy - - def convert(numpydims): - """ Create a numpy array with specified dims, return the OpenCV CvMat """ - a1 = numpy.array([1] * reduce(operator.__mul__, numpydims)).reshape(*numpydims).astype(numpy.float32) - return cv.fromarray(a1) - def row_col_chan(m): - col = m.cols - row = m.rows - chan = cv.CV_MAT_CN(cv.GetElemType(m)) - return (row, col, chan) - - self.assertEqual(row_col_chan(convert((2, 13))), (2, 13, 1)) - self.assertEqual(row_col_chan(convert((2, 13, 4))), (2, 13, 4)) - self.assertEqual(row_col_chan(convert((2, 13, cv.CV_CN_MAX))), (2, 13, cv.CV_CN_MAX)) - self.assertRaises(TypeError, lambda: convert((2,))) - self.assertRaises(TypeError, lambda: convert((11, 17, cv.CV_CN_MAX + 1))) - - for t in [cv.CV_16UC1, cv.CV_32SC1, cv.CV_32FC1]: - for d in [ (8,), (1,7), (2,3,4), (7,9,2,1,8), (1,2,3,4,5,6,7,8) ]: - total = reduce(operator.__mul__, d) - m = cv.CreateMatND(d, t) - for i in range(total): - cv.Set1D(m, i, i) - na = numpy.asarray(m).reshape((total,)) - self.assertEqual(list(na), range(total)) - - # now do numpy -> cvmat, and verify - m2 = cv.fromarray(na, True) - - # Check that new cvmat m2 contains same counting sequence - for i in range(total): - self.assertEqual(cv.Get1D(m, i)[0], i) - - # Verify round-trip for 2D arrays - for rows in [2, 3, 7, 13]: - for cols in [2, 3, 7, 13]: - for allowND in [False, True]: - im = cv.CreateMatND([rows, cols], cv.CV_16UC1) - cv.SetZero(im) - a = numpy.asarray(im) - self.assertEqual(a.shape, (rows, cols)) - cvmatnd = cv.fromarray(a, allowND) - self.assertEqual(cv.GetDims(cvmatnd), (rows, cols)) - - # im, a and cvmatnd all point to the same data, so... - for i,coord in enumerate([(0,0), (0,1), (1,0), (1,1)]): - v = 5 + i + 7 - a[coord] = v - self.assertEqual(im[coord], v) - self.assertEqual(cvmatnd[coord], v) - - # Cv -> Numpy 3 channel check - im = cv.CreateMatND([2, 13], cv.CV_16UC3) - self.assertEqual(numpy.asarray(im).shape, (2, 13, 3)) - - # multi-dimensional NumPy array - na = numpy.ones([7,9,2,1,8]) - cm = cv.fromarray(na, True) - self.assertEqual(cv.GetDims(cm), (7,9,2,1,8)) - - # Using an array object for a CvArr parameter - ones = numpy.ones((640, 480)) - r = cv.fromarray(numpy.ones((640, 480))) - cv.AddS(cv.fromarray(ones), 7, r) - self.assert_(numpy.alltrue(r == (8 * ones))) - - # create arrays, use them in OpenCV and replace the the array - # looking for leaks - def randdim(): - return [random.randrange(1,6) for i in range(random.randrange(1, 6))] - arrays = [numpy.ones(randdim()).astype(numpy.uint8) for i in range(10)] - cs = [cv.fromarray(a, True) for a in arrays] - for i in range(1000): - arrays[random.randrange(10)] = numpy.ones(randdim()).astype(numpy.uint8) - cs[random.randrange(10)] = cv.fromarray(arrays[random.randrange(10)], True) - for j in range(10): - self.assert_(all([c == chr(1) for c in cs[j].tostring()])) - - # - m = numpy.identity(4, dtype = numpy.float32) - m = cv.fromarray(m[:3, :3]) - rvec = cv.CreateMat(3, 1, cv.CV_32FC1) - rvec[0,0] = 1 - rvec[1,0] = 1 - rvec[2,0] = 1 - cv.Rodrigues2(rvec, m) - #print m - - else: - print "SKIPPING test_numpy - numpy support not built" - - def test_boundscatch(self): - l2 = cv.CreateMat(256, 1, cv.CV_8U) - l2[0,0] # should be OK - self.assertRaises(cv.error, lambda: l2[1,1]) - l2[0] # should be OK - self.assertRaises(cv.error, lambda: l2[299]) - for n in range(1, 8): - l = cv.CreateMatND([2] * n, cv.CV_8U) - l[0] # should be OK - self.assertRaises(cv.error, lambda: l[999]) - - tup0 = (0,) * n - l[tup0] # should be OK - tup2 = (2,) * n - self.assertRaises(cv.error, lambda: l[tup2]) - - def test_stereo(self): - left = self.get_sample2("samples/cpp/tsukuba_l.png", 0) - right = self.get_sample2("samples/cpp/tsukuba_r.png", 0) - bm = cv2.createStereoBM(32, 11) - disparity = bm.compute(left, right) - self.assertEqual(left.shape, disparity.shape) - sgbm = cv2.createStereoSGBM(0, 32, 5) - disparity2 = sgbm.compute(left, right) - self.assertEqual(left.shape, disparity2.shape) - - def test_kalman(self): - k = cv.CreateKalman(2, 1, 0) - - def failing_test_exception(self): - a = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1) - b = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1) - self.assertRaises(cv.error, lambda: cv.Laplace(a, b)) - - def test_cvmat_accessors(self): - cvm = cv.CreateMat(20, 10, cv.CV_32FC1) - - def test_depths(self): - #""" Make sure that the depth enums are unique """ - self.assert_(len(self.depths) == len(set(self.depths))) - - def test_leak(self): - #""" If CreateImage is not releasing image storage, then the loop below should use ~4GB of memory. """ - for i in range(64000): - a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1) - for i in range(64000): - a = cv.CreateMat(1024, 1024, cv.CV_8UC1) - - def test_histograms(self): - def split(im): - nchans = cv.CV_MAT_CN(cv.GetElemType(im)) - c = [ cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) for i in range(nchans) ] + [None] * (4 - nchans) - cv.Split(im, c[0], c[1], c[2], c[3]) - return c[:nchans] - def imh(im): - s = split(im) - hist = cv.CreateHist([256] * len(s), cv.CV_HIST_ARRAY, [ (0,255) ] * len(s), 1) - cv.CalcHist(s, hist, 0) - return hist - - dims = [180] - ranges = [(0,180)] - a = cv.CreateHist(dims, cv.CV_HIST_ARRAY , ranges, 1) - src = self.get_sample("samples/c/lena.jpg", 0) - h = imh(src) - (minv, maxv, minl, maxl) = cv.GetMinMaxHistValue(h) - self.assert_(cv.QueryHistValue_nD(h, minl) == minv) - self.assert_(cv.QueryHistValue_nD(h, maxl) == maxv) - bp = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1) - cv.CalcBackProject(split(src), bp, h) - bp = cv.CreateImage((cv.GetSize(src)[0]-2, cv.GetSize(src)[1]-2), cv.IPL_DEPTH_32F, 1) - cv.CalcBackProjectPatch(split(src), bp, (3,3), h, cv.CV_COMP_INTERSECT, 1) - - for meth,expected in [(cv.CV_COMP_CORREL, 1.0), (cv.CV_COMP_CHISQR, 0.0), (cv.CV_COMP_INTERSECT, 1.0), (cv.CV_COMP_BHATTACHARYYA, 0.0)]: - self.assertEqual(cv.CompareHist(h, h, meth), expected) - - def test_remap(self): - rng = cv.RNG(0) - maxError = 1e-6 - raw = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1) - for x in range(0, 640, 20): - cv.Line(raw, (x,0), (x,480), 255, 1) - for y in range(0, 480, 20): - cv.Line(raw, (0,y), (640,y), 255, 1) - intrinsic_mat = cv.CreateMat(3, 3, cv.CV_32FC1) - distortion_coeffs = cv.CreateMat(1, 4, cv.CV_32FC1) - - cv.SetZero(intrinsic_mat) - intrinsic_mat[0,2] = 320.0 - intrinsic_mat[1,2] = 240.0 - intrinsic_mat[0,0] = 320.0 - intrinsic_mat[1,1] = 320.0 - intrinsic_mat[2,2] = 1.0 - cv.SetZero(distortion_coeffs) - distortion_coeffs[0,0] = 1e-1 - mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) - mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) - cv.SetZero(mapx) - cv.SetZero(mapy) - cv.InitUndistortMap(intrinsic_mat, distortion_coeffs, mapx, mapy) - rect = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1) - - (w,h) = (640,480) - rMapxy = cv.CreateMat(h, w, cv.CV_16SC2) - rMapa = cv.CreateMat(h, w, cv.CV_16UC1) - cv.ConvertMaps(mapx,mapy,rMapxy,rMapa) - - cv.Remap(raw, rect, mapx, mapy) - cv.Remap(raw, rect, rMapxy, rMapa) - cv.Undistort2(raw, rect, intrinsic_mat, distortion_coeffs) - - for w in [1, 4, 4095, 4096, 4097, 4100]: - p = cv.CreateImage((w,256), 8, 1) - up = cv.CreateImage((w,256), 8, 1) - cv.Undistort2(p, up, intrinsic_mat, distortion_coeffs) - - fptypes = [cv.CV_32FC1, cv.CV_64FC1] - pointsCount = 7 - for t0 in fptypes: - for t1 in fptypes: - for t2 in fptypes: - for t3 in fptypes: - rotation_vector = cv.CreateMat(1, 3, t0) - translation_vector = cv.CreateMat(1, 3, t1) - cv.RandArr(rng, rotation_vector, cv.CV_RAND_UNI, -1.0, 1.0) - cv.RandArr(rng, translation_vector, cv.CV_RAND_UNI, -1.0, 1.0) - object_points = cv.CreateMat(pointsCount, 3, t2) - image_points = cv.CreateMat(pointsCount, 2, t3) - cv.RandArr(rng, object_points, cv.CV_RAND_UNI, -100.0, 100.0) - cv.ProjectPoints2(object_points, rotation_vector, translation_vector, intrinsic_mat, distortion_coeffs, image_points) - - reshaped_object_points = cv.Reshape(object_points, 1, 3) - reshaped_image_points = cv.CreateMat(2, pointsCount, t3) - cv.ProjectPoints2(object_points, rotation_vector, translation_vector, intrinsic_mat, distortion_coeffs, reshaped_image_points) - - error = cv.Norm(reshaped_image_points, cv.Reshape(image_points, 1, 2)) - self.assert_(error < maxError) - - def test_arithmetic(self): - a = cv.CreateMat(4, 4, cv.CV_8UC1) - a[0,0] = 50.0 - b = cv.CreateMat(4, 4, cv.CV_8UC1) - b[0,0] = 4.0 - d = cv.CreateMat(4, 4, cv.CV_8UC1) - cv.Add(a, b, d) - self.assertEqual(d[0,0], 54.0) - cv.Mul(a, b, d) - self.assertEqual(d[0,0], 200.0) - - - def failing_test_cvtcolor(self): - src3 = self.get_sample("samples/c/lena.jpg") - src1 = self.get_sample("samples/c/lena.jpg", 0) - dst8u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_8U, c)) for c in (1,2,3,4)]) - dst16u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_16U, c)) for c in (1,2,3,4)]) - dst32f = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_32F, c)) for c in (1,2,3,4)]) - - for srcf in ["BGR", "RGB"]: - for dstf in ["Luv"]: - cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf))) - cv.CvtColor(src3, dst32f[3], eval("cv.CV_%s2%s" % (srcf, dstf))) - cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (dstf, srcf))) - - for srcf in ["BayerBG", "BayerGB", "BayerGR"]: - for dstf in ["RGB", "BGR"]: - cv.CvtColor(src1, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf))) - - def test_voronoi(self): - w,h = 500,500 - - storage = cv.CreateMemStorage(0) - - def facet_edges(e0): - e = e0 - while True: - e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT) - yield e - if e == e0: - break + if sys.version_info[:2] == (2, 6): + def assertLess(self, a, b, msg=None): + if not a < b: + self.fail('%s not less than %s' % (repr(a), repr(b))) - def areas(edges): - seen = [] - seensorted = [] - for edge in edges: - pts = [ cv.Subdiv2DEdgeOrg(e) for e in facet_edges(edge) ] - if not (None in pts): - l = [p.pt for p in pts] - ls = sorted(l) - if not(ls in seensorted): - seen.append(l) - seensorted.append(ls) - return seen + def assertLessEqual(self, a, b, msg=None): + if not a <= b: + self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) - for npoints in range(1, 200): - points = [ (random.randrange(w), random.randrange(h)) for i in range(npoints) ] - subdiv = cv.CreateSubdivDelaunay2D( (0,0,w,h), storage ) - for p in points: - cv.SubdivDelaunay2DInsert( subdiv, p) - cv.CalcSubdivVoronoi2D(subdiv) - ars = areas([ cv.Subdiv2DRotateEdge(e, 1) for e in subdiv.edges ] + [ cv.Subdiv2DRotateEdge(e, 3) for e in subdiv.edges ]) - self.assert_(len(ars) == len(set(points))) + def assertGreater(self, a, b, msg=None): + if not a > b: + self.fail('%s not greater than %s' % (repr(a), repr(b))) - if False: - img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3) - cv.SetZero(img) - def T(x): return int(x) # int(300+x/16) - for pts in ars: - cv.FillConvexPoly( img, [(T(x),T(y)) for (x,y) in pts], cv.RGB(100+random.randrange(156),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 ); - for x,y in points: - cv.Circle(img, (T(x), T(y)), 3, cv.RGB(0,0,0), -1) - - cv.ShowImage("snap", img) - if cv.WaitKey(10) > 0: - break - - def perf_test_pow(self): - mt = cv.CreateMat(1000, 1000, cv.CV_32FC1) - dst = cv.CreateMat(1000, 1000, cv.CV_32FC1) - rng = cv.RNG(0) - cv.RandArr(rng, mt, cv.CV_RAND_UNI, 0, 1000.0) - mt[0,0] = 10 - print - for a in [0.5, 2.0, 2.3, 2.4, 3.0, 37.1786] + [2.4]*10: - started = time.time() - for i in range(10): - cv.Pow(mt, dst, a) - took = (time.time() - started) / 1e7 - print "%4.1f took %f ns" % (a, took * 1e9) - print dst[0,0], 10 ** 2.4 - - def test_access_row_col(self): - src = cv.CreateImage((8,3), 8, 1) - # Put these words - # Achilles - # Benedict - # Congreve - # in an array (3 rows, 8 columns). - # Then extract the array in various ways. - - for r,w in enumerate(("Achilles", "Benedict", "Congreve")): - for c,v in enumerate(w): - src[r,c] = ord(v) - self.assertEqual(src.tostring(), "AchillesBenedictCongreve") - self.assertEqual(src[:,:].tostring(), "AchillesBenedictCongreve") - self.assertEqual(src[:,:4].tostring(), "AchiBeneCong") - self.assertEqual(src[:,0].tostring(), "ABC") - self.assertEqual(src[:,4:].tostring(), "llesdictreve") - self.assertEqual(src[::2,:].tostring(), "AchillesCongreve") - self.assertEqual(src[1:,:].tostring(), "BenedictCongreve") - self.assertEqual(src[1:2,:].tostring(), "Benedict") - self.assertEqual(src[::2,:4].tostring(), "AchiCong") - # The mats share the same storage, so updating one should update them all - lastword = src[2] - self.assertEqual(lastword.tostring(), "Congreve") - src[2,0] = ord('K') - self.assertEqual(lastword.tostring(), "Kongreve") - src[2,0] = ord('C') - - # ABCD - # EFGH - # IJKL - # - # MNOP - # QRST - # UVWX - - mt = cv.CreateMatND([2,3,4], cv.CV_8UC1) - for i in range(2): - for j in range(3): - for k in range(4): - mt[i,j,k] = ord('A') + k + 4 * (j + 3 * i) - self.assertEqual(mt[:,:,:1].tostring(), "AEIMQU") - self.assertEqual(mt[:,:1,:].tostring(), "ABCDMNOP") - self.assertEqual(mt[:1,:,:].tostring(), "ABCDEFGHIJKL") - self.assertEqual(mt[1,1].tostring(), "QRST") - self.assertEqual(mt[:,::2,:].tostring(), "ABCDIJKLMNOPUVWX") - - # Exercise explicit GetRows - self.assertEqual(cv.GetRows(src, 0, 3).tostring(), "AchillesBenedictCongreve") - self.assertEqual(cv.GetRows(src, 0, 3, 1).tostring(), "AchillesBenedictCongreve") - self.assertEqual(cv.GetRows(src, 0, 3, 2).tostring(), "AchillesCongreve") - - self.assertEqual(cv.GetRow(src, 0).tostring(), "Achilles") - - self.assertEqual(cv.GetCols(src, 0, 4).tostring(), "AchiBeneCong") - - self.assertEqual(cv.GetCol(src, 0).tostring(), "ABC") - self.assertEqual(cv.GetCol(src, 1).tostring(), "ceo") - - self.assertEqual(cv.GetDiag(src, 0).tostring(), "Aen") - - # Check that matrix type is preserved by the various operators - - for mt in self.mat_types: - m = cv.CreateMat(5, 3, mt) - self.assertEqual(mt, cv.GetElemType(cv.GetRows(m, 0, 2))) - self.assertEqual(mt, cv.GetElemType(cv.GetRow(m, 0))) - self.assertEqual(mt, cv.GetElemType(cv.GetCols(m, 0, 2))) - self.assertEqual(mt, cv.GetElemType(cv.GetCol(m, 0))) - self.assertEqual(mt, cv.GetElemType(cv.GetDiag(m, 0))) - self.assertEqual(mt, cv.GetElemType(m[0])) - self.assertEqual(mt, cv.GetElemType(m[::2])) - self.assertEqual(mt, cv.GetElemType(m[:,0])) - self.assertEqual(mt, cv.GetElemType(m[:,:])) - self.assertEqual(mt, cv.GetElemType(m[::2,:])) - - def test_addS_3D(self): - for dim in [ [1,1,4], [2,2,3], [7,4,3] ]: - for ty,ac in [ (cv.CV_32FC1, 'f'), (cv.CV_64FC1, 'd')]: - mat = cv.CreateMatND(dim, ty) - mat2 = cv.CreateMatND(dim, ty) - for increment in [ 0, 3, -1 ]: - cv.SetData(mat, array.array(ac, range(dim[0] * dim[1] * dim[2])), 0) - cv.AddS(mat, increment, mat2) - for i in range(dim[0]): - for j in range(dim[1]): - for k in range(dim[2]): - self.assert_(mat2[i,j,k] == mat[i,j,k] + increment) - - def test_buffers(self): - ar = array.array('f', [7] * (360*640)) - - m = cv.CreateMat(360, 640, cv.CV_32FC1) - cv.SetData(m, ar, 4 * 640) - self.assert_(m[0,0] == 7.0) - - m = cv.CreateMatND((360, 640), cv.CV_32FC1) - cv.SetData(m, ar, 4 * 640) - self.assert_(m[0,0] == 7.0) - - m = cv.CreateImage((640, 360), cv.IPL_DEPTH_32F, 1) - cv.SetData(m, ar, 4 * 640) - self.assert_(m[0,0] == 7.0) - - def xxtest_Filters(self): - print - m = cv.CreateMat(360, 640, cv.CV_32FC1) - d = cv.CreateMat(360, 640, cv.CV_32FC1) - for k in range(3, 21, 2): - started = time.time() - for i in range(1000): - cv.Smooth(m, m, param1=k) - print k, "took", time.time() - started - - def assertSame(self, a, b): - w,h = cv.GetSize(a) - d = cv.CreateMat(h, w, cv.CV_8UC1) - cv.AbsDiff(a, b, d) - self.assert_(cv.CountNonZero(d) == 0) - - def test_text(self): - img = cv.CreateImage((640,40), cv.IPL_DEPTH_8U, 1) - cv.SetZero(img) - font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1) - message = "XgfooX" - cv.PutText(img, message, (320,30), font, 255) - ((w,h),bl) = cv.GetTextSize(message, font) - - # Find nonzero in X and Y - Xs = [] - for x in range(640): - cv.SetImageROI(img, (x, 0, 1, 40)) - Xs.append(cv.Sum(img)[0] > 0) - def firstlast(l): - return (l.index(True), len(l) - list(reversed(l)).index(True)) - - Ys = [] - for y in range(40): - cv.SetImageROI(img, (0, y, 640, 1)) - Ys.append(cv.Sum(img)[0] > 0) - - x0,x1 = firstlast(Xs) - y0,y1 = firstlast(Ys) - actual_width = x1 - x0 - actual_height = y1 - y0 - - # actual_width can be up to 8 pixels smaller than GetTextSize says - self.assert_(actual_width <= w) - self.assert_((w - actual_width) <= 8) - - # actual_height can be up to 4 pixels smaller than GetTextSize says - self.assert_(actual_height <= (h + bl)) - self.assert_(((h + bl) - actual_height) <= 4) - - cv.ResetImageROI(img) - self.assert_(w != 0) - self.assert_(h != 0) - - def test_sizes(self): - sizes = [ 1, 2, 3, 97, 255, 256, 257, 947 ] - for w in sizes: - for h in sizes: - # Create an IplImage - im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1) - cv.Set(im, 1) - self.assert_(cv.Sum(im)[0] == (w * h)) - del im - # Create a CvMat - mt = cv.CreateMat(h, w, cv.CV_8UC1) - cv.Set(mt, 1) - self.assert_(cv.Sum(mt)[0] == (w * h)) - - random.seed(7) - for dim in range(1, cv.CV_MAX_DIM + 1): - for attempt in range(10): - dims = [ random.choice([1,1,1,1,2,3]) for i in range(dim) ] - mt = cv.CreateMatND(dims, cv.CV_8UC1) - cv.SetZero(mt) - self.assert_(cv.Sum(mt)[0] == 0) - # Set to all-ones, verify the sum - cv.Set(mt, 1) - expected = 1 - for d in dims: - expected *= d - self.assert_(cv.Sum(mt)[0] == expected) - - def test_random(self): - seeds = [ 0, 1, 2**48, 2**48 + 1 ] - sequences = set() - for s in seeds: - rng = cv.RNG(s) - sequences.add(str([cv.RandInt(rng) for i in range(10)])) - self.assert_(len(seeds) == len(sequences)) - - rng = cv.RNG(0) - im = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1) - cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 256) - cv.RandArr(rng, im, cv.CV_RAND_NORMAL, 128, 30) - if 1: - hist = cv.CreateHist([ 256 ], cv.CV_HIST_ARRAY, [ (0,255) ], 1) - cv.CalcHist([im], hist) - - rng = cv.RNG() - for i in range(1000): - v = cv.RandReal(rng) - self.assert_(0 <= v) - self.assert_(v < 1) - - for mode in [ cv.CV_RAND_UNI, cv.CV_RAND_NORMAL ]: - for fmt in self.mat_types: - mat = cv.CreateMat(64, 64, fmt) - cv.RandArr(cv.RNG(), mat, mode, (0,0,0,0), (1,1,1,1)) - - def test_MixChannels(self): - - # First part - test the single case described in the documentation - rgba = cv.CreateMat(100, 100, cv.CV_8UC4) - bgr = cv.CreateMat(100, 100, cv.CV_8UC3) - alpha = cv.CreateMat(100, 100, cv.CV_8UC1) - cv.Set(rgba, (1,2,3,4)) - cv.MixChannels([rgba], [bgr, alpha], [ - (0, 2), # rgba[0] -> bgr[2] - (1, 1), # rgba[1] -> bgr[1] - (2, 0), # rgba[2] -> bgr[0] - (3, 3) # rgba[3] -> alpha[0] - ]) - self.assert_(bgr[0,0] == (3,2,1)) - self.assert_(alpha[0,0] == 4) - - # Second part. Choose random sets of sources and destinations, - # fill them with known values, choose random channel assignments, - # run cvMixChannels and check that the result is as expected. - - random.seed(1) - - for rows in [1,2,4,13,64,1000]: - for cols in [1,2,4,13,64,1000]: - for loop in range(5): - sources = [random.choice([1, 2, 3, 4]) for i in range(8)] - dests = [random.choice([1, 2, 3, 4]) for i in range(8)] - # make sure that fromTo does not have duplicates in dests, otherwise the result is not determined - while 1: - fromTo = [(random.randrange(-1, sum(sources)), random.randrange(sum(dests))) for i in range(random.randrange(1, 30))] - dests_set = list(set([j for (i, j) in fromTo])) - if len(dests_set) == len(dests): - break - - # print sources - # print dests - # print fromTo - - def CV_8UC(n): - return [cv.CV_8UC1, cv.CV_8UC2, cv.CV_8UC3, cv.CV_8UC4][n-1] - source_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in sources] - dest_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in dests] - - def m00(m): - # return the contents of the N channel mat m[0,0] as a N-length list - chans = cv.CV_MAT_CN(cv.GetElemType(m)) - if chans == 1: - return [m[0,0]] - else: - return list(m[0,0])[:chans] - - # Sources numbered from 50, destinations numbered from 100 - - for i in range(len(sources)): - s = sum(sources[:i]) + 50 - cv.Set(source_m[i], (s, s+1, s+2, s+3)) - self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]]) - - for i in range(len(dests)): - s = sum(dests[:i]) + 100 - cv.Set(dest_m[i], (s, s+1, s+2, s+3)) - self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]]) - - # now run the sanity check - - for i in range(len(sources)): - s = sum(sources[:i]) + 50 - self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]]) - - for i in range(len(dests)): - s = sum(dests[:i]) + 100 - self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]]) - - cv.MixChannels(source_m, dest_m, fromTo) - - expected = range(100, 100 + sum(dests)) - for (i, j) in fromTo: - if i == -1: - expected[j] = 0.0 - else: - expected[j] = 50 + i - - actual = sum([m00(m) for m in dest_m], []) - self.assertEqual(sum([m00(m) for m in dest_m], []), expected) - - def test_allocs(self): - mats = [ 0 for i in range(20) ] - for i in range(1000): - m = cv.CreateMat(random.randrange(10, 512), random.randrange(10, 512), cv.CV_8UC1) - j = random.randrange(len(mats)) - mats[j] = m - cv.SetZero(m) - - def test_access(self): - cnames = { 1:cv.CV_32FC1, 2:cv.CV_32FC2, 3:cv.CV_32FC3, 4:cv.CV_32FC4 } - - for w in range(1,11): - for h in range(2,11): - for c in [1,2]: - for o in [ cv.CreateMat(h, w, cnames[c]), cv.CreateImage((w,h), cv.IPL_DEPTH_32F, c) ][1:]: - pattern = [ (i,j) for i in range(w) for j in range(h) ] - random.shuffle(pattern) - for k,(i,j) in enumerate(pattern): - if c == 1: - o[j,i] = k - else: - o[j,i] = (k,) * c - for k,(i,j) in enumerate(pattern): - if c == 1: - self.assert_(o[j,i] == k) - else: - self.assert_(o[j,i] == (k,)*c) - - test_mat = cv.CreateMat(2, 3, cv.CV_32FC1) - cv.SetData(test_mat, array.array('f', range(6)), 12) - self.assertEqual(cv.GetDims(test_mat[0]), (1, 3)) - self.assertEqual(cv.GetDims(test_mat[1]), (1, 3)) - self.assertEqual(cv.GetDims(test_mat[0:1]), (1, 3)) - self.assertEqual(cv.GetDims(test_mat[1:2]), (1, 3)) - self.assertEqual(cv.GetDims(test_mat[-1:]), (1, 3)) - self.assertEqual(cv.GetDims(test_mat[-1]), (1, 3)) - - def xxxtest_corners(self): - a = cv.LoadImage("foo-mono.png", 0) - cv.AdaptiveThreshold(a, a, 255, param1=5) - scribble = cv.CreateImage(cv.GetSize(a), 8, 3) - cv.CvtColor(a, scribble, cv.CV_GRAY2BGR) - if 0: - eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1) - temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1) - pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, use_harris=1) - for p in pts: - cv.Circle( scribble, p, 1, cv.RGB(255,0,0), -1 ) - self.snap(scribble) - canny = cv.CreateImage(cv.GetSize(a), 8, 1) - cv.SubRS(a, 255, canny) - self.snap(canny) - li = cv.HoughLines2(canny, - cv.CreateMemStorage(), - cv.CV_HOUGH_STANDARD, - 1, - math.pi/180, - 60, - 0, - 0) - for (rho,theta) in li: - print rho,theta - c = math.cos(theta) - s = math.sin(theta) - x0 = c*rho - y0 = s*rho - cv.Line(scribble, - (x0 + 1000*(-s), y0 + 1000*c), - (x0 + -1000*(-s), y0 - 1000*c), - (0,255,0)) - self.snap(scribble) - - def test_calibration(self): - - def get_corners(mono, refine = False): - (ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE) - if refine and ok: - corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 )) - return (ok, corners) - - def mk_object_points(nimages, squaresize = 1): - opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1) - for i in range(nimages): - for j in range(num_pts): - opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize - opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize - opts[i * num_pts + j, 2] = 0 - return opts - - def mk_image_points(goodcorners): - ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1) - for (i, co) in enumerate(goodcorners): - for j in range(num_pts): - ipts[i * num_pts + j, 0] = co[j][0] - ipts[i * num_pts + j, 1] = co[j][1] - return ipts - - def mk_point_counts(nimages): - npts = cv.CreateMat(nimages, 1, cv.CV_32SC1) - for i in range(nimages): - npts[i, 0] = num_pts - return npts - - def cvmat_iterator(cvmat): - for i in range(cvmat.rows): - for j in range(cvmat.cols): - yield cvmat[i,j] - - def image_from_archive(tar, name): - member = tar.getmember(name) - filedata = tar.extractfile(member).read() - imagefiledata = cv.CreateMat(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - return cv.DecodeImageM(imagefiledata) - - urllib.urlretrieve("http://opencv.itseez.com/data/camera_calibration.tar.gz", "camera_calibration.tar.gz") - tf = tarfile.open("camera_calibration.tar.gz") - - num_x_ints = 8 - num_y_ints = 6 - num_pts = num_x_ints * num_y_ints - - leftimages = [image_from_archive(tf, "wide/left%04d.pgm" % i) for i in range(3, 15)] - size = cv.GetSize(leftimages[0]) - - # Monocular test - - if True: - corners = [get_corners(i) for i in leftimages] - goodcorners = [co for (im, (ok, co)) in zip(leftimages, corners) if ok] - - ipts = mk_image_points(goodcorners) - opts = mk_object_points(len(goodcorners), .1) - npts = mk_point_counts(len(goodcorners)) - - intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) - distortion = cv.CreateMat(4, 1, cv.CV_64FC1) - cv.SetZero(intrinsics) - cv.SetZero(distortion) - # focal lengths have 1/1 ratio - intrinsics[0,0] = 1.0 - intrinsics[1,1] = 1.0 - cv.CalibrateCamera2(opts, ipts, npts, - cv.GetSize(leftimages[0]), - intrinsics, - distortion, - cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1), - cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1), - flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST) - # print "D =", list(cvmat_iterator(distortion)) - # print "K =", list(cvmat_iterator(intrinsics)) - - newK = cv.CreateMat(3, 3, cv.CV_64FC1) - cv.GetOptimalNewCameraMatrix(intrinsics, distortion, size, 1.0, newK) - # print "newK =", list(cvmat_iterator(newK)) - - mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) - mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) - for K in [ intrinsics, newK ]: - cv.InitUndistortMap(K, distortion, mapx, mapy) - for img in leftimages[:1]: - r = cv.CloneMat(img) - cv.Remap(img, r, mapx, mapy) - # cv.ShowImage("snap", r) - # cv.WaitKey() - - rightimages = [image_from_archive(tf, "wide/right%04d.pgm" % i) for i in range(3, 15)] - - # Stereo test - - if True: - lcorners = [get_corners(i) for i in leftimages] - rcorners = [get_corners(i) for i in rightimages] - good = [(lco, rco) for ((lok, lco), (rok, rco)) in zip(lcorners, rcorners) if (lok and rok)] - - lipts = mk_image_points([l for (l, r) in good]) - ripts = mk_image_points([r for (l, r) in good]) - opts = mk_object_points(len(good), .108) - npts = mk_point_counts(len(good)) - - flags = cv.CV_CALIB_FIX_ASPECT_RATIO | cv.CV_CALIB_FIX_INTRINSIC - flags = cv.CV_CALIB_SAME_FOCAL_LENGTH + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_ZERO_TANGENT_DIST - flags = 0 - - T = cv.CreateMat(3, 1, cv.CV_64FC1) - R = cv.CreateMat(3, 3, cv.CV_64FC1) - lintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) - ldistortion = cv.CreateMat(4, 1, cv.CV_64FC1) - rintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) - rdistortion = cv.CreateMat(4, 1, cv.CV_64FC1) - lR = cv.CreateMat(3, 3, cv.CV_64FC1) - rR = cv.CreateMat(3, 3, cv.CV_64FC1) - lP = cv.CreateMat(3, 4, cv.CV_64FC1) - rP = cv.CreateMat(3, 4, cv.CV_64FC1) - lmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) - lmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) - rmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) - rmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) - - cv.SetIdentity(lintrinsics) - cv.SetIdentity(rintrinsics) - lintrinsics[0,2] = size[0] * 0.5 - lintrinsics[1,2] = size[1] * 0.5 - rintrinsics[0,2] = size[0] * 0.5 - rintrinsics[1,2] = size[1] * 0.5 - cv.SetZero(ldistortion) - cv.SetZero(rdistortion) - - cv.StereoCalibrate(opts, lipts, ripts, npts, - lintrinsics, ldistortion, - rintrinsics, rdistortion, - size, - R, # R - T, # T - cv.CreateMat(3, 3, cv.CV_32FC1), # E - cv.CreateMat(3, 3, cv.CV_32FC1), # F - (cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS, 30, 1e-5), - flags) - - for a in [-1, 0, 1]: - cv.StereoRectify(lintrinsics, - rintrinsics, - ldistortion, - rdistortion, - size, - R, - T, - lR, rR, lP, rP, - alpha = a) - - cv.InitUndistortRectifyMap(lintrinsics, ldistortion, lR, lP, lmapx, lmapy) - cv.InitUndistortRectifyMap(rintrinsics, rdistortion, rR, rP, rmapx, rmapy) - - for l,r in zip(leftimages, rightimages)[:1]: - l_ = cv.CloneMat(l) - r_ = cv.CloneMat(r) - cv.Remap(l, l_, lmapx, lmapy) - cv.Remap(r, r_, rmapx, rmapy) - # cv.ShowImage("snap", l_) - # cv.WaitKey() - - - def xxx_test_Disparity(self): - print - for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F" ]: - for c in [1,2,3,4]: - nm = "%sC%d" % (t, c) - print "int32 CV_%s=%d" % (nm, eval("cv.CV_%s" % nm)) - return - integral = cv.CreateImage((641,481), cv.IPL_DEPTH_32S, 1) - L = cv.LoadImage("f0-left.png", 0) - R = cv.LoadImage("f0-right.png", 0) - d = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1) - Rn = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1) - started = time.time() - for i in range(100): - cv.AbsDiff(L, R, d) - cv.Integral(d, integral) - cv.SetImageROI(R, (1, 1, 639, 479)) - cv.SetImageROI(Rn, (0, 0, 639, 479)) - cv.Copy(R, Rn) - R = Rn - cv.ResetImageROI(R) - print 1e3 * (time.time() - started) / 100, "ms" - # self.snap(d) - - def local_test_lk(self): - seq = [cv.LoadImage("track/%06d.png" % i, 0) for i in range(40)] - crit = (cv.CV_TERMCRIT_ITER, 100, 0.1) - crit = (cv.CV_TERMCRIT_EPS, 0, 0.001) - - for i in range(1,40): - r = cv.CalcOpticalFlowPyrLK(seq[0], seq[i], None, None, [(32,32)], (7,7), 0, crit, 0) - pos = r[0][0] - #print pos, r[2] - - a = cv.CreateImage((1024,1024), 8, 1) - b = cv.CreateImage((1024,1024), 8, 1) - cv.Resize(seq[0], a, cv.CV_INTER_NN) - cv.Resize(seq[i], b, cv.CV_INTER_NN) - cv.Line(a, (0, 512), (1024, 512), 255) - cv.Line(a, (512,0), (512,1024), 255) - x,y = [int(c) for c in pos] - cv.Line(b, (0, y*16), (1024, y*16), 255) - cv.Line(b, (x*16,0), (x*16,1024), 255) - #self.snapL([a,b]) - - - - def local_test_Haar(self): - import os - hcfile = os.environ['OPENCV_ROOT'] + '/share/opencv/haarcascades/haarcascade_frontalface_default.xml' - hc = cv.Load(hcfile) - img = cv.LoadImage('Stu.jpg', 0) - faces = cv.HaarDetectObjects(img, hc, cv.CreateMemStorage()) - self.assert_(len(faces) > 0) - for (x,y,w,h),n in faces: - cv.Rectangle(img, (x,y), (x+w,y+h), 255) - #self.snap(img) - - def test_create(self): - #""" CvCreateImage, CvCreateMat and the header-only form """ - for (w,h) in [ (320,400), (640,480), (1024, 768) ]: - data = "z" * (w * h) - - im = cv.CreateImage((w,h), 8, 1) - cv.SetData(im, data, w) - im2 = cv.CreateImageHeader((w,h), 8, 1) - cv.SetData(im2, data, w) - self.assertSame(im, im2) - - m = cv.CreateMat(h, w, cv.CV_8UC1) - cv.SetData(m, data, w) - m2 = cv.CreateMatHeader(h, w, cv.CV_8UC1) - cv.SetData(m2, data, w) - self.assertSame(m, m2) - - self.assertSame(im, m) - self.assertSame(im2, m2) - - - def test_casts(self): - im = cv.GetImage(self.get_sample("samples/c/lena.jpg", 0)) - data = im.tostring() - cv.SetData(im, data, cv.GetSize(im)[0]) - - start_count = sys.getrefcount(data) - - # Conversions should produce same data - self.assertSame(im, cv.GetImage(im)) - m = cv.GetMat(im) - self.assertSame(im, m) - self.assertSame(m, cv.GetImage(m)) - im2 = cv.GetImage(m) - self.assertSame(im, im2) - - self.assertEqual(sys.getrefcount(data), start_count + 2) - del im2 - self.assertEqual(sys.getrefcount(data), start_count + 1) - del m - self.assertEqual(sys.getrefcount(data), start_count) - del im - self.assertEqual(sys.getrefcount(data), start_count - 1) - - def test_morphological(self): - im = cv.CreateImage((128, 128), cv.IPL_DEPTH_8U, 1) - cv.Resize(cv.GetImage(self.get_sample("samples/c/lena.jpg", 0)), im) - dst = cv.CloneImage(im) - - # Check defaults by asserting that all these operations produce the same image - funs = [ - lambda: cv.Dilate(im, dst), - lambda: cv.Dilate(im, dst, None), - lambda: cv.Dilate(im, dst, iterations = 1), - lambda: cv.Dilate(im, dst, element = None), - lambda: cv.Dilate(im, dst, iterations = 1, element = None), - lambda: cv.Dilate(im, dst, element = None, iterations = 1), - ] - src_h = self.hashimg(im) - hashes = set() - for f in funs: - f() - hashes.add(self.hashimg(dst)) - self.assertNotEqual(src_h, self.hashimg(dst)) - # Source image should be untouched - self.assertEqual(self.hashimg(im), src_h) - # All results should be same - self.assertEqual(len(hashes), 1) - - # self.snap(dst) - shapes = [eval("cv.CV_SHAPE_%s" % s) for s in ['RECT', 'CROSS', 'ELLIPSE']] - elements = [cv.CreateStructuringElementEx(sz, sz, sz / 2 + 1, sz / 2 + 1, shape) for sz in [3, 4, 7, 20] for shape in shapes] - elements += [cv.CreateStructuringElementEx(7, 7, 3, 3, cv.CV_SHAPE_CUSTOM, [1] * 49)] - for e in elements: - for iter in [1, 2]: - cv.Dilate(im, dst, e, iter) - cv.Erode(im, dst, e, iter) - temp = cv.CloneImage(im) - for op in ["OPEN", "CLOSE", "GRADIENT", "TOPHAT", "BLACKHAT"]: - cv.MorphologyEx(im, dst, temp, e, eval("cv.CV_MOP_%s" % op), iter) - - def test_getmat_nd(self): - # 1D CvMatND should yield (N,1) CvMat - matnd = cv.CreateMatND([13], cv.CV_8UC1) - self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (13, 1)) - - # 2D CvMatND should yield 2D CvMat - matnd = cv.CreateMatND([11, 12], cv.CV_8UC1) - self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (11, 12)) - - if 0: # XXX - ticket #149 - # 3D CvMatND should yield (N,1) CvMat - matnd = cv.CreateMatND([7, 8, 9], cv.CV_8UC1) - self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (7 * 8 * 9, 1)) - - def test_clipline(self): - self.assert_(cv.ClipLine((100,100), (-100,0), (500,0)) == ((0,0), (99,0))) - self.assert_(cv.ClipLine((100,100), (-100,0), (-200,0)) == None) - - def test_smoke_image_processing(self): - src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) - #dst = cv.CloneImage(src) - for aperture_size in [1, 3, 5, 7]: - dst_16s = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_16S, 1) - dst_32f = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1) - - cv.Sobel(src, dst_16s, 1, 1, aperture_size) - cv.Laplace(src, dst_16s, aperture_size) - cv.PreCornerDetect(src, dst_32f) - eigendst = cv.CreateImage((6*cv.GetSize(src)[0], cv.GetSize(src)[1]), cv.IPL_DEPTH_32F, 1) - cv.CornerEigenValsAndVecs(src, eigendst, 8, aperture_size) - cv.CornerMinEigenVal(src, dst_32f, 8, aperture_size) - cv.CornerHarris(src, dst_32f, 8, aperture_size) - cv.CornerHarris(src, dst_32f, 8, aperture_size, 0.1) - - #self.snap(dst) - - def test_fitline(self): - cv.FitLine([ (1,1), (10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01) - cv.FitLine([ (1,1,1), (10,10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01) - a = self.get_sample("samples/c/lena.jpg", 0) - eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1) - temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1) - pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, useHarris=1) - hull = cv.ConvexHull2(pts, cv.CreateMemStorage(), return_points = 1) - cv.FitLine(hull, cv.CV_DIST_L2, 0, 0.01, 0.01) - - def test_moments(self): - im = self.get_sample("samples/c/lena.jpg", 0) - mo = cv.Moments(im) - for fld in ["m00", "m10", "m01", "m20", "m11", "m02", "m30", "m21", "m12", "m03", "mu20", "mu11", "mu02", "mu30", "mu21", "mu12", "mu03", "inv_sqrt_m00"]: - self.assert_(isinstance(getattr(mo, fld), float)) - x = getattr(mo, fld) - self.assert_(isinstance(x, float)) - - orders = [] - for x_order in range(4): - for y_order in range(4 - x_order): - orders.append((x_order, y_order)) - - # Just a smoke test for these three functions - [ cv.GetSpatialMoment(mo, xo, yo) for (xo,yo) in orders ] - [ cv.GetCentralMoment(mo, xo, yo) for (xo,yo) in orders ] - [ cv.GetNormalizedCentralMoment(mo, xo, yo) for (xo,yo) in orders ] - - # Hu Moments we can do slightly better. Check that the first - # six are invariant wrt image reflection, and that the 7th - # is negated. - - hu0 = cv.GetHuMoments(cv.Moments(im)) - cv.Flip(im, im, 1) - hu1 = cv.GetHuMoments(cv.Moments(im)) - self.assert_(len(hu0) == 7) - self.assert_(len(hu1) == 7) - for i in range(5): - self.assert_(abs(hu0[i] - hu1[i]) < 1e-6) - self.assert_(abs(hu0[i] + hu1[i]) < 1e-6) - - def test_encode(self): - im = self.get_sample("samples/c/lena.jpg", 1) - jpeg = cv.EncodeImage(".jpeg", im) - - # Smoke jpeg compression at various qualities - sizes = dict([(qual, cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, qual]).cols) for qual in range(5, 100, 5)]) - - # Check that the default QUALITY is 95 - self.assertEqual(cv.EncodeImage(".jpeg", im).cols, sizes[95]) - - # Check that the 'round-trip' gives an image of the same size - round_trip = cv.DecodeImage(cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, 10])) - self.assert_(cv.GetSize(round_trip) == cv.GetSize(im)) - - def test_reduce(self): - srcmat = cv.CreateMat(2, 3, cv.CV_32FC1) - # 0 1 2 - # 3 4 5 - srcmat[0,0] = 0 - srcmat[0,1] = 1 - srcmat[0,2] = 2 - srcmat[1,0] = 3 - srcmat[1,1] = 4 - srcmat[1,2] = 5 - def doreduce(siz, rfunc): - dst = cv.CreateMat(siz[0], siz[1], cv.CV_32FC1) - rfunc(dst) - if siz[0] != 1: - return [dst[i,0] for i in range(siz[0])] - else: - return [dst[0,i] for i in range(siz[1])] - - # exercise dim - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst)), [3, 5, 7]) - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, -1)), [3, 5, 7]) - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0)), [3, 5, 7]) - self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1)), [3, 12]) - - # exercise op - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_SUM)), [3, 5, 7]) - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_AVG)), [1.5, 2.5, 3.5]) - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MAX)), [3, 4, 5]) - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MIN)), [0, 1, 2]) - - # exercise both dim and op - self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0, cv.CV_REDUCE_MAX)), [3, 4, 5]) - self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1, cv.CV_REDUCE_MAX)), [2, 5]) - - def test_operations(self): - class Im: - - def __init__(self, data = None): - self.m = cv.CreateMat(1, 32, cv.CV_32FC1) - if data: - cv.SetData(self.m, array.array('f', data), 128) - - def __add__(self, other): - r = Im() - if isinstance(other, Im): - cv.Add(self.m, other.m, r.m) - else: - cv.AddS(self.m, (other,), r.m) - return r - - def __sub__(self, other): - r = Im() - if isinstance(other, Im): - cv.Sub(self.m, other.m, r.m) - else: - cv.SubS(self.m, (other,), r.m) - return r - - def __rsub__(self, other): - r = Im() - cv.SubRS(self.m, (other,), r.m) - return r - - def __mul__(self, other): - r = Im() - if isinstance(other, Im): - cv.Mul(self.m, other.m, r.m) - else: - cv.ConvertScale(self.m, r.m, other) - return r - - def __rmul__(self, other): - r = Im() - cv.ConvertScale(self.m, r.m, other) - return r - - def __div__(self, other): - r = Im() - if isinstance(other, Im): - cv.Div(self.m, other.m, r.m) - else: - cv.ConvertScale(self.m, r.m, 1.0 / other) - return r - - def __pow__(self, other): - r = Im() - cv.Pow(self.m, r.m, other) - return r - - def __abs__(self): - r = Im() - cv.Abs(self.m, r.m) - return r - - def __getitem__(self, i): - return self.m[0,i] - - def verify(op): - r = op(a, b) - for i in range(32): - expected = op(a[i], b[i]) - self.assertAlmostEqual(expected, r[i], 4) - - a = Im([random.randrange(1, 256) for i in range(32)]) - b = Im([random.randrange(1, 256) for i in range(32)]) - - # simple operations first - verify(lambda x, y: x + y) - verify(lambda x, y: x + 3) - verify(lambda x, y: x + 0) - verify(lambda x, y: x + -8) - - verify(lambda x, y: x - y) - verify(lambda x, y: x - 1) - verify(lambda x, y: 1 - x) - - verify(lambda x, y: abs(x)) - - verify(lambda x, y: x * y) - verify(lambda x, y: x * 3) - - verify(lambda x, y: x / y) - verify(lambda x, y: x / 2) - - for p in [-2, -1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2 ]: - verify(lambda x, y: (x ** p) + (y ** p)) - - # Combinations... - verify(lambda x, y: x - 4 * abs(y)) - verify(lambda x, y: abs(y) / x) - - # a polynomial - verify(lambda x, y: 2 * x + 3 * (y ** 0.5)) - - def temp_test(self): - cv.temp_test() - - def failing_test_rand_GetStarKeypoints(self): - # GetStarKeypoints [, , (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3)] - print cv.CV_MAT_CN(0x4242400d) - mat = cv.CreateMat( 64, 64, cv.CV_32FC2) - cv.GetStarKeypoints(mat, cv.CreateMemStorage(), (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3)) - print mat - - def test_rand_PutText(self): - #""" Test for bug 2829336 """ - mat = cv.CreateMat( 64, 64, cv.CV_8UC1) - font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1) - cv.PutText(mat, chr(127), (20, 20), font, 255) - - def failing_test_rand_FindNearestPoint2D(self): - subdiv = cv.CreateSubdivDelaunay2D((0,0,100,100), cv.CreateMemStorage()) - cv.SubdivDelaunay2DInsert( subdiv, (50, 50)) - cv.CalcSubdivVoronoi2D(subdiv) - print - for e in subdiv.edges: - print e, - print " ", cv.Subdiv2DEdgeOrg(e) - print " ", cv.Subdiv2DEdgeOrg(cv.Subdiv2DRotateEdge(e, 1)), cv.Subdiv2DEdgeDst(cv.Subdiv2DRotateEdge(e, 1)) - print "nearest", cv.FindNearestPoint2D(subdiv, (1.0, 1.0)) - -class DocumentFragmentTests(OpenCVTests): - """ Test the fragments of code that are included in the documentation """ - def setUp(self): - OpenCVTests.setUp(self) - sys.path.append(".") - - def test_precornerdetect(self): - from precornerdetect import precornerdetect - im = self.get_sample("samples/cpp/right01.jpg", 0) - imf = cv.CreateMat(im.rows, im.cols, cv.CV_32FC1) - cv.ConvertScale(im, imf) - (r0,r1) = precornerdetect(imf) - for r in (r0, r1): - self.assertEqual(im.cols, r.cols) - self.assertEqual(im.rows, r.rows) - - def test_findstereocorrespondence(self): - from findstereocorrespondence import findstereocorrespondence - (l,r) = [self.get_sample("samples/cpp/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"] - - (disparity_left, disparity_right) = findstereocorrespondence(l, r) - - disparity_left_visual = cv.CreateMat(l.rows, l.cols, cv.CV_8U) - cv.ConvertScale(disparity_left, disparity_left_visual, -16) - # self.snap(disparity_left_visual) +# Tests to run first; check the handful of basic operations that the later tests rely on - def test_calchist(self): - from calchist import hs_histogram - i1 = self.get_sample("samples/c/lena.jpg") - i2 = self.get_sample("samples/cpp/building.jpg") - i3 = cv.CloneMat(i1) - cv.Flip(i3, i3, 1) - h1 = hs_histogram(i1) - h2 = hs_histogram(i2) - h3 = hs_histogram(i3) - self.assertEqual(self.hashimg(h1), self.hashimg(h3)) - self.assertNotEqual(self.hashimg(h1), self.hashimg(h2)) +class Hackathon244Tests(NewOpenCVTests): + + def test_int_array(self): + a = np.array([-1, 2, -3, 4, -5]) + absa0 = np.abs(a) + self.assert_(cv2.norm(a, cv2.NORM_L1) == 15) + absa1 = cv2.absdiff(a, 0) + self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0) + + def test_imencode(self): + a = np.zeros((480, 640), dtype=np.uint8) + flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90]) + self.assertEqual(flag, True) + self.assertEqual(ajpg.dtype, np.uint8) + self.assertGreater(ajpg.shape[0], 1) + self.assertEqual(ajpg.shape[1], 1) + + def test_projectPoints(self): + objpt = np.float64([[1,2,3]]) + imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) + imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None) + self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2)) + self.assertEqual(imgpt1.shape, imgpt0.shape) + self.assertEqual(jac0.shape, jac1.shape) + self.assertEqual(jac0.shape[0], 2*objpt.shape[0]) + + def test_estimateAffine3D(self): + pattern_size = (11, 8) + pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) + pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) + pattern_points *= 10 + (retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points) + self.assertEqual(retval, 1) + if cv2.norm(out[2,:]) < 1e-3: + out[2,2]=1 + self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) + self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1]) + + def test_fast(self): + fd = cv2.FastFeatureDetector(30, True) + img = self.get_sample("samples/cpp/right02.jpg", 0) + img = cv2.medianBlur(img, 3) + imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + keypoints = fd.detect(img) + self.assert_(600 <= len(keypoints) <= 700) + for kpt in keypoints: + self.assertNotEqual(kpt.response, 0) + + def check_close_angles(self, a, b, angle_delta): + self.assert_(abs(a - b) <= angle_delta or + abs(360 - abs(a - b)) <= angle_delta) + + def check_close_pairs(self, a, b, delta): + self.assertLessEqual(abs(a[0] - b[0]), delta) + self.assertLessEqual(abs(a[1] - b[1]), delta) + + def check_close_boxes(self, a, b, delta, angle_delta): + self.check_close_pairs(a[0], b[0], delta) + self.check_close_pairs(a[1], b[1], delta) + self.check_close_angles(a[2], b[2], angle_delta) + + def test_geometry(self): + npt = 100 + np.random.seed(244) + a = np.random.randn(npt,2).astype('float32')*50 + 150 + + img = np.zeros((300, 300, 3), dtype='uint8') + be = cv2.fitEllipse(a) + br = cv2.minAreaRect(a) + mc, mr = cv2.minEnclosingCircle(a) + + be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742) + br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582) + mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977 + + self.check_close_boxes(be, be0, 5, 15) + self.check_close_boxes(br, br0, 5, 15) + self.check_close_pairs(mc, mc0, 5) + self.assertLessEqual(abs(mr - mr0), 5) if __name__ == '__main__': - print "testing", cv.__version__ + print "Testing OpenCV", cv2.__version__ random.seed(0) unittest.main() -# optlist, args = getopt.getopt(sys.argv[1:], 'l:rd') -# loops = 1 -# shuffle = 0 -# doc_frags = False -# for o,a in optlist: -# if o == '-l': -# loops = int(a) -# if o == '-r': -# shuffle = 1 -# if o == '-d': -# doc_frags = True -# -# cases = [PreliminaryTests, FunctionTests, AreaTests] -# if doc_frags: -# cases += [DocumentFragmentTests] -# everything = [(tc, t) for tc in cases for t in unittest.TestLoader().getTestCaseNames(tc) ] -# if len(args) == 0: -# # cases = [NewTests] -# args = everything -# else: -# args = [(tc, t) for (tc, t) in everything if t in args] -# -# suite = unittest.TestSuite() -# for l in range(loops): -# if shuffle: -# random.shuffle(args) -# for tc,t in args: -# suite.addTest(tc(t)) -# unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/modules/python/test/test2.py b/modules/python/test/test2.py deleted file mode 100644 index fc345f4b5b..0000000000 --- a/modules/python/test/test2.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python - -import unittest -import random -import time -import math -import sys -import array -import urllib -import tarfile -import hashlib -import os -import getopt -import operator -import functools -import numpy as np -import cv2 -import cv2.cv as cv - -class NewOpenCVTests(unittest.TestCase): - - def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR): - if not filename in self.image_cache: - filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read() - self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) - return self.image_cache[filename] - - def setUp(self): - self.image_cache = {} - - def hashimg(self, im): - """ Compute a hash for an image, useful for image comparisons """ - return hashlib.md5(im.tostring()).digest() - - if sys.version_info[:2] == (2, 6): - def assertLess(self, a, b, msg=None): - if not a < b: - self.fail('%s not less than %s' % (repr(a), repr(b))) - - def assertLessEqual(self, a, b, msg=None): - if not a <= b: - self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) - - def assertGreater(self, a, b, msg=None): - if not a > b: - self.fail('%s not greater than %s' % (repr(a), repr(b))) - -# Tests to run first; check the handful of basic operations that the later tests rely on - -class Hackathon244Tests(NewOpenCVTests): - - def test_int_array(self): - a = np.array([-1, 2, -3, 4, -5]) - absa0 = np.abs(a) - self.assert_(cv2.norm(a, cv2.NORM_L1) == 15) - absa1 = cv2.absdiff(a, 0) - self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0) - - def test_imencode(self): - a = np.zeros((480, 640), dtype=np.uint8) - flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90]) - self.assertEqual(flag, True) - self.assertEqual(ajpg.dtype, np.uint8) - self.assertGreater(ajpg.shape[0], 1) - self.assertEqual(ajpg.shape[1], 1) - - def test_projectPoints(self): - objpt = np.float64([[1,2,3]]) - imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) - imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None) - self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2)) - self.assertEqual(imgpt1.shape, imgpt0.shape) - self.assertEqual(jac0.shape, jac1.shape) - self.assertEqual(jac0.shape[0], 2*objpt.shape[0]) - - def test_estimateAffine3D(self): - pattern_size = (11, 8) - pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) - pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) - pattern_points *= 10 - (retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points) - self.assertEqual(retval, 1) - if cv2.norm(out[2,:]) < 1e-3: - out[2,2]=1 - self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) - self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1]) - - def test_fast(self): - fd = cv2.FastFeatureDetector(30, True) - img = self.get_sample("samples/cpp/right02.jpg", 0) - img = cv2.medianBlur(img, 3) - imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - keypoints = fd.detect(img) - self.assert_(600 <= len(keypoints) <= 700) - for kpt in keypoints: - self.assertNotEqual(kpt.response, 0) - - def check_close_angles(self, a, b, angle_delta): - self.assert_(abs(a - b) <= angle_delta or - abs(360 - abs(a - b)) <= angle_delta) - - def check_close_pairs(self, a, b, delta): - self.assertLessEqual(abs(a[0] - b[0]), delta) - self.assertLessEqual(abs(a[1] - b[1]), delta) - - def check_close_boxes(self, a, b, delta, angle_delta): - self.check_close_pairs(a[0], b[0], delta) - self.check_close_pairs(a[1], b[1], delta) - self.check_close_angles(a[2], b[2], angle_delta) - - def test_geometry(self): - npt = 100 - np.random.seed(244) - a = np.random.randn(npt,2).astype('float32')*50 + 150 - - img = np.zeros((300, 300, 3), dtype='uint8') - be = cv2.fitEllipse(a) - br = cv2.minAreaRect(a) - mc, mr = cv2.minEnclosingCircle(a) - - be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742) - br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582) - mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977 - - self.check_close_boxes(be, be0, 5, 15) - self.check_close_boxes(br, br0, 5, 15) - self.check_close_pairs(mc, mc0, 5) - self.assertLessEqual(abs(mr - mr0), 5) - -if __name__ == '__main__': - print "testing", cv2.__version__ - random.seed(0) - unittest.main() diff --git a/modules/video/doc/motion_analysis_and_object_tracking.rst b/modules/video/doc/motion_analysis_and_object_tracking.rst index 5b2ad82886..aef1ba4e94 100644 --- a/modules/video/doc/motion_analysis_and_object_tracking.rst +++ b/modules/video/doc/motion_analysis_and_object_tracking.rst @@ -13,7 +13,6 @@ Calculates an optical flow for a sparse feature set using the iterative Lucas-Ka .. ocv:pyfunction:: cv2.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts[, status[, err[, winSize[, maxLevel[, criteria[, flags[, minEigThreshold]]]]]]]) -> nextPts, status, err .. ocv:cfunction:: void cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr, CvArr* prev_pyr, CvArr* curr_pyr, const CvPoint2D32f* prev_features, CvPoint2D32f* curr_features, int count, CvSize win_size, int level, char* status, float* track_error, CvTermCriteria criteria, int flags ) -.. ocv:pyoldfunction:: cv.CalcOpticalFlowPyrLK(prev, curr, prevPyr, currPyr, prevFeatures, winSize, level, criteria, flags, guesses=None) -> (currFeatures, status, track_error) :param prevImg: first 8-bit input image or pyramid constructed by :ocv:func:`buildOpticalFlowPyramid`. @@ -210,7 +209,6 @@ Updates the motion history image by a moving silhouette. .. ocv:pyfunction:: cv2.updateMotionHistory(silhouette, mhi, timestamp, duration) -> mhi .. ocv:cfunction:: void cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi, double timestamp, double duration ) -.. ocv:pyoldfunction:: cv.UpdateMotionHistory(silhouette, mhi, timestamp, duration)-> None :param silhouette: Silhouette mask that has non-zero pixels where the motion occurs. @@ -244,7 +242,6 @@ Calculates a gradient orientation of a motion history image. .. ocv:pyfunction:: cv2.calcMotionGradient(mhi, delta1, delta2[, mask[, orientation[, apertureSize]]]) -> mask, orientation .. ocv:cfunction:: void cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation, double delta1, double delta2, int aperture_size=3 ) -.. ocv:pyoldfunction:: cv.CalcMotionGradient(mhi, mask, orientation, delta1, delta2, apertureSize=3)-> None :param mhi: Motion history single-channel floating-point image. @@ -284,7 +281,6 @@ Calculates a global motion orientation in a selected region. .. ocv:pyfunction:: cv2.calcGlobalOrientation(orientation, mask, mhi, timestamp, duration) -> retval .. ocv:cfunction:: double cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask, const CvArr* mhi, double timestamp, double duration ) -.. ocv:pyoldfunction:: cv.CalcGlobalOrientation(orientation, mask, mhi, timestamp, duration)-> float :param orientation: Motion gradient orientation image calculated by the function :ocv:func:`calcMotionGradient` . @@ -314,7 +310,6 @@ Splits a motion history image into a few parts corresponding to separate indepen .. ocv:pyfunction:: cv2.segmentMotion(mhi, timestamp, segThresh[, segmask]) -> segmask, boundingRects .. ocv:cfunction:: CvSeq* cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, CvMemStorage* storage, double timestamp, double seg_thresh ) -.. ocv:pyoldfunction:: cv.SegmentMotion(mhi, seg_mask, storage, timestamp, seg_thresh) -> boundingRects :param mhi: Motion history image. @@ -342,8 +337,6 @@ Finds an object center, size, and orientation. .. ocv:cfunction:: int cvCamShift( const CvArr* prob_image, CvRect window, CvTermCriteria criteria, CvConnectedComp* comp, CvBox2D* box=NULL ) -.. ocv:pyoldfunction:: cv.CamShift(prob_image, window, criteria) -> (int, comp, box) - :param probImage: Back projection of the object histogram. See :ocv:func:`calcBackProject` . :param window: Initial search window. @@ -370,7 +363,6 @@ Finds an object on a back projection image. .. ocv:pyfunction:: cv2.meanShift(probImage, window, criteria) -> retval, window .. ocv:cfunction:: int cvMeanShift( const CvArr* prob_image, CvRect window, CvTermCriteria criteria, CvConnectedComp* comp ) -.. ocv:pyoldfunction:: cv.MeanShift(prob_image, window, criteria) -> comp :param probImage: Back projection of the object histogram. See :ocv:func:`calcBackProject` for details. @@ -413,7 +405,6 @@ The constructors. .. ocv:pyfunction:: cv2.KalmanFilter([dynamParams, measureParams[, controlParams[, type]]]) -> .. ocv:cfunction:: CvKalman* cvCreateKalman( int dynam_params, int measure_params, int control_params=0 ) -.. ocv:pyoldfunction:: cv.CreateKalman(dynam_params, measure_params, control_params=0) -> CvKalman The full constructor. @@ -451,7 +442,6 @@ Computes a predicted state. .. ocv:pyfunction:: cv2.KalmanFilter.predict([control]) -> retval .. ocv:cfunction:: const CvMat* cvKalmanPredict( CvKalman* kalman, const CvMat* control=NULL) -.. ocv:pyoldfunction:: cv.KalmanPredict(kalman, control=None) -> mat :param control: The optional input control @@ -466,8 +456,6 @@ Updates the predicted state from the measurement. .. ocv:cfunction:: const CvMat* cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement ) -.. ocv:pyoldfunction:: cv.KalmanCorrect(kalman, measurement) -> mat - :param measurement: The measured system parameters diff --git a/samples/c/adaptiveskindetector.cpp b/samples/c/adaptiveskindetector.cpp index a561440af0..e1f777cc19 100644 --- a/samples/c/adaptiveskindetector.cpp +++ b/samples/c/adaptiveskindetector.cpp @@ -39,7 +39,7 @@ #include #include #include -#include "opencv2/contrib/contrib.hpp" +#include "opencv2/contrib/compat.hpp" #include "opencv2/highgui/highgui_c.h" static void help(char **argv) diff --git a/samples/cpp/facerec_demo.cpp b/samples/cpp/facerec_demo.cpp index 81953e3c80..3a104b86b3 100644 --- a/samples/cpp/facerec_demo.cpp +++ b/samples/cpp/facerec_demo.cpp @@ -32,7 +32,7 @@ static Mat toGrayscale(InputArray _src) { Mat src = _src.getMat(); // only allow one channel if(src.channels() != 1) { - CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported"); + CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported"); } // create and return normalized image Mat dst; @@ -44,7 +44,7 @@ static void read_csv(const string& filename, vector& images, vector& l std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } string line, path, classlabel; while (getline(file, line)) { @@ -82,7 +82,7 @@ int main(int argc, const char *argv[]) { // Quit if there are not enough images for this demo. if(images.size() <= 1) { string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; - CV_Error(CV_StsError, error_message); + CV_Error(Error::StsError, error_message); } // Get the height from the first image. We'll need this // later in code to reshape the images to their original diff --git a/samples/cpp/stereo_match.cpp b/samples/cpp/stereo_match.cpp index 2d4bb5fec6..6f0b9f38b6 100644 --- a/samples/cpp/stereo_match.cpp +++ b/samples/cpp/stereo_match.cpp @@ -178,7 +178,7 @@ int main(int argc, char** argv) if( intrinsic_filename ) { // reading intrinsic parameters - FileStorage fs(intrinsic_filename, CV_STORAGE_READ); + FileStorage fs(intrinsic_filename, FileStorage::READ); if(!fs.isOpened()) { printf("Failed to open file %s\n", intrinsic_filename); @@ -194,7 +194,7 @@ int main(int argc, char** argv) M1 *= scale; M2 *= scale; - fs.open(extrinsic_filename, CV_STORAGE_READ); + fs.open(extrinsic_filename, FileStorage::READ); if(!fs.isOpened()) { printf("Failed to open file %s\n", extrinsic_filename); diff --git a/samples/gpu/cascadeclassifier.cpp b/samples/gpu/cascadeclassifier.cpp index c2e5a7af58..271e7dca85 100644 --- a/samples/gpu/cascadeclassifier.cpp +++ b/samples/gpu/cascadeclassifier.cpp @@ -59,15 +59,15 @@ static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss Point org; org.x = 1; org.y = 3 * fontSize.height * (lineOffsY + 1) / 2; - putText(img, ss, org, fontFace, fontScale, CV_RGB(0,0,0), 5*fontThickness/2, 16); + putText(img, ss, org, fontFace, fontScale, Scalar(0,0,0), 5*fontThickness/2, 16); putText(img, ss, org, fontFace, fontScale, fontColor, fontThickness, 16); } static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps) { - Scalar fontColorRed = CV_RGB(255,0,0); - Scalar fontColorNV = CV_RGB(118,185,0); + Scalar fontColorRed = Scalar(255,0,0); + Scalar fontColorNV = Scalar(118,185,0); ostringstream ss; ss << "FPS = " << setprecision(1) << fixed << fps; diff --git a/samples/python/camera.py b/samples/python/camera.py deleted file mode 100755 index 5d1c63b047..0000000000 --- a/samples/python/camera.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/python - -import cv2.cv as cv -import time - -cv.NamedWindow("camera", 1) - -capture = cv.CaptureFromCAM(0) - -while True: - img = cv.QueryFrame(capture) - cv.ShowImage("camera", img) - if cv.WaitKey(10) == 27: - break -cv.DestroyAllWindows() diff --git a/samples/python/camshift.py b/samples/python/camshift.py deleted file mode 100755 index 3a08b04d69..0000000000 --- a/samples/python/camshift.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -import cv2.cv as cv - -def is_rect_nonzero(r): - (_,_,w,h) = r - return (w > 0) and (h > 0) - -class CamShiftDemo: - - def __init__(self): - self.capture = cv.CaptureFromCAM(0) - cv.NamedWindow( "CamShiftDemo", 1 ) - cv.NamedWindow( "Histogram", 1 ) - cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) - - self.drag_start = None # Set to (x,y) when mouse starts drag - self.track_window = None # Set to rect when the mouse drag finishes - - print( "Keys:\n" - " ESC - quit the program\n" - " b - switch to/from backprojection view\n" - "To initialize tracking, drag across the object with the mouse\n" ) - - def hue_histogram_as_image(self, hist): - """ Returns a nice representation of a hue histogram """ - - histimg_hsv = cv.CreateImage( (320,200), 8, 3) - - mybins = cv.CloneMatND(hist.bins) - cv.Log(mybins, mybins) - (_, hi, _, _) = cv.MinMaxLoc(mybins) - cv.ConvertScale(mybins, mybins, 255. / hi) - - w,h = cv.GetSize(histimg_hsv) - hdims = cv.GetDims(mybins)[0] - for x in range(w): - xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image - val = int(mybins[int(hdims * x / w)] * h / 255) - cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1) - cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1) - - histimg = cv.CreateImage( (320,200), 8, 3) - cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR) - return histimg - - def on_mouse(self, event, x, y, flags, param): - if event == cv.CV_EVENT_LBUTTONDOWN: - self.drag_start = (x, y) - if event == cv.CV_EVENT_LBUTTONUP: - self.drag_start = None - self.track_window = self.selection - if self.drag_start: - xmin = min(x, self.drag_start[0]) - ymin = min(y, self.drag_start[1]) - xmax = max(x, self.drag_start[0]) - ymax = max(y, self.drag_start[1]) - self.selection = (xmin, ymin, xmax - xmin, ymax - ymin) - - def run(self): - hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 ) - backproject_mode = False - while True: - frame = cv.QueryFrame( self.capture ) - - # Convert to HSV and keep the hue - hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) - cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) - self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) - cv.Split(hsv, self.hue, None, None, None) - - # Compute back projection - backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) - - # Run the cam-shift - cv.CalcArrBackProject( [self.hue], backproject, hist ) - if self.track_window and is_rect_nonzero(self.track_window): - crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) - (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) - self.track_window = rect - - # If mouse is pressed, highlight the current selected rectangle - # and recompute the histogram - - if self.drag_start and is_rect_nonzero(self.selection): - sub = cv.GetSubRect(frame, self.selection) - save = cv.CloneMat(sub) - cv.ConvertScale(frame, frame, 0.5) - cv.Copy(save, sub) - x,y,w,h = self.selection - cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255)) - - sel = cv.GetSubRect(self.hue, self.selection ) - cv.CalcArrHist( [sel], hist, 0) - (_, max_val, _, _) = cv.GetMinMaxHistValue( hist) - if max_val != 0: - cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) - elif self.track_window and is_rect_nonzero(self.track_window): - cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) - - if not backproject_mode: - cv.ShowImage( "CamShiftDemo", frame ) - else: - cv.ShowImage( "CamShiftDemo", backproject) - cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist)) - - c = cv.WaitKey(7) % 0x100 - if c == 27: - break - elif c == ord("b"): - backproject_mode = not backproject_mode - -if __name__=="__main__": - demo = CamShiftDemo() - demo.run() - cv.DestroyAllWindows() diff --git a/samples/python/chessboard.py b/samples/python/chessboard.py deleted file mode 100755 index ce80431db5..0000000000 --- a/samples/python/chessboard.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python -import cv2.cv as cv -import sys -import urllib2 - -if __name__ == "__main__": - cv.NamedWindow("win") - if len(sys.argv) > 1: - filename = sys.argv[1] - im = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) - im3 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_COLOR) - else: - try: # try opening local copy of image - fileName = '../cpp/left01.jpg' - im = cv.LoadImageM(fileName, False) - im3 = cv.LoadImageM(fileName, True) - except: # if local copy cannot be opened, try downloading it - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/left01.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) - im3 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - chessboard_dim = ( 9, 6 ) - - found_all, corners = cv.FindChessboardCorners( im, chessboard_dim ) - print found_all, len(corners) - - cv.DrawChessboardCorners( im3, chessboard_dim, corners, found_all ) - - cv.ShowImage("win", im3); - cv.WaitKey() - cv.DestroyAllWindows() diff --git a/samples/python/contours.py b/samples/python/contours.py deleted file mode 100755 index d6058907e6..0000000000 --- a/samples/python/contours.py +++ /dev/null @@ -1,136 +0,0 @@ -#! /usr/bin/env python - -print "OpenCV Python version of contours" - -# import the necessary things for OpenCV -import cv2.cv as cv - -# some default constants -_SIZE = 500 -_DEFAULT_LEVEL = 3 - -# definition of some colors -_red = (0, 0, 255, 0); -_green = (0, 255, 0, 0); -_white = cv.RealScalar (255) -_black = cv.RealScalar (0) - -# the callback on the trackbar, to set the level of contours we want -# to display -def on_trackbar (position): - - # create the image for putting in it the founded contours - contours_image = cv.CreateImage ( (_SIZE, _SIZE), 8, 3) - - # compute the real level of display, given the current position - levels = position - 3 - - # initialisation - _contours = contours - - if levels <= 0: - # zero or negative value - # => get to the nearest face to make it look more funny - _contours = contours.h_next().h_next().h_next() - - # first, clear the image where we will draw contours - cv.SetZero (contours_image) - - # draw contours in red and green - cv.DrawContours (contours_image, _contours, - _red, _green, - levels, 3, cv.CV_AA, - (0, 0)) - - # finally, show the image - cv.ShowImage ("contours", contours_image) - -if __name__ == '__main__': - - # create the image where we want to display results - image = cv.CreateImage ( (_SIZE, _SIZE), 8, 1) - - # start with an empty image - cv.SetZero (image) - - # draw the original picture - for i in range (6): - dx = (i % 2) * 250 - 30 - dy = (i / 2) * 150 - - cv.Ellipse (image, - (dx + 150, dy + 100), - (100, 70), - 0, 0, 360, _white, -1, 8, 0) - cv.Ellipse (image, - (dx + 115, dy + 70), - (30, 20), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 185, dy + 70), - (30, 20), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 115, dy + 70), - (15, 15), - 0, 0, 360, _white, -1, 8, 0) - cv.Ellipse (image, - (dx + 185, dy + 70), - (15, 15), - 0, 0, 360, _white, -1, 8, 0) - cv.Ellipse (image, - (dx + 115, dy + 70), - (5, 5), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 185, dy + 70), - (5, 5), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 150, dy + 100), - (10, 5), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 150, dy + 150), - (40, 10), - 0, 0, 360, _black, -1, 8, 0) - cv.Ellipse (image, - (dx + 27, dy + 100), - (20, 35), - 0, 0, 360, _white, -1, 8, 0) - cv.Ellipse (image, - (dx + 273, dy + 100), - (20, 35), - 0, 0, 360, _white, -1, 8, 0) - - # create window and display the original picture in it - cv.NamedWindow ("image", 1) - cv.ShowImage ("image", image) - - # create the storage area - storage = cv.CreateMemStorage (0) - - # find the contours - contours = cv.FindContours(image, - storage, - cv.CV_RETR_TREE, - cv.CV_CHAIN_APPROX_SIMPLE, - (0,0)) - - # comment this out if you do not want approximation - contours = cv.ApproxPoly (contours, - storage, - cv.CV_POLY_APPROX_DP, 3, 1) - - # create the window for the contours - cv.NamedWindow ("contours", 1) - - # create the trackbar, to enable the change of the displayed level - cv.CreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar) - - # call one time the callback, so we will have the 1st display done - on_trackbar (_DEFAULT_LEVEL) - - # wait a key pressed to end - cv.WaitKey (0) - cv.DestroyAllWindows() diff --git a/samples/python/convexhull.py b/samples/python/convexhull.py deleted file mode 100755 index 17423e053f..0000000000 --- a/samples/python/convexhull.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python - -print "OpenCV Python version of convexhull" - -# import the necessary things for OpenCV -import cv2.cv as cv - -# to generate random values -import random - -# how many points we want at max -_MAX_POINTS = 100 - -if __name__ == '__main__': - - # main object to get random values from - my_random = random.Random () - - # create the image where we want to display results - image = cv.CreateImage ( (500, 500), 8, 3) - - # create the window to put the image in - cv.NamedWindow ('hull', cv.CV_WINDOW_AUTOSIZE) - - while True: - # do forever - - # get a random number of points - count = my_random.randrange (0, _MAX_POINTS) + 1 - - # initialisations - points = [] - - for i in range (count): - # generate a random point - points.append ( ( - my_random.randrange (0, image.width / 2) + image.width / 4, - my_random.randrange (0, image.width / 2) + image.width / 4 - )) - - # compute the convex hull - storage = cv.CreateMemStorage(0) - hull = cv.ConvexHull2 (points, storage, cv.CV_CLOCKWISE, 1) - - # start with an empty image - cv.SetZero (image) - - # draw all the points as circles in red - for i in range (count): - cv.Circle (image, points [i], 2, - (0, 0, 255, 0), - cv.CV_FILLED, cv.CV_AA, 0) - - # Draw the convex hull as a closed polyline in green - cv.PolyLine(image, [hull], 1, cv.RGB(0,255,0), 1, cv.CV_AA) - - # display the final image - cv.ShowImage ('hull', image) - - # handle events, and wait a key pressed - k = cv.WaitKey (0) % 0x100 - if k == 27: - # user has press the ESC key, so exit - break - cv.DestroyAllWindows() diff --git a/samples/python/cv20squares.py b/samples/python/cv20squares.py deleted file mode 100755 index e472fa1973..0000000000 --- a/samples/python/cv20squares.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python - -""" -Find Squares in image by finding countours and filtering -""" -#Results slightly different from C version on same images, but is -#otherwise ok - -import math -import cv2.cv as cv - -def angle(pt1, pt2, pt0): - "calculate angle contained by 3 points(x, y)" - dx1 = pt1[0] - pt0[0] - dy1 = pt1[1] - pt0[1] - dx2 = pt2[0] - pt0[0] - dy2 = pt2[1] - pt0[1] - - nom = dx1*dx2 + dy1*dy2 - denom = math.sqrt( (dx1*dx1 + dy1*dy1) * (dx2*dx2 + dy2*dy2) + 1e-10 ) - ang = nom / denom - return ang - -def is_square(contour): - """ - Squareness checker - - Square contours should: - -have 4 vertices after approximation, - -have relatively large area (to filter out noisy contours) - -be convex. - -have angles between sides close to 90deg (cos(ang) ~0 ) - Note: absolute value of an area is used because area may be - positive or negative - in accordance with the contour orientation - """ - - area = math.fabs( cv.ContourArea(contour) ) - isconvex = cv.CheckContourConvexity(contour) - s = 0 - if len(contour) == 4 and area > 1000 and isconvex: - for i in range(1, 4): - # find minimum angle between joint edges (maximum of cosine) - pt1 = contour[i] - pt2 = contour[i-1] - pt0 = contour[i-2] - - t = math.fabs(angle(pt0, pt1, pt2)) - if s <= t:s = t - - # if cosines of all angles are small (all angles are ~90 degree) - # then its a square - if s < 0.3:return True - - return False - -def find_squares_from_binary( gray ): - """ - use contour search to find squares in binary image - returns list of numpy arrays containing 4 points - """ - squares = [] - storage = cv.CreateMemStorage(0) - contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) - storage = cv.CreateMemStorage(0) - while contours: - #approximate contour with accuracy proportional to the contour perimeter - arclength = cv.ArcLength(contours) - polygon = cv.ApproxPoly( contours, storage, cv.CV_POLY_APPROX_DP, arclength * 0.02, 0) - if is_square(polygon): - squares.append(polygon[0:4]) - contours = contours.h_next() - - return squares - -def find_squares4(color_img): - """ - Finds multiple squares in image - - Steps: - -Use Canny edge to highlight contours, and dilation to connect - the edge segments. - -Threshold the result to binary edge tokens - -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours - -Filter each candidate: use Approx poly, keep only contours with 4 vertices, - enough area, and ~90deg angles. - - Return all squares contours in one flat list of arrays, 4 x,y points each. - """ - #select even sizes only - width, height = (color_img.width & -2, color_img.height & -2 ) - timg = cv.CloneImage( color_img ) # make a copy of input image - gray = cv.CreateImage( (width,height), 8, 1 ) - - # select the maximum ROI in the image - cv.SetImageROI( timg, (0, 0, width, height) ) - - # down-scale and upscale the image to filter out the noise - pyr = cv.CreateImage( (width/2, height/2), 8, 3 ) - cv.PyrDown( timg, pyr, 7 ) - cv.PyrUp( pyr, timg, 7 ) - - tgray = cv.CreateImage( (width,height), 8, 1 ) - squares = [] - - # Find squares in every color plane of the image - # Two methods, we use both: - # 1. Canny to catch squares with gradient shading. Use upper threshold - # from slider, set the lower to 0 (which forces edges merging). Then - # dilate canny output to remove potential holes between edge segments. - # 2. Binary thresholding at multiple levels - N = 11 - for c in [0, 1, 2]: - #extract the c-th color plane - cv.SetImageCOI( timg, c+1 ); - cv.Copy( timg, tgray, None ); - cv.Canny( tgray, gray, 0, 50, 5 ) - cv.Dilate( gray, gray) - squares = squares + find_squares_from_binary( gray ) - - # Look for more squares at several threshold levels - for l in range(1, N): - cv.Threshold( tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY ) - squares = squares + find_squares_from_binary( gray ) - - return squares - - -RED = (0,0,255) -GREEN = (0,255,0) -def draw_squares( color_img, squares ): - """ - Squares is py list containing 4-pt numpy arrays. Step through the list - and draw a polygon for each 4-group - """ - color, othercolor = RED, GREEN - for square in squares: - cv.PolyLine(color_img, [square], True, color, 3, cv.CV_AA, 0) - color, othercolor = othercolor, color - - cv.ShowImage(WNDNAME, color_img) - - -WNDNAME = "Squares Demo" -def main(): - """Open test color images, create display window, start the search""" - cv.NamedWindow(WNDNAME, 1) - for name in [ "../c/pic%d.png" % i for i in [1, 2, 3, 4, 5, 6] ]: - img0 = cv.LoadImage(name, 1) - try: - img0 - except ValueError: - print "Couldn't load %s\n" % name - continue - - # slider deleted from C version, same here and use fixed Canny param=50 - img = cv.CloneImage(img0) - - cv.ShowImage(WNDNAME, img) - - # force the image processing - draw_squares( img, find_squares4( img ) ) - - # wait for key. - if cv.WaitKey(-1) % 0x100 == 27: - break - -if __name__ == "__main__": - main() - cv.DestroyAllWindows() diff --git a/samples/python/cvutils.py b/samples/python/cvutils.py deleted file mode 100755 index a2ef8ae325..0000000000 --- a/samples/python/cvutils.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import cv2.cv as cv -import urllib2 -from sys import argv - -def load_sample(name=None): - if len(argv) > 1: - img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR) - elif name is not None: - try: - img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR) - except IOError: - urlbase = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/' - file = name.split('/')[-1] - filedata = urllib2.urlopen(urlbase+file).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - return img0 diff --git a/samples/python/delaunay.py b/samples/python/delaunay.py deleted file mode 100755 index b26d4341c5..0000000000 --- a/samples/python/delaunay.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -""" -the script demostrates iterative construction of -delaunay triangulation and voronoi tesselation - -Original Author (C version): ? -Converted to Python by: Roman Stanchak -""" -import cv2.cv as cv -import random - -def draw_subdiv_point( img, fp, color ): - cv.Circle( img, (cv.Round(fp[0]), cv.Round(fp[1])), 3, color, cv.CV_FILLED, 8, 0 ); - -def draw_subdiv_edge( img, edge, color ): - org_pt = cv.Subdiv2DEdgeOrg(edge); - dst_pt = cv.Subdiv2DEdgeDst(edge); - - if org_pt and dst_pt : - - org = org_pt.pt; - dst = dst_pt.pt; - - iorg = ( cv.Round( org[0] ), cv.Round( org[1] )); - idst = ( cv.Round( dst[0] ), cv.Round( dst[1] )); - - cv.Line( img, iorg, idst, color, 1, cv.CV_AA, 0 ); - - -def draw_subdiv( img, subdiv, delaunay_color, voronoi_color ): - - for edge in subdiv.edges: - edge_rot = cv.Subdiv2DRotateEdge( edge, 1 ) - - draw_subdiv_edge( img, edge_rot, voronoi_color ); - draw_subdiv_edge( img, edge, delaunay_color ); - - -def locate_point( subdiv, fp, img, active_color ): - - (res, e0) = cv.Subdiv2DLocate( subdiv, fp ); - - if res in [ cv.CV_PTLOC_INSIDE, cv.CV_PTLOC_ON_EDGE ]: - e = e0 - while True: - draw_subdiv_edge( img, e, active_color ); - e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT); - if e == e0: - break - - draw_subdiv_point( img, fp, active_color ); - - -def draw_subdiv_facet( img, edge ): - - t = edge; - count = 0; - - # count number of edges in facet - while count == 0 or t != edge: - count+=1 - t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT ); - - buf = [] - - # gather points - t = edge; - for i in range(count): - assert t>4 - pt = cv.Subdiv2DEdgeOrg( t ); - if not pt: - break; - buf.append( ( cv.Round(pt.pt[0]), cv.Round(pt.pt[1]) ) ); - t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT ); - - if( len(buf)==count ): - pt = cv.Subdiv2DEdgeDst( cv.Subdiv2DRotateEdge( edge, 1 )); - cv.FillConvexPoly( img, buf, cv.RGB(random.randrange(256),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 ); - cv.PolyLine( img, [buf], 1, cv.RGB(0,0,0), 1, cv.CV_AA, 0); - draw_subdiv_point( img, pt.pt, cv.RGB(0,0,0)); - -def paint_voronoi( subdiv, img ): - - cv.CalcSubdivVoronoi2D( subdiv ); - - for edge in subdiv.edges: - - # left - draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 1 )); - - # right - draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 3 )); - -if __name__ == '__main__': - win = "source"; - rect = ( 0, 0, 600, 600 ); - - active_facet_color = cv.RGB( 255, 0, 0 ); - delaunay_color = cv.RGB( 0,0,0); - voronoi_color = cv.RGB(0, 180, 0); - bkgnd_color = cv.RGB(255,255,255); - - img = cv.CreateImage( (rect[2],rect[3]), 8, 3 ); - cv.Set( img, bkgnd_color ); - - cv.NamedWindow( win, 1 ); - - storage = cv.CreateMemStorage(0); - subdiv = cv.CreateSubdivDelaunay2D( rect, storage ); - - print "Delaunay triangulation will be build now interactively." - print "To stop the process, press any key\n"; - - for i in range(200): - fp = ( random.random()*(rect[2]-10)+5, random.random()*(rect[3]-10)+5 ) - - locate_point( subdiv, fp, img, active_facet_color ); - cv.ShowImage( win, img ); - - if( cv.WaitKey( 100 ) >= 0 ): - break; - - cv.SubdivDelaunay2DInsert( subdiv, fp ); - cv.CalcSubdivVoronoi2D( subdiv ); - cv.Set( img, bkgnd_color ); - draw_subdiv( img, subdiv, delaunay_color, voronoi_color ); - cv.ShowImage( win, img ); - - if( cv.WaitKey( 100 ) >= 0 ): - break; - - - cv.Set( img, bkgnd_color ); - paint_voronoi( subdiv, img ); - cv.ShowImage( win, img ); - - cv.WaitKey(0); - - cv.DestroyWindow( win ); diff --git a/samples/python/demhist.py b/samples/python/demhist.py deleted file mode 100755 index 72565d6d74..0000000000 --- a/samples/python/demhist.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python -import cv2.cv as cv -import sys -import urllib2 - -hist_size = 64 -range_0 = [0, 256] -ranges = [ range_0 ] - -class DemHist: - - def __init__(self, src_image): - self.src_image = src_image - self.dst_image = cv.CloneMat(src_image) - self.hist_image = cv.CreateImage((320, 200), 8, 1) - self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1) - - self.brightness = 0 - self.contrast = 0 - - cv.NamedWindow("image", 0) - cv.NamedWindow("histogram", 0) - cv.CreateTrackbar("brightness", "image", 100, 200, self.update_brightness) - cv.CreateTrackbar("contrast", "image", 100, 200, self.update_contrast) - - self.update_brightcont() - - def update_brightness(self, val): - self.brightness = val - 100 - self.update_brightcont() - - def update_contrast(self, val): - self.contrast = val - 100 - self.update_brightcont() - - def update_brightcont(self): - # The algorithm is by Werner D. Streidt - # (http://visca.com/ffactory/archives/5-99/msg00021.html) - - if self.contrast > 0: - delta = 127. * self.contrast / 100 - a = 255. / (255. - delta * 2) - b = a * (self.brightness - delta) - else: - delta = -128. * self.contrast / 100 - a = (256. - delta * 2) / 255. - b = a * self.brightness + delta - - cv.ConvertScale(self.src_image, self.dst_image, a, b) - cv.ShowImage("image", self.dst_image) - - cv.CalcArrHist([self.dst_image], self.hist) - (min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist) - cv.Scale(self.hist.bins, self.hist.bins, float(self.hist_image.height) / max_value, 0) - - cv.Set(self.hist_image, cv.ScalarAll(255)) - bin_w = round(float(self.hist_image.width) / hist_size) - - for i in range(hist_size): - cv.Rectangle(self.hist_image, (int(i * bin_w), self.hist_image.height), - (int((i + 1) * bin_w), self.hist_image.height - cv.Round(self.hist.bins[i])), - cv.ScalarAll(0), -1, 8, 0) - - cv.ShowImage("histogram", self.hist_image) - -if __name__ == "__main__": - # Load the source image. - if len(sys.argv) > 1: - src_image = cv.GetMat(cv.LoadImage(sys.argv[1], 0)) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - src_image = cv.DecodeImageM(imagefiledata, 0) - - dh = DemHist(src_image) - - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/dft.py b/samples/python/dft.py deleted file mode 100755 index 0ecc50ac81..0000000000 --- a/samples/python/dft.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/python -import cv2.cv as cv -import sys -import urllib2 - -# Rearrange the quadrants of Fourier image so that the origin is at -# the image center -# src & dst arrays of equal size & type -def cvShiftDFT(src_arr, dst_arr ): - - size = cv.GetSize(src_arr) - dst_size = cv.GetSize(dst_arr) - - if dst_size != size: - cv.Error( cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ ) - - if(src_arr is dst_arr): - tmp = cv.CreateMat(size[1]/2, size[0]/2, cv.GetElemType(src_arr)) - - cx = size[0] / 2 - cy = size[1] / 2 # image center - - q1 = cv.GetSubRect( src_arr, (0,0,cx, cy) ) - q2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) ) - q3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) ) - q4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) ) - d1 = cv.GetSubRect( src_arr, (0,0,cx,cy) ) - d2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) ) - d3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) ) - d4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) ) - - if(src_arr is not dst_arr): - if( not cv.CV_ARE_TYPES_EQ( q1, d1 )): - cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ ) - - cv.Copy(q3, d1) - cv.Copy(q4, d2) - cv.Copy(q1, d3) - cv.Copy(q2, d4) - - else: - cv.Copy(q3, tmp) - cv.Copy(q1, q3) - cv.Copy(tmp, q1) - cv.Copy(q4, tmp) - cv.Copy(q2, q4) - cv.Copy(tmp, q2) - -if __name__ == "__main__": - - if len(sys.argv) > 1: - im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) - - realInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1) - imaginaryInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1) - complexInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 2) - - cv.Scale(im, realInput, 1.0, 0.0) - cv.Zero(imaginaryInput) - cv.Merge(realInput, imaginaryInput, None, None, complexInput) - - dft_M = cv.GetOptimalDFTSize( im.height - 1 ) - dft_N = cv.GetOptimalDFTSize( im.width - 1 ) - - dft_A = cv.CreateMat( dft_M, dft_N, cv.CV_64FC2 ) - image_Re = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1) - image_Im = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1) - - # copy A to dft_A and pad dft_A with zeros - tmp = cv.GetSubRect( dft_A, (0,0, im.width, im.height)) - cv.Copy( complexInput, tmp, None ) - if(dft_A.width > im.width): - tmp = cv.GetSubRect( dft_A, (im.width,0, dft_N - im.width, im.height)) - cv.Zero( tmp ) - - # no need to pad bottom part of dft_A with zeros because of - # use nonzero_rows parameter in cv.FT() call below - - cv.DFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height ) - - cv.NamedWindow("win", 0) - cv.NamedWindow("magnitude", 0) - cv.ShowImage("win", im) - - # Split Fourier in real and imaginary parts - cv.Split( dft_A, image_Re, image_Im, None, None ) - - # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2) - cv.Pow( image_Re, image_Re, 2.0) - cv.Pow( image_Im, image_Im, 2.0) - cv.Add( image_Re, image_Im, image_Re, None) - cv.Pow( image_Re, image_Re, 0.5 ) - - # Compute log(1 + Mag) - cv.AddS( image_Re, cv.ScalarAll(1.0), image_Re, None ) # 1 + Mag - cv.Log( image_Re, image_Re ) # log(1 + Mag) - - - # Rearrange the quadrants of Fourier image so that the origin is at - # the image center - cvShiftDFT( image_Re, image_Re ) - - min, max, pt1, pt2 = cv.MinMaxLoc(image_Re) - cv.Scale(image_Re, image_Re, 1.0/(max-min), 1.0*(-min)/(max-min)) - cv.ShowImage("magnitude", image_Re) - - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/distrans.py b/samples/python/distrans.py deleted file mode 100755 index 38ace4419a..0000000000 --- a/samples/python/distrans.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -import sys -import cv2.cv as cv -import urllib2 - -wndname = "Distance transform" -tbarname = "Threshold" - -# The output images -dist = 0 -dist8u1 = 0 -dist8u2 = 0 -dist8u = 0 -dist32s = 0 - -gray = 0 -edge = 0 - -# define a trackbar callback -def on_trackbar(edge_thresh): - - cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh), cv.CV_THRESH_BINARY) - #Distance transform - cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5) - - cv.ConvertScale(dist, dist, 5000.0, 0) - cv.Pow(dist, dist, 0.5) - - cv.ConvertScale(dist, dist32s, 1.0, 0.5) - cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None) - cv.ConvertScale(dist32s, dist8u1, 1, 0) - cv.ConvertScale(dist32s, dist32s, -1, 0) - cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None) - cv.ConvertScale(dist32s, dist8u2, 1, 0) - cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u) - cv.ShowImage(wndname, dist8u) - - -if __name__ == "__main__": - edge_thresh = 100 - - if len(sys.argv) > 1: - gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - gray = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) - - # Create the output image - dist = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32F, 1) - dist8u1 = cv.CloneImage(gray) - dist8u2 = cv.CloneImage(gray) - dist8u = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_8U, 3) - dist32s = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32S, 1) - - # Convert to grayscale - edge = cv.CloneImage(gray) - - # Create a window - cv.NamedWindow(wndname, 1) - - # create a toolbar - cv.CreateTrackbar(tbarname, wndname, edge_thresh, 255, on_trackbar) - - # Show the image - on_trackbar(edge_thresh) - - # Wait for a key stroke; the same function arranges events processing - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/dmtx.py b/samples/python/dmtx.py deleted file mode 100755 index 0ff2b43da5..0000000000 --- a/samples/python/dmtx.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python - -import cv2.cv as cv -import time -from pydmtx import DataMatrix -import numpy -import sys -import math - -''' -Find 2 D barcode based on up to 3 channel datamatrix -''' - -def absnorm8(im, im8): - """ im may be any single-channel image type. Return an 8-bit version, absolute value, normalized so that max is 255 """ - (minVal, maxVal, _, _) = cv.MinMaxLoc(im) - cv.ConvertScaleAbs(im, im8, 255 / max(abs(minVal), abs(maxVal)), 0) - return im8 - -font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, thickness = 2, lineType = cv.CV_AA) -if 0: - started = time.time() - print dm_write.decode(bg.width, bg.height, buffer(bg.tostring()), max_count = 1, min_edge = 12, max_edge = 13, shape = DataMatrix.DmtxSymbol10x10) # , timeout = 10) - print "took", time.time() - started - -class DmtxFinder: - def __init__(self): - self.cache = {} - self.dm = DataMatrix() - - def Cached(self, name, rows, cols, type): - key = (name, rows, cols) - if not key in self.cache: - self.cache[key] = cv.CreateMat(rows, cols, type) - return self.cache[key] - - def find0(self, img): - started = time.time() - self.dm.decode(img.width, - img.height, - buffer(img.tostring()), - max_count = 4, - #min_edge = 6, - #max_edge = 19 # Units of 2 pixels - ) - print "brute", time.time() - started - found = {} - for i in range(self.dm.count()): - stats = dm_read.stats(i + 1) - print stats - found[stats[0]] = stats[1] - return found - - def find(self, img): - started = time.time() - gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1) - cv.CvtColor(img, gray, cv.CV_BGR2GRAY) - - sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1) - sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1) - - cv.Sobel(gray, sobel, 1, 0) - cv.Sobel(gray, sobely, 0, 1) - cv.Add(sobel, sobely, sobel) - - sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1) - absnorm8(sobel, sobel8) - cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY) - - sobel_integral = self.Cached('sobel_integral', img.height + 1, img.width + 1, cv.CV_32SC1) - cv.Integral(sobel8, sobel_integral) - - d = 16 - _x1y1 = cv.GetSubRect(sobel_integral, (0, 0, sobel_integral.cols - d, sobel_integral.rows - d)) - _x1y2 = cv.GetSubRect(sobel_integral, (0, d, sobel_integral.cols - d, sobel_integral.rows - d)) - _x2y1 = cv.GetSubRect(sobel_integral, (d, 0, sobel_integral.cols - d, sobel_integral.rows - d)) - _x2y2 = cv.GetSubRect(sobel_integral, (d, d, sobel_integral.cols - d, sobel_integral.rows - d)) - - summation = cv.CloneMat(_x2y2) - cv.Sub(summation, _x1y2, summation) - cv.Sub(summation, _x2y1, summation) - cv.Add(summation, _x1y1, summation) - sum8 = self.Cached('sum8', summation.height, summation.width, cv.CV_8UC1) - absnorm8(summation, sum8) - cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY) - - cv.ShowImage("sum8", sum8) - seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL) - subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows)) - t_cull = time.time() - started - - seqs = [] - while seq: - seqs.append(seq) - seq = seq.h_next() - - started = time.time() - found = {} - print 'seqs', len(seqs) - for seq in seqs: - area = cv.ContourArea(seq) - if area > 1000: - rect = cv.BoundingRect(seq) - edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5) - candidate = cv.GetSubRect(subimg, rect) - sym = self.dm.decode(candidate.width, - candidate.height, - buffer(candidate.tostring()), - max_count = 1, - #min_edge = 6, - #max_edge = int(edge) # Units of 2 pixels - ) - if sym: - onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y) for (x, y) in self.dm.stats(1)[1]] - found[sym] = onscreen - else: - print "FAILED" - t_brute = time.time() - started - print "cull took", t_cull, "brute", t_brute - return found - -bg = cv.CreateMat(1024, 1024, cv.CV_8UC3) -cv.Set(bg, cv.RGB(0, 0, 0)) -df = DmtxFinder() - -cv.NamedWindow("camera", 1) - -def mkdmtx(msg): - dm_write = DataMatrix() - dm_write.encode(msg) - pi = dm_write.image # .resize((14, 14)) - cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3) - cv.SetData(cv_im, pi.tostring()) - return cv_im - -# test = [('WIL', (100,100))]: # , ('LOW', (250,100)), ('GAR', (300, 300)), ('AGE', (500, 300))]: - -test = [] -y = 10 -for j in range(7): - r = 28 + j * 4 - mr = r * math.sqrt(2) - y += mr * 1.8 - test += [(str(deg) + "abcdefgh"[j], (50 + deg * 11, y), math.pi * deg / 180, r) for deg in range(0, 90, 10)] - -for (msg, (x, y), angle, r) in test: - map = cv.CreateMat(2, 3, cv.CV_32FC1) - corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th)) for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]] - src = mkdmtx(msg) - (sx, sy) = cv.GetSize(src) - cv.GetAffineTransform([(0,0), (sx, 0), (sx, sy)], corners[:3], map) - temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3) - cv.Set(temp, cv.RGB(0, 0, 0)) - cv.WarpAffine(src, temp, map) - cv.Or(temp, bg, bg) - - -cv.ShowImage("comp", bg) -scribble = cv.CloneMat(bg) - -if 0: - for i in range(10): - df.find(bg) - -for (sym, coords) in df.find(bg).items(): - print sym - cv.PolyLine(scribble, [coords], 1, cv.CV_RGB(255, 0,0), 1, lineType = cv.CV_AA) - Xs = [x for (x, y) in coords] - Ys = [y for (x, y) in coords] - where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50) - cv.PutText(scribble, sym, where, font, cv.RGB(0,255, 0)) - -cv.ShowImage("results", scribble) -cv.WaitKey() -cv.DestroyAllWindows() - -sys.exit(0) - -capture = cv.CaptureFromCAM(0) -while True: - img = cv.QueryFrame(capture) - cv.ShowImage("capture", img) - print df.find(img) - cv.WaitKey(6) diff --git a/samples/python/drawing.py b/samples/python/drawing.py deleted file mode 100755 index be2c244104..0000000000 --- a/samples/python/drawing.py +++ /dev/null @@ -1,162 +0,0 @@ -#! /usr/bin/env python -from random import Random -import colorsys - -print "OpenCV Python version of drawing" - -import cv2.cv as cv - -def random_color(random): - """ - Return a random color - """ - icolor = random.randint(0, 0xFFFFFF) - return cv.Scalar(icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff) - -if __name__ == '__main__': - - # some "constants" - width = 1000 - height = 700 - window_name = "Drawing Demo" - number = 100 - delay = 5 - line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics - - # create the source image - image = cv.CreateImage( (width, height), 8, 3) - - # create window and display the original picture in it - cv.NamedWindow(window_name, 1) - cv.SetZero(image) - cv.ShowImage(window_name, image) - - # create the random number - random = Random() - - # draw some lines - for i in range(number): - pt1 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - pt2 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - cv.Line(image, pt1, pt2, - random_color(random), - random.randrange(0, 10), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # draw some rectangles - for i in range(number): - pt1 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - pt2 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - cv.Rectangle(image, pt1, pt2, - random_color(random), - random.randrange(-1, 9), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # draw some ellipes - for i in range(number): - pt1 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - sz = (random.randrange(0, 200), - random.randrange(0, 200)) - angle = random.randrange(0, 1000) * 0.180 - cv.Ellipse(image, pt1, sz, angle, angle - 100, angle + 200, - random_color(random), - random.randrange(-1, 9), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # init the list of polylines - nb_polylines = 2 - polylines_size = 3 - pt = [0,] * nb_polylines - for a in range(nb_polylines): - pt [a] = [0,] * polylines_size - - # draw some polylines - for i in range(number): - for a in range(nb_polylines): - for b in range(polylines_size): - pt [a][b] = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - cv.PolyLine(image, pt, 1, - random_color(random), - random.randrange(1, 9), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # draw some filled polylines - for i in range(number): - for a in range(nb_polylines): - for b in range(polylines_size): - pt [a][b] = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - cv.FillPoly(image, pt, - random_color(random), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # draw some circles - for i in range(number): - pt1 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - cv.Circle(image, pt1, random.randrange(0, 300), - random_color(random), - random.randrange(-1, 9), - line_type, 0) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # draw some text - for i in range(number): - pt1 = (random.randrange(-width, 2 * width), - random.randrange(-height, 2 * height)) - font = cv.InitFont(random.randrange(0, 8), - random.randrange(0, 100) * 0.05 + 0.01, - random.randrange(0, 100) * 0.05 + 0.01, - random.randrange(0, 5) * 0.1, - random.randrange(0, 10), - line_type) - - cv.PutText(image, "Testing text rendering!", - pt1, font, - random_color(random)) - - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # prepare a text, and get it's properties - font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, - 3, 3, 0.0, 5, line_type) - text_size, ymin = cv.GetTextSize("OpenCV forever!", font) - pt1 = ((width - text_size[0]) / 2, (height + text_size[1]) / 2) - image2 = cv.CloneImage(image) - - # now, draw some OpenCV pub ;-) - for i in range(0, 512, 2): - cv.SubS(image2, cv.ScalarAll(i), image) - (r, g, b) = colorsys.hsv_to_rgb((i % 100) / 100., 1, 1) - cv.PutText(image, "OpenCV forever!", - pt1, font, cv.RGB(255 * r, 255 * g, 255 * b)) - cv.ShowImage(window_name, image) - cv.WaitKey(delay) - - # wait some key to end - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/edge.py b/samples/python/edge.py deleted file mode 100755 index 2d3b8efb63..0000000000 --- a/samples/python/edge.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python - -print "OpenCV Python version of edge" - -import sys -import urllib2 -import cv2.cv as cv - -# some definitions -win_name = "Edge" -trackbar_name = "Threshold" - -# the callback on the trackbar -def on_trackbar(position): - - cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0) - cv.Not(gray, edge) - - # run the edge dector on gray scale - cv.Canny(gray, edge, position, position * 3, 3) - - # reset - cv.SetZero(col_edge) - - # copy edge points - cv.Copy(im, col_edge, edge) - - # show the im - cv.ShowImage(win_name, col_edge) - -if __name__ == '__main__': - if len(sys.argv) > 1: - im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - # create the output im - col_edge = cv.CreateImage((im.width, im.height), 8, 3) - - # convert to grayscale - gray = cv.CreateImage((im.width, im.height), 8, 1) - edge = cv.CreateImage((im.width, im.height), 8, 1) - cv.CvtColor(im, gray, cv.CV_BGR2GRAY) - - # create the window - cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE) - - # create the trackbar - cv.CreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar) - - # show the im - on_trackbar(0) - - # wait a key pressed to end - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/facedetect.py b/samples/python/facedetect.py deleted file mode 100755 index 925b73094e..0000000000 --- a/samples/python/facedetect.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -""" -This program is demonstration for face and object detection using haar-like features. -The program finds faces in a camera image or video stream and displays a red box around them. - -Original C implementation by: ? -Python implementation by: Roman Stanchak, James Bowman -""" -import sys -import cv2.cv as cv -from optparse import OptionParser - -# Parameters for haar detection -# From the API: -# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned -# for accurate yet slow object detection. For a faster operation on real video -# images the settings are: -# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, -# min_size== 0: - break - else: - image = cv.LoadImage(input_name, 1) - detect_and_draw(image, cascade) - cv.WaitKey(0) - - cv.DestroyWindow("result") diff --git a/samples/python/fback.py b/samples/python/fback.py deleted file mode 100755 index 6d90925378..0000000000 --- a/samples/python/fback.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python - -from cv import * - -class FBackDemo: - def __init__(self): - self.capture = CaptureFromCAM(0) - self.mv_step = 16 - self.mv_scale = 1.5 - self.mv_color = (0, 255, 0) - self.cflow = None - self.flow = None - - NamedWindow( "Optical Flow", 1 ) - - print( "Press ESC - quit the program\n" ) - - def draw_flow(self, flow, prevgray): - """ Returns a nice representation of a hue histogram """ - - CvtColor(prevgray, self.cflow, CV_GRAY2BGR) - for y in range(0, flow.height, self.mv_step): - for x in range(0, flow.width, self.mv_step): - fx, fy = flow[y, x] - Line(self.cflow, (x,y), (x+fx,y+fy), self.mv_color) - Circle(self.cflow, (x,y), 2, self.mv_color, -1) - ShowImage("Optical Flow", self.cflow) - - def run(self): - first_frame = True - - while True: - frame = QueryFrame( self.capture ) - - if first_frame: - gray = CreateImage(GetSize(frame), 8, 1) - prev_gray = CreateImage(GetSize(frame), 8, 1) - flow = CreateImage(GetSize(frame), 32, 2) - self.cflow = CreateImage(GetSize(frame), 8, 3) - - CvtColor(frame, gray, CV_BGR2GRAY) - if not first_frame: - CalcOpticalFlowFarneback(prev_gray, gray, flow, - pyr_scale=0.5, levels=3, winsize=15, - iterations=3, poly_n=5, poly_sigma=1.2, flags=0) - self.draw_flow(flow, prev_gray) - c = WaitKey(7) - if c in [27, ord('q'), ord('Q')]: - break - prev_gray, gray = gray, prev_gray - first_frame = False - -if __name__=="__main__": - demo = FBackDemo() - demo.run() - cv.DestroyAllWindows() diff --git a/samples/python/ffilldemo.py b/samples/python/ffilldemo.py deleted file mode 100755 index 2ecf5ffa72..0000000000 --- a/samples/python/ffilldemo.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/python -import sys -import random -import urllib2 -import cv2.cv as cv - -im=None; -mask=None; -color_img=None; -gray_img0 = None; -gray_img = None; -ffill_case = 1; -lo_diff = 20 -up_diff = 20; -connectivity = 4; -is_color = 1; -is_mask = 0; -new_mask_val = 255; - -def update_lo( pos ): - lo_diff = pos -def update_up( pos ): - up_diff = pos - -def on_mouse( event, x, y, flags, param ): - - if( not color_img ): - return; - - if event == cv.CV_EVENT_LBUTTONDOWN: - my_mask = None - seed = (x,y); - if ffill_case==0: - lo = up = 0 - flags = connectivity + (new_mask_val << 8) - else: - lo = lo_diff; - up = up_diff; - flags = connectivity + (new_mask_val << 8) + cv.CV_FLOODFILL_FIXED_RANGE - b = random.randint(0,255) - g = random.randint(0,255) - r = random.randint(0,255) - - if( is_mask ): - my_mask = mask - cv.Threshold( mask, mask, 1, 128, cv.CV_THRESH_BINARY ); - - if( is_color ): - - color = cv.CV_RGB( r, g, b ); - comp = cv.FloodFill( color_img, seed, color, cv.CV_RGB( lo, lo, lo ), - cv.CV_RGB( up, up, up ), flags, my_mask ); - cv.ShowImage( "image", color_img ); - - else: - - brightness = cv.RealScalar((r*2 + g*7 + b + 5)/10); - comp = cv.FloodFill( gray_img, seed, brightness, cv.RealScalar(lo), - cv.RealScalar(up), flags, my_mask ); - cv.ShowImage( "image", gray_img ); - - - print "%g pixels were repainted" % comp[0] - - if( is_mask ): - cv.ShowImage( "mask", mask ); - - - - -if __name__ == "__main__": - - if len(sys.argv) > 1: - im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - print "Hot keys:" - print "\tESC - quit the program" - print "\tc - switch color/grayscale mode" - print "\tm - switch mask mode" - print "\tr - restore the original image" - print "\ts - use null-range floodfill" - print "\tf - use gradient floodfill with fixed(absolute) range" - print "\tg - use gradient floodfill with floating(relative) range" - print "\t4 - use 4-connectivity mode" - print "\t8 - use 8-connectivity mode" - - color_img = cv.CloneImage( im ); - gray_img0 = cv.CreateImage( (color_img.width, color_img.height), 8, 1 ); - cv.CvtColor( color_img, gray_img0, cv.CV_BGR2GRAY ); - gray_img = cv.CloneImage( gray_img0 ); - mask = cv.CreateImage( (color_img.width + 2, color_img.height + 2), 8, 1 ); - - cv.NamedWindow( "image", 1 ); - cv.CreateTrackbar( "lo_diff", "image", lo_diff, 255, update_lo); - cv.CreateTrackbar( "up_diff", "image", up_diff, 255, update_up); - - cv.SetMouseCallback( "image", on_mouse ); - - while True: - if( is_color ): - cv.ShowImage( "image", color_img ); - else: - cv.ShowImage( "image", gray_img ); - - c = cv.WaitKey(0) % 0x100 - if c == 27: - print("Exiting ..."); - sys.exit(0) - elif c == ord('c'): - if( is_color ): - - print("Grayscale mode is set"); - cv.CvtColor( color_img, gray_img, cv.CV_BGR2GRAY ); - is_color = 0; - - else: - - print("Color mode is set"); - cv.Copy( im, color_img, None ); - cv.Zero( mask ); - is_color = 1; - - elif c == ord('m'): - if( is_mask ): - cv.DestroyWindow( "mask" ); - is_mask = 0; - - else: - cv.NamedWindow( "mask", 0 ); - cv.Zero( mask ); - cv.ShowImage( "mask", mask ); - is_mask = 1; - - elif c == ord('r'): - print("Original image is restored"); - cv.Copy( im, color_img, None ); - cv.Copy( gray_img0, gray_img, None ); - cv.Zero( mask ); - elif c == ord('s'): - print("Simple floodfill mode is set"); - ffill_case = 0; - elif c == ord('f'): - print("Fixed Range floodfill mode is set"); - ffill_case = 1; - elif c == ord('g'): - print("Gradient (floating range) floodfill mode is set"); - ffill_case = 2; - elif c == ord('4'): - print("4-connectivity mode is set"); - connectivity = 4; - elif c == ord('8'): - print("8-connectivity mode is set"); - connectivity = 8; - cv.DestroyAllWindows() diff --git a/samples/python/fitellipse.py b/samples/python/fitellipse.py deleted file mode 100755 index 88a927d451..0000000000 --- a/samples/python/fitellipse.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python -""" -This program is a demonstration of ellipse fitting. - -Trackbar controls threshold parameter. - -Gray lines are contours. Colored lines are fit ellipses. - -Original C implementation by: Denis Burenkov. -Python implementation by: Roman Stanchak, James Bowman -""" - -import sys -import urllib2 -import random -import cv2.cv as cv - -def contour_iterator(contour): - while contour: - yield contour - contour = contour.h_next() - -class FitEllipse: - - def __init__(self, source_image, slider_pos): - self.source_image = source_image - cv.CreateTrackbar("Threshold", "Result", slider_pos, 255, self.process_image) - self.process_image(slider_pos) - - def process_image(self, slider_pos): - """ - This function finds contours, draws them and their approximation by ellipses. - """ - stor = cv.CreateMemStorage() - - # Create the destination images - image02 = cv.CloneImage(self.source_image) - cv.Zero(image02) - image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3) - cv.Zero(image04) - - # Threshold the source image. This needful for cv.FindContours(). - cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY) - - # Find all contours. - cont = cv.FindContours(image02, - stor, - cv.CV_RETR_LIST, - cv.CV_CHAIN_APPROX_NONE, - (0, 0)) - - for c in contour_iterator(cont): - # Number of points must be more than or equal to 6 for cv.FitEllipse2 - if len(c) >= 6: - # Copy the contour into an array of (x,y)s - PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2) - for (i, (x, y)) in enumerate(c): - PointArray2D32f[0, i] = (x, y) - - # Draw the current contour in gray - gray = cv.CV_RGB(100, 100, 100) - cv.DrawContours(image04, c, gray, gray,0,1,8,(0,0)) - - # Fits ellipse to current contour. - (center, size, angle) = cv.FitEllipse2(PointArray2D32f) - - # Convert ellipse data from float to integer representation. - center = (cv.Round(center[0]), cv.Round(center[1])) - size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5)) - - # Draw ellipse in random color - color = cv.CV_RGB(random.randrange(256),random.randrange(256),random.randrange(256)) - cv.Ellipse(image04, center, size, - angle, 0, 360, - color, 2, cv.CV_AA, 0) - - # Show image. HighGUI use. - cv.ShowImage( "Result", image04 ) - - -if __name__ == '__main__': - if len(sys.argv) > 1: - source_image = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - source_image = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) - - # Create windows. - cv.NamedWindow("Source", 1) - cv.NamedWindow("Result", 1) - - # Show the image. - cv.ShowImage("Source", source_image) - - fe = FitEllipse(source_image, 70) - - print "Press any key to exit" - cv.WaitKey(0) - - cv.DestroyWindow("Source") - cv.DestroyWindow("Result") diff --git a/samples/python/houghlines.py b/samples/python/houghlines.py deleted file mode 100755 index a437bfe022..0000000000 --- a/samples/python/houghlines.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# This is a standalone program. Pass an image name as a first parameter of the program. - -import sys -from math import sin, cos, sqrt, pi -import cv2.cv as cv -import urllib2 - -# toggle between CV_HOUGH_STANDARD and CV_HOUGH_PROBILISTIC -USE_STANDARD = True - -if __name__ == "__main__": - if len(sys.argv) > 1: - filename = sys.argv[1] - src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/doc/pics/building.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) - - - cv.NamedWindow("Source", 1) - cv.NamedWindow("Hough", 1) - - while True: - dst = cv.CreateImage(cv.GetSize(src), 8, 1) - color_dst = cv.CreateImage(cv.GetSize(src), 8, 3) - storage = cv.CreateMemStorage(0) - lines = 0 - cv.Canny(src, dst, 50, 200, 3) - cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR) - - if USE_STANDARD: - lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0) - for (rho, theta) in lines[:100]: - a = cos(theta) - b = sin(theta) - x0 = a * rho - y0 = b * rho - pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a))) - pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a))) - cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8) - else: - lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10) - for line in lines: - cv.Line(color_dst, line[0], line[1], cv.CV_RGB(255, 0, 0), 3, 8) - - cv.ShowImage("Source", src) - cv.ShowImage("Hough", color_dst) - - k = cv.WaitKey(0) % 0x100 - if k == ord(' '): - USE_STANDARD = not USE_STANDARD - if k == 27: - break - cv.DestroyAllWindows() diff --git a/samples/python/inpaint.py b/samples/python/inpaint.py deleted file mode 100755 index fa7e9a0925..0000000000 --- a/samples/python/inpaint.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/python -import urllib2 -import sys -import cv2.cv as cv - -class Sketcher: - def __init__(self, windowname, dests): - self.prev_pt = None - self.windowname = windowname - self.dests = dests - cv.SetMouseCallback(self.windowname, self.on_mouse) - - def on_mouse(self, event, x, y, flags, param): - pt = (x, y) - if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON): - self.prev_pt = None - elif event == cv.CV_EVENT_LBUTTONDOWN: - self.prev_pt = pt - elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) : - if self.prev_pt: - for dst in self.dests: - cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0) - self.prev_pt = pt - cv.ShowImage(self.windowname, img) - -if __name__=="__main__": - if len(sys.argv) > 1: - img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - print "Hot keys:" - print "\tESC - quit the program" - print "\tr - restore the original image" - print "\ti or ENTER - run inpainting algorithm" - print "\t\t(before running it, paint something on the image)" - - cv.NamedWindow("image", 1) - cv.NamedWindow("inpainted image", 1) - - img = cv.CloneImage(img0) - inpainted = cv.CloneImage(img0) - inpaint_mask = cv.CreateImage(cv.GetSize(img), 8, 1) - - cv.Zero(inpaint_mask) - cv.Zero(inpainted) - cv.ShowImage("image", img) - cv.ShowImage("inpainted image", inpainted) - - sk = Sketcher("image", [img, inpaint_mask]) - while True: - c = cv.WaitKey(0) % 0x100 - - if c == 27 or c == ord('q'): - break - - if c == ord('r'): - cv.Zero(inpaint_mask) - cv.Copy(img0, img) - cv.ShowImage("image", img) - - if c == ord('i') or c == ord('\n'): - cv.Inpaint(img, inpaint_mask, inpainted, 3, cv.CV_INPAINT_TELEA) - cv.ShowImage("inpainted image", inpainted) - cv.DestroyAllWindows() diff --git a/samples/python/kalman.py b/samples/python/kalman.py deleted file mode 100755 index cf040da5cf..0000000000 --- a/samples/python/kalman.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/python -""" - Tracking of rotating point. - Rotation speed is constant. - Both state and measurements vectors are 1D (a point angle), - Measurement is the real point angle + gaussian noise. - The real and the estimated points are connected with yellow line segment, - the real and the measured points are connected with red line segment. - (if Kalman filter works correctly, - the yellow segment should be shorter than the red one). - Pressing any key (except ESC) will reset the tracking with a different speed. - Pressing ESC will stop the program. -""" -import urllib2 -import cv2.cv as cv -from math import cos, sin, sqrt -import sys - -if __name__ == "__main__": - A = [ [1, 1], [0, 1] ] - - img = cv.CreateImage((500, 500), 8, 3) - kalman = cv.CreateKalman(2, 1, 0) - state = cv.CreateMat(2, 1, cv.CV_32FC1) # (phi, delta_phi) - process_noise = cv.CreateMat(2, 1, cv.CV_32FC1) - measurement = cv.CreateMat(1, 1, cv.CV_32FC1) - rng = cv.RNG(-1) - code = -1L - - cv.Zero(measurement) - cv.NamedWindow("Kalman", 1) - - while True: - cv.RandArr(rng, state, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1)) - - kalman.transition_matrix[0,0] = 1 - kalman.transition_matrix[0,1] = 1 - kalman.transition_matrix[1,0] = 0 - kalman.transition_matrix[1,1] = 1 - - cv.SetIdentity(kalman.measurement_matrix, cv.RealScalar(1)) - cv.SetIdentity(kalman.process_noise_cov, cv.RealScalar(1e-5)) - cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(1e-1)) - cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1)) - cv.RandArr(rng, kalman.state_post, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1)) - - - while True: - def calc_point(angle): - return (cv.Round(img.width/2 + img.width/3*cos(angle)), - cv.Round(img.height/2 - img.width/3*sin(angle))) - - state_angle = state[0,0] - state_pt = calc_point(state_angle) - - prediction = cv.KalmanPredict(kalman) - predict_angle = prediction[0, 0] - predict_pt = calc_point(predict_angle) - - cv.RandArr(rng, measurement, cv.CV_RAND_NORMAL, cv.RealScalar(0), - cv.RealScalar(sqrt(kalman.measurement_noise_cov[0, 0]))) - - # generate measurement - cv.MatMulAdd(kalman.measurement_matrix, state, measurement, measurement) - - measurement_angle = measurement[0, 0] - measurement_pt = calc_point(measurement_angle) - - # plot points - def draw_cross(center, color, d): - cv.Line(img, (center[0] - d, center[1] - d), - (center[0] + d, center[1] + d), color, 1, cv.CV_AA, 0) - cv.Line(img, (center[0] + d, center[1] - d), - (center[0] - d, center[1] + d), color, 1, cv.CV_AA, 0) - - cv.Zero(img) - draw_cross(state_pt, cv.CV_RGB(255, 255, 255), 3) - draw_cross(measurement_pt, cv.CV_RGB(255, 0,0), 3) - draw_cross(predict_pt, cv.CV_RGB(0, 255, 0), 3) - cv.Line(img, state_pt, measurement_pt, cv.CV_RGB(255, 0,0), 3, cv. CV_AA, 0) - cv.Line(img, state_pt, predict_pt, cv.CV_RGB(255, 255, 0), 3, cv. CV_AA, 0) - - cv.KalmanCorrect(kalman, measurement) - - cv.RandArr(rng, process_noise, cv.CV_RAND_NORMAL, cv.RealScalar(0), - cv.RealScalar(sqrt(kalman.process_noise_cov[0, 0]))) - cv.MatMulAdd(kalman.transition_matrix, state, process_noise, state) - - cv.ShowImage("Kalman", img) - - code = cv.WaitKey(100) % 0x100 - if code != -1: - break - - if code in [27, ord('q'), ord('Q')]: - break - - cv.DestroyWindow("Kalman") diff --git a/samples/python/kmeans.py b/samples/python/kmeans.py deleted file mode 100755 index 80fe424b34..0000000000 --- a/samples/python/kmeans.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python -import urllib2 -import cv2.cv as cv -from random import randint -MAX_CLUSTERS = 5 - -if __name__ == "__main__": - - color_tab = [ - cv.CV_RGB(255, 0,0), - cv.CV_RGB(0, 255, 0), - cv.CV_RGB(100, 100, 255), - cv.CV_RGB(255, 0,255), - cv.CV_RGB(255, 255, 0)] - img = cv.CreateImage((500, 500), 8, 3) - rng = cv.RNG(-1) - - cv.NamedWindow("clusters", 1) - - while True: - cluster_count = randint(2, MAX_CLUSTERS) - sample_count = randint(1, 1000) - points = cv.CreateMat(sample_count, 1, cv.CV_32FC2) - clusters = cv.CreateMat(sample_count, 1, cv.CV_32SC1) - - # generate random sample from multigaussian distribution - for k in range(cluster_count): - center = (cv.RandInt(rng)%img.width, cv.RandInt(rng)%img.height) - first = k*sample_count/cluster_count - last = sample_count - if k != cluster_count: - last = (k+1)*sample_count/cluster_count - - point_chunk = cv.GetRows(points, first, last) - - cv.RandArr(rng, point_chunk, cv.CV_RAND_NORMAL, - cv.Scalar(center[0], center[1], 0, 0), - cv.Scalar(img.width*0.1, img.height*0.1, 0, 0)) - - - # shuffle samples - cv.RandShuffle(points, rng) - - cv.KMeans2(points, cluster_count, clusters, - (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 10, 1.0)) - - cv.Zero(img) - - for i in range(sample_count): - cluster_idx = int(clusters[i, 0]) - pt = (cv.Round(points[i, 0][0]), cv.Round(points[i, 0][1])) - cv.Circle(img, pt, 2, color_tab[cluster_idx], cv.CV_FILLED, cv.CV_AA, 0) - - cv.ShowImage("clusters", img) - - key = cv.WaitKey(0) % 0x100 - if key in [27, ord('q'), ord('Q')]: - break - - cv.DestroyWindow("clusters") diff --git a/samples/python/laplace.py b/samples/python/laplace.py deleted file mode 100755 index df4f2430b6..0000000000 --- a/samples/python/laplace.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/python -import urllib2 -import cv2.cv as cv -import sys - -if __name__ == "__main__": - laplace = None - colorlaplace = None - planes = [ None, None, None ] - capture = None - - if len(sys.argv) == 1: - capture = cv.CreateCameraCapture(0) - elif len(sys.argv) == 2 and sys.argv[1].isdigit(): - capture = cv.CreateCameraCapture(int(sys.argv[1])) - elif len(sys.argv) == 2: - capture = cv.CreateFileCapture(sys.argv[1]) - - if not capture: - print "Could not initialize capturing..." - sys.exit(-1) - - cv.NamedWindow("Laplacian", 1) - - while True: - frame = cv.QueryFrame(capture) - if frame: - if not laplace: - planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)] - laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1) - colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3) - - cv.Split(frame, planes[0], planes[1], planes[2], None) - for plane in planes: - cv.Laplace(plane, laplace, 3) - cv.ConvertScaleAbs(laplace, plane, 1, 0) - - cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace) - - cv.ShowImage("Laplacian", colorlaplace) - - if cv.WaitKey(10) != -1: - break - - cv.DestroyWindow("Laplacian") diff --git a/samples/python/lkdemo.py b/samples/python/lkdemo.py deleted file mode 100755 index 51485fe0a3..0000000000 --- a/samples/python/lkdemo.py +++ /dev/null @@ -1,192 +0,0 @@ -#! /usr/bin/env python - -print "OpenCV Python version of lkdemo" - -import sys - -# import the necessary things for OpenCV -import cv2.cv as cv - -############################################################################# -# some "constants" - -win_size = 10 -MAX_COUNT = 500 - -############################################################################# -# some "global" variables - -image = None -pt = None -add_remove_pt = False -flags = 0 -night_mode = False -need_to_init = False - -############################################################################# -# the mouse callback - -# the callback on the trackbar -def on_mouse (event, x, y, flags, param): - - # we will use the global pt and add_remove_pt - global pt - global add_remove_pt - - if image is None: - # not initialized, so skip - return - - if image.origin != 0: - # different origin - y = image.height - y - - if event == cv.CV_EVENT_LBUTTONDOWN: - # user has click, so memorize it - pt = (x, y) - add_remove_pt = True - -############################################################################# -# so, here is the main part of the program - -if __name__ == '__main__': - - frames = sys.argv[1:] - if frames == []: - print "usage lkdemo.py " - sys.exit(1) - - # display a small howto use it - print "Hot keys: \n" \ - "\tESC - quit the program\n" \ - "\tr - auto-initialize tracking\n" \ - "\tc - delete all the points\n" \ - "\tn - switch the \"night\" mode on/off\n" \ - "\tSPACE - next frame\n" \ - "To add/remove a feature point click it\n" - - # first, create the necessary windows - cv.NamedWindow ('LkDemo', cv.CV_WINDOW_AUTOSIZE) - - # register the mouse callback - cv.SetMouseCallback ('LkDemo', on_mouse, None) - - fc = 0 - while 1: - # do forever - - frame = cv.LoadImage(frames[fc]) - - if image is None: - # create the images we need - image = cv.CreateImage (cv.GetSize (frame), 8, 3) - image.origin = frame.origin - grey = cv.CreateImage (cv.GetSize (frame), 8, 1) - prev_grey = cv.CreateImage (cv.GetSize (frame), 8, 1) - pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1) - prev_pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1) - features = [] - - # copy the frame, so we can draw on it - cv.Copy (frame, image) - - # create a grey version of the image - cv.CvtColor (image, grey, cv.CV_BGR2GRAY) - - if night_mode: - # night mode: only display the points - cv.SetZero (image) - - if need_to_init: - # we want to search all the good points - - # create the wanted images - eig = cv.CreateImage (cv.GetSize (grey), 32, 1) - temp = cv.CreateImage (cv.GetSize (grey), 32, 1) - - # the default parameters - quality = 0.01 - min_distance = 10 - - # search the good points - features = cv.GoodFeaturesToTrack ( - grey, eig, temp, - MAX_COUNT, - quality, min_distance, None, 3, 0, 0.04) - - # refine the corner locations - features = cv.FindCornerSubPix ( - grey, - features, - (win_size, win_size), (-1, -1), - (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) - - elif features != []: - # we have points, so display them - - # calculate the optical flow - features, status, track_error = cv.CalcOpticalFlowPyrLK ( - prev_grey, grey, prev_pyramid, pyramid, - features, - (win_size, win_size), 3, - (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03), - flags) - - # set back the points we keep - features = [ p for (st,p) in zip(status, features) if st] - - if add_remove_pt: - # we have a point to add, so see if it is close to - # another one. If yes, don't use it - def ptptdist(p0, p1): - dx = p0[0] - p1[0] - dy = p0[1] - p1[1] - return dx**2 + dy**2 - if min([ ptptdist(pt, p) for p in features ]) < 25: - # too close - add_remove_pt = 0 - - # draw the points as green circles - for the_point in features: - cv.Circle (image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0) - - if add_remove_pt: - # we want to add a point - # refine this corner location and append it to 'features' - - features += cv.FindCornerSubPix ( - grey, - [pt], - (win_size, win_size), (-1, -1), - (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, - 20, 0.03)) - # we are no longer in "add_remove_pt" mode - add_remove_pt = False - - # swapping - prev_grey, grey = grey, prev_grey - prev_pyramid, pyramid = pyramid, prev_pyramid - need_to_init = False - - # we can now display the image - cv.ShowImage ('LkDemo', image) - - # handle events - c = cv.WaitKey(10) % 0x100 - - if c == 27: - # user has press the ESC key, so exit - break - - # processing depending on the character - if 32 <= c and c < 128: - cc = chr(c).lower() - if cc == 'r': - need_to_init = True - elif cc == 'c': - features = [] - elif cc == 'n': - night_mode = not night_mode - elif cc == ' ': - fc = (fc + 1) % len(frames) - cv.DestroyAllWindows() diff --git a/samples/python/logpolar.py b/samples/python/logpolar.py deleted file mode 100755 index 338acc2ce8..0000000000 --- a/samples/python/logpolar.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/python -import sys -import urllib2 -import cv2.cv as cv - -src=None -dst=None -src2=None - -def on_mouse(event, x, y, flags, param): - - if not src: - return - - if event==cv.CV_EVENT_LBUTTONDOWN: - cv.LogPolar(src, dst, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS) - cv.LogPolar(dst, src2, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS + cv.CV_WARP_INVERSE_MAP) - cv.ShowImage("log-polar", dst) - cv.ShowImage("inverse log-polar", src2) - -if __name__ == "__main__": - - if len(sys.argv) > 1: - src = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - cv.NamedWindow("original", 1) - cv.NamedWindow("log-polar", 1) - cv.NamedWindow("inverse log-polar", 1) - - - dst = cv.CreateImage((256, 256), 8, 3) - src2 = cv.CreateImage(cv.GetSize(src), 8, 3) - - cv.SetMouseCallback("original", on_mouse) - on_mouse(cv.CV_EVENT_LBUTTONDOWN, src.width/2, src.height/2, None, None) - - cv.ShowImage("original", src) - cv.WaitKey() - cv.DestroyAllWindows() diff --git a/samples/python/minarea.py b/samples/python/minarea.py deleted file mode 100755 index 5fe03f72b9..0000000000 --- a/samples/python/minarea.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/python - -import urllib2 -import cv2.cv as cv -from random import randint - -def roundxy(pt): - return (cv.Round(pt[0]), cv.Round(pt[1])) - -def draw_common(points): - success, center, radius = cv.MinEnclosingCircle(points) - if success: - cv.Circle(img, roundxy(center), cv.Round(radius), cv.CV_RGB(255, 255, 0), 1, cv. CV_AA, 0) - - box = cv.MinAreaRect2(points) - box_vtx = [roundxy(p) for p in cv.BoxPoints(box)] - cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv. CV_AA) - -def minarea_array(img, count): - pointMat = cv.CreateMat(count, 1, cv.CV_32SC2) - for i in range(count): - pointMat[i, 0] = (randint(img.width/4, img.width*3/4), - randint(img.height/4, img.height*3/4)) - - cv.Zero(img) - - for i in range(count): - cv.Circle(img, roundxy(pointMat[i, 0]), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0) - - draw_common(pointMat) - -def minarea_seq(img, count, storage): - points = [(randint(img.width/4, img.width*3/4), randint(img.height/4, img.height*3/4)) for i in range(count)] - cv.Zero(img) - - for p in points: - cv.Circle(img, roundxy(p), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0) - - draw_common(points) - -if __name__ == "__main__": - img = cv.CreateImage((500, 500), 8, 3) - storage = cv.CreateMemStorage() - - cv.NamedWindow("rect & circle", 1) - - use_seq = True - - while True: - count = randint(1, 100) - if use_seq: - minarea_seq(img, count, storage) - else: - minarea_array(img, count) - - cv.ShowImage("rect & circle", img) - key = cv.WaitKey() % 0x100 - if key in [27, ord('q'), ord('Q')]: - break - - use_seq = not use_seq - cv.DestroyAllWindows() diff --git a/samples/python/minidemo.py b/samples/python/minidemo.py deleted file mode 100755 index cbc754e933..0000000000 --- a/samples/python/minidemo.py +++ /dev/null @@ -1,13 +0,0 @@ -#! /usr/bin/env python - -import cv2.cv as cv - -cap = cv.CreateFileCapture("../c/tree.avi") -img = cv.QueryFrame(cap) -print "Got frame of dimensions (", img.width, " x ", img.height, ")" - -cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE) -cv.ShowImage("win", img) -cv.MoveWindow("win", 200, 200) -cv.WaitKey(0) -cv.DestroyAllWindows() diff --git a/samples/python/morphology.py b/samples/python/morphology.py deleted file mode 100755 index 6440272563..0000000000 --- a/samples/python/morphology.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/python -import sys -import urllib2 -import cv2.cv as cv - -src = 0 -image = 0 -dest = 0 -element_shape = cv.CV_SHAPE_RECT - -def Opening(pos): - element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape) - cv.Erode(src, image, element, 1) - cv.Dilate(image, dest, element, 1) - cv.ShowImage("Opening & Closing", dest) -def Closing(pos): - element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape) - cv.Dilate(src, image, element, 1) - cv.Erode(image, dest, element, 1) - cv.ShowImage("Opening & Closing", dest) -def Erosion(pos): - element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape) - cv.Erode(src, dest, element, 1) - cv.ShowImage("Erosion & Dilation", dest) -def Dilation(pos): - element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape) - cv.Dilate(src, dest, element, 1) - cv.ShowImage("Erosion & Dilation", dest) - -if __name__ == "__main__": - if len(sys.argv) > 1: - src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - image = cv.CloneImage(src) - dest = cv.CloneImage(src) - cv.NamedWindow("Opening & Closing", 1) - cv.NamedWindow("Erosion & Dilation", 1) - cv.ShowImage("Opening & Closing", src) - cv.ShowImage("Erosion & Dilation", src) - cv.CreateTrackbar("Open", "Opening & Closing", 0, 10, Opening) - cv.CreateTrackbar("Close", "Opening & Closing", 0, 10, Closing) - cv.CreateTrackbar("Dilate", "Erosion & Dilation", 0, 10, Dilation) - cv.CreateTrackbar("Erode", "Erosion & Dilation", 0, 10, Erosion) - cv.WaitKey(0) - cv.DestroyWindow("Opening & Closing") - cv.DestroyWindow("Erosion & Dilation") diff --git a/samples/python/motempl.py b/samples/python/motempl.py deleted file mode 100755 index c2944c5c50..0000000000 --- a/samples/python/motempl.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/python -import urllib2 -import sys -import time -from math import cos, sin -import cv2.cv as cv - -CLOCKS_PER_SEC = 1.0 -MHI_DURATION = 1 -MAX_TIME_DELTA = 0.5 -MIN_TIME_DELTA = 0.05 -N = 4 -buf = range(10) -last = 0 -mhi = None # MHI -orient = None # orientation -mask = None # valid orientation mask -segmask = None # motion segmentation map -storage = None # temporary storage - -def update_mhi(img, dst, diff_threshold): - global last - global mhi - global storage - global mask - global orient - global segmask - timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds - size = cv.GetSize(img) # get current frame size - idx1 = last - if not mhi or cv.GetSize(mhi) != size: - for i in range(N): - buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) - cv.Zero(buf[i]) - mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) - cv.Zero(mhi) # clear MHI at the beginning - orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) - segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) - mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1) - - cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale - idx2 = (last + 1) % N # index of (last - (N-1))th frame - last = idx2 - silh = buf[idx2] - cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames - cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it - cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI - cv.CvtScale(mhi, mask, 255./MHI_DURATION, - (MHI_DURATION - timestamp)*255./MHI_DURATION) - cv.Zero(dst) - cv.Merge(mask, None, None, None, dst) - cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3) - if not storage: - storage = cv.CreateMemStorage(0) - seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA) - for (area, value, comp_rect) in seq: - if comp_rect[2] + comp_rect[3] > 100: # reject very small components - color = cv.CV_RGB(255, 0,0) - silh_roi = cv.GetSubRect(silh, comp_rect) - mhi_roi = cv.GetSubRect(mhi, comp_rect) - orient_roi = cv.GetSubRect(orient, comp_rect) - mask_roi = cv.GetSubRect(mask, comp_rect) - angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION) - - count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI - if count < (comp_rect[2] * comp_rect[3] * 0.05): - continue - - magnitude = 30. - center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2)) - cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0) - cv.Line(dst, - center, - (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)), - cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))), - color, - 3, - cv.CV_AA, - 0) - -if __name__ == "__main__": - motion = 0 - capture = 0 - - if len(sys.argv)==1: - capture = cv.CreateCameraCapture(0) - elif len(sys.argv)==2 and sys.argv[1].isdigit(): - capture = cv.CreateCameraCapture(int(sys.argv[1])) - elif len(sys.argv)==2: - capture = cv.CreateFileCapture(sys.argv[1]) - - if not capture: - print "Could not initialize capturing..." - sys.exit(-1) - - cv.NamedWindow("Motion", 1) - while True: - image = cv.QueryFrame(capture) - if(image): - if(not motion): - motion = cv.CreateImage((image.width, image.height), 8, 3) - cv.Zero(motion) - #motion.origin = image.origin - update_mhi(image, motion, 30) - cv.ShowImage("Motion", motion) - if(cv.WaitKey(10) != -1): - break - else: - break - cv.DestroyWindow("Motion") diff --git a/samples/python/numpy_array.py b/samples/python/numpy_array.py deleted file mode 100755 index a79eec7a95..0000000000 --- a/samples/python/numpy_array.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/python -import urllib2 -import sys -import cv2.cv as cv -import numpy - -# SRGB-linear conversions using NumPy - see http://en.wikipedia.org/wiki/SRGB - -def srgb2lin(x): - a = 0.055 - return numpy.where(x <= 0.04045, - x * (1.0 / 12.92), - numpy.power((x + a) * (1.0 / (1 + a)), 2.4)) - -def lin2srgb(x): - a = 0.055 - return numpy.where(x <= 0.0031308, - x * 12.92, - (1 + a) * numpy.power(x, 1 / 2.4) - a) - -if __name__ == "__main__": - if len(sys.argv) > 1: - img0 = cv.LoadImageM( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/lena.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - cv.NamedWindow("original", 1) - cv.ShowImage("original", img0) - - # Image was originally bytes in range 0-255. Turn it into an array of floats in range 0.0 - 1.0 - n = numpy.asarray(img0) / 255.0 - - # Use NumPy to do some transformations on the image - - # Negate the image by subtracting it from 1.0 - cv.NamedWindow("negative") - cv.ShowImage("negative", cv.fromarray(1.0 - n)) - - # Assume the image was sRGB, and compute the linear version. - cv.NamedWindow("linear") - cv.ShowImage("linear", cv.fromarray(srgb2lin(n))) - - # Look at a subwindow - cv.NamedWindow("subwindow") - cv.ShowImage("subwindow", cv.fromarray(n[200:300,200:400])) - - # Compute the grayscale image - cv.NamedWindow("monochrome") - ln = srgb2lin(n) - red = ln[:,:,0] - grn = ln[:,:,1] - blu = ln[:,:,2] - linear_mono = 0.3 * red + 0.59 * grn + 0.11 * blu - cv.ShowImage("monochrome", cv.fromarray(lin2srgb(linear_mono))) - - # Apply a blur to the NumPy array using OpenCV - cv.NamedWindow("gaussian") - cv.Smooth(n, n, cv.CV_GAUSSIAN, 15, 15) - cv.ShowImage("gaussian", cv.fromarray(n)) - - cv.WaitKey(0) - cv.DestroyAllWindows() diff --git a/samples/python/numpy_warhol.py b/samples/python/numpy_warhol.py deleted file mode 100755 index f440abc037..0000000000 --- a/samples/python/numpy_warhol.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -import urllib2 -import sys -import cv2.cv as cv -import numpy - -if __name__ == "__main__": - cv.NamedWindow("camera", 1) - - capture = cv.CaptureFromCAM(0) - - paste = cv.CreateMat(960, 1280, cv.CV_8UC3) - topleft = numpy.asarray(cv.GetSubRect(paste, (0, 0, 640, 480))) - topright = numpy.asarray(cv.GetSubRect(paste, (640, 0, 640, 480))) - bottomleft = numpy.asarray(cv.GetSubRect(paste, (0, 480, 640, 480))) - bottomright = numpy.asarray(cv.GetSubRect(paste, (640, 480, 640, 480))) - - while True: - img = cv.GetMat(cv.QueryFrame(capture)) - - n = (numpy.asarray(img)).astype(numpy.uint8) - - red = n[:,:,0] - grn = n[:,:,1] - blu = n[:,:,2] - - topleft[:,:,0] = 255 - grn - topleft[:,:,1] = red - topleft[:,:,2] = blu - - topright[:,:,0] = blu - topright[:,:,1] = 255 - red - topright[:,:,2] = grn - - bottomright[:,:,0] = red - bottomright[:,:,1] = grn - bottomright[:,:,2] = 255 - blu - - fgrn = grn.astype(numpy.float32) - fred = red.astype(numpy.float32) - bottomleft[:,:,0] = blu - bottomleft[:,:,1] = (abs(fgrn - fred)).astype(numpy.uint8) - bottomleft[:,:,2] = red - - cv.ShowImage("camera", paste) - if cv.WaitKey(6) == 27: - break - cv.DestroyAllWindows() diff --git a/samples/python/peopledetect.py b/samples/python/peopledetect.py deleted file mode 100755 index 2824299791..0000000000 --- a/samples/python/peopledetect.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python - -import sys -from cv import * - -def inside(r, q): - (rx, ry), (rw, rh) = r - (qx, qy), (qw, qh) = q - return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh - -try: - img = LoadImage(sys.argv[1]) -except: - try: - f = open(sys.argv[1], "rt") - except: - print "cannot read " + sys.argv[1] - sys.exit(-1) - imglist = list(f.readlines()) -else: - imglist = [sys.argv[1]] - -NamedWindow("people detection demo", 1) -storage = CreateMemStorage(0) - -for name in imglist: - n = name.strip() - print n - try: - img = LoadImage(n) - except: - continue - - #ClearMemStorage(storage) - found = list(HOGDetectMultiScale(img, storage, win_stride=(8,8), - padding=(32,32), scale=1.05, group_threshold=2)) - found_filtered = [] - for r in found: - insidef = False - for q in found: - if inside(r, q): - insidef = True - break - if not insidef: - found_filtered.append(r) - for r in found_filtered: - (rx, ry), (rw, rh) = r - tl = (rx + int(rw*0.1), ry + int(rh*0.07)) - br = (rx + int(rw*0.9), ry + int(rh*0.87)) - Rectangle(img, tl, br, (0, 255, 0), 3) - - ShowImage("people detection demo", img) - c = WaitKey(0) - if c == ord('q'): - break -cv.DestroyAllWindows() diff --git a/samples/python/pyramid_segmentation.py b/samples/python/pyramid_segmentation.py deleted file mode 100755 index d9612529f6..0000000000 --- a/samples/python/pyramid_segmentation.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/python -import cv2.cv as cv - -class PyrSegmentation: - def __init__(self, img0): - self.thresh1 = 255 - self.thresh2 = 30 - self.level =4 - self.storage = cv.CreateMemStorage() - cv.NamedWindow("Source", 0) - cv.ShowImage("Source", img0) - cv.NamedWindow("Segmentation", 0) - cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1) - cv.CreateTrackbar("Thresh2", "Segmentation", self.thresh2, 255, self.set_thresh2) - self.image0 = cv.CloneImage(img0) - self.image1 = cv.CloneImage(img0) - cv.ShowImage("Segmentation", self.image1) - - def set_thresh1(self, val): - self.thresh1 = val - self.on_segment() - - def set_thresh2(self, val): - self.thresh2 = val - self.on_segment() - - def on_segment(self): - comp = cv.PyrSegmentation(self.image0, self.image1, self.storage, \ - self.level, self.thresh1+1, self.thresh2+1) - cv.ShowImage("Segmentation", self.image1) - - def run(self): - self.on_segment() - cv.WaitKey(0) - -if __name__ == "__main__": - img0 = cv.LoadImage("../c/fruits.jpg", 1) - - # segmentation of the color image - PyrSegmentation(img0).run() - cv.DestroyAllWindows() diff --git a/samples/python/squares.py b/samples/python/squares.py deleted file mode 100755 index d5a2f20445..0000000000 --- a/samples/python/squares.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -# The full "Square Detector" program. -# It loads several images subsequentally and tries to find squares in -# each image -# - -import urllib2 -from math import sqrt -import cv2.cv as cv - -thresh = 50 -img = None -img0 = None -storage = None -wndname = "Square Detection Demo" - -def angle(pt1, pt2, pt0): - dx1 = pt1.x - pt0.x - dy1 = pt1.y - pt0.y - dx2 = pt2.x - pt0.x - dy2 = pt2.y - pt0.y - return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10) - -def findSquares4(img, storage): - N = 11 - sz = (img.width & -2, img.height & -2) - timg = cv.CloneImage(img); # make a copy of input image - gray = cv.CreateImage(sz, 8, 1) - pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3) - # create empty sequence that will contain points - - # 4 points per square (the square's vertices) - squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage) - squares = CvSeq_CvPoint.cast(squares) - - # select the maximum ROI in the image - # with the width and height divisible by 2 - subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height)) - - # down-scale and upscale the image to filter out the noise - cv.PyrDown(subimage, pyr, 7) - cv.PyrUp(pyr, subimage, 7) - tgray = cv.CreateImage(sz, 8, 1) - # find squares in every color plane of the image - for c in range(3): - # extract the c-th color plane - channels = [None, None, None] - channels[c] = tgray - cv.Split(subimage, channels[0], channels[1], channels[2], None) - for l in range(N): - # hack: use Canny instead of zero threshold level. - # Canny helps to catch squares with gradient shading - if(l == 0): - # apply Canny. Take the upper threshold from slider - # and set the lower to 0 (which forces edges merging) - cv.Canny(tgray, gray, 0, thresh, 5) - # dilate canny output to remove potential - # holes between edge segments - cv.Dilate(gray, gray, None, 1) - else: - # apply threshold if l!=0: - # tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0 - cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) - - # find contours and store them all as a list - count, contours = cv.FindContours(gray, storage, sizeof_CvContour, - cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0)) - - if not contours: - continue - - # test each contour - for contour in contours.hrange(): - # approximate contour with accuracy proportional - # to the contour perimeter - result = cv.ApproxPoly(contour, sizeof_CvContour, storage, - cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0) - # square contours should have 4 vertices after approximation - # relatively large area (to filter out noisy contours) - # and be convex. - # Note: absolute value of an area is used because - # area may be positive or negative - in accordance with the - # contour orientation - if(result.total == 4 and - abs(cv.ContourArea(result)) > 1000 and - cv.CheckContourConvexity(result)): - s = 0 - for i in range(5): - # find minimum angle between joint - # edges (maximum of cosine) - if(i >= 2): - t = abs(angle(result[i], result[i-2], result[i-1])) - if s 1: - img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) - else: - url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg' - filedata = urllib2.urlopen(url).read() - imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) - cv.SetData(imagefiledata, filedata, len(filedata)) - img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) - - rng = cv.RNG(-1) - - print "Hot keys:" - print "\tESC - quit the program" - print "\tr - restore the original image" - print "\tw - run watershed algorithm" - print "\t (before that, roughly outline several markers on the image)" - - cv.NamedWindow("image", 1) - cv.NamedWindow("watershed transform", 1) - - img = cv.CloneImage(img0) - img_gray = cv.CloneImage(img0) - wshed = cv.CloneImage(img0) - marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1) - markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1) - - cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY) - cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR) - - cv.Zero(marker_mask) - cv.Zero(wshed) - - cv.ShowImage("image", img) - cv.ShowImage("watershed transform", wshed) - - sk = Sketcher("image", [img, marker_mask]) - - while True: - c = cv.WaitKey(0) % 0x100 - if c == 27 or c == ord('q'): - break - if c == ord('r'): - cv.Zero(marker_mask) - cv.Copy(img0, img) - cv.ShowImage("image", img) - if c == ord('w'): - storage = cv.CreateMemStorage(0) - #cv.SaveImage("wshed_mask.png", marker_mask) - #marker_mask = cv.LoadImage("wshed_mask.png", 0) - contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) - def contour_iterator(contour): - while contour: - yield contour - contour = contour.h_next() - - cv.Zero(markers) - comp_count = 0 - for c in contour_iterator(contours): - cv.DrawContours(markers, - c, - cv.ScalarAll(comp_count + 1), - cv.ScalarAll(comp_count + 1), - -1, - -1, - 8) - comp_count += 1 - - cv.Watershed(img0, markers) - - cv.Set(wshed, cv.ScalarAll(255)) - - # paint the watershed image - color_tab = [(cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50) for i in range(comp_count)] - for j in range(markers.height): - for i in range(markers.width): - idx = markers[j, i] - if idx != -1: - wshed[j, i] = color_tab[int(idx - 1)] - - cv.AddWeighted(wshed, 0.5, img_gray, 0.5, 0, wshed) - cv.ShowImage("watershed transform", wshed) - cv.DestroyAllWindows() - diff --git a/samples/python2/_coverage.py b/samples/python2/_coverage.py index 80edffb997..4b23370b50 100755 --- a/samples/python2/_coverage.py +++ b/samples/python2/_coverage.py @@ -24,8 +24,3 @@ if __name__ == '__main__': r = 1.0 * len(cv2_used) / len(cv2_callable) print '\ncv2 api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 ) - - print '\nold (cv) symbols:' - for s in found: - if s.startswith('cv.'): - print s diff --git a/samples/python2/browse.py b/samples/python2/browse.py index 81ef33bada..462cd17626 100755 --- a/samples/python2/browse.py +++ b/samples/python2/browse.py @@ -37,7 +37,7 @@ if __name__ == '__main__': img = np.zeros((sz, sz), np.uint8) track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) track = np.int32(track*10 + (sz/2, sz/2)) - cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA) + cv2.polylines(img, [track], 0, 255, 1, cv2.LINE_AA) small = img diff --git a/samples/python2/common.py b/samples/python2/common.py index 3988fe2a9e..5b13ca5c4b 100755 --- a/samples/python2/common.py +++ b/samples/python2/common.py @@ -71,8 +71,8 @@ def mtx2rvec(R): return axis * np.arctan2(s, c) def draw_str(dst, (x, y), s): - cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA) - cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA) + cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA) + cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA) class Sketcher: def __init__(self, windowname, dests, colors_func): diff --git a/samples/python2/contours.py b/samples/python2/contours.py index f81f21231c..075401b33b 100755 --- a/samples/python2/contours.py +++ b/samples/python2/contours.py @@ -53,7 +53,7 @@ if __name__ == '__main__': vis = np.zeros((h, w, 3), np.uint8) levels = levels - 3 cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255), - 3, cv2.CV_AA, hierarchy, abs(levels) ) + 3, cv2.LINE_AA, hierarchy, abs(levels) ) cv2.imshow('contours', vis) update(3) cv2.createTrackbar( "levels+3", "contours", 3, 7, update ) diff --git a/samples/python2/deconvolution.py b/samples/python2/deconvolution.py index 8650e7b0a0..1cf5fb41ed 100755 --- a/samples/python2/deconvolution.py +++ b/samples/python2/deconvolution.py @@ -57,7 +57,7 @@ def motion_kernel(angle, d, sz=65): def defocus_kernel(d, sz=65): kern = np.zeros((sz, sz), np.uint8) - cv2.circle(kern, (sz, sz), d, 255, -1, cv2.CV_AA, shift=1) + cv2.circle(kern, (sz, sz), d, 255, -1, cv2.LINE_AA, shift=1) kern = np.float32(kern) / 255.0 return kern @@ -69,7 +69,7 @@ if __name__ == '__main__': opts = dict(opts) try: fn = args[0] - except: + except: fn = 'data/licenseplate_motion.jpg' win = 'deconvolution' @@ -78,7 +78,7 @@ if __name__ == '__main__': if img is None: print 'Failed to load fn1:', fn1 sys.exit(1) - + img = np.float32(img)/255.0 cv2.imshow('input', img) diff --git a/samples/python2/dft.py b/samples/python2/dft.py old mode 100644 new mode 100755 index 0a5ca650c1..792ebe1234 --- a/samples/python2/dft.py +++ b/samples/python2/dft.py @@ -93,7 +93,7 @@ if __name__ == "__main__": shift_dft(log_spectrum, log_spectrum) # normalize and display the results as rgb - cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX) + cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX) cv2.imshow("magnitude", log_spectrum) cv2.waitKey(0) diff --git a/samples/python2/distrans.py b/samples/python2/distrans.py index 083c8d65b6..8e6fff2385 100755 --- a/samples/python2/distrans.py +++ b/samples/python2/distrans.py @@ -14,7 +14,6 @@ Keys: import numpy as np import cv2 -import cv2.cv as cv from common import make_cmap @@ -30,7 +29,7 @@ if __name__ == '__main__': if img is None: print 'Failed to load fn:', fn sys.exit(1) - + cm = make_cmap('jet') need_update = True voronoi = False @@ -40,7 +39,7 @@ if __name__ == '__main__': need_update = False thrs = cv2.getTrackbarPos('threshold', 'distrans') mark = cv2.Canny(img, thrs, 3*thrs) - dist, labels = cv2.distanceTransformWithLabels(~mark, cv.CV_DIST_L2, 5) + dist, labels = cv2.distanceTransformWithLabels(~mark, cv2.DIST_L2, 5) if voronoi: vis = cm[np.uint8(labels)] else: diff --git a/samples/python2/facedetect.py b/samples/python2/facedetect.py index 1b6e9f1f79..a1b60d0a83 100755 --- a/samples/python2/facedetect.py +++ b/samples/python2/facedetect.py @@ -2,7 +2,6 @@ import numpy as np import cv2 -import cv2.cv as cv # local modules from video import create_capture @@ -13,7 +12,7 @@ USAGE: facedetect.py [--cascade ] [--nested-cascade ] [< ''' def detect(img, cascade): - rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) + rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] diff --git a/samples/python2/fitline.py b/samples/python2/fitline.py index 876435c9c3..82b45b0232 100755 --- a/samples/python2/fitline.py +++ b/samples/python2/fitline.py @@ -42,7 +42,7 @@ def sample_line(p1, p2, n, noise=0.0): t = np.random.rand(n,1) return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise -dist_func_names = it.cycle('CV_DIST_L2 CV_DIST_L1 CV_DIST_L12 CV_DIST_FAIR CV_DIST_WELSCH CV_DIST_HUBER'.split()) +dist_func_names = it.cycle('DIST_L2 DIST_L1 DIST_L12 DIST_FAIR DIST_WELSCH DIST_HUBER'.split()) cur_func_name = dist_func_names.next() def update(_=None): @@ -63,7 +63,7 @@ def update(_=None): cv2.circle(img, toint(p), 2, (255, 255, 255), -1) for p in outliers: cv2.circle(img, toint(p), 2, (64, 64, 255), -1) - func = getattr(cv2.cv, cur_func_name) + func = getattr(cv2, cur_func_name) vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01) cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255)) diff --git a/samples/python2/gaussian_mix.py b/samples/python2/gaussian_mix.py index 704d30cf6b..6b1b9c24d1 100755 --- a/samples/python2/gaussian_mix.py +++ b/samples/python2/gaussian_mix.py @@ -23,7 +23,7 @@ def draw_gaussain(img, mean, cov, color): w, u, vt = cv2.SVDecomp(cov) ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi) s1, s2 = np.sqrt(w)*3.0 - cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA) + cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA) if __name__ == '__main__': diff --git a/samples/python2/houghcircles.py b/samples/python2/houghcircles.py index 45f4576dce..620118ccee 100755 --- a/samples/python2/houghcircles.py +++ b/samples/python2/houghcircles.py @@ -22,11 +22,11 @@ img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) img = cv2.medianBlur(img, 5) cimg = src.copy() # numpy function -circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) +circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) a, b, c = circles.shape for i in range(b): - cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.cv.CV_AA) - cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.cv.CV_AA) # draw center of circle + cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_AA) + cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle cv2.imshow("source", src) cv2.imshow("detected circles", cimg) diff --git a/samples/python2/houghlines.py b/samples/python2/houghlines.py index 9755d87223..cf67945b19 100755 --- a/samples/python2/houghlines.py +++ b/samples/python2/houghlines.py @@ -10,31 +10,31 @@ import sys import math try: - fn = sys.argv[1] + fn = sys.argv[1] except: - fn = "../cpp/pic1.png" + fn = "../cpp/pic1.png" print __doc__ src = cv2.imread(fn) dst = cv2.Canny(src, 50, 200) cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) # HoughLines() -# lines = cv2.HoughLines(dst, 1, cv2.cv.CV_PI/180.0, 50, np.array([]), 0, 0) +# lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0) # a,b,c = lines.shape # for i in range(b): -# rho = lines[0][i][0] -# theta = lines[0][i][1] -# a = math.cos(theta) -# b = math.sin(theta) -# x0, y0 = a*rho, b*rho -# pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) ) -# pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) ) -# cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.cv.CV_AA) +# rho = lines[0][i][0] +# theta = lines[0][i][1] +# a = math.cos(theta) +# b = math.sin(theta) +# x0, y0 = a*rho, b*rho +# pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) ) +# pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) ) +# cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA) -lines = cv2.HoughLinesP(dst, 1, cv2.cv.CV_PI/180.0, 50, np.array([]), 50, 10) +lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 50, np.array([]), 50, 10) a,b,c = lines.shape for i in range(b): - cv2.line(cdst, (lines[0][i][0], lines[0][i][1]), (lines[0][i][2], lines[0][i][3]), (0, 0, 255), 3, cv2.cv.CV_AA) + cv2.line(cdst, (lines[0][i][0], lines[0][i][1]), (lines[0][i][2], lines[0][i][3]), (0, 0, 255), 3, cv2.LINE_AA) cv2.imshow("source", src) cv2.imshow("detected lines", cdst) diff --git a/samples/python2/mouse_and_match.py b/samples/python2/mouse_and_match.py index 66c3b1af7a..f440033850 100755 --- a/samples/python2/mouse_and_match.py +++ b/samples/python2/mouse_and_match.py @@ -9,7 +9,6 @@ Demonstrate using a mouse to interact with an image: ESC to exit ''' import numpy as np -import cv2 as cv # built-in modules import os @@ -24,27 +23,27 @@ sel = (0,0,0,0) def onmouse(event, x, y, flags, param): global drag_start, sel - if event == cv.EVENT_LBUTTONDOWN: + if event == cv2.EVENT_LBUTTONDOWN: drag_start = x, y sel = 0,0,0,0 - elif event == cv.EVENT_LBUTTONUP: + elif event == cv2.EVENT_LBUTTONUP: if sel[2] > sel[0] and sel[3] > sel[1]: patch = gray[sel[1]:sel[3],sel[0]:sel[2]] - result = cv.matchTemplate(gray,patch,cv.TM_CCOEFF_NORMED) + result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED) result = np.abs(result)**3 - val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO) - result8 = cv.normalize(result,None,0,255,cv.NORM_MINMAX,cv.CV_8U) - cv.imshow("result", result8) + val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO) + result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U) + cv2.imshow("result", result8) drag_start = None elif drag_start: #print flags - if flags & cv.EVENT_FLAG_LBUTTON: + if flags & cv2.EVENT_FLAG_LBUTTON: minpos = min(drag_start[0], x), min(drag_start[1], y) maxpos = max(drag_start[0], x), max(drag_start[1], y) sel = minpos[0], minpos[1], maxpos[0], maxpos[1] - img = cv.cvtColor(gray, cv.COLOR_GRAY2BGR) - cv.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1) - cv.imshow("gray", img) + img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) + cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1) + cv2.imshow("gray", img) else: print "selection is complete" drag_start = None @@ -55,21 +54,21 @@ if __name__ == '__main__': args = parser.parse_args() path = args.input - cv.namedWindow("gray",1) - cv.setMouseCallback("gray", onmouse) + cv2.namedWindow("gray",1) + cv2.setMouseCallback("gray", onmouse) '''Loop through all the images in the directory''' for infile in glob.glob( os.path.join(path, '*.*') ): ext = os.path.splitext(infile)[1][1:] #get the filename extenstion if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm": print infile - img=cv.imread(infile,1) + img=cv2.imread(infile,1) if img == None: continue sel = (0,0,0,0) drag_start = None - gray=cv.cvtColor(img, cv.COLOR_BGR2GRAY) - cv.imshow("gray",gray) - if (cv.waitKey() & 255) == 27: + gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + cv2.imshow("gray",gray) + if (cv2.waitKey() & 255) == 27: break - cv.destroyAllWindows() + cv2.destroyAllWindows() diff --git a/samples/python2/texture_flow.py b/samples/python2/texture_flow.py index 38767fae9b..0e1849ce3f 100755 --- a/samples/python2/texture_flow.py +++ b/samples/python2/texture_flow.py @@ -24,7 +24,7 @@ if __name__ == '__main__': if img is None: print 'Failed to load image file:', fn sys.exit(1) - + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) h, w = img.shape[:2] @@ -38,7 +38,7 @@ if __name__ == '__main__': points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) for x, y in points: vx, vy = np.int32(flow[y, x]*d) - cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.CV_AA) + cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA) cv2.imshow('input', img) cv2.imshow('flow', vis) cv2.waitKey() diff --git a/samples/python2/turing.py b/samples/python2/turing.py index e847f014c1..94816e6c31 100755 --- a/samples/python2/turing.py +++ b/samples/python2/turing.py @@ -9,7 +9,6 @@ Inspired by http://www.jonathanmccabe.com/Cyclic_Symmetric_Multi-Scale_Turing_Pa import numpy as np import cv2 -import cv2.cv as cv from common import draw_str import getopt, sys from itertools import count @@ -30,7 +29,7 @@ if __name__ == '__main__': out = None if '-o' in args: fn = args['-o'] - out = cv2.VideoWriter(args['-o'], cv.CV_FOURCC(*'DIB '), 30.0, (w, h), False) + out = cv2.VideoWriter(args['-o'], cv2.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False) print 'writing %s ...' % fn a = np.zeros((h, w), np.float32) diff --git a/samples/python2/video.py b/samples/python2/video.py index 8cbc6f03ba..e40d4d18c8 100755 --- a/samples/python2/video.py +++ b/samples/python2/video.py @@ -105,7 +105,7 @@ class Chess(VideoSynthBase): img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] img_quads.shape = quads.shape[:2] + (2,) for q in img_quads: - cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2) + cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2) def render(self, dst): t = self.t @@ -159,8 +159,8 @@ def create_capture(source = 0, fallback = presets['chess']): cap = cv2.VideoCapture(source) if 'size' in params: w, h = map(int, params['size'].split('x')) - cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w) - cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, w) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h) if cap is None or not cap.isOpened(): print 'Warning: unable to open video source: ', source if fallback is not None: