diff --git a/doc/tutorials/others/meanshift.markdown b/doc/tutorials/others/meanshift.markdown index 858ad1b7c9..1fab48b34d 100644 --- a/doc/tutorials/others/meanshift.markdown +++ b/doc/tutorials/others/meanshift.markdown @@ -135,5 +135,5 @@ Additional Resources Exercises --------- --# OpenCV comes with a Python [sample](https://github.com/opencv/opencv/blob/5.x/samples/python/camshift.py) for an interactive demo of camshift. Use it, hack it, understand - it. +-# OpenCV comes with a Python [sample](https://github.com/opencv/opencv/blob/5.x/samples/python/snippets/camshift.py) for an interactive demo of camshift. Use it, hack it, understand + it. \ No newline at end of file diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index a1b1390f7f..4d4027e9ca 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -2160,6 +2160,10 @@ the invert function (preferably using the #DECOMP_SVD method, as the most accura */ CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +/** @example samples/python/snippets/dft.py +An example on Discrete Fourier transform (DFT) in python. +*/ + /** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array. The function cv::dft performs one of the following: @@ -3055,7 +3059,10 @@ private: //! @{ /** @example samples/cpp/snippets/kmeans.cpp -An example on K-means clustering +An example on k-means clustering +*/ +/** @example samples/python/snippets/kmeans.py +An example on k-means clustering in python */ /** @brief Finds centers of clusters and groups input samples around the clusters. diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 827334d442..6d675ddd2b 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -482,6 +482,10 @@ public: CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; }; +/** @example samples/python/snippets/mser.py +An example using Maximally stable extremal region(MSER) extractor in python +*/ + /** @brief Maximally stable extremal region extractor The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index c33b833b0f..f9eceb4e98 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -1847,7 +1847,10 @@ CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, int borderType = BORDER_DEFAULT ); /** @example samples/cpp/snippets/laplace.cpp -An example using Laplace transformations for edge detection +An example using Laplace filter for edge detection +*/ +/** @example samples/python/snippets/laplace.py +An example using Laplace filter for edge detection in python */ /** @brief Calculates the Laplacian of an image. @@ -1968,6 +1971,10 @@ CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT ); +/** @example samples/python/snippets/texture_flow.py +An example using cornerEigenValsAndVecs in python +*/ + /** @brief Calculates eigenvalues and eigenvectors of image blocks for corner detection. For every pixel \f$p\f$ , the function cornerEigenValsAndVecs considers a blockSize \f$\times\f$ blockSize @@ -2158,6 +2165,9 @@ CV_EXPORTS CV_WRAP_AS(goodFeaturesToTrackWithQuality) void goodFeaturesToTrack( An example using the Hough line detector ![Sample input image](Hough_Lines_Tutorial_Original_Image.jpg) ![Output image](Hough_Lines_Tutorial_Result.jpg) */ +/** @example samples/python/snippets/houghlines.py +An example using the Hough line detector in python +*/ /** @brief Finds lines in a binary image using the standard Hough transform. @@ -2248,6 +2258,9 @@ CV_EXPORTS_W void HoughLinesPointSet( InputArray point, OutputArray lines, int l /** @example samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp An example using the Hough circle detector */ +/** @example samples/python/snippets/houghcircles.py +An example using the Hough circle detector in python +*/ /** @brief Finds circles in a grayscale image using the Hough transform. @@ -2689,6 +2702,9 @@ CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize, /** @example samples/cpp/snippets/polar_transforms.cpp An example using the cv::linearPolar and cv::logPolar operations */ +/** @example samples/python/snippets/logpolar.py +An example using the linearPolar and logPolar operations in python +*/ /** @brief Remaps an image to semilog-polar coordinates space. @@ -3423,6 +3439,9 @@ CV_EXPORTS_AS(EMD) float wrapperEMD( InputArray signature1, InputArray signature /** @example samples/cpp/snippets/watershed.cpp An example using the watershed algorithm */ +/** @example samples/python/snippets/watershed.py +An example using the watershed algorithm using python +*/ /** @brief Performs a marker-based image segmentation using the watershed algorithm. @@ -3535,6 +3554,9 @@ CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, /** @example samples/cpp/snippets/distrans.cpp An example on using the distance transform */ +/** @example samples/python/snippets/distrans.py +An example on using the distance transform in python +*/ /** @brief Calculates the distance to the closest zero pixel for each pixel of the source image. @@ -4069,6 +4091,10 @@ CV_EXPORTS_W void findContoursLinkRuns(InputArray image, OutputArrayOfArrays con //! @overload CV_EXPORTS_W void findContoursLinkRuns(InputArray image, OutputArrayOfArrays contours); +/** @example samples/python/snippets/squares.py +An example using approxPolyDP function in python. +*/ + /** @brief Approximates a polygonal curve(s) with the specified precision. The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less @@ -4401,6 +4427,10 @@ CV_EXPORTS_W RotatedRect fitEllipseAMS( InputArray points ); */ CV_EXPORTS_W RotatedRect fitEllipseDirect( InputArray points ); +/** @example samples/python/snippets/fitline.py +An example for fitting line in python +*/ + /** @brief Fits a line to a 2D or 3D point set. The function fitLine fits a line to a 2D or 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where @@ -4762,6 +4792,9 @@ CV_EXPORTS void polylines(InputOutputArray img, const Point* const* pts, const i int ncontours, bool isClosed, const Scalar& color, int thickness = 1, int lineType = LINE_8, int shift = 0 ); +/** @example samples/python/snippets/contours.py +An example program illustrates the use of findContours and drawContours in python +*/ /** @example samples/cpp/snippets/segment_objects.cpp An example using drawContours to clean up a background segmentation result diff --git a/modules/stitching/include/opencv2/stitching.hpp b/modules/stitching/include/opencv2/stitching.hpp index bde1411bc0..cf4b150fb4 100644 --- a/modules/stitching/include/opencv2/stitching.hpp +++ b/modules/stitching/include/opencv2/stitching.hpp @@ -113,7 +113,7 @@ namespace cv { A basic example on image stitching */ -/** @example samples/python/stitching.py +/** @example samples/python/snippets/stitching.py A basic example on image stitching in Python. */ diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp index 737abf435c..b1fc24f36c 100644 --- a/modules/video/include/opencv2/video/tracking.hpp +++ b/modules/video/include/opencv2/video/tracking.hpp @@ -74,7 +74,7 @@ See the OpenCV sample camshiftdemo.c that tracks colored objects. @note - (Python) A sample explaining the camshift tracking algorithm can be found at - opencv_source_code/samples/python/camshift.py + opencv_source_code/samples/python/snippets/camshift.py */ CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria ); @@ -129,6 +129,10 @@ CV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays py An example using the Lucas-Kanade optical flow algorithm */ +/** @example samples/python/snippets/lk_track.py +An example using the Lucas-Kanade optical flow algorithm in python +*/ + /** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids. @@ -183,6 +187,10 @@ CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4 ); +/** @example samples/python/snippets/opt_flow.py +An example to show optical flow in python +*/ + /** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm. @param prev first 8-bit single-channel input image. @@ -350,6 +358,11 @@ double findTransformECC(InputArray templateImage, InputArray inputImage, An example using the standard Kalman filter */ + +/** @example samples/python/snippets/kalman.py +An example using the standard Kalman filter in Python. + */ + /** @brief Kalman filter class. The class implements a standard Kalman filter , @@ -431,9 +444,14 @@ CV_EXPORTS_W Mat readOpticalFlow( const String& path ); */ CV_EXPORTS_W bool writeOpticalFlow( const String& path, InputArray flow ); +/** @example samples/python/snippets/dis_opt_flow.py +An example using the dense optical flow and DIS optical flow algorithms in python +*/ + /** @example samples/cpp/snippets/dis_opticalflow.cpp An example using the dense optical flow and DIS optical flow algorithms */ + /** Base class for dense optical flow algorithms */ diff --git a/samples/python/browse.py b/samples/python/browse.py deleted file mode 100755 index b55f8d9373..0000000000 --- a/samples/python/browse.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -''' -browse.py -========= - -Sample shows how to implement a simple hi resolution image navigation - -Usage ------ -browse.py [image filename] - -''' - -import numpy as np -import cv2 as cv - -# built-in modules -import sys - -def main(): - if len(sys.argv) > 1: - fn = cv.samples.findFile(sys.argv[1]) - print('loading %s ...' % fn) - img = cv.imread(fn) - if img is None: - print('Failed to load fn:', fn) - sys.exit(1) - - else: - sz = 4096 - print('generating %dx%d procedural image ...' % (sz, sz)) - img = np.zeros((sz, sz), np.uint8) - track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) - track = np.int32(track*10 + (sz/2, sz/2)) - cv.polylines(img, [track], 0, 255, 1, cv.LINE_AA) - - - small = img - for _i in range(3): - small = cv.pyrDown(small) - - def onmouse(event, x, y, flags, param): - h, _w = img.shape[:2] - h1, _w1 = small.shape[:2] - x, y = 1.0*x*h/h1, 1.0*y*h/h1 - zoom = cv.getRectSubPix(img, (800, 600), (x+0.5, y+0.5)) - cv.imshow('zoom', zoom) - - cv.imshow('preview', small) - cv.setMouseCallback('preview', onmouse) - cv.waitKey() - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/coherence.py b/samples/python/coherence.py deleted file mode 100755 index 76d86527f3..0000000000 --- a/samples/python/coherence.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python - -''' -Coherence-enhancing filtering example -===================================== - -inspired by - Joachim Weickert "Coherence-Enhancing Shock Filters" - http://www.mia.uni-saarland.de/Publications/weickert-dagm03.pdf -''' - -import numpy as np -import cv2 as cv - -def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): - h, w = img.shape[:2] - - for i in range(iter_n): - print(i) - - gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) - eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3) - eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] - x, y = eigen[:,:,1,0], eigen[:,:,1,1] - - gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma) - gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma) - gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma) - gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy - m = gvv < 0 - - ero = cv.erode(img, None) - dil = cv.dilate(img, None) - img1 = ero - img1[m] = dil[m] - img = np.uint8(img*(1.0 - blend) + img1*blend) - print('done') - return img - - -def main(): - import sys - try: - fn = sys.argv[1] - except: - fn = 'baboon.jpg' - - src = cv.imread(cv.samples.findFile(fn)) - - def nothing(*argv): - pass - - def update(): - sigma = cv.getTrackbarPos('sigma', 'control')*2+1 - str_sigma = cv.getTrackbarPos('str_sigma', 'control')*2+1 - blend = cv.getTrackbarPos('blend', 'control') / 10.0 - print('sigma: %d str_sigma: %d blend_coef: %f' % (sigma, str_sigma, blend)) - dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend) - cv.imshow('dst', dst) - - cv.namedWindow('control', 0) - cv.createTrackbar('sigma', 'control', 9, 15, nothing) - cv.createTrackbar('blend', 'control', 7, 10, nothing) - cv.createTrackbar('str_sigma', 'control', 9, 15, nothing) - - - print('Press SPACE to update the image\n') - - cv.imshow('src', src) - update() - while True: - ch = cv.waitKey() - if ch == ord(' '): - update() - if ch == 27: - break - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/color_histogram.py b/samples/python/color_histogram.py deleted file mode 100755 index a1924bab8b..0000000000 --- a/samples/python/color_histogram.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python - -''' -Video histogram sample to show live histogram of video - -Keys: - ESC - exit - -''' - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -# built-in modules -import sys - -# local modules -import video - -class App(): - - def set_scale(self, val): - self.hist_scale = val - - def run(self): - hsv_map = np.zeros((180, 256, 3), np.uint8) - h, s = np.indices(hsv_map.shape[:2]) - hsv_map[:,:,0] = h - hsv_map[:,:,1] = s - hsv_map[:,:,2] = 255 - hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR) - cv.imshow('hsv_map', hsv_map) - - cv.namedWindow('hist', 0) - self.hist_scale = 10 - - cv.createTrackbar('scale', 'hist', self.hist_scale, 32, self.set_scale) - - try: - fn = sys.argv[1] - except: - fn = 0 - cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05') - - while True: - _flag, frame = cam.read() - cv.imshow('camera', frame) - - small = cv.pyrDown(frame) - - hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV) - dark = hsv[...,2] < 32 - hsv[dark] = 0 - h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) - - h = np.clip(h*0.005*self.hist_scale, 0, 1) - vis = hsv_map*h[:,:,np.newaxis] / 255.0 - cv.imshow('hist', vis) - - ch = cv.waitKey(1) - if ch == 27: - break - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - App().run() - cv.destroyAllWindows() diff --git a/samples/python/deconvolution.py b/samples/python/deconvolution.py deleted file mode 100755 index b276ca8cfb..0000000000 --- a/samples/python/deconvolution.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python - -''' -Wiener deconvolution. - -Sample shows how DFT can be used to perform Weiner deconvolution [1] -of an image with user-defined point spread function (PSF) - -Usage: - deconvolution.py [--circle] - [--angle ] - [--d ] - [--snr ] - [] - - Use sliders to adjust PSF paramitiers. - Keys: - SPACE - switch btw linear/circular PSF - ESC - exit - -Examples: - deconvolution.py --angle 135 --d 22 licenseplate_motion.jpg - (image source: http://www.topazlabs.com/infocus/_images/licenseplate_compare.jpg) - - deconvolution.py --angle 86 --d 31 text_motion.jpg - deconvolution.py --circle --d 19 text_defocus.jpg - (image source: compact digital photo camera, no artificial distortion) - - -[1] http://en.wikipedia.org/wiki/Wiener_deconvolution -''' - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -# local module -from common import nothing - - -def blur_edge(img, d=31): - h, w = img.shape[:2] - img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP) - img_blur = cv.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d] - y, x = np.indices((h, w)) - dist = np.dstack([x, w-x-1, y, h-y-1]).min(-1) - w = np.minimum(np.float32(dist)/d, 1.0) - return img*w + img_blur*(1-w) - -def motion_kernel(angle, d, sz=65): - kern = np.ones((1, d), np.float32) - c, s = np.cos(angle), np.sin(angle) - A = np.float32([[c, -s, 0], [s, c, 0]]) - sz2 = sz // 2 - A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((d-1)*0.5, 0)) - kern = cv.warpAffine(kern, A, (sz, sz), flags=cv.INTER_CUBIC) - return kern - -def defocus_kernel(d, sz=65): - kern = np.zeros((sz, sz), np.uint8) - cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1) - kern = np.float32(kern) / 255.0 - return kern - - -def main(): - import sys, getopt - opts, args = getopt.getopt(sys.argv[1:], '', ['circle', 'angle=', 'd=', 'snr=']) - opts = dict(opts) - try: - fn = args[0] - except: - fn = 'licenseplate_motion.jpg' - - win = 'deconvolution' - - img = cv.imread(cv.samples.findFile(fn), cv.IMREAD_GRAYSCALE) - if img is None: - print('Failed to load file:', fn) - sys.exit(1) - - img = np.float32(img)/255.0 - cv.imshow('input', img) - - img = blur_edge(img) - IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT) - - defocus = '--circle' in opts - - def update(_): - ang = np.deg2rad( cv.getTrackbarPos('angle', win) ) - d = cv.getTrackbarPos('d', win) - noise = 10**(-0.1*cv.getTrackbarPos('SNR (db)', win)) - - if defocus: - psf = defocus_kernel(d) - else: - psf = motion_kernel(ang, d) - cv.imshow('psf', psf) - - psf /= psf.sum() - psf_pad = np.zeros_like(img) - kh, kw = psf.shape - psf_pad[:kh, :kw] = psf - PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows = kh) - PSF2 = (PSF**2).sum(-1) - iPSF = PSF / (PSF2 + noise)[...,np.newaxis] - RES = cv.mulSpectrums(IMG, iPSF, 0) - res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT ) - res = np.roll(res, -kh//2, 0) - res = np.roll(res, -kw//2, 1) - cv.imshow(win, res) - - cv.namedWindow(win) - cv.namedWindow('psf', 0) - cv.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update) - cv.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update) - cv.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update) - update(None) - - while True: - ch = cv.waitKey() - if ch == 27: - break - if ch == ord(' '): - defocus = not defocus - update(None) - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/edge.py b/samples/python/edge.py deleted file mode 100755 index e85c2f6288..0000000000 --- a/samples/python/edge.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python - -''' -This sample demonstrates Canny edge detection. - -Usage: - edge.py [