diff --git a/samples/python2/calibrate.py b/samples/python2/calibrate.py index 24e4aa7d0c..873dceb2f8 100755 --- a/samples/python2/calibrate.py +++ b/samples/python2/calibrate.py @@ -1,5 +1,21 @@ #!/usr/bin/env python +''' +camera calibration for distorted images with chess board samples +reads distorted images, calculates the calibration and write undistorted images + +usage: + calibrate.py [--debug ] [--square_size] [] + +default values: + --debug: ./output/ + --square_size: 1.0 + defaults to ../data/left*.jpg + +read more: + http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html +''' + # Python 2/3 compatibility from __future__ import print_function @@ -12,64 +28,88 @@ from common import splitfn # built-in modules import os - -USAGE = ''' -USAGE: calib.py [--save ] [--debug ] [--square_size] [] -''' - - - if __name__ == '__main__': import sys import getopt from glob import glob - args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size=']) + args, img_mask = getopt.getopt(sys.argv[1:], '', ['debug=', 'square_size=']) args = dict(args) - try: + args.setdefault('--debug', './output/') + args.setdefault('--square_size', 1.0) + if not img_mask: + img_mask = '../data/left*.jpg' # default + else: img_mask = img_mask[0] - except: - img_mask = '../data/left*.jpg' img_names = glob(img_mask) debug_dir = args.get('--debug') - square_size = float(args.get('--square_size', 1.0)) + if not os.path.isdir(debug_dir): + os.mkdir(debug_dir) + square_size = float(args.get('--square_size')) pattern_size = (9, 6) - pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 ) - pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) + pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) + pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2) pattern_points *= square_size obj_points = [] img_points = [] h, w = 0, 0 + img_names_undistort = [] for fn in img_names: - print('processing %s...' % fn,) + print('processing %s... ' % fn, end='') img = cv2.imread(fn, 0) if img is None: - print("Failed to load", fn) - continue + print("Failed to load", fn) + continue h, w = img.shape[:2] found, corners = cv2.findChessboardCorners(img, pattern_size) if found: - term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 ) + term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + if debug_dir: vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) cv2.drawChessboardCorners(vis, pattern_size, corners, found) path, name, ext = splitfn(fn) - cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis) + outfile = debug_dir + name + '_chess.png' + cv2.imwrite(outfile, vis) + if found: + img_names_undistort.append(outfile) + if not found: print('chessboard not found') continue + img_points.append(corners.reshape(-1, 2)) obj_points.append(pattern_points) print('ok') + # calculate camera distortion rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None) - print("RMS:", rms) - print("camera matrix:\n", camera_matrix) - print("distortion coefficients: ", dist_coefs.ravel()) + + # print("RMS:", rms) + # print("camera matrix:\n", camera_matrix) + # print("distortion coefficients: ", dist_coefs.ravel()) + + # undistort the image with the calibration + print('') + for img_found in img_names_undistort: + img = cv2.imread(img_found) + + h, w = img.shape[:2] + newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h)) + + dst = cv2.undistort(img, camera_matrix, dist_coefs, None, newcameramtx) + + # crop and save the image + x, y, w, h = roi + dst = dst[y:y+h, x:x+w] + outfile = img_found + '_undistorted.png' + print('Undistorted image written to: %s' % outfile) + cv2.imwrite(outfile, dst) + cv2.destroyAllWindows() diff --git a/samples/python2/color_histogram.py b/samples/python2/color_histogram.py index 7e964d5c57..9e691b7efe 100755 --- a/samples/python2/color_histogram.py +++ b/samples/python2/color_histogram.py @@ -1,11 +1,18 @@ #!/usr/bin/env python +''' +Video histogram sample to show live histogram of video + +Keys: + ESC - exit + +''' + import numpy as np import cv2 # built-in modules import sys -from time import clock # local modules import video @@ -22,6 +29,7 @@ if __name__ == '__main__': cv2.namedWindow('hist', 0) hist_scale = 10 + def set_scale(val): global hist_scale hist_scale = val @@ -42,8 +50,7 @@ if __name__ == '__main__': hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV) dark = hsv[...,2] < 32 hsv[dark] = 0 - h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) - + h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) h = np.clip(h*0.005*hist_scale, 0, 1) vis = hsv_map*h[:,:,np.newaxis] / 255.0 diff --git a/samples/python2/dft.py b/samples/python2/dft.py index d617438a88..4437aad646 100755 --- a/samples/python2/dft.py +++ b/samples/python2/dft.py @@ -1,5 +1,13 @@ #!/usr/bin/env python +''' +sample for disctrete fourier transform (dft) + +USAGE: + dft.py +''' + + # Python 2/3 compatibility from __future__ import print_function @@ -56,9 +64,9 @@ def shift_dft(src, dst=None): if __name__ == "__main__": - if len(sys.argv)>1: + if len(sys.argv) > 1: im = cv2.imread(sys.argv[1]) - else : + else: im = cv2.imread('../data/baboon.jpg') print("usage : python dft.py ") diff --git a/samples/python2/facedetect.py b/samples/python2/facedetect.py index 2349087dc8..c55407943e 100755 --- a/samples/python2/facedetect.py +++ b/samples/python2/facedetect.py @@ -1,5 +1,15 @@ #!/usr/bin/env python +''' +face detection using haar cascades + +USAGE: + facedetect.py [--cascade ] [--nested-cascade ] [] + +read more: + http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_objdetect/py_face_detection/py_face_detection.html +''' + # Python 2/3 compatibility from __future__ import print_function @@ -10,12 +20,10 @@ import cv2 from video import create_capture from common import clock, draw_str -help_message = ''' -USAGE: facedetect.py [--cascade ] [--nested-cascade ] [] -''' def detect(img, cascade): - rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE) + rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), + flags=cv2.CASCADE_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] @@ -27,7 +35,7 @@ def draw_rects(img, rects, color): if __name__ == '__main__': import sys, getopt - print(help_message) + print(__doc__) args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade=']) try: diff --git a/samples/python2/houghcircles.py b/samples/python2/houghcircles.py index fe87d8f3e1..04491dd8e7 100755 --- a/samples/python2/houghcircles.py +++ b/samples/python2/houghcircles.py @@ -2,8 +2,13 @@ ''' This example illustrates how to use cv2.HoughCircles() function. -Usage: ./houghcircles.py [] -image argument defaults to ../data/board.jpg + +Usage: + houghcircles.py [] + image argument defaults to ../data/board.jpg + +read more: + http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html ''' # Python 2/3 compatibility @@ -14,11 +19,11 @@ import numpy as np import sys if __name__ == '__main__': - print(__doc__) + try: fn = sys.argv[1] - except: + except IndexError: fn = "../data/board.jpg" src = cv2.imread(fn, 1) @@ -30,7 +35,7 @@ if __name__ == '__main__': a, b, c = circles.shape for i in range(b): cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_AA) - cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle + cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle cv2.imshow("source", src) cv2.imshow("detected circles", cimg) diff --git a/samples/python2/houghlines.py b/samples/python2/houghlines.py index 674b26ec70..9a0f653f1f 100755 --- a/samples/python2/houghlines.py +++ b/samples/python2/houghlines.py @@ -1,9 +1,16 @@ #!/usr/bin/python + ''' This example illustrates how to use Hough Transform to find lines -Usage: ./houghlines.py [] -image argument defaults to ../data/pic1.png + +Usage: + houghlines.py [] + image argument defaults to ../data/pic1.png + +read more: + http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html ''' + # Python 2/3 compatibility from __future__ import print_function @@ -13,12 +20,13 @@ import sys import math if __name__ == '__main__': + print(__doc__) try: fn = sys.argv[1] - except: + except IndexError: fn = "../data/pic1.png" - print(__doc__) + src = cv2.imread(fn) dst = cv2.Canny(src, 50, 200) cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) @@ -42,7 +50,6 @@ if __name__ == '__main__': pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) ) cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA) - cv2.imshow("source", src) cv2.imshow("detected lines", cdst) cv2.waitKey(0) diff --git a/samples/python2/logpolar.py b/samples/python2/logpolar.py index 60695bfd80..fdf03f3f80 100644 --- a/samples/python2/logpolar.py +++ b/samples/python2/logpolar.py @@ -1,15 +1,27 @@ #!/usr/bin/env python +''' +plots image as logPolar and linearPolar + +Usage: + logpolar.py + +Keys: + ESC - exit +''' + # Python 2/3 compatibility from __future__ import print_function import cv2 if __name__ == '__main__': + print(__doc__) + import sys try: fn = sys.argv[1] - except: + except IndexError: fn = '../data/fruits.jpg' img = cv2.imread(fn) diff --git a/samples/python2/opencv_version.py b/samples/python2/opencv_version.py index 44f1977362..b26b55c1dd 100644 --- a/samples/python2/opencv_version.py +++ b/samples/python2/opencv_version.py @@ -1,5 +1,15 @@ #!/usr/bin/env python +''' +prints OpenCV version + +Usage: + opencv_version.py [] + params: + --build: print complete build info + --help: print this help +''' + # Python 2/3 compatibility from __future__ import print_function @@ -7,14 +17,16 @@ import cv2 if __name__ == '__main__': import sys + print(__doc__) + try: param = sys.argv[1] - except: + except IndexError: param = "" - if ("--build" == param): + if "--build" == param: print(cv2.getBuildInformation()) - elif ("--help" == param): + elif "--help" == param: print("\t--build\n\t\tprint complete build info") print("\t--help\n\t\tprint this help") else: diff --git a/samples/python2/opt_flow.py b/samples/python2/opt_flow.py index 63f65f2b78..be85262c91 100755 --- a/samples/python2/opt_flow.py +++ b/samples/python2/opt_flow.py @@ -1,21 +1,26 @@ #!/usr/bin/env python -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 -import video +''' +example to show optical flow -help_message = ''' USAGE: opt_flow.py [] Keys: 1 - toggle HSV flow visualization 2 - toggle glitch +Keys: + ESC - exit ''' +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 +import video + + def draw_flow(img, flow, step=16): h, w = img.shape[:2] y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int) @@ -28,6 +33,7 @@ def draw_flow(img, flow, step=16): cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1) return vis + def draw_hsv(flow): h, w = flow.shape[:2] fx, fy = flow[:,:,0], flow[:,:,1] @@ -40,6 +46,7 @@ def draw_hsv(flow): bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) return bgr + def warp_flow(img, flow): h, w = flow.shape[:2] flow = -flow @@ -50,10 +57,10 @@ def warp_flow(img, flow): if __name__ == '__main__': import sys - print(help_message) + print(__doc__) try: fn = sys.argv[1] - except: + except IndexError: fn = 0 cam = video.create_capture(fn) diff --git a/samples/python2/peopledetect.py b/samples/python2/peopledetect.py index bd0fe73086..1cad95288b 100755 --- a/samples/python2/peopledetect.py +++ b/samples/python2/peopledetect.py @@ -1,22 +1,27 @@ #!/usr/bin/env python +''' +example to detect upright people in images using HOG features + +Usage: + peopledetect.py + +Press any key to continue, ESC to stop. +''' + # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 -help_message = ''' -USAGE: peopledetect.py ... - -Press any key to continue, ESC to stop. -''' def inside(r, q): rx, ry, rw, rh = r qx, qy, qw, qh = q return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh + def draw_detections(img, rects, thickness = 1): for x, y, w, h in rects: # the HOG detector returns slightly larger rectangles than the real objects. @@ -30,13 +35,13 @@ if __name__ == '__main__': from glob import glob import itertools as it - print(help_message) + print(__doc__) hog = cv2.HOGDescriptor() hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() ) - default = ['../data/basketball2.png '] if len(sys.argv[1:]) == 0 else [] + for fn in it.chain(*map(glob, default + sys.argv[1:])): print(fn, ' - ',) try: