diff --git a/apps/traincascade/imagestorage.cpp b/apps/traincascade/imagestorage.cpp index a133ccc3d4..f220e5c2b3 100644 --- a/apps/traincascade/imagestorage.cpp +++ b/apps/traincascade/imagestorage.cpp @@ -54,7 +54,7 @@ bool CvCascadeImageReader::NegReader::nextImg() size_t count = imgFilenames.size(); for( size_t i = 0; i < count; i++ ) { - src = imread( imgFilenames[last++], 0 ); + src = imread( imgFilenames[last++], IMREAD_GRAYSCALE ); if( src.empty() ){ last %= count; continue; diff --git a/doc/opencv.bib b/doc/opencv.bib index e1b1d8badc..64aa363202 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -1370,3 +1370,10 @@ journal = {IEEE transactions on pattern analysis and machine intelligence}, doi = {10.1109/TPAMI.2006.153} } +@article{Buades2005DenoisingIS, + title={Denoising image sequences does not require motion estimation}, + author={Antoni Buades and Bartomeu Coll and Jean-Michel Morel}, + journal={IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.}, + year={2005}, + pages={70-74} +} diff --git a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown index f0ea83122b..52d0c5933d 100644 --- a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown +++ b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown @@ -41,8 +41,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -imgL = cv.imread('tsukuba_l.png',0) -imgR = cv.imread('tsukuba_r.png',0) +imgL = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE) +imgR = cv.imread('tsukuba_r.png', cv.IMREAD_GRAYSCALE) stereo = cv.StereoBM_create(numDisparities=16, blockSize=15) disparity = stereo.compute(imgL,imgR) diff --git a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown index 6b8d90882a..ada22222cb 100644 --- a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown +++ b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown @@ -76,8 +76,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img1 = cv.imread('myleft.jpg',0) #queryimage # left image -img2 = cv.imread('myright.jpg',0) #trainimage # right image +img1 = cv.imread('myleft.jpg', cv.IMREAD_GRAYSCALE) #queryimage # left image +img2 = cv.imread('myright.jpg', cv.IMREAD_GRAYSCALE) #trainimage # right image sift = cv.SIFT_create() diff --git a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown index 4c6aa4bb92..1594f77200 100644 --- a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown +++ b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown @@ -25,6 +25,7 @@ Let's load a color image first: >>> import cv2 as cv >>> img = cv.imread('messi5.jpg') +>>> assert img is not None, "file could not be read, check with os.path.exists()" @endcode You can access a pixel value by its row and column coordinates. For BGR image, it returns an array of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned. @@ -173,6 +174,7 @@ from matplotlib import pyplot as plt BLUE = [255,0,0] img1 = cv.imread('opencv-logo.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE) reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT) diff --git a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown index d08d974c2f..4b6e8bd3c1 100644 --- a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown +++ b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown @@ -50,6 +50,8 @@ Here \f$\gamma\f$ is taken as zero. @code{.py} img1 = cv.imread('ml.png') img2 = cv.imread('opencv-logo.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" dst = cv.addWeighted(img1,0.7,img2,0.3,0) @@ -76,6 +78,8 @@ bitwise operations as shown below: # Load two images img1 = cv.imread('messi5.jpg') img2 = cv.imread('opencv-logo-white.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" # I want to put logo on top-left corner, So I create a ROI rows,cols,channels = img2.shape diff --git a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown index d24613a643..7d63ffadef 100644 --- a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown +++ b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown @@ -37,6 +37,7 @@ of odd sizes ranging from 5 to 49. (Don't worry about what the result will look goal): @code{.py} img1 = cv.imread('messi5.jpg') +assert img1 is not None, "file could not be read, check with os.path.exists()" e1 = cv.getTickCount() for i in range(5,49,2): diff --git a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown index 4abcdc1bad..a34e7e7805 100644 --- a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown +++ b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown @@ -63,7 +63,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('simple.jpg',0) +img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE) # Initiate FAST detector star = cv.xfeatures2d.StarDetector_create() diff --git a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown index b1b8a81ca8..1d7b3e3911 100644 --- a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown +++ b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown @@ -98,7 +98,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('blox.jpg',0) # `/samples/data/blox.jpg` +img = cv.imread('blox.jpg', cv.IMREAD_GRAYSCALE) # `/samples/data/blox.jpg` # Initiate FAST object with default values fast = cv.FastFeatureDetector_create() diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 6abac2c57b..4597c6bfcf 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -40,8 +40,8 @@ from matplotlib import pyplot as plt MIN_MATCH_COUNT = 10 -img1 = cv.imread('box.png',0) # queryImage -img2 = cv.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png', cv.IMREAD_GRAYSCALE) # queryImage +img2 = cv.imread('box_in_scene.png', cv.IMREAD_GRAYSCALE) # trainImage # Initiate SIFT detector sift = cv.SIFT_create() diff --git a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown index 55bc0f2903..73d01aaaa1 100644 --- a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown +++ b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown @@ -67,7 +67,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('simple.jpg',0) +img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE) # Initiate ORB detector orb = cv.ORB_create() diff --git a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown index fc980994c3..5bcd91cce8 100644 --- a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown @@ -76,7 +76,7 @@ and descriptors. First we will see a simple demo on how to find SURF keypoints and descriptors and draw it. All examples are shown in Python terminal since it is just same as SIFT only. @code{.py} ->>> img = cv.imread('fly.png',0) +>>> img = cv.imread('fly.png', cv.IMREAD_GRAYSCALE) # Create SURF object. You can specify params here or later. # Here I set Hessian Threshold to 400 diff --git a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown index d36e5784eb..8c651afb35 100644 --- a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown +++ b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown @@ -83,7 +83,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" edges = cv.Canny(img,100,200) plt.subplot(121),plt.imshow(img,cmap = 'gray') diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown index f3c7f6fc31..e98b8a64b9 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown @@ -24,7 +24,8 @@ The function **cv.moments()** gives a dictionary of all moment values calculated import numpy as np import cv2 as cv -img = cv.imread('star.jpg',0) +img = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" ret,thresh = cv.threshold(img,127,255,0) contours,hierarchy = cv.findContours(thresh, 1, 2) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown index 0049d3131d..e96598b11e 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown @@ -29,6 +29,7 @@ import numpy as np import cv2 as cv im = cv.imread('test.jpg') +assert im is not None, "file could not be read, check with os.path.exists()" imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(imgray, 127, 255, 0) contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown index 65f5b75401..fc278669b0 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown @@ -41,6 +41,7 @@ import cv2 as cv import numpy as np img = cv.imread('star.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) ret,thresh = cv.threshold(img_gray, 127, 255,0) contours,hierarchy = cv.findContours(thresh,2,1) @@ -92,8 +93,10 @@ docs. import cv2 as cv import numpy as np -img1 = cv.imread('star.jpg',0) -img2 = cv.imread('star2.jpg',0) +img1 = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE) +img2 = cv.imread('star2.jpg', cv.IMREAD_GRAYSCALE) +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" ret, thresh = cv.threshold(img1, 127, 255,0) ret, thresh2 = cv.threshold(img2, 127, 255,0) diff --git a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown index 1b626df94f..82ce0d45ab 100644 --- a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown +++ b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown @@ -29,6 +29,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('opencv_logo.png') +assert img is not None, "file could not be read, check with os.path.exists()" kernel = np.ones((5,5),np.float32)/25 dst = cv.filter2D(img,-1,kernel) @@ -70,6 +71,7 @@ import numpy as np from matplotlib import pyplot as plt img = cv.imread('opencv-logo-white.png') +assert img is not None, "file could not be read, check with os.path.exists()" blur = cv.blur(img,(5,5)) diff --git a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown index add96f2962..6dd151fe96 100644 --- a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown +++ b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown @@ -28,6 +28,7 @@ import numpy as np import cv2 as cv img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC) @@ -49,7 +50,8 @@ function. See the below example for a shift of (100,50): import numpy as np import cv2 as cv -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols = img.shape M = np.float32([[1,0,100],[0,1,50]]) @@ -87,7 +89,8 @@ where: To find this transformation matrix, OpenCV provides a function, **cv.getRotationMatrix2D**. Check out the below example which rotates the image by 90 degree with respect to center without any scaling. @code{.py} -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols = img.shape # cols-1 and rows-1 are the coordinate limits. @@ -108,6 +111,7 @@ which is to be passed to **cv.warpAffine**. Check the below example, and also look at the points I selected (which are marked in green color): @code{.py} img = cv.imread('drawing.png') +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols,ch = img.shape pts1 = np.float32([[50,50],[200,50],[50,200]]) @@ -137,6 +141,7 @@ matrix. See the code below: @code{.py} img = cv.imread('sudoku.png') +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols,ch = img.shape pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) diff --git a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown index 7dc22d37aa..349ebac031 100644 --- a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown +++ b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown @@ -93,6 +93,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" mask = np.zeros(img.shape[:2],np.uint8) bgdModel = np.zeros((1,65),np.float64) @@ -122,7 +123,8 @@ remaining background with gray. Then loaded that mask image in OpenCV, edited or got with corresponding values in newly added mask image. Check the code below:* @code{.py} # newmask is the mask image I manually labelled -newmask = cv.imread('newmask.png',0) +newmask = cv.imread('newmask.png', cv.IMREAD_GRAYSCALE) +assert newmask is not None, "file could not be read, check with os.path.exists()" # wherever it is marked white (sure foreground), change mask=1 # wherever it is marked black (sure background), change mask=0 diff --git a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown index 0a52cd431c..0b9556f2bb 100644 --- a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown +++ b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown @@ -42,7 +42,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('dave.jpg',0) +img = cv.imread('dave.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" laplacian = cv.Laplacian(img,cv.CV_64F) sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5) @@ -79,7 +80,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('box.png',0) +img = cv.imread('box.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # Output dtype = cv.CV_8U sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown index 492897255a..8e05a64080 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown @@ -38,6 +38,7 @@ import numpy as np import cv2 as cv img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) @@ -55,6 +56,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist, xbins, ybins = np.histogram2d(h.ravel(),s.ravel(),[180,256],[[0,180],[0,256]]) @@ -89,6 +91,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown index a235617914..dce31c376b 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown @@ -38,10 +38,12 @@ import cv2 as cvfrom matplotlib import pyplot as plt #roi is the object or region of object we need to find roi = cv.imread('rose_red.png') +assert roi is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV) #target is the image we search in target = cv.imread('rose.png') +assert target is not None, "file could not be read, check with os.path.exists()" hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV) # Find the histograms using calcHist. Can be done with np.histogram2d also @@ -85,9 +87,11 @@ import numpy as np import cv2 as cv roi = cv.imread('rose_red.png') +assert roi is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV) target = cv.imread('rose.png') +assert target is not None, "file could not be read, check with os.path.exists()" hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV) # calculating object histogram diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown index 8cb24139e8..5667cee36c 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown @@ -77,7 +77,8 @@ and its parameters : So let's start with a sample image. Simply load an image in grayscale mode and find its full histogram. @code{.py} -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" hist = cv.calcHist([img],[0],None,[256],[0,256]) @endcode hist is a 256x1 array, each value corresponds to number of pixels in that image with its @@ -121,7 +122,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" plt.hist(img.ravel(),256,[0,256]); plt.show() @endcode You will get a plot as below : @@ -136,6 +138,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" color = ('b','g','r') for i,col in enumerate(color): histr = cv.calcHist([img],[i],None,[256],[0,256]) @@ -164,7 +167,8 @@ We used cv.calcHist() to find the histogram of the full image. What if you want of some regions of an image? Just create a mask image with white color on the region you want to find histogram and black otherwise. Then pass this as the mask. @code{.py} -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # create a mask mask = np.zeros(img.shape[:2], np.uint8) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown index 99ef285b08..bc9c69a714 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown @@ -30,7 +30,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('wiki.jpg',0) +img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" hist,bins = np.histogram(img.flatten(),256,[0,256]) @@ -81,7 +82,8 @@ output is our histogram equalized image. Below is a simple code snippet showing its usage for same image we used : @code{.py} -img = cv.imread('wiki.jpg',0) +img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" equ = cv.equalizeHist(img) res = np.hstack((img,equ)) #stacking images side-by-side cv.imwrite('res.png',res) @@ -124,7 +126,8 @@ Below code snippet shows how to apply CLAHE in OpenCV: import numpy as np import cv2 as cv -img = cv.imread('tsukuba_l.png',0) +img = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # create a CLAHE object (Arguments are optional). clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) diff --git a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown index 7a424e9daf..570ad9145c 100644 --- a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown @@ -23,7 +23,8 @@ explained in the documentation. So we directly go to the code. import numpy as np import cv2 as cv -img = cv.imread('opencv-logo-white.png',0) +img = cv.imread('opencv-logo-white.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img = cv.medianBlur(img,5) cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR) diff --git a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown index 84a62d14cd..f52a2ce411 100644 --- a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown +++ b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown @@ -38,7 +38,8 @@ Here, as an example, I would use a 5x5 kernel with full of ones. Let's see it ho import cv2 as cv import numpy as np -img = cv.imread('j.png',0) +img = cv.imread('j.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" kernel = np.ones((5,5),np.uint8) erosion = cv.erode(img,kernel,iterations = 1) @endcode diff --git a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown index 602ab29ad6..0470211fd3 100644 --- a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown +++ b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown @@ -31,6 +31,7 @@ Similarly while expanding, area becomes 4 times in each level. We can find Gauss **cv.pyrDown()** and **cv.pyrUp()** functions. @code{.py} img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" lower_reso = cv.pyrDown(higher_reso) @endcode Below is the 4 levels in an image pyramid. @@ -84,6 +85,8 @@ import numpy as np,sys A = cv.imread('apple.jpg') B = cv.imread('orange.jpg') +assert A is not None, "file could not be read, check with os.path.exists()" +assert B is not None, "file could not be read, check with os.path.exists()" # generate Gaussian pyramid for A G = A.copy() diff --git a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown index 551f117879..1557118ab6 100644 --- a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown +++ b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown @@ -38,9 +38,11 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img2 = img.copy() -template = cv.imread('template.jpg',0) +template = cv.imread('template.jpg', cv.IMREAD_GRAYSCALE) +assert template is not None, "file could not be read, check with os.path.exists()" w, h = template.shape[::-1] # All the 6 methods for comparison in a list @@ -113,8 +115,10 @@ import numpy as np from matplotlib import pyplot as plt img_rgb = cv.imread('mario.png') +assert img_rgb is not None, "file could not be read, check with os.path.exists()" img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY) -template = cv.imread('mario_coin.png',0) +template = cv.imread('mario_coin.png', cv.IMREAD_GRAYSCALE) +assert template is not None, "file could not be read, check with os.path.exists()" w, h = template.shape[::-1] res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED) diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown index f52e9c5db6..7a200725de 100644 --- a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown +++ b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown @@ -37,7 +37,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('gradient.png',0) +img = cv.imread('gradient.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY) ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV) ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC) @@ -85,7 +86,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('sudoku.png',0) +img = cv.imread('sudoku.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img = cv.medianBlur(img,5) ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) @@ -133,7 +135,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('noisy2.png',0) +img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # global thresholding ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) @@ -183,7 +186,8 @@ where It actually finds a value of t which lies in between two peaks such that variances to both classes are minimal. It can be simply implemented in Python as follows: @code{.py} -img = cv.imread('noisy2.png',0) +img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" blur = cv.GaussianBlur(img,(5,5),0) # find normalized_histogram, and its cumulative distribution function diff --git a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown index 6c4533a1b0..59337b1355 100644 --- a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown +++ b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown @@ -54,7 +54,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" f = np.fft.fft2(img) fshift = np.fft.fftshift(f) magnitude_spectrum = 20*np.log(np.abs(fshift)) @@ -121,7 +122,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) @@ -184,7 +186,8 @@ So how do we find this optimal size ? OpenCV provides a function, **cv.getOptima this. It is applicable to both **cv.dft()** and **np.fft.fft2()**. Let's check their performance using IPython magic command %timeit. @code{.py} -In [16]: img = cv.imread('messi5.jpg',0) +In [15]: img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +In [16]: assert img is not None, "file could not be read, check with os.path.exists()" In [17]: rows,cols = img.shape In [18]: print("{} {}".format(rows,cols)) 342 548 diff --git a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown index ad3c233f30..9536bf3e30 100644 --- a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown +++ b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown @@ -49,6 +49,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('coins.png') +assert img is not None, "file could not be read, check with os.path.exists()" gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) @endcode diff --git a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown index 64fabe4564..dce4cf2e5f 100644 --- a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown +++ b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown @@ -56,7 +56,7 @@ import numpy as np import cv2 as cv img = cv.imread('messi_2.jpg') -mask = cv.imread('mask2.png',0) +mask = cv.imread('mask2.png', cv.IMREAD_GRAYSCALE) dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA) diff --git a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown index 76f1b2aecf..5d38298e0b 100644 --- a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown +++ b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown @@ -63,7 +63,7 @@ Making a project int main( int argc, char** argv ) { Mat image; - image = imread( argv[1], 1 ); + image = imread( argv[1], IMREAD_COLOR ); if( argc != 2 || !image.data ) { diff --git a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown index ee3f1eb7f9..d08280db64 100644 --- a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown +++ b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown @@ -42,7 +42,7 @@ int main(int argc, char** argv ) } Mat image; - image = imread( argv[1], 1 ); + image = imread( argv[1], IMREAD_COLOR ); if ( !image.data ) { diff --git a/modules/calib3d/test/test_calibration_hand_eye.cpp b/modules/calib3d/test/test_calibration_hand_eye.cpp index 50bcbd7aff..aa8b34d6d9 100644 --- a/modules/calib3d/test/test_calibration_hand_eye.cpp +++ b/modules/calib3d/test/test_calibration_hand_eye.cpp @@ -427,9 +427,9 @@ public: eps_rvec_noise[CALIB_HAND_EYE_ANDREFF] = 1.0e-2; eps_rvec_noise[CALIB_HAND_EYE_DANIILIDIS] = 1.0e-2; - eps_tvec_noise[CALIB_HAND_EYE_TSAI] = 5.0e-2; - eps_tvec_noise[CALIB_HAND_EYE_PARK] = 5.0e-2; - eps_tvec_noise[CALIB_HAND_EYE_HORAUD] = 5.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_TSAI] = 7.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_PARK] = 7.0e-2; + eps_tvec_noise[CALIB_HAND_EYE_HORAUD] = 7.0e-2; if (eyeToHandConfig) { eps_tvec_noise[CALIB_HAND_EYE_ANDREFF] = 7.0e-2; @@ -454,7 +454,7 @@ void CV_CalibrateHandEyeTest::run(int) { ts->set_failed_test_info(cvtest::TS::OK); - RNG& rng = ts->get_rng(); + RNG& rng = cv::theRNG(); std::vector > vec_rvec_diff(5); std::vector > vec_tvec_diff(5); diff --git a/modules/calib3d/test/test_chesscorners.cpp b/modules/calib3d/test/test_chesscorners.cpp index b4d0628c87..7226da999a 100644 --- a/modules/calib3d/test/test_chesscorners.cpp +++ b/modules/calib3d/test/test_chesscorners.cpp @@ -224,7 +224,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename ) /* read the image */ String img_file = board_list[idx * 2]; - Mat gray = imread( folder + img_file, 0); + Mat gray = imread( folder + img_file, IMREAD_GRAYSCALE); if( gray.empty() ) { diff --git a/modules/calib3d/test/test_homography.cpp b/modules/calib3d/test/test_homography.cpp index 09478dae03..41188a066d 100644 --- a/modules/calib3d/test/test_homography.cpp +++ b/modules/calib3d/test/test_homography.cpp @@ -73,60 +73,27 @@ int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS, cv::RHO}; using namespace cv; using namespace std; -class CV_HomographyTest: public cvtest::ArrayTest -{ -public: - CV_HomographyTest(); - ~CV_HomographyTest(); - - void run (int); - -protected: - - int method; - int image_size; - double reproj_threshold; - double sigma; - -private: - float max_diff, max_2diff; - bool check_matrix_size(const cv::Mat& H); - bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff); - int check_ransac_mask_1(const Mat& src, const Mat& mask); - int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask); - - void print_information_1(int j, int N, int method, const Mat& H); - void print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff); - void print_information_3(int method, int j, int N, const Mat& mask); - void print_information_4(int method, int j, int N, int k, int l, double diff); - void print_information_5(int method, int j, int N, int l, double diff); - void print_information_6(int method, int j, int N, int k, double diff, bool value); - void print_information_7(int method, int j, int N, int k, double diff, bool original_value, bool found_value); - void print_information_8(int method, int j, int N, int k, int l, double diff); -}; - -CV_HomographyTest::CV_HomographyTest() : max_diff(1e-2f), max_2diff(2e-2f) -{ - method = 0; - image_size = 100; - reproj_threshold = 3.0; - sigma = 0.01; -} -CV_HomographyTest::~CV_HomographyTest() {} +namespace HomographyTestUtils { + +static const float max_diff = 0.032f; +static const float max_2diff = 0.020f; +static const int image_size = 100; +static const double reproj_threshold = 3.0; +static const double sigma = 0.01; -bool CV_HomographyTest::check_matrix_size(const cv::Mat& H) +static bool check_matrix_size(const cv::Mat& H) { return (H.rows == 3) && (H.cols == 3); } -bool CV_HomographyTest::check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff) +static bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff) { diff = cvtest::norm(original, found, norm_type); return diff <= max_diff; } -int CV_HomographyTest::check_ransac_mask_1(const Mat& src, const Mat& mask) +static int check_ransac_mask_1(const Mat& src, const Mat& mask) { if (!(mask.cols == 1) && (mask.rows == src.cols)) return 1; if (countNonZero(mask) < mask.rows) return 2; @@ -134,14 +101,14 @@ int CV_HomographyTest::check_ransac_mask_1(const Mat& src, const Mat& mask) return 0; } -int CV_HomographyTest::check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask) +static int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask) { if (!(found_mask.cols == 1) && (found_mask.rows == original_mask.rows)) return 1; for (int i = 0; i < found_mask.rows; ++i) if (found_mask.at(i, 0) > 1) return 2; return 0; } -void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat& H) +static void print_information_1(int j, int N, int _method, const Mat& H) { cout << endl; cout << "Checking for homography matrix sizes..." << endl; cout << endl; cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector "; @@ -153,7 +120,7 @@ void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat cout << "Number of rows: " << H.rows << " Number of cols: " << H.cols << endl; cout << endl; } -void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat& H, const Mat& H_res, int k, double diff) +static void print_information_2(int j, int N, int _method, const Mat& H, const Mat& H_res, int k, double diff) { cout << endl; cout << "Checking for accuracy of homography matrix computing..." << endl; cout << endl; cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector "; @@ -169,7 +136,7 @@ void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat cout << "Maximum allowed difference: " << max_diff << endl; cout << endl; } -void CV_HomographyTest::print_information_3(int _method, int j, int N, const Mat& mask) +static void print_information_3(int _method, int j, int N, const Mat& mask) { cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl; cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector "; @@ -181,7 +148,7 @@ void CV_HomographyTest::print_information_3(int _method, int j, int N, const Mat cout << "Number of rows: " << mask.rows << " Number of cols: " << mask.cols << endl; cout << endl; } -void CV_HomographyTest::print_information_4(int _method, int j, int N, int k, int l, double diff) +static void print_information_4(int _method, int j, int N, int k, int l, double diff) { cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl; cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl; @@ -195,7 +162,7 @@ void CV_HomographyTest::print_information_4(int _method, int j, int N, int k, in cout << "Maximum allowed difference: " << max_2diff << endl; cout << endl; } -void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, double diff) +static void print_information_5(int _method, int j, int N, int l, double diff) { cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl; cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl; @@ -208,7 +175,7 @@ void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, do cout << "Maximum allowed difference: " << max_diff << endl; cout << endl; } -void CV_HomographyTest::print_information_6(int _method, int j, int N, int k, double diff, bool value) +static void print_information_6(int _method, int j, int N, int k, double diff, bool value) { cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl; cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl; @@ -221,7 +188,7 @@ void CV_HomographyTest::print_information_6(int _method, int j, int N, int k, do cout << "Value of found mask: "<< value << endl; cout << endl; } -void CV_HomographyTest::print_information_7(int _method, int j, int N, int k, double diff, bool original_value, bool found_value) +static void print_information_7(int _method, int j, int N, int k, double diff, bool original_value, bool found_value) { cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl; cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl; @@ -234,7 +201,7 @@ void CV_HomographyTest::print_information_7(int _method, int j, int N, int k, do cout << "Value of original mask: "<< original_value << " Value of found mask: " << found_value << endl; cout << endl; } -void CV_HomographyTest::print_information_8(int _method, int j, int N, int k, int l, double diff) +static void print_information_8(int _method, int j, int N, int k, int l, double diff) { cout << endl; cout << "Checking for reprojection error of inlier..." << endl; cout << endl; cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl; @@ -248,11 +215,15 @@ void CV_HomographyTest::print_information_8(int _method, int j, int N, int k, in cout << "Maximum allowed difference: " << max_2diff << endl; cout << endl; } -void CV_HomographyTest::run(int) +} // HomographyTestUtils:: + + +TEST(Calib3d_Homography, accuracy) { + using namespace HomographyTestUtils; for (int N = MIN_COUNT_OF_POINTS; N <= MAX_COUNT_OF_POINTS; ++N) { - RNG& rng = ts->get_rng(); + RNG& rng = cv::theRNG(); float *src_data = new float [2*N]; @@ -308,7 +279,7 @@ void CV_HomographyTest::run(int) for (int i = 0; i < METHODS_COUNT; ++i) { - method = METHOD[i]; + const int method = METHOD[i]; switch (method) { case 0: @@ -411,7 +382,7 @@ void CV_HomographyTest::run(int) for (int i = 0; i < METHODS_COUNT; ++i) { - method = METHOD[i]; + const int method = METHOD[i]; switch (method) { case 0: @@ -573,8 +544,6 @@ void CV_HomographyTest::run(int) } } -TEST(Calib3d_Homography, accuracy) { CV_HomographyTest test; test.safe_run(); } - TEST(Calib3d_Homography, EKcase) { float pt1data[] = diff --git a/modules/calib3d/test/test_stereomatching.cpp b/modules/calib3d/test/test_stereomatching.cpp index 4ea23ebff3..02d1823d2d 100644 --- a/modules/calib3d/test/test_stereomatching.cpp +++ b/modules/calib3d/test/test_stereomatching.cpp @@ -456,8 +456,8 @@ void CV_StereoMatchingTest::run(int) string datasetFullDirName = dataPath + DATASETS_DIR + datasetName + "/"; Mat leftImg = imread(datasetFullDirName + LEFT_IMG_NAME); Mat rightImg = imread(datasetFullDirName + RIGHT_IMG_NAME); - Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, 0); - Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, 0); + Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, IMREAD_GRAYSCALE); + Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, IMREAD_GRAYSCALE); Rect calcROI; if( leftImg.empty() || rightImg.empty() || trueLeftDisp.empty() ) @@ -835,9 +835,9 @@ TEST_P(Calib3d_StereoBM_BufferBM, memAllocsTest) const int SADWindowSize = get<1>(get<1>(GetParam())); String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/"; - Mat leftImg = imread(path + "im2.png", 0); + Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE); ASSERT_FALSE(leftImg.empty()); - Mat rightImg = imread(path + "im6.png", 0); + Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE); ASSERT_FALSE(rightImg.empty()); Mat leftDisp; { @@ -923,9 +923,9 @@ TEST(Calib3d_StereoSGBM, regression) { CV_StereoSGBMTest test; test.safe_run(); TEST(Calib3d_StereoSGBM_HH4, regression) { String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/"; - Mat leftImg = imread(path + "im2.png", 0); + Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE); ASSERT_FALSE(leftImg.empty()); - Mat rightImg = imread(path + "im6.png", 0); + Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE); ASSERT_FALSE(rightImg.empty()); Mat testData = imread(path + "disp2_hh4.png",-1); ASSERT_FALSE(testData.empty()); diff --git a/modules/core/include/opencv2/core/fast_math.hpp b/modules/core/include/opencv2/core/fast_math.hpp index 9ee7dba672..47a2948222 100644 --- a/modules/core/include/opencv2/core/fast_math.hpp +++ b/modules/core/include/opencv2/core/fast_math.hpp @@ -306,7 +306,7 @@ CV_INLINE int cvIsInf( double value ) #elif defined(__x86_64__) || defined(_M_X64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__PPC64__) || defined(__loongarch64) Cv64suf ieee754; ieee754.f = value; - return (ieee754.u & 0x7fffffff00000000) == + return (ieee754.u & 0x7fffffffffffffff) == 0x7ff0000000000000; #else Cv64suf ieee754; diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 027072a5da..d4fdbf05ee 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -247,6 +247,7 @@ std::wstring GetTempFileNameWinRT(std::wstring prefix) #if defined __MACH__ && defined __APPLE__ #include #include +#include #endif #endif @@ -635,6 +636,14 @@ struct HWFeatures #if (defined __ARM_FP && (((__ARM_FP & 0x2) != 0) && defined __ARM_NEON__)) have[CV_CPU_FP16] = true; #endif + #if (defined __ARM_FEATURE_DOTPROD) + int has_feat_dotprod = 0; + size_t has_feat_dotprod_size = sizeof(has_feat_dotprod); + sysctlbyname("hw.optional.arm.FEAT_DotProd", &has_feat_dotprod, &has_feat_dotprod_size, NULL, 0); + if (has_feat_dotprod) { + have[CV_CPU_NEON_DOTPROD] = true; + } + #endif #elif (defined __clang__) #if (defined __ARM_NEON__ || (defined __ARM_NEON && defined __aarch64__)) have[CV_CPU_NEON] = true; diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp index 580b4dcb10..f15f984957 100644 --- a/modules/core/test/test_math.cpp +++ b/modules/core/test/test_math.cpp @@ -3992,6 +3992,13 @@ TEST(Core_FastMath, InlineNaN) EXPECT_EQ( cvIsNaN((double) NAN), 1); EXPECT_EQ( cvIsNaN((double) -NAN), 1); EXPECT_EQ( cvIsNaN(0.0), 0); + + // Regression: check the +/-Inf cases + Cv64suf suf; + suf.u = 0x7FF0000000000000UL; + EXPECT_EQ( cvIsNaN(suf.f), 0); + suf.u = 0xFFF0000000000000UL; + EXPECT_EQ( cvIsNaN(suf.f), 0); } TEST(Core_FastMath, InlineIsInf) @@ -4003,6 +4010,13 @@ TEST(Core_FastMath, InlineIsInf) EXPECT_EQ( cvIsInf((double) HUGE_VAL), 1); EXPECT_EQ( cvIsInf((double) -HUGE_VAL), 1); EXPECT_EQ( cvIsInf(0.0), 0); + + // Regression: check the cases of 0x7FF00000xxxxxxxx + Cv64suf suf; + suf.u = 0x7FF0000000000001UL; + EXPECT_EQ( cvIsInf(suf.f), 0); + suf.u = 0x7FF0000012345678UL; + EXPECT_EQ( cvIsInf(suf.f), 0); } }} // namespace diff --git a/modules/dnn/test/test_int8_layers.cpp b/modules/dnn/test/test_int8_layers.cpp index 3551ee239f..8b3cd01f29 100644 --- a/modules/dnn/test/test_int8_layers.cpp +++ b/modules/dnn/test/test_int8_layers.cpp @@ -974,6 +974,9 @@ TEST_P(Test_Int8_nets, opencv_face_detector) TEST_P(Test_Int8_nets, EfficientDet) { + if (cvtest::skipUnstableTests) + throw SkipTestException("Skip unstable test"); // detail: https://github.com/opencv/opencv/pull/23167 + applyTestTag(CV_TEST_TAG_DEBUG_VERYLONG); if (target == DNN_TARGET_OPENCL_FP16 && !ocl::Device::getDefault().isIntel()) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); diff --git a/modules/features2d/src/sift.simd.hpp b/modules/features2d/src/sift.simd.hpp index 3d809f67ed..674648da8b 100644 --- a/modules/features2d/src/sift.simd.hpp +++ b/modules/features2d/src/sift.simd.hpp @@ -960,11 +960,18 @@ if( dstMat.type() == CV_32F ) __dst = v_min(v_max(v_cvt_f32(v_round(__dst * __nrm2)), __min), __max); v_store(dst + k, __dst); } +#endif +#if defined(__GNUC__) && __GNUC__ >= 9 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations" // iteration XX invokes undefined behavior #endif for( ; k < len; k++ ) { dst[k] = saturate_cast(rawDst[k]*nrm2); } +#if defined(__GNUC__) && __GNUC__ >= 9 +#pragma GCC diagnostic pop +#endif } else // CV_8U { @@ -984,9 +991,8 @@ else // CV_8U #endif #if defined(__GNUC__) && __GNUC__ >= 9 -// avoid warning "iteration 7 invokes undefined behavior" on Linux ARM64 #pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations" +#pragma GCC diagnostic ignored "-Waggressive-loop-optimizations" // iteration XX invokes undefined behavior #endif for( ; k < len; k++ ) { diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index 1a750feb8c..0258fea0f3 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -82,7 +82,7 @@ TEST( Features2d_DescriptorExtractor, batch_ORB ) for( i = 0; i < n; i++ ) { string imgname = format("%s/img%d.png", path.c_str(), i+1); - Mat img = imread(imgname, 0); + Mat img = imread(imgname, IMREAD_GRAYSCALE); imgs.push_back(img); } @@ -110,7 +110,7 @@ TEST( Features2d_DescriptorExtractor, batch_SIFT ) for( i = 0; i < n; i++ ) { string imgname = format("%s/img%d.png", path.c_str(), i+1); - Mat img = imread(imgname, 0); + Mat img = imread(imgname, IMREAD_GRAYSCALE); imgs.push_back(img); } diff --git a/modules/imgcodecs/misc/java/test/ImgcodecsTest.java b/modules/imgcodecs/misc/java/test/ImgcodecsTest.java index 8fe15d2536..ba22aac06e 100644 --- a/modules/imgcodecs/misc/java/test/ImgcodecsTest.java +++ b/modules/imgcodecs/misc/java/test/ImgcodecsTest.java @@ -45,7 +45,7 @@ public class ImgcodecsTest extends OpenCVTestCase { } public void testImreadStringInt() { - dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0); + dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.IMREAD_GRAYSCALE); assertFalse(dst.empty()); assertEquals(1, dst.channels()); assertTrue(512 == dst.cols()); diff --git a/modules/imgproc/test/test_color.cpp b/modules/imgproc/test/test_color.cpp index 318dae66db..2c89932adc 100644 --- a/modules/imgproc/test/test_color.cpp +++ b/modules/imgproc/test/test_color.cpp @@ -1077,7 +1077,7 @@ double CV_ColorLabTest::get_success_error_level( int /*test_case_idx*/, int i, i { int depth = test_mat[i][j].depth(); // j == 0 is for forward code, j == 1 is for inverse code - return (depth == CV_8U) ? (srgb ? 32 : 8) : + return (depth == CV_8U) ? (srgb ? 37 : 8) : //(depth == CV_16U) ? 32 : // 16u is disabled srgb ? ((j == 0) ? 0.4 : 0.0055) : 1e-3; } @@ -1256,7 +1256,7 @@ double CV_ColorLuvTest::get_success_error_level( int /*test_case_idx*/, int i, i { int depth = test_mat[i][j].depth(); // j == 0 is for forward code, j == 1 is for inverse code - return (depth == CV_8U) ? (srgb ? 36 : 8) : + return (depth == CV_8U) ? (srgb ? 37 : 8) : //(depth == CV_16U) ? 32 : // 16u is disabled 5e-2; } diff --git a/modules/imgproc/test/test_connectedcomponents.cpp b/modules/imgproc/test/test_connectedcomponents.cpp index e1a6b761c7..8717217cdf 100644 --- a/modules/imgproc/test/test_connectedcomponents.cpp +++ b/modules/imgproc/test/test_connectedcomponents.cpp @@ -81,7 +81,7 @@ void CV_ConnectedComponentsTest::run(int /* start_from */) int ccltype[] = { cv::CCL_DEFAULT, cv::CCL_WU, cv::CCL_GRANA, cv::CCL_BOLELLI, cv::CCL_SAUF, cv::CCL_BBDT, cv::CCL_SPAGHETTI }; string exp_path = string(ts->get_data_path()) + "connectedcomponents/ccomp_exp.png"; - Mat exp = imread(exp_path, 0); + Mat exp = imread(exp_path, IMREAD_GRAYSCALE); Mat orig = imread(string(ts->get_data_path()) + "connectedcomponents/concentric_circles.png", 0); if (orig.empty()) diff --git a/modules/imgproc/test/test_convhull.cpp b/modules/imgproc/test/test_convhull.cpp index 14f560b97e..70251e3a25 100644 --- a/modules/imgproc/test/test_convhull.cpp +++ b/modules/imgproc/test/test_convhull.cpp @@ -180,7 +180,7 @@ cvTsIsPointOnLineSegment(const cv::Point2f &x, const cv::Point2f &a, const cv::P double d2 = cvTsDist(cvPoint2D32f(x.x, x.y), cvPoint2D32f(b.x, b.y)); double d3 = cvTsDist(cvPoint2D32f(a.x, a.y), cvPoint2D32f(b.x, b.y)); - return (abs(d1 + d2 - d3) <= (1E-5)); + return (abs(d1 + d2 - d3) <= (1E-4)); } diff --git a/modules/imgproc/test/test_imgproc_umat.cpp b/modules/imgproc/test/test_imgproc_umat.cpp index 08b85595cb..74bfdac621 100644 --- a/modules/imgproc/test/test_imgproc_umat.cpp +++ b/modules/imgproc/test/test_imgproc_umat.cpp @@ -53,7 +53,7 @@ protected: void run(int) { string imgpath = string(ts->get_data_path()) + "shared/lena.png"; - Mat img = imread(imgpath, 1), gray, smallimg, result; + Mat img = imread(imgpath, IMREAD_COLOR), gray, smallimg, result; UMat uimg = img.getUMat(ACCESS_READ), ugray, usmallimg, uresult; cvtColor(img, gray, COLOR_BGR2GRAY); diff --git a/modules/imgproc/test/test_imgwarp.cpp b/modules/imgproc/test/test_imgwarp.cpp index 1b4e0b2cf4..c01bccf71e 100644 --- a/modules/imgproc/test/test_imgwarp.cpp +++ b/modules/imgproc/test/test_imgwarp.cpp @@ -102,7 +102,6 @@ void CV_ImgWarpBaseTest::get_test_array_types_and_sizes( int test_case_idx, int cn = cvtest::randInt(rng) % 3 + 1; cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types ); depth = depth == 0 ? CV_8U : depth == 1 ? CV_16U : CV_32F; - cn += cn == 2; types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(depth, cn); if( test_array[INPUT].size() > 1 ) diff --git a/modules/imgproc/test/test_imgwarp_strict.cpp b/modules/imgproc/test/test_imgwarp_strict.cpp index da8a197b3f..83a5e23781 100644 --- a/modules/imgproc/test/test_imgwarp_strict.cpp +++ b/modules/imgproc/test/test_imgwarp_strict.cpp @@ -151,8 +151,6 @@ void CV_ImageWarpBaseTest::generate_test_data() depth = rng.uniform(0, CV_64F); int cn = rng.uniform(1, 4); - while (cn == 2) - cn = rng.uniform(1, 4); src.create(ssize, CV_MAKE_TYPE(depth, cn)); @@ -237,7 +235,7 @@ float CV_ImageWarpBaseTest::get_success_error_level(int _interpolation, int) con else if (_interpolation == INTER_LANCZOS4) return 1.0f; else if (_interpolation == INTER_NEAREST) - return 1.0f; + return 255.0f; // FIXIT: check is not reliable for Black/White (0/255) images else if (_interpolation == INTER_AREA) return 2.0f; else @@ -430,8 +428,6 @@ void CV_Resize_Test::generate_test_data() depth = rng.uniform(0, CV_64F); int cn = rng.uniform(1, 4); - while (cn == 2) - cn = rng.uniform(1, 4); src.create(ssize, CV_MAKE_TYPE(depth, cn)); diff --git a/modules/imgproc/test/test_watershed.cpp b/modules/imgproc/test/test_watershed.cpp index 90307ca30c..efe5aa2fae 100644 --- a/modules/imgproc/test/test_watershed.cpp +++ b/modules/imgproc/test/test_watershed.cpp @@ -60,7 +60,7 @@ CV_WatershedTest::~CV_WatershedTest() {} void CV_WatershedTest::run( int /* start_from */) { string exp_path = string(ts->get_data_path()) + "watershed/wshed_exp.png"; - Mat exp = imread(exp_path, 0); + Mat exp = imread(exp_path, IMREAD_GRAYSCALE); Mat orig = imread(string(ts->get_data_path()) + "inpaint/orig.png"); FileStorage fs(string(ts->get_data_path()) + "watershed/comp.xml", FileStorage::READ); diff --git a/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java index 802bb2daa4..0ebd0db538 100644 --- a/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java @@ -149,7 +149,7 @@ public class OpenCVTestCase extends TestCase { rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE); gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0)); diff --git a/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java index 7ed8a41ba8..8fe2f269ed 100644 --- a/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java @@ -175,7 +175,7 @@ public class OpenCVTestCase extends TestCase { rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE); gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0)); diff --git a/modules/objdetect/test/test_cascadeandhog.cpp b/modules/objdetect/test/test_cascadeandhog.cpp index e318f1a56d..4151b899e3 100644 --- a/modules/objdetect/test/test_cascadeandhog.cpp +++ b/modules/objdetect/test/test_cascadeandhog.cpp @@ -138,7 +138,7 @@ int CV_DetectorTest::prepareData( FileStorage& _fs ) String filename; it >> filename; imageFilenames.push_back(filename); - Mat img = imread( dataPath+filename, 1 ); + Mat img = imread( dataPath+filename, IMREAD_COLOR ); images.push_back( img ); } } diff --git a/modules/photo/include/opencv2/photo.hpp b/modules/photo/include/opencv2/photo.hpp index c2e89a3858..cef4e4da59 100644 --- a/modules/photo/include/opencv2/photo.hpp +++ b/modules/photo/include/opencv2/photo.hpp @@ -201,8 +201,8 @@ CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst, /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been captured in small period of time. For example video. This version of the function is for grayscale -images or for manual manipulation with colorspaces. For more details see - +images or for manual manipulation with colorspaces. See @cite Buades2005DenoisingIS for more details +(open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)). @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel images sequence. All images should have the same type and @@ -228,8 +228,8 @@ CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputA /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been captured in small period of time. For example video. This version of the function is for grayscale -images or for manual manipulation with colorspaces. For more details see - +images or for manual manipulation with colorspaces. See @cite Buades2005DenoisingIS for more details +(open access [here](https://static.aminer.org/pdf/PDF/000/317/196/spatio_temporal_wiener_filtering_of_image_sequences_using_a_parametric.pdf)). @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 2-channel, 3-channel or 4-channel images sequence. All images should diff --git a/modules/photo/test/test_denoising.cpp b/modules/photo/test/test_denoising.cpp index 2cd2e4be6c..fa330b85a0 100644 --- a/modules/photo/test/test_denoising.cpp +++ b/modules/photo/test/test_denoising.cpp @@ -157,7 +157,7 @@ TEST(Photo_White, issue_2646) TEST(Photo_Denoising, speed) { string imgname = string(cvtest::TS::ptr()->get_data_path()) + "shared/5MP.png"; - Mat src = imread(imgname, 0), dst; + Mat src = imread(imgname, IMREAD_GRAYSCALE), dst; double t = (double)getTickCount(); fastNlMeansDenoising(src, dst, 5, 7, 21); diff --git a/modules/ts/include/opencv2/ts/ts_perf.hpp b/modules/ts/include/opencv2/ts/ts_perf.hpp index b79d75d5a6..5ca22d2b1e 100644 --- a/modules/ts/include/opencv2/ts/ts_perf.hpp +++ b/modules/ts/include/opencv2/ts/ts_perf.hpp @@ -454,9 +454,6 @@ private: performance_metrics metrics; void validateMetrics(); - static int64 _timeadjustment; - static int64 _calibrate(); - static void warmup_impl(cv::Mat m, WarmUpType wtype); static int getSizeInBytes(cv::InputArray a); static cv::Size getSize(cv::InputArray a); diff --git a/modules/ts/src/cuda_test.cpp b/modules/ts/src/cuda_test.cpp index a50f2cc3ce..6b8d19cccf 100644 --- a/modules/ts/src/cuda_test.cpp +++ b/modules/ts/src/cuda_test.cpp @@ -522,13 +522,35 @@ namespace cvtest int validCount = 0; - for (size_t i = 0; i < gold.size(); ++i) + if (actual.size() == gold.size()) { - const cv::KeyPoint& p1 = gold[i]; - const cv::KeyPoint& p2 = actual[i]; + for (size_t i = 0; i < gold.size(); ++i) + { + const cv::KeyPoint& p1 = gold[i]; + const cv::KeyPoint& p2 = actual[i]; - if (keyPointsEquals(p1, p2)) - ++validCount; + if (keyPointsEquals(p1, p2)) + ++validCount; + } + } + else + { + std::vector& shorter = gold; + std::vector& longer = actual; + if (actual.size() < gold.size()) + { + shorter = actual; + longer = gold; + } + for (size_t i = 0; i < shorter.size(); ++i) + { + const cv::KeyPoint& p1 = shorter[i]; + const cv::KeyPoint& p2 = longer[i]; + const cv::KeyPoint& p3 = longer[i+1]; + + if (keyPointsEquals(p1, p2) || keyPointsEquals(p1, p3)) + ++validCount; + } } return validCount; diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 3539f17646..e5d36b435b 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -623,20 +623,27 @@ void TS::set_gtest_status() void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_context ) { + CV_UNUSED(update_ts_context); + if( current_test_info.test != test ) { for( int i = 0; i <= CONSOLE_IDX; i++ ) output_buf[i] = string(); - rng = RNG(params.rng_seed); - current_test_info.rng_seed0 = current_test_info.rng_seed = rng.state; + } + + if (test_case_idx >= 0) + { + current_test_info.rng_seed = param_seed + test_case_idx; + current_test_info.rng_seed0 = current_test_info.rng_seed; + + rng = RNG(current_test_info.rng_seed); + cv::theRNG() = rng; } current_test_info.test = test; current_test_info.test_case_idx = test_case_idx; current_test_info.code = 0; cvSetErrStatus( CV_StsOk ); - if( update_ts_context ) - current_test_info.rng_seed = rng.state; } diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp index 683ba8afae..958a2e300d 100644 --- a/modules/ts/src/ts_perf.cpp +++ b/modules/ts/src/ts_perf.cpp @@ -26,7 +26,6 @@ using namespace perf; int64 TestBase::timeLimitDefault = 0; unsigned int TestBase::iterationsLimitDefault = UINT_MAX; -int64 TestBase::_timeadjustment = 0; // Item [0] will be considered the default implementation. static std::vector available_impls; @@ -1159,7 +1158,6 @@ void TestBase::Init(const std::vector & availableImpls, timeLimitDefault = param_time_limit == 0.0 ? 1 : (int64)(param_time_limit * cv::getTickFrequency()); iterationsLimitDefault = param_force_samples == 0 ? UINT_MAX : param_force_samples; - _timeadjustment = _calibrate(); } void TestBase::RecordRunParameters() @@ -1193,66 +1191,6 @@ enum PERF_STRATEGY TestBase::getCurrentModulePerformanceStrategy() return strategyForce == PERF_STRATEGY_DEFAULT ? strategyModule : strategyForce; } - -int64 TestBase::_calibrate() -{ - CV_TRACE_FUNCTION(); - if (iterationsLimitDefault <= 1) - return 0; - - class _helper : public ::perf::TestBase - { - public: - _helper() { testStrategy = PERF_STRATEGY_BASE; } - performance_metrics& getMetrics() { return calcMetrics(); } - virtual void TestBody() {} - virtual void PerfTestBody() - { - //the whole system warmup - SetUp(); - cv::Mat a(2048, 2048, CV_32S, cv::Scalar(1)); - cv::Mat b(2048, 2048, CV_32S, cv::Scalar(2)); - declare.time(30); - double s = 0; - declare.iterations(20); - minIters = nIters = 20; - for(; next() && startTimer(); stopTimer()) - s+=a.dot(b); - declare.time(s); - - //self calibration - SetUp(); - declare.iterations(1000); - minIters = nIters = 1000; - for(int iters = 0; next() && startTimer(); iters++, stopTimer()) { /*std::cout << iters << nIters << std::endl;*/ } - } - }; - - // Initialize ThreadPool - class _dummyParallel : public ParallelLoopBody - { - public: - void operator()(const cv::Range& range) const - { - // nothing - CV_UNUSED(range); - } - }; - parallel_for_(cv::Range(0, 1000), _dummyParallel()); - - _timeadjustment = 0; - _helper h; - h.PerfTestBody(); - double compensation = h.getMetrics().min; - if (getCurrentModulePerformanceStrategy() == PERF_STRATEGY_SIMPLE) - { - CV_Assert(compensation < 0.01 * cv::getTickFrequency()); - compensation = 0.0f; // simple strategy doesn't require any compensation - } - LOGD("Time compensation is %.0f", compensation); - return (int64)compensation; -} - #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable:4355) // 'this' : used in base member initializer list @@ -1561,9 +1499,8 @@ void TestBase::stopTimer() if (lastTime == 0) ADD_FAILURE() << " stopTimer() is called before startTimer()/next()"; lastTime = time - lastTime; + CV_Assert(lastTime >= 0); // TODO: CV_Check* for int64 totalTime += lastTime; - lastTime -= _timeadjustment; - if (lastTime < 0) lastTime = 0; times.push_back(lastTime); lastTime = 0; diff --git a/modules/video/test/test_estimaterigid.cpp b/modules/video/test/test_estimaterigid.cpp index 0b4179732f..6173c29c9d 100644 --- a/modules/video/test/test_estimaterigid.cpp +++ b/modules/video/test/test_estimaterigid.cpp @@ -83,7 +83,7 @@ struct WrapAff2D bool CV_RigidTransform_Test::testNPoints(int from) { - cv::RNG rng = ts->get_rng(); + cv::RNG rng = cv::theRNG(); int progress = 0; int k, ntests = 10000; @@ -181,4 +181,4 @@ void CV_RigidTransform_Test::run( int start_from ) TEST(Video_RigidFlow, accuracy) { CV_RigidTransform_Test test; test.safe_run(); } -}} // namespace \ No newline at end of file +}} // namespace diff --git a/samples/cpp/3calibration.cpp b/samples/cpp/3calibration.cpp index 2495dbd041..115d6987b2 100644 --- a/samples/cpp/3calibration.cpp +++ b/samples/cpp/3calibration.cpp @@ -250,7 +250,7 @@ int main( int argc, char** argv ) { int k1 = k == 0 ? 2 : k == 1 ? 0 : 1; printf("%s\n", imageList[i*3+k].c_str()); - view = imread(imageList[i*3+k], 1); + view = imread(imageList[i*3+k], IMREAD_COLOR); if(!view.empty()) { @@ -338,7 +338,7 @@ int main( int argc, char** argv ) { int k1 = k == 0 ? 2 : k == 1 ? 0 : 1; int k2 = k == 0 ? 1 : k == 1 ? 0 : 2; - view = imread(imageList[i*3+k], 1); + view = imread(imageList[i*3+k], IMREAD_COLOR); if(view.empty()) continue; diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 1e8e149940..89b0564e74 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -496,7 +496,7 @@ int main( int argc, char** argv ) view0.copyTo(view); } else if( i < (int)imageList.size() ) - view = imread(imageList[i], 1); + view = imread(imageList[i], IMREAD_COLOR); if(view.empty()) { @@ -621,7 +621,7 @@ int main( int argc, char** argv ) for( i = 0; i < (int)imageList.size(); i++ ) { - view = imread(imageList[i], 1); + view = imread(imageList[i], IMREAD_COLOR); if(view.empty()) continue; remap(view, rview, map1, map2, INTER_LINEAR); diff --git a/samples/cpp/facedetect.cpp b/samples/cpp/facedetect.cpp index 9c846faf48..144306c20e 100644 --- a/samples/cpp/facedetect.cpp +++ b/samples/cpp/facedetect.cpp @@ -145,7 +145,7 @@ int main( int argc, const char** argv ) len--; buf[len] = '\0'; cout << "file " << buf << endl; - image = imread( buf, 1 ); + image = imread( buf, IMREAD_COLOR ); if( !image.empty() ) { detectAndDraw( image, cascade, nestedCascade, scale, tryflip ); diff --git a/samples/cpp/pca.cpp b/samples/cpp/pca.cpp index a5a1c54a92..96fd1e25b1 100644 --- a/samples/cpp/pca.cpp +++ b/samples/cpp/pca.cpp @@ -59,7 +59,7 @@ static void read_imgList(const string& filename, vector& images) { } string line; while (getline(file, line)) { - images.push_back(imread(line, 0)); + images.push_back(imread(line, IMREAD_GRAYSCALE)); } } diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index 9f5aa56ed6..bfc3b22d71 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -80,7 +80,7 @@ StereoCalib(const vector& imagelist, Size boardSize, float squareSize, b for( k = 0; k < 2; k++ ) { const string& filename = imagelist[i*2+k]; - Mat img = imread(filename, 0); + Mat img = imread(filename, IMREAD_GRAYSCALE); if(img.empty()) break; if( imageSize == Size() ) @@ -298,7 +298,7 @@ StereoCalib(const vector& imagelist, Size boardSize, float squareSize, b { for( k = 0; k < 2; k++ ) { - Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; + Mat img = imread(goodImageList[i*2+k], IMREAD_GRAYSCALE), rimg, cimg; remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR); cvtColor(rimg, cimg, COLOR_GRAY2BGR); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp index 289484dca3..c343dd0a8f 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp @@ -8,7 +8,7 @@ using namespace std; int main(int argc, char** argv) { Mat img, gray; - if( argc != 2 || !(img=imread(argv[1], 1)).data) + if( argc != 2 || !(img=imread(argv[1], IMREAD_COLOR)).data) return -1; cvtColor(img, gray, COLOR_BGR2GRAY); // smooth it, otherwise a lot of false circles may be detected diff --git a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp index e19d29abbb..986b1e79b5 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp @@ -7,7 +7,7 @@ using namespace std; int main(int argc, char** argv) { Mat src, dst, color_dst; - if( argc != 2 || !(src=imread(argv[1], 0)).data) + if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data) return -1; Canny( src, dst, 50, 200, 3 ); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp b/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp index 9d1ca46033..274df040b6 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp @@ -6,7 +6,7 @@ using namespace cv; int main( int argc, char** argv ) { Mat src, hsv; - if( argc != 2 || !(src=imread(argv[1], 1)).data ) + if( argc != 2 || !(src=imread(argv[1], IMREAD_COLOR)).data ) return -1; cvtColor(src, hsv, COLOR_BGR2HSV); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp b/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp index 4dfcde668e..b90dfad840 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp @@ -9,7 +9,7 @@ int main( int argc, char** argv ) Mat src; // the first command-line parameter must be a filename of the binary // (black-n-white) image - if( argc != 2 || !(src=imread(argv[1], 0)).data) + if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data) return -1; Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3); diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index 9c48ae0ee3..f5df8bebae 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -54,7 +54,7 @@ int main( int argc, char** argv ) return 0; } string filename = samples::findFile(parser.get("@input")); - Mat img0 = imread(filename, 1), imgGray; + Mat img0 = imread(filename, IMREAD_COLOR), imgGray; if( img0.empty() ) { diff --git a/samples/python/calibrate.py b/samples/python/calibrate.py index bca430b5a5..991a531ede 100755 --- a/samples/python/calibrate.py +++ b/samples/python/calibrate.py @@ -57,7 +57,7 @@ def main(): def processImage(fn): print('processing %s... ' % fn) - img = cv.imread(fn, 0) + img = cv.imread(fn, cv.IMREAD_GRAYSCALE) if img is None: print("Failed to load", fn) return None diff --git a/samples/python/mouse_and_match.py b/samples/python/mouse_and_match.py index 0bc2fce76e..9e33264df4 100755 --- a/samples/python/mouse_and_match.py +++ b/samples/python/mouse_and_match.py @@ -69,7 +69,7 @@ class App(): if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm": print(infile) - img = cv.imread(infile,1) + img = cv.imread(infile, cv.IMREAD_COLOR) if img is None: continue self.sel = (0,0,0,0)