From a64b51dd94d6e7e8940cd0dfa5a0c91509c65368 Mon Sep 17 00:00:00 2001 From: Christoph Rackwitz Date: Mon, 9 Jan 2023 01:55:31 -0800 Subject: [PATCH] Merge pull request #23108 from crackwitz:issue-23107 Usage of imread(): magic number 0, unchecked result * docs: rewrite 0/1 to IMREAD_GRAYSCALE/IMREAD_COLOR in imread() * samples, apps: rewrite 0/1 to IMREAD_GRAYSCALE/IMREAD_COLOR in imread() * tests: rewrite 0/1 to IMREAD_GRAYSCALE/IMREAD_COLOR in imread() * doc/py_tutorials: check imread() result --- apps/traincascade/imagestorage.cpp | 2 +- .../py_calib3d/py_depthmap/py_depthmap.markdown | 4 ++-- .../py_epipolar_geometry.markdown | 4 ++-- .../py_core/py_basic_ops/py_basic_ops.markdown | 2 ++ .../py_image_arithmetics.markdown | 4 ++++ .../py_core/py_optimization/py_optimization.markdown | 1 + .../py_feature2d/py_brief/py_brief.markdown | 2 +- .../py_feature2d/py_fast/py_fast.markdown | 2 +- .../py_feature_homography.markdown | 4 ++-- doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown | 2 +- .../py_surf_intro/py_surf_intro.markdown | 2 +- .../py_imgproc/py_canny/py_canny.markdown | 3 ++- .../py_contour_features/py_contour_features.markdown | 3 ++- .../py_contours_begin/py_contours_begin.markdown | 1 + .../py_contours_more_functions.markdown | 7 +++++-- .../py_imgproc/py_filtering/py_filtering.markdown | 2 ++ .../py_geometric_transformations.markdown | 9 +++++++-- .../py_imgproc/py_grabcut/py_grabcut.markdown | 4 +++- .../py_imgproc/py_gradients/py_gradients.markdown | 6 ++++-- .../py_2d_histogram/py_2d_histogram.markdown | 3 +++ .../py_histogram_backprojection.markdown | 4 ++++ .../py_histogram_begins/py_histogram_begins.markdown | 10 +++++++--- .../py_histogram_equalization.markdown | 9 ++++++--- .../py_houghcircles/py_houghcircles.markdown | 3 ++- .../py_morphological_ops.markdown | 3 ++- .../py_imgproc/py_pyramids/py_pyramids.markdown | 3 +++ .../py_template_matching.markdown | 10 +++++++--- .../py_thresholding/py_thresholding.markdown | 12 ++++++++---- .../py_fourier_transform.markdown | 9 ++++++--- .../py_imgproc/py_watershed/py_watershed.markdown | 1 + .../py_photo/py_inpainting/py_inpainting.markdown | 2 +- .../linux_eclipse/linux_eclipse.markdown | 2 +- .../linux_gcc_cmake/linux_gcc_cmake.markdown | 2 +- modules/calib3d/test/test_chesscorners.cpp | 2 +- modules/calib3d/test/test_stereomatching.cpp | 12 ++++++------ .../features2d/test/test_descriptors_regression.cpp | 4 ++-- modules/imgcodecs/misc/java/test/ImgcodecsTest.java | 2 +- modules/imgproc/test/test_connectedcomponents.cpp | 2 +- modules/imgproc/test/test_imgproc_umat.cpp | 2 +- modules/imgproc/test/test_watershed.cpp | 2 +- .../src/org/opencv/test/OpenCVTestCase.java | 2 +- .../src/org/opencv/test/OpenCVTestCase.java | 2 +- modules/objdetect/test/test_cascadeandhog.cpp | 2 +- modules/photo/test/test_denoising.cpp | 2 +- modules/videoio/test/test_ffmpeg.cpp | 2 +- samples/cpp/3calibration.cpp | 4 ++-- samples/cpp/calibration.cpp | 4 ++-- samples/cpp/facedetect.cpp | 2 +- samples/cpp/pca.cpp | 2 +- samples/cpp/stereo_calib.cpp | 4 ++-- .../snippets/imgproc_HoughLinesCircles.cpp | 2 +- .../tutorial_code/snippets/imgproc_HoughLinesP.cpp | 2 +- .../cpp/tutorial_code/snippets/imgproc_calcHist.cpp | 2 +- .../tutorial_code/snippets/imgproc_drawContours.cpp | 2 +- samples/cpp/watershed.cpp | 2 +- samples/python/calibrate.py | 2 +- samples/python/mouse_and_match.py | 2 +- 57 files changed, 129 insertions(+), 74 deletions(-) diff --git a/apps/traincascade/imagestorage.cpp b/apps/traincascade/imagestorage.cpp index a133ccc3d4..f220e5c2b3 100644 --- a/apps/traincascade/imagestorage.cpp +++ b/apps/traincascade/imagestorage.cpp @@ -54,7 +54,7 @@ bool CvCascadeImageReader::NegReader::nextImg() size_t count = imgFilenames.size(); for( size_t i = 0; i < count; i++ ) { - src = imread( imgFilenames[last++], 0 ); + src = imread( imgFilenames[last++], IMREAD_GRAYSCALE ); if( src.empty() ){ last %= count; continue; diff --git a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown index f0ea83122b..52d0c5933d 100644 --- a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown +++ b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown @@ -41,8 +41,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -imgL = cv.imread('tsukuba_l.png',0) -imgR = cv.imread('tsukuba_r.png',0) +imgL = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE) +imgR = cv.imread('tsukuba_r.png', cv.IMREAD_GRAYSCALE) stereo = cv.StereoBM_create(numDisparities=16, blockSize=15) disparity = stereo.compute(imgL,imgR) diff --git a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown index 6b8d90882a..ada22222cb 100644 --- a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown +++ b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown @@ -76,8 +76,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img1 = cv.imread('myleft.jpg',0) #queryimage # left image -img2 = cv.imread('myright.jpg',0) #trainimage # right image +img1 = cv.imread('myleft.jpg', cv.IMREAD_GRAYSCALE) #queryimage # left image +img2 = cv.imread('myright.jpg', cv.IMREAD_GRAYSCALE) #trainimage # right image sift = cv.SIFT_create() diff --git a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown index 4c6aa4bb92..1594f77200 100644 --- a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown +++ b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown @@ -25,6 +25,7 @@ Let's load a color image first: >>> import cv2 as cv >>> img = cv.imread('messi5.jpg') +>>> assert img is not None, "file could not be read, check with os.path.exists()" @endcode You can access a pixel value by its row and column coordinates. For BGR image, it returns an array of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned. @@ -173,6 +174,7 @@ from matplotlib import pyplot as plt BLUE = [255,0,0] img1 = cv.imread('opencv-logo.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE) reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT) diff --git a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown index d08d974c2f..4b6e8bd3c1 100644 --- a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown +++ b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown @@ -50,6 +50,8 @@ Here \f$\gamma\f$ is taken as zero. @code{.py} img1 = cv.imread('ml.png') img2 = cv.imread('opencv-logo.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" dst = cv.addWeighted(img1,0.7,img2,0.3,0) @@ -76,6 +78,8 @@ bitwise operations as shown below: # Load two images img1 = cv.imread('messi5.jpg') img2 = cv.imread('opencv-logo-white.png') +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" # I want to put logo on top-left corner, So I create a ROI rows,cols,channels = img2.shape diff --git a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown index d24613a643..7d63ffadef 100644 --- a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown +++ b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown @@ -37,6 +37,7 @@ of odd sizes ranging from 5 to 49. (Don't worry about what the result will look goal): @code{.py} img1 = cv.imread('messi5.jpg') +assert img1 is not None, "file could not be read, check with os.path.exists()" e1 = cv.getTickCount() for i in range(5,49,2): diff --git a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown index 4abcdc1bad..a34e7e7805 100644 --- a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown +++ b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown @@ -63,7 +63,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('simple.jpg',0) +img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE) # Initiate FAST detector star = cv.xfeatures2d.StarDetector_create() diff --git a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown index b1b8a81ca8..1d7b3e3911 100644 --- a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown +++ b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown @@ -98,7 +98,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('blox.jpg',0) # `/samples/data/blox.jpg` +img = cv.imread('blox.jpg', cv.IMREAD_GRAYSCALE) # `/samples/data/blox.jpg` # Initiate FAST object with default values fast = cv.FastFeatureDetector_create() diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 6abac2c57b..4597c6bfcf 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -40,8 +40,8 @@ from matplotlib import pyplot as plt MIN_MATCH_COUNT = 10 -img1 = cv.imread('box.png',0) # queryImage -img2 = cv.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png', cv.IMREAD_GRAYSCALE) # queryImage +img2 = cv.imread('box_in_scene.png', cv.IMREAD_GRAYSCALE) # trainImage # Initiate SIFT detector sift = cv.SIFT_create() diff --git a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown index 55bc0f2903..73d01aaaa1 100644 --- a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown +++ b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown @@ -67,7 +67,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('simple.jpg',0) +img = cv.imread('simple.jpg', cv.IMREAD_GRAYSCALE) # Initiate ORB detector orb = cv.ORB_create() diff --git a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown index fc980994c3..5bcd91cce8 100644 --- a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown @@ -76,7 +76,7 @@ and descriptors. First we will see a simple demo on how to find SURF keypoints and descriptors and draw it. All examples are shown in Python terminal since it is just same as SIFT only. @code{.py} ->>> img = cv.imread('fly.png',0) +>>> img = cv.imread('fly.png', cv.IMREAD_GRAYSCALE) # Create SURF object. You can specify params here or later. # Here I set Hessian Threshold to 400 diff --git a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown index d36e5784eb..8c651afb35 100644 --- a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown +++ b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown @@ -83,7 +83,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" edges = cv.Canny(img,100,200) plt.subplot(121),plt.imshow(img,cmap = 'gray') diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown index e8cfbd6597..91bab9461a 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown @@ -24,7 +24,8 @@ The function **cv.moments()** gives a dictionary of all moment values calculated import numpy as np import cv2 as cv -img = cv.imread('star.jpg',0) +img = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" ret,thresh = cv.threshold(img,127,255,0) im2,contours,hierarchy = cv.findContours(thresh, 1, 2) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown index 74d7b252a5..c10faf2608 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown @@ -29,6 +29,7 @@ import numpy as np import cv2 as cv im = cv.imread('test.jpg') +assert im is not None, "file could not be read, check with os.path.exists()" imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(imgray, 127, 255, 0) im2, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown index 397a2a63a0..df4bac93d5 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown @@ -41,6 +41,7 @@ import cv2 as cv import numpy as np img = cv.imread('star.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) ret,thresh = cv.threshold(img_gray, 127, 255,0) im2,contours,hierarchy = cv.findContours(thresh,2,1) @@ -92,8 +93,10 @@ docs. import cv2 as cv import numpy as np -img1 = cv.imread('star.jpg',0) -img2 = cv.imread('star2.jpg',0) +img1 = cv.imread('star.jpg', cv.IMREAD_GRAYSCALE) +img2 = cv.imread('star2.jpg', cv.IMREAD_GRAYSCALE) +assert img1 is not None, "file could not be read, check with os.path.exists()" +assert img2 is not None, "file could not be read, check with os.path.exists()" ret, thresh = cv.threshold(img1, 127, 255,0) ret, thresh2 = cv.threshold(img2, 127, 255,0) diff --git a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown index 1b626df94f..82ce0d45ab 100644 --- a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown +++ b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown @@ -29,6 +29,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('opencv_logo.png') +assert img is not None, "file could not be read, check with os.path.exists()" kernel = np.ones((5,5),np.float32)/25 dst = cv.filter2D(img,-1,kernel) @@ -70,6 +71,7 @@ import numpy as np from matplotlib import pyplot as plt img = cv.imread('opencv-logo-white.png') +assert img is not None, "file could not be read, check with os.path.exists()" blur = cv.blur(img,(5,5)) diff --git a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown index add96f2962..6dd151fe96 100644 --- a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown +++ b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown @@ -28,6 +28,7 @@ import numpy as np import cv2 as cv img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC) @@ -49,7 +50,8 @@ function. See the below example for a shift of (100,50): import numpy as np import cv2 as cv -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols = img.shape M = np.float32([[1,0,100],[0,1,50]]) @@ -87,7 +89,8 @@ where: To find this transformation matrix, OpenCV provides a function, **cv.getRotationMatrix2D**. Check out the below example which rotates the image by 90 degree with respect to center without any scaling. @code{.py} -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols = img.shape # cols-1 and rows-1 are the coordinate limits. @@ -108,6 +111,7 @@ which is to be passed to **cv.warpAffine**. Check the below example, and also look at the points I selected (which are marked in green color): @code{.py} img = cv.imread('drawing.png') +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols,ch = img.shape pts1 = np.float32([[50,50],[200,50],[50,200]]) @@ -137,6 +141,7 @@ matrix. See the code below: @code{.py} img = cv.imread('sudoku.png') +assert img is not None, "file could not be read, check with os.path.exists()" rows,cols,ch = img.shape pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) diff --git a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown index 7dc22d37aa..349ebac031 100644 --- a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown +++ b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown @@ -93,6 +93,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" mask = np.zeros(img.shape[:2],np.uint8) bgdModel = np.zeros((1,65),np.float64) @@ -122,7 +123,8 @@ remaining background with gray. Then loaded that mask image in OpenCV, edited or got with corresponding values in newly added mask image. Check the code below:* @code{.py} # newmask is the mask image I manually labelled -newmask = cv.imread('newmask.png',0) +newmask = cv.imread('newmask.png', cv.IMREAD_GRAYSCALE) +assert newmask is not None, "file could not be read, check with os.path.exists()" # wherever it is marked white (sure foreground), change mask=1 # wherever it is marked black (sure background), change mask=0 diff --git a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown index 0a52cd431c..0b9556f2bb 100644 --- a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown +++ b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown @@ -42,7 +42,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('dave.jpg',0) +img = cv.imread('dave.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" laplacian = cv.Laplacian(img,cv.CV_64F) sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5) @@ -79,7 +80,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('box.png',0) +img = cv.imread('box.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # Output dtype = cv.CV_8U sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown index 492897255a..8e05a64080 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown @@ -38,6 +38,7 @@ import numpy as np import cv2 as cv img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) @@ -55,6 +56,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist, xbins, ybins = np.histogram2d(h.ravel(),s.ravel(),[180,256],[[0,180],[0,256]]) @@ -89,6 +91,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV) hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown index a235617914..dce31c376b 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown @@ -38,10 +38,12 @@ import cv2 as cvfrom matplotlib import pyplot as plt #roi is the object or region of object we need to find roi = cv.imread('rose_red.png') +assert roi is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV) #target is the image we search in target = cv.imread('rose.png') +assert target is not None, "file could not be read, check with os.path.exists()" hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV) # Find the histograms using calcHist. Can be done with np.histogram2d also @@ -85,9 +87,11 @@ import numpy as np import cv2 as cv roi = cv.imread('rose_red.png') +assert roi is not None, "file could not be read, check with os.path.exists()" hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV) target = cv.imread('rose.png') +assert target is not None, "file could not be read, check with os.path.exists()" hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV) # calculating object histogram diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown index 8cb24139e8..5667cee36c 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown @@ -77,7 +77,8 @@ and its parameters : So let's start with a sample image. Simply load an image in grayscale mode and find its full histogram. @code{.py} -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" hist = cv.calcHist([img],[0],None,[256],[0,256]) @endcode hist is a 256x1 array, each value corresponds to number of pixels in that image with its @@ -121,7 +122,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" plt.hist(img.ravel(),256,[0,256]); plt.show() @endcode You will get a plot as below : @@ -136,6 +138,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('home.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" color = ('b','g','r') for i,col in enumerate(color): histr = cv.calcHist([img],[i],None,[256],[0,256]) @@ -164,7 +167,8 @@ We used cv.calcHist() to find the histogram of the full image. What if you want of some regions of an image? Just create a mask image with white color on the region you want to find histogram and black otherwise. Then pass this as the mask. @code{.py} -img = cv.imread('home.jpg',0) +img = cv.imread('home.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # create a mask mask = np.zeros(img.shape[:2], np.uint8) diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown index 99ef285b08..bc9c69a714 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown @@ -30,7 +30,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('wiki.jpg',0) +img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" hist,bins = np.histogram(img.flatten(),256,[0,256]) @@ -81,7 +82,8 @@ output is our histogram equalized image. Below is a simple code snippet showing its usage for same image we used : @code{.py} -img = cv.imread('wiki.jpg',0) +img = cv.imread('wiki.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" equ = cv.equalizeHist(img) res = np.hstack((img,equ)) #stacking images side-by-side cv.imwrite('res.png',res) @@ -124,7 +126,8 @@ Below code snippet shows how to apply CLAHE in OpenCV: import numpy as np import cv2 as cv -img = cv.imread('tsukuba_l.png',0) +img = cv.imread('tsukuba_l.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # create a CLAHE object (Arguments are optional). clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) diff --git a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown index 7a424e9daf..570ad9145c 100644 --- a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown @@ -23,7 +23,8 @@ explained in the documentation. So we directly go to the code. import numpy as np import cv2 as cv -img = cv.imread('opencv-logo-white.png',0) +img = cv.imread('opencv-logo-white.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img = cv.medianBlur(img,5) cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR) diff --git a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown index 84a62d14cd..f52a2ce411 100644 --- a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown +++ b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown @@ -38,7 +38,8 @@ Here, as an example, I would use a 5x5 kernel with full of ones. Let's see it ho import cv2 as cv import numpy as np -img = cv.imread('j.png',0) +img = cv.imread('j.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" kernel = np.ones((5,5),np.uint8) erosion = cv.erode(img,kernel,iterations = 1) @endcode diff --git a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown index 602ab29ad6..0470211fd3 100644 --- a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown +++ b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown @@ -31,6 +31,7 @@ Similarly while expanding, area becomes 4 times in each level. We can find Gauss **cv.pyrDown()** and **cv.pyrUp()** functions. @code{.py} img = cv.imread('messi5.jpg') +assert img is not None, "file could not be read, check with os.path.exists()" lower_reso = cv.pyrDown(higher_reso) @endcode Below is the 4 levels in an image pyramid. @@ -84,6 +85,8 @@ import numpy as np,sys A = cv.imread('apple.jpg') B = cv.imread('orange.jpg') +assert A is not None, "file could not be read, check with os.path.exists()" +assert B is not None, "file could not be read, check with os.path.exists()" # generate Gaussian pyramid for A G = A.copy() diff --git a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown index 551f117879..1557118ab6 100644 --- a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown +++ b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown @@ -38,9 +38,11 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img2 = img.copy() -template = cv.imread('template.jpg',0) +template = cv.imread('template.jpg', cv.IMREAD_GRAYSCALE) +assert template is not None, "file could not be read, check with os.path.exists()" w, h = template.shape[::-1] # All the 6 methods for comparison in a list @@ -113,8 +115,10 @@ import numpy as np from matplotlib import pyplot as plt img_rgb = cv.imread('mario.png') +assert img_rgb is not None, "file could not be read, check with os.path.exists()" img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY) -template = cv.imread('mario_coin.png',0) +template = cv.imread('mario_coin.png', cv.IMREAD_GRAYSCALE) +assert template is not None, "file could not be read, check with os.path.exists()" w, h = template.shape[::-1] res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED) diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown index f52e9c5db6..7a200725de 100644 --- a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown +++ b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown @@ -37,7 +37,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('gradient.png',0) +img = cv.imread('gradient.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY) ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV) ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC) @@ -85,7 +86,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('sudoku.png',0) +img = cv.imread('sudoku.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" img = cv.medianBlur(img,5) ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) @@ -133,7 +135,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('noisy2.png',0) +img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" # global thresholding ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) @@ -183,7 +186,8 @@ where It actually finds a value of t which lies in between two peaks such that variances to both classes are minimal. It can be simply implemented in Python as follows: @code{.py} -img = cv.imread('noisy2.png',0) +img = cv.imread('noisy2.png', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" blur = cv.GaussianBlur(img,(5,5),0) # find normalized_histogram, and its cumulative distribution function diff --git a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown index 6c4533a1b0..59337b1355 100644 --- a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown +++ b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown @@ -54,7 +54,8 @@ import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" f = np.fft.fft2(img) fshift = np.fft.fftshift(f) magnitude_spectrum = 20*np.log(np.abs(fshift)) @@ -121,7 +122,8 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -img = cv.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +assert img is not None, "file could not be read, check with os.path.exists()" dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) @@ -184,7 +186,8 @@ So how do we find this optimal size ? OpenCV provides a function, **cv.getOptima this. It is applicable to both **cv.dft()** and **np.fft.fft2()**. Let's check their performance using IPython magic command %timeit. @code{.py} -In [16]: img = cv.imread('messi5.jpg',0) +In [15]: img = cv.imread('messi5.jpg', cv.IMREAD_GRAYSCALE) +In [16]: assert img is not None, "file could not be read, check with os.path.exists()" In [17]: rows,cols = img.shape In [18]: print("{} {}".format(rows,cols)) 342 548 diff --git a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown index ad3c233f30..9536bf3e30 100644 --- a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown +++ b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown @@ -49,6 +49,7 @@ import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('coins.png') +assert img is not None, "file could not be read, check with os.path.exists()" gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) @endcode diff --git a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown index 64fabe4564..dce4cf2e5f 100644 --- a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown +++ b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown @@ -56,7 +56,7 @@ import numpy as np import cv2 as cv img = cv.imread('messi_2.jpg') -mask = cv.imread('mask2.png',0) +mask = cv.imread('mask2.png', cv.IMREAD_GRAYSCALE) dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA) diff --git a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown index 66ca510efb..e20f50764b 100644 --- a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown +++ b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.markdown @@ -55,7 +55,7 @@ Making a project int main( int argc, char** argv ) { Mat image; - image = imread( argv[1], 1 ); + image = imread( argv[1], IMREAD_COLOR ); if( argc != 2 || !image.data ) { diff --git a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown index 0f4d1c8cce..f65540770a 100644 --- a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown +++ b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.markdown @@ -35,7 +35,7 @@ int main(int argc, char** argv ) } Mat image; - image = imread( argv[1], 1 ); + image = imread( argv[1], IMREAD_COLOR ); if ( !image.data ) { diff --git a/modules/calib3d/test/test_chesscorners.cpp b/modules/calib3d/test/test_chesscorners.cpp index 3792ca1bc7..0e68dc4886 100644 --- a/modules/calib3d/test/test_chesscorners.cpp +++ b/modules/calib3d/test/test_chesscorners.cpp @@ -216,7 +216,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename ) /* read the image */ String img_file = board_list[idx * 2]; - Mat gray = imread( folder + img_file, 0); + Mat gray = imread( folder + img_file, IMREAD_GRAYSCALE); if( gray.empty() ) { diff --git a/modules/calib3d/test/test_stereomatching.cpp b/modules/calib3d/test/test_stereomatching.cpp index 4ea23ebff3..02d1823d2d 100644 --- a/modules/calib3d/test/test_stereomatching.cpp +++ b/modules/calib3d/test/test_stereomatching.cpp @@ -456,8 +456,8 @@ void CV_StereoMatchingTest::run(int) string datasetFullDirName = dataPath + DATASETS_DIR + datasetName + "/"; Mat leftImg = imread(datasetFullDirName + LEFT_IMG_NAME); Mat rightImg = imread(datasetFullDirName + RIGHT_IMG_NAME); - Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, 0); - Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, 0); + Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, IMREAD_GRAYSCALE); + Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, IMREAD_GRAYSCALE); Rect calcROI; if( leftImg.empty() || rightImg.empty() || trueLeftDisp.empty() ) @@ -835,9 +835,9 @@ TEST_P(Calib3d_StereoBM_BufferBM, memAllocsTest) const int SADWindowSize = get<1>(get<1>(GetParam())); String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/"; - Mat leftImg = imread(path + "im2.png", 0); + Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE); ASSERT_FALSE(leftImg.empty()); - Mat rightImg = imread(path + "im6.png", 0); + Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE); ASSERT_FALSE(rightImg.empty()); Mat leftDisp; { @@ -923,9 +923,9 @@ TEST(Calib3d_StereoSGBM, regression) { CV_StereoSGBMTest test; test.safe_run(); TEST(Calib3d_StereoSGBM_HH4, regression) { String path = cvtest::TS::ptr()->get_data_path() + "cv/stereomatching/datasets/teddy/"; - Mat leftImg = imread(path + "im2.png", 0); + Mat leftImg = imread(path + "im2.png", IMREAD_GRAYSCALE); ASSERT_FALSE(leftImg.empty()); - Mat rightImg = imread(path + "im6.png", 0); + Mat rightImg = imread(path + "im6.png", IMREAD_GRAYSCALE); ASSERT_FALSE(rightImg.empty()); Mat testData = imread(path + "disp2_hh4.png",-1); ASSERT_FALSE(testData.empty()); diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index 0de2b2bd55..b8d4a4755f 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -406,7 +406,7 @@ TEST( Features2d_DescriptorExtractor, batch_ORB ) for( i = 0; i < n; i++ ) { string imgname = format("%s/img%d.png", path.c_str(), i+1); - Mat img = imread(imgname, 0); + Mat img = imread(imgname, IMREAD_GRAYSCALE); imgs.push_back(img); } @@ -434,7 +434,7 @@ TEST( Features2d_DescriptorExtractor, batch_SIFT ) for( i = 0; i < n; i++ ) { string imgname = format("%s/img%d.png", path.c_str(), i+1); - Mat img = imread(imgname, 0); + Mat img = imread(imgname, IMREAD_GRAYSCALE); imgs.push_back(img); } diff --git a/modules/imgcodecs/misc/java/test/ImgcodecsTest.java b/modules/imgcodecs/misc/java/test/ImgcodecsTest.java index 8fe15d2536..ba22aac06e 100644 --- a/modules/imgcodecs/misc/java/test/ImgcodecsTest.java +++ b/modules/imgcodecs/misc/java/test/ImgcodecsTest.java @@ -45,7 +45,7 @@ public class ImgcodecsTest extends OpenCVTestCase { } public void testImreadStringInt() { - dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0); + dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.IMREAD_GRAYSCALE); assertFalse(dst.empty()); assertEquals(1, dst.channels()); assertTrue(512 == dst.cols()); diff --git a/modules/imgproc/test/test_connectedcomponents.cpp b/modules/imgproc/test/test_connectedcomponents.cpp index e1a6b761c7..8717217cdf 100644 --- a/modules/imgproc/test/test_connectedcomponents.cpp +++ b/modules/imgproc/test/test_connectedcomponents.cpp @@ -81,7 +81,7 @@ void CV_ConnectedComponentsTest::run(int /* start_from */) int ccltype[] = { cv::CCL_DEFAULT, cv::CCL_WU, cv::CCL_GRANA, cv::CCL_BOLELLI, cv::CCL_SAUF, cv::CCL_BBDT, cv::CCL_SPAGHETTI }; string exp_path = string(ts->get_data_path()) + "connectedcomponents/ccomp_exp.png"; - Mat exp = imread(exp_path, 0); + Mat exp = imread(exp_path, IMREAD_GRAYSCALE); Mat orig = imread(string(ts->get_data_path()) + "connectedcomponents/concentric_circles.png", 0); if (orig.empty()) diff --git a/modules/imgproc/test/test_imgproc_umat.cpp b/modules/imgproc/test/test_imgproc_umat.cpp index 08b85595cb..74bfdac621 100644 --- a/modules/imgproc/test/test_imgproc_umat.cpp +++ b/modules/imgproc/test/test_imgproc_umat.cpp @@ -53,7 +53,7 @@ protected: void run(int) { string imgpath = string(ts->get_data_path()) + "shared/lena.png"; - Mat img = imread(imgpath, 1), gray, smallimg, result; + Mat img = imread(imgpath, IMREAD_COLOR), gray, smallimg, result; UMat uimg = img.getUMat(ACCESS_READ), ugray, usmallimg, uresult; cvtColor(img, gray, COLOR_BGR2GRAY); diff --git a/modules/imgproc/test/test_watershed.cpp b/modules/imgproc/test/test_watershed.cpp index b9356f0eb9..42b6bbb6c7 100644 --- a/modules/imgproc/test/test_watershed.cpp +++ b/modules/imgproc/test/test_watershed.cpp @@ -59,7 +59,7 @@ CV_WatershedTest::~CV_WatershedTest() {} void CV_WatershedTest::run( int /* start_from */) { string exp_path = string(ts->get_data_path()) + "watershed/wshed_exp.png"; - Mat exp = imread(exp_path, 0); + Mat exp = imread(exp_path, IMREAD_GRAYSCALE); Mat orig = imread(string(ts->get_data_path()) + "inpaint/orig.png"); FileStorage fs(string(ts->get_data_path()) + "watershed/comp.xml", FileStorage::READ); diff --git a/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java index 802bb2daa4..0ebd0db538 100644 --- a/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/android_test/src/org/opencv/test/OpenCVTestCase.java @@ -149,7 +149,7 @@ public class OpenCVTestCase extends TestCase { rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE); gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0)); diff --git a/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java index 3fd918dbfe..bd5546188a 100644 --- a/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/pure_test/src/org/opencv/test/OpenCVTestCase.java @@ -175,7 +175,7 @@ public class OpenCVTestCase extends TestCase { rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, Imgcodecs.IMREAD_GRAYSCALE); gray255_32f_3d = new Mat(new int[]{matSize, matSize, matSize}, CvType.CV_32F, new Scalar(255.0)); diff --git a/modules/objdetect/test/test_cascadeandhog.cpp b/modules/objdetect/test/test_cascadeandhog.cpp index e6bf9a23ca..e9fde88464 100644 --- a/modules/objdetect/test/test_cascadeandhog.cpp +++ b/modules/objdetect/test/test_cascadeandhog.cpp @@ -137,7 +137,7 @@ int CV_DetectorTest::prepareData( FileStorage& _fs ) String filename; it >> filename; imageFilenames.push_back(filename); - Mat img = imread( dataPath+filename, 1 ); + Mat img = imread( dataPath+filename, IMREAD_COLOR ); images.push_back( img ); } } diff --git a/modules/photo/test/test_denoising.cpp b/modules/photo/test/test_denoising.cpp index 2cd2e4be6c..fa330b85a0 100644 --- a/modules/photo/test/test_denoising.cpp +++ b/modules/photo/test/test_denoising.cpp @@ -157,7 +157,7 @@ TEST(Photo_White, issue_2646) TEST(Photo_Denoising, speed) { string imgname = string(cvtest::TS::ptr()->get_data_path()) + "shared/5MP.png"; - Mat src = imread(imgname, 0), dst; + Mat src = imread(imgname, IMREAD_GRAYSCALE), dst; double t = (double)getTickCount(); fastNlMeansDenoising(src, dst, 5, 7, 21); diff --git a/modules/videoio/test/test_ffmpeg.cpp b/modules/videoio/test/test_ffmpeg.cpp index 6acc65f177..53dea40418 100644 --- a/modules/videoio/test/test_ffmpeg.cpp +++ b/modules/videoio/test/test_ffmpeg.cpp @@ -194,7 +194,7 @@ public: { string filename = ts->get_data_path() + "readwrite/ordinary.bmp"; VideoCapture cap(filename, CAP_FFMPEG); - Mat img0 = imread(filename, 1); + Mat img0 = imread(filename, IMREAD_COLOR); Mat img, img_next; cap >> img; cap >> img_next; diff --git a/samples/cpp/3calibration.cpp b/samples/cpp/3calibration.cpp index 2495dbd041..115d6987b2 100644 --- a/samples/cpp/3calibration.cpp +++ b/samples/cpp/3calibration.cpp @@ -250,7 +250,7 @@ int main( int argc, char** argv ) { int k1 = k == 0 ? 2 : k == 1 ? 0 : 1; printf("%s\n", imageList[i*3+k].c_str()); - view = imread(imageList[i*3+k], 1); + view = imread(imageList[i*3+k], IMREAD_COLOR); if(!view.empty()) { @@ -338,7 +338,7 @@ int main( int argc, char** argv ) { int k1 = k == 0 ? 2 : k == 1 ? 0 : 1; int k2 = k == 0 ? 1 : k == 1 ? 0 : 2; - view = imread(imageList[i*3+k], 1); + view = imread(imageList[i*3+k], IMREAD_COLOR); if(view.empty()) continue; diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 5d67600239..24c9b4417b 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -456,7 +456,7 @@ int main( int argc, char** argv ) view0.copyTo(view); } else if( i < (int)imageList.size() ) - view = imread(imageList[i], 1); + view = imread(imageList[i], IMREAD_COLOR); if(view.empty()) { @@ -581,7 +581,7 @@ int main( int argc, char** argv ) for( i = 0; i < (int)imageList.size(); i++ ) { - view = imread(imageList[i], 1); + view = imread(imageList[i], IMREAD_COLOR); if(view.empty()) continue; remap(view, rview, map1, map2, INTER_LINEAR); diff --git a/samples/cpp/facedetect.cpp b/samples/cpp/facedetect.cpp index 9c846faf48..144306c20e 100644 --- a/samples/cpp/facedetect.cpp +++ b/samples/cpp/facedetect.cpp @@ -145,7 +145,7 @@ int main( int argc, const char** argv ) len--; buf[len] = '\0'; cout << "file " << buf << endl; - image = imread( buf, 1 ); + image = imread( buf, IMREAD_COLOR ); if( !image.empty() ) { detectAndDraw( image, cascade, nestedCascade, scale, tryflip ); diff --git a/samples/cpp/pca.cpp b/samples/cpp/pca.cpp index a5a1c54a92..96fd1e25b1 100644 --- a/samples/cpp/pca.cpp +++ b/samples/cpp/pca.cpp @@ -59,7 +59,7 @@ static void read_imgList(const string& filename, vector& images) { } string line; while (getline(file, line)) { - images.push_back(imread(line, 0)); + images.push_back(imread(line, IMREAD_GRAYSCALE)); } } diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index 9f5aa56ed6..bfc3b22d71 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -80,7 +80,7 @@ StereoCalib(const vector& imagelist, Size boardSize, float squareSize, b for( k = 0; k < 2; k++ ) { const string& filename = imagelist[i*2+k]; - Mat img = imread(filename, 0); + Mat img = imread(filename, IMREAD_GRAYSCALE); if(img.empty()) break; if( imageSize == Size() ) @@ -298,7 +298,7 @@ StereoCalib(const vector& imagelist, Size boardSize, float squareSize, b { for( k = 0; k < 2; k++ ) { - Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; + Mat img = imread(goodImageList[i*2+k], IMREAD_GRAYSCALE), rimg, cimg; remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR); cvtColor(rimg, cimg, COLOR_GRAY2BGR); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp index 289484dca3..c343dd0a8f 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesCircles.cpp @@ -8,7 +8,7 @@ using namespace std; int main(int argc, char** argv) { Mat img, gray; - if( argc != 2 || !(img=imread(argv[1], 1)).data) + if( argc != 2 || !(img=imread(argv[1], IMREAD_COLOR)).data) return -1; cvtColor(img, gray, COLOR_BGR2GRAY); // smooth it, otherwise a lot of false circles may be detected diff --git a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp index e19d29abbb..986b1e79b5 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_HoughLinesP.cpp @@ -7,7 +7,7 @@ using namespace std; int main(int argc, char** argv) { Mat src, dst, color_dst; - if( argc != 2 || !(src=imread(argv[1], 0)).data) + if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data) return -1; Canny( src, dst, 50, 200, 3 ); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp b/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp index 9d1ca46033..274df040b6 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_calcHist.cpp @@ -6,7 +6,7 @@ using namespace cv; int main( int argc, char** argv ) { Mat src, hsv; - if( argc != 2 || !(src=imread(argv[1], 1)).data ) + if( argc != 2 || !(src=imread(argv[1], IMREAD_COLOR)).data ) return -1; cvtColor(src, hsv, COLOR_BGR2HSV); diff --git a/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp b/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp index 4dfcde668e..b90dfad840 100644 --- a/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp +++ b/samples/cpp/tutorial_code/snippets/imgproc_drawContours.cpp @@ -9,7 +9,7 @@ int main( int argc, char** argv ) Mat src; // the first command-line parameter must be a filename of the binary // (black-n-white) image - if( argc != 2 || !(src=imread(argv[1], 0)).data) + if( argc != 2 || !(src=imread(argv[1], IMREAD_GRAYSCALE)).data) return -1; Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3); diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index 9c48ae0ee3..f5df8bebae 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -54,7 +54,7 @@ int main( int argc, char** argv ) return 0; } string filename = samples::findFile(parser.get("@input")); - Mat img0 = imread(filename, 1), imgGray; + Mat img0 = imread(filename, IMREAD_COLOR), imgGray; if( img0.empty() ) { diff --git a/samples/python/calibrate.py b/samples/python/calibrate.py index bca430b5a5..991a531ede 100755 --- a/samples/python/calibrate.py +++ b/samples/python/calibrate.py @@ -57,7 +57,7 @@ def main(): def processImage(fn): print('processing %s... ' % fn) - img = cv.imread(fn, 0) + img = cv.imread(fn, cv.IMREAD_GRAYSCALE) if img is None: print("Failed to load", fn) return None diff --git a/samples/python/mouse_and_match.py b/samples/python/mouse_and_match.py index 0bc2fce76e..9e33264df4 100755 --- a/samples/python/mouse_and_match.py +++ b/samples/python/mouse_and_match.py @@ -69,7 +69,7 @@ class App(): if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm": print(infile) - img = cv.imread(infile,1) + img = cv.imread(infile, cv.IMREAD_COLOR) if img is None: continue self.sel = (0,0,0,0)