Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/11609/head
Alexander Alekhin 7 years ago
commit 0f298a4203
  1. 3
      apps/interactive-calibration/frameProcessor.cpp
  2. 251
      doc/tutorials/imgproc/histograms/back_projection/back_projection.markdown
  3. 278
      doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.markdown
  4. 188
      doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.markdown
  5. 126
      doc/tutorials/imgproc/histograms/histogram_equalization/histogram_equalization.markdown
  6. 209
      doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.markdown
  7. 10
      doc/tutorials/imgproc/table_of_content_imgproc.markdown
  8. 44
      modules/calib3d/src/calibinit.cpp
  9. 2
      modules/calib3d/src/calibration.cpp
  10. 8
      modules/core/include/opencv2/core/cvdef.h
  11. 2
      modules/core/include/opencv2/core/private.cuda.hpp
  12. 2
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  13. 4
      modules/dnn/misc/python/pyopencv_dnn.hpp
  14. 6
      modules/dnn/src/dnn.cpp
  15. 2
      modules/dnn/src/layers/batch_norm_layer.cpp
  16. 2
      modules/dnn/src/layers/convolution_layer.cpp
  17. 4
      modules/dnn/src/layers/fully_connected_layer.cpp
  18. 2
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  19. 3
      modules/dnn/src/layers/recurrent_layers.cpp
  20. 89
      modules/dnn/src/layers/scale_layer.cpp
  21. 145
      modules/dnn/src/layers/shift_layer.cpp
  22. 2
      modules/dnn/src/ocl4dnn/src/math_functions.cpp
  23. 34
      modules/dnn/src/tensorflow/tf_importer.cpp
  24. 7
      modules/dnn/test/test_halide_layers.cpp
  25. 3
      modules/dnn/test/test_tf_importer.cpp
  26. 2
      modules/highgui/include/opencv2/highgui/highgui_c.h
  27. 9
      modules/imgproc/include/opencv2/imgproc.hpp
  28. 26
      modules/imgproc/perf/perf_houghcircles.cpp
  29. 43
      modules/imgproc/perf/perf_houghlines.cpp
  30. 3
      modules/imgproc/src/color.hpp
  31. 18
      modules/imgproc/src/filter.cpp
  32. 175
      modules/imgproc/src/hough.cpp
  33. 75
      modules/imgproc/test/test_filter.cpp
  34. 51
      modules/imgproc/test/test_houghcircles.cpp
  35. 90
      modules/imgproc/test/test_houghlines.cpp
  36. 16
      modules/imgproc/test/test_intersection.cpp
  37. 2
      modules/photo/src/merge.cpp
  38. 41
      modules/videoio/src/cap.cpp
  39. 3
      modules/videoio/src/cap_gstreamer.cpp
  40. 183
      modules/videoio/src/cap_msmf.cpp
  41. 10
      modules/videoio/src/cap_openni.cpp
  42. 8
      modules/videoio/src/precomp.hpp
  43. 62
      samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
  44. 126
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
  45. 99
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp
  46. 137
      samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
  47. 70
      samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
  48. 130
      samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
  49. 61
      samples/dnn/tf_text_graph_ssd.py
  50. 173
      samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java
  51. 189
      samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java
  52. 99
      samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
  53. 91
      samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
  54. 49
      samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
  55. 24
      samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
  56. 24
      samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java
  57. 128
      samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java
  58. 80
      samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
  59. 71
      samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py
  60. 79
      samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py
  61. 71
      samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
  62. 69
      samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
  63. 31
      samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
  64. 54
      samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py

@ -395,7 +395,8 @@ ShowProcessor::ShowProcessor(cv::Ptr<calibrationData> data, cv::Ptr<calibControl
cv::Mat ShowProcessor::processFrame(const cv::Mat &frame) cv::Mat ShowProcessor::processFrame(const cv::Mat &frame)
{ {
if(mCalibdata->cameraMatrix.size[0] && mCalibdata->distCoeffs.size[0]) { if (!mCalibdata->cameraMatrix.empty() && !mCalibdata->distCoeffs.empty())
{
mTextSize = VIDEO_TEXT_SIZE * (double) frame.cols / IMAGE_MAX_WIDTH; mTextSize = VIDEO_TEXT_SIZE * (double) frame.cols / IMAGE_MAX_WIDTH;
cv::Scalar textColor = cv::Scalar(0,0,255); cv::Scalar textColor = cv::Scalar(0,0,255);
cv::Mat frameCopy; cv::Mat frameCopy;

@ -67,46 +67,104 @@ Code
- Calculate the histogram (and update it if the bins change) and the backprojection of the - Calculate the histogram (and update it if the bins change) and the backprojection of the
same image. same image.
- Display the backprojection and the histogram in windows. - Display the backprojection and the histogram in windows.
- **Downloadable code**:
-# Click @add_toggle_cpp
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp) [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
for the basic version (explained in this tutorial). for the basic version (explained in this tutorial).
-# For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the - For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp) demo](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
-# ...or you can always check out the classical - ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp) [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples. in samples.
- **Code at glance:** - **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp @include samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
- ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java
@end_toggle
@add_toggle_python
- **Downloadable code**:
- Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
demo](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
- ...or you can always check out the classical
[camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py
@end_toggle
Explanation Explanation
----------- -----------
-# Declare the matrices to store our images and initialize the number of bins to be used by our - Read the input image:
histogram:
@code{.cpp} @add_toggle_cpp
Mat src; Mat hsv; Mat hue; @snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Read the image
int bins = 25; @end_toggle
@endcode
-# Read the input image and transform it to HSV format: @add_toggle_java
@code{.cpp} @snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Read the image
src = imread( argv[1], 1 ); @end_toggle
cvtColor( src, hsv, COLOR_BGR2HSV );
@endcode @add_toggle_python
-# For this tutorial, we will use only the Hue value for our 1-D histogram (check out the fancier @snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Read the image
@end_toggle
- Transform it to HSV format:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Transform it to HSV
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Transform it to HSV
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Transform it to HSV
@end_toggle
- For this tutorial, we will use only the Hue value for our 1-D histogram (check out the fancier
code in the links above if you want to use the more standard H-S histogram, which yields better code in the links above if you want to use the more standard H-S histogram, which yields better
results): results):
@code{.cpp}
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
@endcode
as you see, we use the function @ref cv::mixChannels to get only the channel 0 (Hue) from
the hsv image. It gets the following parameters:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Use only the Hue value
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Use only the Hue value
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Use only the Hue value
@end_toggle
- as you see, we use the function @ref cv::mixChannels to get only the channel 0 (Hue) from
the hsv image. It gets the following parameters:
- **&hsv:** The source array from which the channels will be copied - **&hsv:** The source array from which the channels will be copied
- **1:** The number of source arrays - **1:** The number of source arrays
- **&hue:** The destination array of the copied channels - **&hue:** The destination array of the copied channels
@ -115,59 +173,108 @@ Explanation
case, the Hue(0) channel of &hsv is being copied to the 0 channel of &hue (1-channel) case, the Hue(0) channel of &hsv is being copied to the 0 channel of &hue (1-channel)
- **1:** Number of index pairs - **1:** Number of index pairs
-# Create a Trackbar for the user to enter the bin values. Any change on the Trackbar means a call - Create a Trackbar for the user to enter the bin values. Any change on the Trackbar means a call
to the **Hist_and_Backproj** callback function. to the **Hist_and_Backproj** callback function.
@code{.cpp}
char* window_image = "Source image"; @add_toggle_cpp
namedWindow( window_image, WINDOW_AUTOSIZE ); @snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Create Trackbar to enter the number of bins
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj ); @end_toggle
Hist_and_Backproj(0, 0);
@endcode @add_toggle_java
-# Show the image and wait for the user to exit the program: @snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Create Trackbar to enter the number of bins
@code{.cpp} @end_toggle
imshow( window_image, src );
@add_toggle_python
waitKey(0); @snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Create Trackbar to enter the number of bins
return 0; @end_toggle
@endcode
-# **Hist_and_Backproj function:** Initialize the arguments needed for @ref cv::calcHist . The - Show the image and wait for the user to exit the program:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Show the image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Show the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Show the image
@end_toggle
- **Hist_and_Backproj function:** Initialize the arguments needed for @ref cv::calcHist . The
number of bins comes from the Trackbar: number of bins comes from the Trackbar:
@code{.cpp}
void Hist_and_Backproj(int, void* ) @add_toggle_cpp
{ @snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp initialize
MatND hist; @end_toggle
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 }; @add_toggle_java
const float* ranges = { hue_range }; @snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java initialize
@endcode @end_toggle
-# Calculate the Histogram and normalize it to the range \f$[0,255]\f$
@code{.cpp} @add_toggle_python
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false ); @snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py initialize
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() ); @end_toggle
@endcode
-# Get the Backprojection of the same image by calling the function @ref cv::calcBackProject - Calculate the Histogram and normalize it to the range \f$[0,255]\f$
@code{.cpp}
MatND backproj; @add_toggle_cpp
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true ); @snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Get the Histogram and normalize it
@endcode @end_toggle
all the arguments are known (the same as used to calculate the histogram), only we add the
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Get the Histogram and normalize it
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Get the Histogram and normalize it
@end_toggle
- Get the Backprojection of the same image by calling the function @ref cv::calcBackProject
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Get Backprojection
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Get Backprojection
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Get Backprojection
@end_toggle
- all the arguments are known (the same as used to calculate the histogram), only we add the
backproj matrix, which will store the backprojection of the source image (&hue) backproj matrix, which will store the backprojection of the source image (&hue)
-# Display backproj: - Display backproj:
@code{.cpp}
imshow( "BackProj", backproj ); @add_toggle_cpp
@endcode @snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Draw the backproj
-# Draw the 1-D Hue histogram of the image: @end_toggle
@code{.cpp}
int w = 400; int h = 400; @add_toggle_java
int bin_w = cvRound( (double) w / histSize ); @snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Draw the backproj
Mat histImg = Mat::zeros( w, h, CV_8UC3 ); @end_toggle
for( int i = 0; i < bins; i ++ ) @add_toggle_python
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); } @snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Draw the backproj
@end_toggle
imshow( "Histogram", histImg );
@endcode - Draw the 1-D Hue histogram of the image:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp Draw the histogram
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java Draw the histogram
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py Draw the histogram
@end_toggle
Results Results
------- -------

@ -17,7 +17,8 @@ histogram called *Image histogram*. Now we will considerate it in its more gener
- Histograms are collected *counts* of data organized into a set of predefined *bins* - Histograms are collected *counts* of data organized into a set of predefined *bins*
- When we say *data* we are not restricting it to be intensity values (as we saw in the previous - When we say *data* we are not restricting it to be intensity values (as we saw in the previous
Tutorial). The data collected can be whatever feature you find useful to describe your image. Tutorial @ref tutorial_histogram_equalization). The data collected can be whatever feature you find
useful to describe your image.
- Let's see an example. Imagine that a Matrix contains information of an image (i.e. intensity in - Let's see an example. Imagine that a Matrix contains information of an image (i.e. intensity in
the range \f$0-255\f$): the range \f$0-255\f$):
@ -65,122 +66,193 @@ Code
- Splits the image into its R, G and B planes using the function @ref cv::split - Splits the image into its R, G and B planes using the function @ref cv::split
- Calculate the Histogram of each 1-channel plane by calling the function @ref cv::calcHist - Calculate the Histogram of each 1-channel plane by calling the function @ref cv::calcHist
- Plot the three histograms in a window - Plot the three histograms in a window
@add_toggle_cpp
- **Downloadable code**: Click - **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp) [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
- **Code at glance:** - **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp @include samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
@end_toggle
Explanation Explanation
----------- -----------
-# Create the necessary matrices: - Load the source image
@code{.cpp}
Mat src, dst;
@endcode
-# Load the source image
@code{.cpp}
src = imread( argv[1], 1 );
if( !src.data ) @add_toggle_cpp
{ return -1; } @snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Load image
@endcode @end_toggle
-# Separate the source image in its three R,G and B planes. For this we use the OpenCV function
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Load image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Load image
@end_toggle
- Separate the source image in its three R,G and B planes. For this we use the OpenCV function
@ref cv::split : @ref cv::split :
@code{.cpp}
vector<Mat> bgr_planes; @add_toggle_cpp
split( src, bgr_planes ); @snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Separate the image in 3 places ( B, G and R )
@endcode @end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Separate the image in 3 places ( B, G and R )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Separate the image in 3 places ( B, G and R )
@end_toggle
our input is the image to be divided (this case with three channels) and the output is a vector our input is the image to be divided (this case with three channels) and the output is a vector
of Mat ) of Mat )
-# Now we are ready to start configuring the **histograms** for each plane. Since we are working - Now we are ready to start configuring the **histograms** for each plane. Since we are working
with the B, G and R planes, we know that our values will range in the interval \f$[0,255]\f$ with the B, G and R planes, we know that our values will range in the interval \f$[0,255]\f$
-# Establish number of bins (5, 10...):
@code{.cpp}
int histSize = 256; //from 0 to 255
@endcode
-# Set the range of values (as we said, between 0 and 255 )
@code{.cpp}
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ; //the upper boundary is exclusive
const float* histRange = { range };
@endcode
-# We want our bins to have the same size (uniform) and to clear the histograms in the
beginning, so:
@code{.cpp}
bool uniform = true; bool accumulate = false;
@endcode
-# Finally, we create the Mat objects to save our histograms. Creating 3 (one for each plane):
@code{.cpp}
Mat b_hist, g_hist, r_hist;
@endcode
-# We proceed to calculate the histograms by using the OpenCV function @ref cv::calcHist :
@code{.cpp}
/// Compute the histograms:
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
@endcode
where the arguments are:
- **&bgr_planes[0]:** The source array(s)
- **1**: The number of source arrays (in this case we are using 1. We can enter here also
a list of arrays )
- **0**: The channel (*dim*) to be measured. In this case it is just the intensity (each
array is single-channel) so we just write 0.
- **Mat()**: A mask to be used on the source array ( zeros indicating pixels to be ignored
). If not defined it is not used
- **b_hist**: The Mat object where the histogram will be stored
- **1**: The histogram dimensionality.
- **histSize:** The number of bins per each used dimension
- **histRange:** The range of values to be measured per each dimension
- **uniform** and **accumulate**: The bin sizes are the same and the histogram is cleared
at the beginning.
-# Create an image to display the histograms:
@code{.cpp}
// Draw the histograms for R, G and B
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) ); - Establish the number of bins (5, 10...):
@endcode
-# Notice that before drawing, we first @ref cv::normalize the histogram so its values fall in the @add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Establish the number of bins
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Establish the number of bins
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Establish the number of bins
@end_toggle
- Set the range of values (as we said, between 0 and 255 )
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Set the ranges ( for B,G,R) )
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Set the ranges ( for B,G,R) )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Set the ranges ( for B,G,R) )
@end_toggle
- We want our bins to have the same size (uniform) and to clear the histograms in the
beginning, so:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Set histogram param
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Set histogram param
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Set histogram param
@end_toggle
- We proceed to calculate the histograms by using the OpenCV function @ref cv::calcHist :
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Compute the histograms
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Compute the histograms
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Compute the histograms
@end_toggle
- where the arguments are (**C++ code**):
- **&bgr_planes[0]:** The source array(s)
- **1**: The number of source arrays (in this case we are using 1. We can enter here also
a list of arrays )
- **0**: The channel (*dim*) to be measured. In this case it is just the intensity (each
array is single-channel) so we just write 0.
- **Mat()**: A mask to be used on the source array ( zeros indicating pixels to be ignored
). If not defined it is not used
- **b_hist**: The Mat object where the histogram will be stored
- **1**: The histogram dimensionality.
- **histSize:** The number of bins per each used dimension
- **histRange:** The range of values to be measured per each dimension
- **uniform** and **accumulate**: The bin sizes are the same and the histogram is cleared
at the beginning.
- Create an image to display the histograms:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Draw the histograms for B, G and R
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Draw the histograms for B, G and R
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Draw the histograms for B, G and R
@end_toggle
- Notice that before drawing, we first @ref cv::normalize the histogram so its values fall in the
range indicated by the parameters entered: range indicated by the parameters entered:
@code{.cpp}
/// Normalize the result to [ 0, histImage.rows ]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
@endcode
this function receives these arguments:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Normalize the result to ( 0, histImage.rows )
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Normalize the result to ( 0, histImage.rows )
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Normalize the result to ( 0, histImage.rows )
@end_toggle
- this function receives these arguments (**C++ code**):
- **b_hist:** Input array - **b_hist:** Input array
- **b_hist:** Output normalized array (can be the same) - **b_hist:** Output normalized array (can be the same)
- **0** and\**histImage.rows: For this example, they are the lower and upper limits to - **0** and **histImage.rows**: For this example, they are the lower and upper limits to
normalize the values ofr_hist*\* normalize the values of **r_hist**
- **NORM_MINMAX:** Argument that indicates the type of normalization (as described above, it - **NORM_MINMAX:** Argument that indicates the type of normalization (as described above, it
adjusts the values between the two limits set before) adjusts the values between the two limits set before)
- **-1:** Implies that the output normalized array will be the same type as the input - **-1:** Implies that the output normalized array will be the same type as the input
- **Mat():** Optional mask - **Mat():** Optional mask
-# Finally, observe that to access the bin (in this case in this 1D-Histogram): - Observe that to access the bin (in this case in this 1D-Histogram):
@code{.cpp}
/// Draw for each channel @add_toggle_cpp
for( int i = 1; i < histSize; i++ ) @snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Draw for each channel
{ @end_toggle
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ), @add_toggle_java
Scalar( 255, 0, 0), 2, 8, 0 ); @snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Draw for each channel
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) , @end_toggle
Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
Scalar( 0, 255, 0), 2, 8, 0 ); @add_toggle_python
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) , @snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Draw for each channel
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ), @end_toggle
Scalar( 0, 0, 255), 2, 8, 0 ); we use the expression (**C++ code**):
}
@endcode
we use the expression:
@code{.cpp} @code{.cpp}
b_hist.at<float>(i) b_hist.at<float>(i)
@endcode @endcode
@ -189,20 +261,24 @@ Explanation
b_hist.at<float>( i, j ) b_hist.at<float>( i, j )
@endcode @endcode
-# Finally we display our histograms and wait for the user to exit: - Finally we display our histograms and wait for the user to exit:
@code{.cpp}
namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage );
waitKey(0); @add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp Display
@end_toggle
return 0; @add_toggle_java
@endcode @snippet samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java Display
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py Display
@end_toggle
Result Result
------ ------
-# Using as input argument an image like the shown below: -# Using as input argument an image like the one shown below:
![](images/Histogram_Calculation_Original_Image.jpg) ![](images/Histogram_Calculation_Original_Image.jpg)

@ -43,90 +43,118 @@ Code
- Compare the histogram of the *base image* with respect to the 2 test histograms, the - Compare the histogram of the *base image* with respect to the 2 test histograms, the
histogram of the lower half base image and with the same base image histogram. histogram of the lower half base image and with the same base image histogram.
- Display the numerical matching parameters obtained. - Display the numerical matching parameters obtained.
@add_toggle_cpp
- **Downloadable code**: Click - **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp) [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
- **Code at glance:** - **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
@include cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp - **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
@end_toggle
Explanation Explanation
----------- -----------
-# Declare variables such as the matrices to store the base image and the two other images to - Load the base image (src_base) and the other two test images:
compare ( BGR and HSV )
@code{.cpp} @add_toggle_cpp
Mat src_base, hsv_base; @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Load three images with different environment settings
Mat src_test1, hsv_test1; @end_toggle
Mat src_test2, hsv_test2;
Mat hsv_half_down; @add_toggle_java
@endcode @snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Load three images with different environment settings
-# Load the base image (src_base) and the other two test images: @end_toggle
@code{.cpp}
if( argc < 4 ) @add_toggle_python
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n"); @snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Load three images with different environment settings
return -1; @end_toggle
}
- Convert them to HSV format:
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 ); @add_toggle_cpp
src_test2 = imread( argv[3], 1 ); @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Convert to HSV
@endcode @end_toggle
-# Convert them to HSV format:
@code{.cpp} @add_toggle_java
cvtColor( src_base, hsv_base, COLOR_BGR2HSV ); @snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Convert to HSV
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV ); @end_toggle
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
@endcode @add_toggle_python
-# Also, create an image of half the base image (in HSV format): @snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Convert to HSV
@code{.cpp} @end_toggle
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
@endcode - Also, create an image of half the base image (in HSV format):
-# Initialize the arguments to calculate the histograms (bins, ranges and channels H and S ).
@code{.cpp} @add_toggle_cpp
int h_bins = 50; int s_bins = 60; @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Convert to HSV half
int histSize[] = { h_bins, s_bins }; @end_toggle
float h_ranges[] = { 0, 180 }; @add_toggle_java
float s_ranges[] = { 0, 256 }; @snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Convert to HSV half
@end_toggle
const float* ranges[] = { h_ranges, s_ranges };
@add_toggle_python
int channels[] = { 0, 1 }; @snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Convert to HSV half
@endcode @end_toggle
-# Create the MatND objects to store the histograms:
@code{.cpp} - Initialize the arguments to calculate the histograms (bins, ranges and channels H and S ).
MatND hist_base;
MatND hist_half_down; @add_toggle_cpp
MatND hist_test1; @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Using 50 bins for hue and 60 for saturation
MatND hist_test2; @end_toggle
@endcode
-# Calculate the Histograms for the base image, the 2 test images and the half-down base image: @add_toggle_java
@code{.cpp} @snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Using 50 bins for hue and 60 for saturation
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false ); @end_toggle
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
@add_toggle_python
calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false ); @snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Using 50 bins for hue and 60 for saturation
normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() ); @end_toggle
calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false ); - Calculate the Histograms for the base image, the 2 test images and the half-down base image:
normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
@add_toggle_cpp
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false ); @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Calculate the histograms for the HSV images
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() ); @end_toggle
@endcode
-# Apply sequentially the 4 comparison methods between the histogram of the base image (hist_base) @add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Calculate the histograms for the HSV images
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Calculate the histograms for the HSV images
@end_toggle
- Apply sequentially the 4 comparison methods between the histogram of the base image (hist_base)
and the other histograms: and the other histograms:
@code{.cpp}
for( int i = 0; i < 4; i++ ) @add_toggle_cpp
{ int compare_method = i; @snippet samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp Apply the histogram comparison methods
double base_base = compareHist( hist_base, hist_base, compare_method ); @end_toggle
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method ); @add_toggle_java
double base_test2 = compareHist( hist_base, hist_test2, compare_method ); @snippet samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java Apply the histogram comparison methods
@end_toggle
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
} @add_toggle_python
@endcode @snippet samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py Apply the histogram comparison methods
@end_toggle
Results Results
------- -------
@ -144,13 +172,13 @@ Results
are from the same source. For the other two test images, we can observe that they have very are from the same source. For the other two test images, we can observe that they have very
different lighting conditions, so the matching should not be very good: different lighting conditions, so the matching should not be very good:
-# Here the numeric results: -# Here the numeric results we got with OpenCV 3.4.1:
*Method* | Base - Base | Base - Half | Base - Test 1 | Base - Test 2 *Method* | Base - Base | Base - Half | Base - Test 1 | Base - Test 2
----------------- | ------------ | ------------ | -------------- | --------------- ----------------- | ------------ | ------------ | -------------- | ---------------
*Correlation* | 1.000000 | 0.930766 | 0.182073 | 0.120447 *Correlation* | 1.000000 | 0.880438 | 0.20457 | 0.0664547
*Chi-square* | 0.000000 | 4.940466 | 21.184536 | 49.273437 *Chi-square* | 0.000000 | 4.6834 | 2697.98 | 4763.8
*Intersection* | 24.391548 | 14.959809 | 3.889029 | 5.775088 *Intersection* | 18.8947 | 13.022 | 5.44085 | 2.58173
*Bhattacharyya* | 0.000000 | 0.222609 | 0.646576 | 0.801869 *Bhattacharyya* | 0.000000 | 0.237887 | 0.679826 | 0.874173
For the *Correlation* and *Intersection* methods, the higher the metric, the more accurate the For the *Correlation* and *Intersection* methods, the higher the metric, the more accurate the
match. As we can see, the match *base-base* is the highest of all as expected. Also we can observe match. As we can see, the match *base-base* is the highest of all as expected. Also we can observe
that the match *base-half* is the second best match (as we predicted). For the other two metrics, that the match *base-half* is the second best match (as we predicted). For the other two metrics,

@ -22,7 +22,7 @@ Theory
### What is Histogram Equalization? ### What is Histogram Equalization?
- It is a method that improves the contrast in an image, in order to stretch out the intensity - It is a method that improves the contrast in an image, in order to stretch out the intensity
range. range (see also the corresponding <a href="https://en.wikipedia.org/wiki/Histogram_equalization">Wikipedia entry</a>).
- To make it clearer, from the image above, you can see that the pixels seem clustered around the - To make it clearer, from the image above, you can see that the pixels seem clustered around the
middle of the available range of intensities. What Histogram Equalization does is to *stretch middle of the available range of intensities. What Histogram Equalization does is to *stretch
out* this range. Take a look at the figure below: The green circles indicate the out* this range. Take a look at the figure below: The green circles indicate the
@ -61,53 +61,105 @@ Code
- Convert the original image to grayscale - Convert the original image to grayscale
- Equalize the Histogram by using the OpenCV function @ref cv::equalizeHist - Equalize the Histogram by using the OpenCV function @ref cv::equalizeHist
- Display the source and equalized images in a window. - Display the source and equalized images in a window.
@add_toggle_cpp
- **Downloadable code**: Click - **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp) [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
- **Code at glance:** - **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp @include samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
@end_toggle
Explanation Explanation
----------- -----------
-# Declare the source and destination images as well as the windows names: - Load the source image:
@code{.cpp}
Mat src, dst; @add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Load image
char* source_window = "Source image"; @end_toggle
char* equalized_window = "Equalized Image";
@endcode @add_toggle_java
-# Load the source image: @snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Load image
@code{.cpp} @end_toggle
src = imread( argv[1], 1 );
@add_toggle_python
if( !src.data ) @snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Load image
{ cout<<"Usage: ./Histogram_Demo <path_to_image>"<<endl; @end_toggle
return -1;}
@endcode - Convert it to grayscale:
-# Convert it to grayscale:
@code{.cpp} @add_toggle_cpp
cvtColor( src, src, COLOR_BGR2GRAY ); @snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Convert to grayscale
@endcode @end_toggle
-# Apply histogram equalization with the function @ref cv::equalizeHist :
@code{.cpp} @add_toggle_java
equalizeHist( src, dst ); @snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Convert to grayscale
@endcode @end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Convert to grayscale
@end_toggle
- Apply histogram equalization with the function @ref cv::equalizeHist :
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Apply Histogram Equalization
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Apply Histogram Equalization
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Apply Histogram Equalization
@end_toggle
As it can be easily seen, the only arguments are the original image and the output (equalized) As it can be easily seen, the only arguments are the original image and the output (equalized)
image. image.
-# Display both images (original and equalized) : - Display both images (original and equalized):
@code{.cpp}
namedWindow( source_window, WINDOW_AUTOSIZE ); @add_toggle_cpp
namedWindow( equalized_window, WINDOW_AUTOSIZE ); @snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Display results
@end_toggle
imshow( source_window, src );
imshow( equalized_window, dst ); @add_toggle_java
@endcode @snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Display results
-# Wait until user exists the program @end_toggle
@code{.cpp}
waitKey(0); @add_toggle_python
return 0; @snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Display results
@endcode @end_toggle
- Wait until user exists the program
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp Wait until user exits the program
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java Wait until user exits the program
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py Wait until user exits the program
@end_toggle
Results Results
------- -------

@ -80,7 +80,7 @@ Theory
Code Code
---- ----
-# **What does this program do?** - **What does this program do?**
- Loads an image - Loads an image
- Applies an Affine Transform to the image. This transform is obtained from the relation - Applies an Affine Transform to the image. This transform is obtained from the relation
between three points. We use the function @ref cv::warpAffine for that purpose. between three points. We use the function @ref cv::warpAffine for that purpose.
@ -88,57 +88,88 @@ Code
the image center the image center
- Waits until the user exits the program - Waits until the user exits the program
-# The tutorial's code is shown below. You can also download it here @add_toggle_cpp
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp) - The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp @include samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
@end_toggle
@add_toggle_java
- The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
@end_toggle
@add_toggle_python
- The tutorial's code is shown below. You can also download it
[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
@include samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py
@end_toggle
Explanation Explanation
----------- -----------
-# Declare some variables we will use, such as the matrices to store our results and 2 arrays of - Load an image:
points to store the 2D points that define our Affine Transform.
@code{.cpp} @add_toggle_cpp
Point2f srcTri[3]; @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Load the image
Point2f dstTri[3]; @end_toggle
Mat rot_mat( 2, 3, CV_32FC1 ); @add_toggle_java
Mat warp_mat( 2, 3, CV_32FC1 ); @snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Load the image
Mat src, warp_dst, warp_rotate_dst; @end_toggle
@endcode
-# Load an image: @add_toggle_python
@code{.cpp} @snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Load the image
src = imread( argv[1], 1 ); @end_toggle
@endcode
-# Initialize the destination image as having the same size and type as the source: - **Affine Transform:** As we explained in lines above, we need two sets of 3 points to derive the
@code{.cpp}
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
@endcode
-# **Affine Transform:** As we explained in lines above, we need two sets of 3 points to derive the
affine transform relation. Have a look: affine transform relation. Have a look:
@code{.cpp}
srcTri[0] = Point2f( 0, 0 ); @add_toggle_cpp
srcTri[1] = Point2f( src.cols - 1, 0 ); @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Set your 3 points to calculate the Affine Transform
srcTri[2] = Point2f( 0, src.rows - 1 ); @end_toggle
dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 ); @add_toggle_java
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 ); @snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Set your 3 points to calculate the Affine Transform
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 ); @end_toggle
@endcode
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Set your 3 points to calculate the Affine Transform
@end_toggle
You may want to draw these points to get a better idea on how they change. Their locations are You may want to draw these points to get a better idea on how they change. Their locations are
approximately the same as the ones depicted in the example figure (in the Theory section). You approximately the same as the ones depicted in the example figure (in the Theory section). You
may note that the size and orientation of the triangle defined by the 3 points change. may note that the size and orientation of the triangle defined by the 3 points change.
-# Armed with both sets of points, we calculate the Affine Transform by using OpenCV function @ref - Armed with both sets of points, we calculate the Affine Transform by using OpenCV function @ref
cv::getAffineTransform : cv::getAffineTransform :
@code{.cpp}
warp_mat = getAffineTransform( srcTri, dstTri ); @add_toggle_cpp
@endcode @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Get the Affine Transform
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Get the Affine Transform
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Get the Affine Transform
@end_toggle
We get a \f$2 \times 3\f$ matrix as an output (in this case **warp_mat**) We get a \f$2 \times 3\f$ matrix as an output (in this case **warp_mat**)
-# We then apply the Affine Transform just found to the src image - We then apply the Affine Transform just found to the src image
@code{.cpp}
warpAffine( src, warp_dst, warp_mat, warp_dst.size() ); @add_toggle_cpp
@endcode @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Apply the Affine Transform just found to the src image
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Apply the Affine Transform just found to the src image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Apply the Affine Transform just found to the src image
@end_toggle
with the following arguments: with the following arguments:
- **src**: Input image - **src**: Input image
@ -149,47 +180,87 @@ Explanation
We just got our first transformed image! We will display it in one bit. Before that, we also We just got our first transformed image! We will display it in one bit. Before that, we also
want to rotate it... want to rotate it...
-# **Rotate:** To rotate an image, we need to know two things: - **Rotate:** To rotate an image, we need to know two things:
-# The center with respect to which the image will rotate -# The center with respect to which the image will rotate
-# The angle to be rotated. In OpenCV a positive angle is counter-clockwise -# The angle to be rotated. In OpenCV a positive angle is counter-clockwise
-# *Optional:* A scale factor -# *Optional:* A scale factor
We define these parameters with the following snippet: We define these parameters with the following snippet:
@code{.cpp}
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 ); @add_toggle_cpp
double angle = -50.0; @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Compute a rotation matrix with respect to the center of the image
double scale = 0.6; @end_toggle
@endcode
-# We generate the rotation matrix with the OpenCV function @ref cv::getRotationMatrix2D , which @add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Compute a rotation matrix with respect to the center of the image
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Compute a rotation matrix with respect to the center of the image
@end_toggle
- We generate the rotation matrix with the OpenCV function @ref cv::getRotationMatrix2D , which
returns a \f$2 \times 3\f$ matrix (in this case *rot_mat*) returns a \f$2 \times 3\f$ matrix (in this case *rot_mat*)
@code{.cpp}
rot_mat = getRotationMatrix2D( center, angle, scale ); @add_toggle_cpp
@endcode @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Get the rotation matrix with the specifications above
-# We now apply the found rotation to the output of our previous Transformation. @end_toggle
@code{.cpp}
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() ); @add_toggle_java
@endcode @snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Get the rotation matrix with the specifications above
-# Finally, we display our results in two windows plus the original image for good measure: @end_toggle
@code{.cpp}
namedWindow( source_window, WINDOW_AUTOSIZE ); @add_toggle_python
imshow( source_window, src ); @snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Get the rotation matrix with the specifications above
@end_toggle
namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst ); - We now apply the found rotation to the output of our previous Transformation:
namedWindow( warp_rotate_window, WINDOW_AUTOSIZE ); @add_toggle_cpp
imshow( warp_rotate_window, warp_rotate_dst ); @snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Rotate the warped image
@endcode @end_toggle
-# We just have to wait until the user exits the program
@code{.cpp} @add_toggle_java
waitKey(0); @snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Rotate the warped image
@endcode @end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Rotate the warped image
@end_toggle
- Finally, we display our results in two windows plus the original image for good measure:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Show what you got
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Show what you got
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Show what you got
@end_toggle
- We just have to wait until the user exits the program
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp Wait until user exits the program
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java Wait until user exits the program
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py Wait until user exits the program
@end_toggle
Result Result
------ ------
-# After compiling the code above, we can give it the path of an image as argument. For instance, - After compiling the code above, we can give it the path of an image as argument. For instance,
for a picture like: for a picture like:
![](images/Warp_Affine_Tutorial_Original_Image.jpg) ![](images/Warp_Affine_Tutorial_Original_Image.jpg)

@ -165,6 +165,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_warp_affine - @subpage tutorial_warp_affine
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán *Author:* Ana Huamán
@ -173,6 +175,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_equalization - @subpage tutorial_histogram_equalization
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán *Author:* Ana Huamán
@ -181,6 +185,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_calculation - @subpage tutorial_histogram_calculation
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán *Author:* Ana Huamán
@ -189,6 +195,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_histogram_comparison - @subpage tutorial_histogram_comparison
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán *Author:* Ana Huamán
@ -197,6 +205,8 @@ In this section you will learn about the image processing (manipulation) functio
- @subpage tutorial_back_projection - @subpage tutorial_back_projection
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
*Author:* Ana Huamán *Author:* Ana Huamán

@ -252,7 +252,7 @@ static int icvSmoothHistogram( const std::vector<int>& piHist, std::vector<int>&
for ( int ii=-iWidth; ii<=iWidth; ii++) for ( int ii=-iWidth; ii<=iWidth; ii++)
{ {
iIdx = i+ii; iIdx = i+ii;
if (iIdx > 0 && iIdx < 256) if (iIdx >= 0 && iIdx < 256)
{ {
iSmooth += piHist[iIdx]; iSmooth += piHist[iIdx];
} }
@ -293,7 +293,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
std::vector<int> piHistSmooth(iNumBins, 0); std::vector<int> piHistSmooth(iNumBins, 0);
std::vector<int> piHistGrad(iNumBins, 0); std::vector<int> piHistGrad(iNumBins, 0);
std::vector<int> piAccumSum(iNumBins, 0); std::vector<int> piAccumSum(iNumBins, 0);
std::vector<int> piMaxPos(20, 0); std::vector<int> piMaxPos; piMaxPos.reserve(20);
int iThresh = 0; int iThresh = 0;
int iIdx; int iIdx;
int iWidth = 1; int iWidth = 1;
@ -319,7 +319,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
{ {
if ( (piHistGrad[i-1] < 0) && (piHistGrad[i] > 0) ) if ( (piHistGrad[i-1] < 0) && (piHistGrad[i] > 0) )
{ {
piMaxPos[iCntMaxima] = i; piMaxPos.push_back(i);
iCntMaxima++; iCntMaxima++;
} }
} }
@ -332,15 +332,35 @@ static bool icvBinarizationHistogramBased( Mat & img )
iSumAroundMax = piHistSmooth[iIdx-1] + piHistSmooth[iIdx] + piHistSmooth[iIdx+1]; iSumAroundMax = piHistSmooth[iIdx-1] + piHistSmooth[iIdx] + piHistSmooth[iIdx+1];
if ( iSumAroundMax < iMaxPix1 && iIdx < 64 ) if ( iSumAroundMax < iMaxPix1 && iIdx < 64 )
{ {
for ( int j=i; j<iCntMaxima-1; j++ ) piMaxPos.erase(piMaxPos.begin() + i);
{
piMaxPos[j] = piMaxPos[j+1];
}
iCntMaxima--; iCntMaxima--;
i--; i--;
} }
} }
if ( iCntMaxima == 1)
CV_Assert((size_t)iCntMaxima == piMaxPos.size());
PRINTF("HIST: MAXIMA COUNT: %d (%d, %d, %d, ...)\n", iCntMaxima,
iCntMaxima > 0 ? piMaxPos[0] : -1,
iCntMaxima > 1 ? piMaxPos[1] : -1,
iCntMaxima > 2 ? piMaxPos[2] : -1);
if (iCntMaxima == 0)
{
// no any maxima inside (except 0 and 255 which are not handled above)
// Does image black-write already?
const int iMaxPix2 = iMaxPix / 2;
for (int sum = 0, i = 0; i < 256; ++i) // select mean intensity
{
sum += piHistIntensity[i];
if (sum > iMaxPix2)
{
iThresh = i;
break;
}
}
}
else if (iCntMaxima == 1)
{ {
iThresh = piMaxPos[0]/2; iThresh = piMaxPos[0]/2;
} }
@ -380,7 +400,7 @@ static bool icvBinarizationHistogramBased( Mat & img )
int iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]]; int iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]];
//IF TOO CLOSE TO 255, jump to next maximum //IF TOO CLOSE TO 255, jump to next maximum
if ( piMaxPos[iIdxBGMax] >= 250 && iIdxBGMax < iCntMaxima ) if ( piMaxPos[iIdxBGMax] >= 250 && iIdxBGMax + 1 < iCntMaxima )
{ {
iIdxBGMax++; iIdxBGMax++;
iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]]; iMaxVal = piHistIntensity[piMaxPos[iIdxBGMax]];
@ -497,7 +517,8 @@ int cvFindChessboardCorners( const void* arr, CvSize pattern_size,
int max_quad_buf_size = 0; int max_quad_buf_size = 0;
cvFree(&quads); cvFree(&quads);
cvFree(&corners); cvFree(&corners);
int quad_count = icvGenerateQuads( &quads, &corners, storage, thresh_img_new, flags, &max_quad_buf_size ); Mat binarized_img = thresh_img_new.clone(); // make clone because cvFindContours modifies the source image
int quad_count = icvGenerateQuads( &quads, &corners, storage, binarized_img, flags, &max_quad_buf_size );
PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1)); PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("New quads", thresh_img_new, quads, quad_count); SHOW_QUADS("New quads", thresh_img_new, quads, quad_count);
if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size)) if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size))
@ -562,7 +583,8 @@ int cvFindChessboardCorners( const void* arr, CvSize pattern_size,
int max_quad_buf_size = 0; int max_quad_buf_size = 0;
cvFree(&quads); cvFree(&quads);
cvFree(&corners); cvFree(&corners);
int quad_count = icvGenerateQuads( &quads, &corners, storage, thresh_img, flags, &max_quad_buf_size); Mat binarized_img = (useAdaptive) ? thresh_img : thresh_img.clone(); // make clone because cvFindContours modifies the source image
int quad_count = icvGenerateQuads( &quads, &corners, storage, binarized_img, flags, &max_quad_buf_size);
PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1)); PRINTF("Quad count: %d/%d\n", quad_count, (pattern_size.width/2+1)*(pattern_size.height/2+1));
SHOW_QUADS("Old quads", thresh_img, quads, quad_count); SHOW_QUADS("Old quads", thresh_img, quads, quad_count);
if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size)) if (processQuads(quads, quad_count, pattern_size, max_quad_buf_size, storage, corners, out_corners, out_corner_count, prev_sqr_size))

@ -3889,7 +3889,7 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
P3.at<double>(0,3) *= P3.at<double>(0,0); P3.at<double>(0,3) *= P3.at<double>(0,0);
P3.at<double>(1,3) *= P3.at<double>(1,1); P3.at<double>(1,3) *= P3.at<double>(1,1);
if( !_imgpt1.empty() && _imgpt3.empty() ) if( !_imgpt1.empty() && !_imgpt3.empty() )
adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(), adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
_cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3); _cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);

@ -435,7 +435,7 @@ Cv64suf;
// Integer types portatibility // Integer types portatibility
#ifdef OPENCV_STDINT_HEADER #ifdef OPENCV_STDINT_HEADER
#include OPENCV_STDINT_HEADER #include OPENCV_STDINT_HEADER
#else #elif defined(__cplusplus)
#if defined(_MSC_VER) && _MSC_VER < 1600 /* MSVS 2010 */ #if defined(_MSC_VER) && _MSC_VER < 1600 /* MSVS 2010 */
namespace cv { namespace cv {
typedef signed char int8_t; typedef signed char int8_t;
@ -472,9 +472,15 @@ typedef ::int64_t int64_t;
typedef ::uint64_t uint64_t; typedef ::uint64_t uint64_t;
} }
#endif #endif
#else // pure C
#include <stdint.h>
#endif #endif
//! @} //! @}
#ifndef __cplusplus
#include "opencv2/core/fast_math.hpp" // define cvRound(double)
#endif
#endif // OPENCV_CORE_CVDEF_H #endif // OPENCV_CORE_CVDEF_H

@ -152,7 +152,7 @@ namespace cv { namespace cuda
inline ~NppStreamHandler() inline ~NppStreamHandler()
{ {
cudaStreamSynchronize(oldStream); nppSafeSetStream(nppGetStream(), oldStream);
} }
private: private:

@ -503,7 +503,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
class CV_EXPORTS ShiftLayer : public Layer class CV_EXPORTS ShiftLayer : public Layer
{ {
public: public:
static Ptr<ShiftLayer> create(const LayerParams& params); static Ptr<Layer> create(const LayerParams& params);
}; };
class CV_EXPORTS PriorBoxLayer : public Layer class CV_EXPORTS PriorBoxLayer : public Layer

@ -142,7 +142,7 @@ public:
PyGILState_Release(gstate); PyGILState_Release(gstate);
if (!res) if (!res)
CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method"); CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method");
pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)); CV_Assert(pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)));
return false; return false;
} }
@ -163,7 +163,7 @@ public:
CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method"); CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method");
std::vector<Mat> pyOutputs; std::vector<Mat> pyOutputs;
pyopencv_to(res, pyOutputs, ArgInfo("", 0)); CV_Assert(pyopencv_to(res, pyOutputs, ArgInfo("", 0)));
CV_Assert(pyOutputs.size() == outputs.size()); CV_Assert(pyOutputs.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); ++i) for (size_t i = 0; i < outputs.size(); ++i)

@ -1530,10 +1530,12 @@ struct Net::Impl
LayerData *eltwiseData = nextData; LayerData *eltwiseData = nextData;
// go down from the second input and find the first non-skipped layer. // go down from the second input and find the first non-skipped layer.
LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid]; LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid];
CV_Assert(downLayerData);
while (downLayerData->skip) while (downLayerData->skip)
{ {
downLayerData = &layers[downLayerData->inputBlobsId[0].lid]; downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
} }
CV_Assert(downLayerData);
// second input layer is current layer. // second input layer is current layer.
if ( ld.id == downLayerData->id ) if ( ld.id == downLayerData->id )
@ -1548,9 +1550,7 @@ struct Net::Impl
downLayerData = &layers[downLayerData->inputBlobsId[0].lid]; downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
} }
Ptr<ConvolutionLayer> convLayer; Ptr<ConvolutionLayer> convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
if( downLayerData )
convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
// first input layer is convolution layer // first input layer is convolution layer
if( !convLayer.empty() && eltwiseData->consumers.size() == 1 ) if( !convLayer.empty() && eltwiseData->consumers.size() == 1 )

@ -283,7 +283,7 @@ public:
lp.precision = InferenceEngine::Precision::FP32; lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = weights_.total(); const size_t numChannels = weights_.total();
ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C); ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C); ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C);

@ -456,7 +456,7 @@ public:
if (hasBias() || fusedBias) if (hasBias() || fusedBias)
{ {
Mat biasesMat({outCn}, CV_32F, &biasvec[0]); Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {outCn}, InferenceEngine::Layout::C); ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
} }
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE

@ -427,9 +427,9 @@ public:
std::shared_ptr<InferenceEngine::FullyConnectedLayer> ieLayer(new InferenceEngine::FullyConnectedLayer(lp)); std::shared_ptr<InferenceEngine::FullyConnectedLayer> ieLayer(new InferenceEngine::FullyConnectedLayer(lp));
ieLayer->_out_num = blobs[0].size[0]; ieLayer->_out_num = blobs[0].size[0];
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {blobs[0].size[0], blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW); ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
if (blobs.size() > 1) if (blobs.size() > 1)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {ieLayer->_out_num}, InferenceEngine::Layout::C); ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
return Ptr<BackendNode>(); return Ptr<BackendNode>();

@ -254,7 +254,7 @@ public:
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0"; ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0"; ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
const int numChannels = blobs[0].total(); const size_t numChannels = blobs[0].total();
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE

@ -119,9 +119,10 @@ public:
if (blobs.size() > 3) if (blobs.size() > 3)
{ {
CV_Assert(blobs.size() == 6); CV_Assert(blobs.size() == 6);
const int N = Wh.cols;
for (int i = 3; i < 6; ++i) for (int i = 3; i < 6; ++i)
{ {
CV_Assert(blobs[i].rows == Wh.cols && blobs[i].cols == Wh.cols); CV_Assert(blobs[i].rows == N && blobs[i].cols == N);
CV_Assert(blobs[i].type() == bias.type()); CV_Assert(blobs[i].type() == bias.type());
} }
} }

@ -28,6 +28,7 @@ public:
setParamsFrom(params); setParamsFrom(params);
hasBias = params.get<bool>("bias_term", false); hasBias = params.get<bool>("bias_term", false);
axis = params.get<int>("axis", 1); axis = params.get<int>("axis", 1);
hasWeights = false;
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -35,11 +36,16 @@ public:
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == 1 + hasBias);
outputs.assign(1, inputs[0]); outputs.assign(1, inputs[0]);
return true; return true;
} }
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_DEFAULT || return backendId == DNN_BACKEND_DEFAULT ||
@ -63,10 +69,15 @@ public:
Mat &inpBlob = *inputs[0]; Mat &inpBlob = *inputs[0];
Mat &outBlob = outputs[0]; Mat &outBlob = outputs[0];
Mat &weights = blobs.empty() ? *inputs[1] : blobs[0]; // There is a mode when we multiply a first blob by a second one
Mat bias = hasBias ? blobs.back() : Mat(); // instead of trainable weights.
Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
if (!weights.empty())
weights = weights.reshape(1, 1);
MatShape inpShape = shape(inpBlob); MatShape inpShape = shape(inpBlob);
const int numWeights = weights.total(); const int numWeights = !weights.empty() ? weights.total() : bias.total();
CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total());
int endAxis; int endAxis;
for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis) for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
@ -84,15 +95,15 @@ public:
if (endAxis != inpBlob.dims) if (endAxis != inpBlob.dims)
{ {
float* weightsData = (float*)weights.data; float* weightsData = !weights.empty() ? (float*)weights.data : 0;
float* biasesData = hasBias ? (float*)bias.data : 0; float* biasesData = hasBias ? (float*)bias.data : 0;
int spatialSize = total(inpShape, endAxis); // spatialSize != 1 int spatialSize = total(inpShape, endAxis); // spatialSize != 1
for (int i = 0; i < numSlices; ++i) for (int i = 0; i < numSlices; ++i)
{ {
for (int j = 0; j < numWeights; ++j) for (int j = 0; j < numWeights; ++j)
{ {
float w = weightsData[j]; float w = weightsData ? weightsData[j] : 1;
float b = hasBias ? biasesData[j] : 0; float b = biasesData ? biasesData[j] : 0;
Mat inpSlice(1, spatialSize, CV_32F, inpData); Mat inpSlice(1, spatialSize, CV_32F, inpData);
Mat outSlice(1, spatialSize, CV_32F, outData); Mat outSlice(1, spatialSize, CV_32F, outData);
inpSlice.convertTo(outSlice, CV_32F, w, b); inpSlice.convertTo(outSlice, CV_32F, w, b);
@ -105,12 +116,16 @@ public:
{ {
for (int i = 0; i < numSlices; ++i) for (int i = 0; i < numSlices; ++i)
{ {
Mat inpSlice(weights.dims, weights.size, CV_32F, inpData); Mat inpSlice(1, numWeights, CV_32F, inpData);
Mat outSlice(weights.dims, weights.size, CV_32F, outData); Mat outSlice(1, numWeights, CV_32F, outData);
multiply(inpSlice, weights, outSlice); if (!weights.empty())
if (hasBias) {
add(outSlice, bias, outSlice); multiply(inpSlice, weights, outSlice);
if (hasBias)
add(outSlice, bias, outSlice);
}
else if (hasBias)
add(inpSlice, bias, outSlice);
inpData += numWeights; inpData += numWeights;
outData += numWeights; outData += numWeights;
} }
@ -157,11 +172,15 @@ public:
const int numChannels = blobs[0].total(); const int numChannels = blobs[0].total();
auto weights = wrapToHalideBuffer(blobs[0], {numChannels}); Halide::Expr topExpr = input;
Halide::Expr topExpr = input * weights(c); if (hasWeights)
{
auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
topExpr *= weights(c);
}
if (hasBias) if (hasBias)
{ {
auto bias = wrapToHalideBuffer(blobs[1], {numChannels}); auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});
topExpr += bias(c); topExpr += bias(c);
} }
top(x, y, c, n) = topExpr; top(x, y, c, n) = topExpr;
@ -178,10 +197,24 @@ public:
lp.precision = InferenceEngine::Precision::FP32; lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const int numChannels = blobs[0].total(); CV_Assert(!blobs.empty());
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); const size_t numChannels = blobs[0].total();
if (hasWeights)
{
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
ieLayer->_weights = weights;
}
if (hasBias) if (hasBias)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {numChannels}, InferenceEngine::Layout::C); ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
@ -190,8 +223,8 @@ public:
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{ {
scale = !blobs.empty() ? blobs[0] : Mat(); scale = hasWeights ? blobs[0] : Mat();
shift = hasBias ? blobs[1] : Mat(); shift = hasBias ? blobs.back() : Mat();
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
@ -205,6 +238,9 @@ public:
} }
return flops; return flops;
} }
private:
bool hasWeights;
}; };
@ -213,5 +249,16 @@ Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
return Ptr<ScaleLayer>(new ScaleLayerImpl(params)); return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
} }
Ptr<Layer> ShiftLayer::create(const LayerParams& params)
{
LayerParams scaleParams;
scaleParams.name = params.name;
scaleParams.type = "Scale";
scaleParams.blobs = params.blobs;
scaleParams.set("bias_term", true);
scaleParams.set("axis", 0);
return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));
}
} // namespace dnn } // namespace dnn
} // namespace cv } // namespace cv

@ -1,145 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of shift layer, which adds up const values to blob.
*/
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
class ShiftLayerImpl CV_FINAL : public ShiftLayer
{
public:
ShiftLayerImpl(const LayerParams &params)
{
setParamsFrom(params);
CV_Assert(blobs.size() == 1);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
internals.assign(1, shape(1, total(inputs[0], 2)));
return true;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() > 0);
if(inputs[0]->dims == blobs[0].dims)
{
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
outBlob = inpBlob + blobs[0];
}
}
else
{
Mat biasOnesMat = internals[0];
biasOnesMat.setTo(1);
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &outBlob = outputs[ii];
inpBlob.copyTo(outBlob);
for (int n = 0; n < inpBlob.size[0]; n++)
{
Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3],
outBlob.type(), outBlob.ptr(n));
gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv
}
}
}
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
// Inference Engine has no layer just for biases. Create a linear
// transformation layer with ones weights.
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{blobs[0].total()});
weights->allocate();
std::vector<float> ones(blobs[0].total(), 1);
weights->set(ones);
ieLayer->_weights = weights;
ieLayer->_biases = wrapToInfEngineBlob(blobs[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = Mat();
shift = blobs[0];
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i= 0; i < inputs.size(); i++)
{
flops += total(inputs[i]);
}
return flops;
}
};
Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params)
{
return Ptr<ShiftLayer>(new ShiftLayerImpl(params));
}
}
}

@ -504,7 +504,7 @@ static bool ocl4dnnFastBufferGEMM(const CBLAS_TRANSPOSE TransA,
oclk_gemm_float.set(arg_idx++, (float)alpha); oclk_gemm_float.set(arg_idx++, (float)alpha);
oclk_gemm_float.set(arg_idx++, (float)beta); oclk_gemm_float.set(arg_idx++, (float)beta);
bool ret; bool ret = true;
if (TransB == CblasNoTrans || TransA != CblasNoTrans) { if (TransB == CblasNoTrans || TransA != CblasNoTrans) {
int stride = 256; int stride = 256;
for (int start_index = 0; start_index < K; start_index += stride) { for (int start_index = 0; start_index < K; start_index += stride) {

@ -743,10 +743,20 @@ void TFImporter::populateNet(Net dstNet)
if (haveConst) if (haveConst)
{ {
layerParams.blobs.resize(1); Mat values = getTensorContent(getConstBlob(layer, value_id));
blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]); CV_Assert(values.type() == CV_32FC1);
int id = dstNet.addLayer(name, "Shift", layerParams); int id;
if (values.total() == 1) // is a scalar.
{
layerParams.set("shift", values.at<float>(0));
id = dstNet.addLayer(name, "Power", layerParams);
}
else // is a vector
{
layerParams.blobs.resize(1, values);
id = dstNet.addLayer(name, "Shift", layerParams);
}
layer_id[name] = id; layer_id[name] = id;
// one input only // one input only
@ -777,11 +787,21 @@ void TFImporter::populateNet(Net dstNet)
} }
CV_Assert(haveConst); CV_Assert(haveConst);
layerParams.blobs.resize(1); Mat values = getTensorContent(getConstBlob(layer, value_id));
blobFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]); CV_Assert(values.type() == CV_32FC1);
layerParams.blobs[0] *= -1; values *= -1.0f;
int id = dstNet.addLayer(name, "Shift", layerParams); int id;
if (values.total() == 1) // is a scalar.
{
layerParams.set("shift", values.at<float>(0));
id = dstNet.addLayer(name, "Power", layerParams);
}
else // is a vector
{
layerParams.blobs.resize(1, values);
id = dstNet.addLayer(name, "Shift", layerParams);
}
layer_id[name] = id; layer_id[name] = id;
// one input only // one input only

@ -40,17 +40,18 @@ TEST(Padding_Halide, Accuracy)
{ {
static const int kNumRuns = 10; static const int kNumRuns = 10;
std::vector<int> paddings(8); std::vector<int> paddings(8);
cv::RNG& rng = cv::theRNG();
for (int t = 0; t < kNumRuns; ++t) for (int t = 0; t < kNumRuns; ++t)
{ {
for (int i = 0; i < paddings.size(); ++i) for (int i = 0; i < paddings.size(); ++i)
paddings[i] = rand() % 5; paddings[i] = rng(5);
LayerParams lp; LayerParams lp;
lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size())); lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
lp.type = "Padding"; lp.type = "Padding";
lp.name = "testLayer"; lp.name = "testLayer";
Mat input({1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10}, CV_32F); Mat input({1 + rng(10), 1 + rng(10), 1 + rng(10), 1 + rng(10)}, CV_32F);
test(lp, input); test(lp, input);
} }
} }
@ -633,7 +634,7 @@ TEST_P(Eltwise, Accuracy)
eltwiseParam.set("operation", op); eltwiseParam.set("operation", op);
if (op == "sum" && weighted) if (op == "sum" && weighted)
{ {
RNG rng = cv::theRNG(); RNG& rng = cv::theRNG();
std::vector<float> coeff(1 + numConv); std::vector<float> coeff(1 + numConv);
for (int i = 0; i < coeff.size(); ++i) for (int i = 0; i < coeff.size(); ++i)
{ {

@ -376,7 +376,8 @@ TEST(Test_TensorFlow, memory_read)
class ResizeBilinearLayer CV_FINAL : public Layer class ResizeBilinearLayer CV_FINAL : public Layer
{ {
public: public:
ResizeBilinearLayer(const LayerParams &params) : Layer(params) ResizeBilinearLayer(const LayerParams &params) : Layer(params),
outWidth(0), outHeight(0), factorWidth(1), factorHeight(1)
{ {
CV_Assert(!params.get<bool>("align_corners", false)); CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(!blobs.empty()); CV_Assert(!blobs.empty());

@ -135,8 +135,10 @@ CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOS
CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value);
CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); CVAPI(double) cvGetWindowProperty(const char* name, int prop_id);
#ifdef __cplusplus // FIXIT remove in OpenCV 4.0
/* Get window image rectangle coordinates, width and height */ /* Get window image rectangle coordinates, width and height */
CVAPI(cv::Rect)cvGetWindowImageRect(const char* name); CVAPI(cv::Rect)cvGetWindowImageRect(const char* name);
#endif
/* display image within window (highgui windows remember their content) */ /* display image within window (highgui windows remember their content) */
CVAPI(void) cvShowImage( const char* name, const CvArr* image ); CVAPI(void) cvShowImage( const char* name, const CvArr* image );

@ -1977,10 +1977,11 @@ detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good ex
transform. transform.
@param image 8-bit, single-channel binary source image. The image may be modified by the function. @param image 8-bit, single-channel binary source image. The image may be modified by the function.
@param lines Output vector of lines. Each line is represented by a two-element vector @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
\f$(\rho, \theta)\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of \f$(\rho, \theta)\f$ or \f$(\rho, \theta, \votes)\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
the image). \f$\theta\f$ is the line rotation angle in radians ( the image). \f$\theta\f$ is the line rotation angle in radians (
\f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ). \f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
\f$\votes\f$ is the value of accumulator.
@param rho Distance resolution of the accumulator in pixels. @param rho Distance resolution of the accumulator in pixels.
@param theta Angle resolution of the accumulator in radians. @param theta Angle resolution of the accumulator in radians.
@param threshold Accumulator threshold parameter. Only those lines are returned that get enough @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
@ -2155,8 +2156,8 @@ you know it. Or, you may set maxRadius to a negative number to return centers on
search, and find the correct radius using an additional procedure. search, and find the correct radius using an additional procedure.
@param image 8-bit, single-channel, grayscale input image. @param image 8-bit, single-channel, grayscale input image.
@param circles Output vector of found circles. Each vector is encoded as a 3-element @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
floating-point vector \f$(x, y, radius)\f$ . floating-point vector \f$(x, y, radius)\f$ or \f$(x, y, radius, votes)\f$ .
@param method Detection method, see #HoughModes. Currently, the only implemented method is #HOUGH_GRADIENT @param method Detection method, see #HoughModes. Currently, the only implemented method is #HOUGH_GRADIENT
@param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has

@ -56,4 +56,30 @@ PERF_TEST(PerfHoughCircles2, ManySmallCircles)
SANITY_CHECK_NOTHING(); SANITY_CHECK_NOTHING();
} }
PERF_TEST(PerfHoughCircles4f, Basic)
{
string filename = getDataPath("cv/imgproc/stuff.jpg");
const double dp = 1.0;
double minDist = 20;
double edgeThreshold = 20;
double accumThreshold = 30;
int minRadius = 20;
int maxRadius = 200;
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty()) << "Unable to load source image " << filename;
GaussianBlur(img, img, Size(9, 9), 2, 2);
vector<Vec4f> circles;
declare.in(img);
TEST_CYCLE()
{
HoughCircles(img, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
}
SANITY_CHECK_NOTHING();
}
} // namespace } // namespace

@ -69,4 +69,47 @@ PERF_TEST_P(Image_RhoStep_ThetaStep_Threshold, HoughLines,
SANITY_CHECK_NOTHING(); SANITY_CHECK_NOTHING();
} }
PERF_TEST_P(Image_RhoStep_ThetaStep_Threshold, HoughLines3f,
testing::Combine(
testing::Values( "cv/shared/pic5.png", "stitching/a1.png" ),
testing::Values( 1, 10 ),
testing::Values( 0.01, 0.1 ),
testing::Values( 0.5, 1.1 )
)
)
{
string filename = getDataPath(get<0>(GetParam()));
double rhoStep = get<1>(GetParam());
double thetaStep = get<2>(GetParam());
double threshold_ratio = get<3>(GetParam());
Mat image = imread(filename, IMREAD_GRAYSCALE);
if (image.empty())
FAIL() << "Unable to load source image" << filename;
Canny(image, image, 32, 128);
// add some syntetic lines:
line(image, Point(0, 0), Point(image.cols, image.rows), Scalar::all(255), 3);
line(image, Point(image.cols, 0), Point(image.cols/2, image.rows), Scalar::all(255), 3);
vector<Vec3f> lines;
declare.time(60);
int hough_threshold = (int)(std::min(image.cols, image.rows) * threshold_ratio);
TEST_CYCLE() HoughLines(image, lines, rhoStep, thetaStep, hough_threshold);
printf("%dx%d: %d lines\n", image.cols, image.rows, (int)lines.size());
if (threshold_ratio < 1.0)
{
EXPECT_GE(lines.size(), 2u);
}
EXPECT_LT(lines.size(), 3000u);
SANITY_CHECK_NOTHING();
}
} // namespace } // namespace

@ -285,7 +285,8 @@ struct CvtHelper
template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE > template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE >
struct OclHelper struct OclHelper
{ {
OclHelper( InputArray _src, OutputArray _dst, int dcn) OclHelper( InputArray _src, OutputArray _dst, int dcn) :
nArgs(0)
{ {
src = _src.getUMat(); src = _src.getUMat();
Size sz = src.size(), dstSz; Size sz = src.size(), dstSz;

@ -4643,7 +4643,8 @@ static bool ippFilter2D(int stype, int dtype, int kernel_type,
static bool dftFilter2D(int stype, int dtype, int kernel_type, static bool dftFilter2D(int stype, int dtype, int kernel_type,
uchar * src_data, size_t src_step, uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step, uchar * dst_data, size_t dst_step,
int width, int height, int full_width, int full_height,
int offset_x, int offset_y,
uchar * kernel_data, size_t kernel_step, uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height, int kernel_width, int kernel_height,
int anchor_x, int anchor_y, int anchor_x, int anchor_y,
@ -4666,8 +4667,8 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
Point anchor = Point(anchor_x, anchor_y); Point anchor = Point(anchor_x, anchor_y);
Mat kernel = Mat(Size(kernel_width, kernel_height), kernel_type, kernel_data, kernel_step); Mat kernel = Mat(Size(kernel_width, kernel_height), kernel_type, kernel_data, kernel_step);
Mat src(Size(width, height), stype, src_data, src_step); Mat src(Size(full_width-offset_x, full_height-offset_y), stype, src_data, src_step);
Mat dst(Size(width, height), dtype, dst_data, dst_step); Mat dst(Size(full_width, full_height), dtype, dst_data, dst_step);
Mat temp; Mat temp;
int src_channels = CV_MAT_CN(stype); int src_channels = CV_MAT_CN(stype);
int dst_channels = CV_MAT_CN(dtype); int dst_channels = CV_MAT_CN(dtype);
@ -4680,10 +4681,10 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
// we just use that. // we just use that.
int corrDepth = ddepth; int corrDepth = ddepth;
if ((ddepth == CV_32F || ddepth == CV_64F) && src_data != dst_data) { if ((ddepth == CV_32F || ddepth == CV_64F) && src_data != dst_data) {
temp = Mat(Size(width, height), dtype, dst_data, dst_step); temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
} else { } else {
corrDepth = ddepth == CV_64F ? CV_64F : CV_32F; corrDepth = ddepth == CV_64F ? CV_64F : CV_32F;
temp.create(Size(width, height), CV_MAKETYPE(corrDepth, dst_channels)); temp.create(Size(full_width, full_height), CV_MAKETYPE(corrDepth, dst_channels));
} }
crossCorr(src, kernel, temp, src.size(), crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(corrDepth, src_channels), CV_MAKETYPE(corrDepth, src_channels),
@ -4694,9 +4695,9 @@ static bool dftFilter2D(int stype, int dtype, int kernel_type,
} }
} else { } else {
if (src_data != dst_data) if (src_data != dst_data)
temp = Mat(Size(width, height), dtype, dst_data, dst_step); temp = Mat(Size(full_width, full_height), dtype, dst_data, dst_step);
else else
temp.create(Size(width, height), dtype); temp.create(Size(full_width, full_height), dtype);
crossCorr(src, kernel, temp, src.size(), crossCorr(src, kernel, temp, src.size(),
CV_MAKETYPE(ddepth, src_channels), CV_MAKETYPE(ddepth, src_channels),
anchor, delta, borderType); anchor, delta, borderType);
@ -4830,7 +4831,8 @@ void filter2D(int stype, int dtype, int kernel_type,
res = dftFilter2D(stype, dtype, kernel_type, res = dftFilter2D(stype, dtype, kernel_type,
src_data, src_step, src_data, src_step,
dst_data, dst_step, dst_data, dst_step,
width, height, full_width, full_height,
offset_x, offset_y,
kernel_data, kernel_step, kernel_data, kernel_step,
kernel_width, kernel_height, kernel_width, kernel_height,
anchor_x, anchor_y, anchor_x, anchor_y,

@ -105,48 +105,56 @@ array of (rho, theta) pairs. linesMax is the buffer size (number of pairs).
Functions return the actual number of found lines. Functions return the actual number of found lines.
*/ */
static void static void
HoughLinesStandard( const Mat& img, float rho, float theta, HoughLinesStandard( InputArray src, OutputArray lines, int type,
int threshold, std::vector<Vec2f>& lines, int linesMax, float rho, float theta,
int threshold, int linesMax,
double min_theta, double max_theta ) double min_theta, double max_theta )
{ {
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Internal error");
Mat img = src.getMat();
int i, j; int i, j;
float irho = 1 / rho; float irho = 1 / rho;
CV_Assert( img.type() == CV_8UC1 ); CV_Assert( img.type() == CV_8UC1 );
CV_Assert( linesMax > 0 );
const uchar* image = img.ptr(); const uchar* image = img.ptr();
int step = (int)img.step; int step = (int)img.step;
int width = img.cols; int width = img.cols;
int height = img.rows; int height = img.rows;
if (max_theta < min_theta ) { int max_rho = width + height;
CV_Error( CV_StsBadArg, "max_theta must be greater than min_theta" ); int min_rho = -max_rho;
}
CV_CheckGE(max_theta, min_theta, "max_theta must be greater than min_theta");
int numangle = cvRound((max_theta - min_theta) / theta); int numangle = cvRound((max_theta - min_theta) / theta);
int numrho = cvRound(((width + height) * 2 + 1) / rho); int numrho = cvRound(((max_rho - min_rho) + 1) / rho);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH #if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
CV_IPP_CHECK() if (type == CV_32FC2 && CV_IPP_CHECK_COND)
{ {
IppiSize srcSize = { width, height }; IppiSize srcSize = { width, height };
IppPointPolar delta = { rho, theta }; IppPointPolar delta = { rho, theta };
IppPointPolar dstRoi[2] = {{(Ipp32f) -(width + height), (Ipp32f) min_theta},{(Ipp32f) (width + height), (Ipp32f) max_theta}}; IppPointPolar dstRoi[2] = {{(Ipp32f) min_rho, (Ipp32f) min_theta},{(Ipp32f) max_rho, (Ipp32f) max_theta}};
int bufferSize; int bufferSize;
int nz = countNonZero(img); int nz = countNonZero(img);
int ipp_linesMax = std::min(linesMax, nz*numangle/threshold); int ipp_linesMax = std::min(linesMax, nz*numangle/threshold);
int linesCount = 0; int linesCount = 0;
lines.resize(ipp_linesMax); std::vector<Vec2f> _lines(ipp_linesMax);
IppStatus ok = ippiHoughLineGetSize_8u_C1R(srcSize, delta, ipp_linesMax, &bufferSize); IppStatus ok = ippiHoughLineGetSize_8u_C1R(srcSize, delta, ipp_linesMax, &bufferSize);
Ipp8u* buffer = ippsMalloc_8u_L(bufferSize); Ipp8u* buffer = ippsMalloc_8u_L(bufferSize);
if (ok >= 0) {ok = CV_INSTRUMENT_FUN_IPP(ippiHoughLine_Region_8u32f_C1R, image, step, srcSize, (IppPointPolar*) &lines[0], dstRoi, ipp_linesMax, &linesCount, delta, threshold, buffer);}; if (ok >= 0) {ok = CV_INSTRUMENT_FUN_IPP(ippiHoughLine_Region_8u32f_C1R, image, step, srcSize, (IppPointPolar*) &_lines[0], dstRoi, ipp_linesMax, &linesCount, delta, threshold, buffer);};
ippsFree(buffer); ippsFree(buffer);
if (ok >= 0) if (ok >= 0)
{ {
lines.resize(linesCount); lines.create(linesCount, 1, CV_32FC2);
Mat(linesCount, 1, CV_32FC2, &_lines[0]).copyTo(lines);
CV_IMPL_ADD(CV_IMPL_IPP); CV_IMPL_ADD(CV_IMPL_IPP);
return; return;
} }
lines.clear();
setIppErrorStatus(); setIppErrorStatus();
} }
#endif #endif
@ -185,6 +193,9 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
// stage 4. store the first min(total,linesMax) lines to the output buffer // stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = std::min(linesMax, (int)_sort_buf.size()); linesMax = std::min(linesMax, (int)_sort_buf.size());
double scale = 1./(numrho+2); double scale = 1./(numrho+2);
lines.create(linesMax, 1, type);
Mat _lines = lines.getMat();
for( i = 0; i < linesMax; i++ ) for( i = 0; i < linesMax; i++ )
{ {
LinePolar line; LinePolar line;
@ -193,7 +204,15 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
int r = idx - (n+1)*(numrho+2) - 1; int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho; line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = static_cast<float>(min_theta) + n * theta; line.angle = static_cast<float>(min_theta) + n * theta;
lines.push_back(Vec2f(line.rho, line.angle)); if (type == CV_32FC2)
{
_lines.at<Vec2f>(i) = Vec2f(line.rho, line.angle);
}
else
{
CV_DbgAssert(type == CV_32FC3);
_lines.at<Vec3f>(i) = Vec3f(line.rho, line.angle, (float)accum[idx]);
}
} }
} }
@ -212,15 +231,17 @@ struct hough_index
static void static void
HoughLinesSDiv( const Mat& img, HoughLinesSDiv( InputArray image, OutputArray lines, int type,
float rho, float theta, int threshold, float rho, float theta, int threshold,
int srn, int stn, int srn, int stn, int linesMax,
std::vector<Vec2f>& lines, int linesMax,
double min_theta, double max_theta ) double min_theta, double max_theta )
{ {
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Internal error");
#define _POINT(row, column)\ #define _POINT(row, column)\
(image_src[(row)*step+(column)]) (image_src[(row)*step+(column)])
Mat img = image.getMat();
int index, i; int index, i;
int ri, ti, ti1, ti0; int ri, ti, ti1, ti0;
int row, col; int row, col;
@ -343,7 +364,7 @@ HoughLinesSDiv( const Mat& img,
if( count * 100 > rn * tn ) if( count * 100 > rn * tn )
{ {
HoughLinesStandard( img, rho, theta, threshold, lines, linesMax, min_theta, max_theta ); HoughLinesStandard( image, lines, type, rho, theta, threshold, linesMax, min_theta, max_theta );
return; return;
} }
@ -415,11 +436,21 @@ HoughLinesSDiv( const Mat& img,
} }
} }
lines.create((int)lst.size(), 1, type);
Mat _lines = lines.getMat();
for( size_t idx = 0; idx < lst.size(); idx++ ) for( size_t idx = 0; idx < lst.size(); idx++ )
{ {
if( lst[idx].rho < 0 ) if( lst[idx].rho < 0 )
continue; continue;
lines.push_back(Vec2f(lst[idx].rho, lst[idx].theta)); if (type == CV_32FC2)
{
_lines.at<Vec2f>((int)idx) = Vec2f(lst[idx].rho, lst[idx].theta);
}
else
{
CV_DbgAssert(type == CV_32FC3);
_lines.at<Vec3f>((int)idx) = Vec3f(lst[idx].rho, lst[idx].theta, (float)lst[idx].value);
}
} }
} }
@ -861,24 +892,26 @@ static bool ocl_HoughLinesP(InputArray _src, OutputArray _lines, double rho, dou
#endif /* HAVE_OPENCL */ #endif /* HAVE_OPENCL */
void HoughLines( InputArray _image, OutputArray _lines, void HoughLines( InputArray _image, OutputArray lines,
double rho, double theta, int threshold, double rho, double theta, int threshold,
double srn, double stn, double min_theta, double max_theta ) double srn, double stn, double min_theta, double max_theta )
{ {
CV_INSTRUMENT_REGION() CV_INSTRUMENT_REGION()
CV_OCL_RUN(srn == 0 && stn == 0 && _image.isUMat() && _lines.isUMat(), int type = CV_32FC2;
ocl_HoughLines(_image, _lines, rho, theta, threshold, min_theta, max_theta)); if (lines.fixedType())
{
type = lines.type();
CV_CheckType(type, type == CV_32FC2 || type == CV_32FC3, "Wrong type of output lines");
}
Mat image = _image.getMat(); CV_OCL_RUN(srn == 0 && stn == 0 && _image.isUMat() && lines.isUMat() && type == CV_32FC2,
std::vector<Vec2f> lines; ocl_HoughLines(_image, lines, rho, theta, threshold, min_theta, max_theta));
if( srn == 0 && stn == 0 ) if( srn == 0 && stn == 0 )
HoughLinesStandard(image, (float)rho, (float)theta, threshold, lines, INT_MAX, min_theta, max_theta ); HoughLinesStandard(_image, lines, type, (float)rho, (float)theta, threshold, INT_MAX, min_theta, max_theta );
else else
HoughLinesSDiv(image, (float)rho, (float)theta, threshold, cvRound(srn), cvRound(stn), lines, INT_MAX, min_theta, max_theta); HoughLinesSDiv(_image, lines, type, (float)rho, (float)theta, threshold, cvRound(srn), cvRound(stn), INT_MAX, min_theta, max_theta);
Mat(lines).copyTo(_lines);
} }
@ -1007,11 +1040,16 @@ static bool cmpAccum(const EstimatedCircle& left, const EstimatedCircle& right)
return false; return false;
} }
inline Vec3f GetCircle(const EstimatedCircle& est) static inline Vec3f GetCircle(const EstimatedCircle& est)
{ {
return est.c; return est.c;
} }
static inline Vec4f GetCircle4f(const EstimatedCircle& est)
{
return Vec4f(est.c[0], est.c[1], est.c[2], (float)est.accum);
}
class NZPointList : public std::vector<Point> class NZPointList : public std::vector<Point>
{ {
private: private:
@ -1264,12 +1302,13 @@ private:
Mutex& _lock; Mutex& _lock;
}; };
static bool CheckDistance(const std::vector<Vec3f> &circles, size_t endIdx, const Vec3f& circle, float minDist2) template<typename T>
static bool CheckDistance(const std::vector<T> &circles, size_t endIdx, const T& circle, float minDist2)
{ {
bool goodPoint = true; bool goodPoint = true;
for (uint j = 0; j < endIdx; ++j) for (uint j = 0; j < endIdx; ++j)
{ {
Vec3f pt = circles[j]; T pt = circles[j];
float distX = circle[0] - pt[0], distY = circle[1] - pt[1]; float distX = circle[0] - pt[0], distY = circle[1] - pt[1];
if (distX * distX + distY * distY < minDist2) if (distX * distX + distY * distY < minDist2)
{ {
@ -1297,13 +1336,31 @@ static void GetCircleCenters(const std::vector<int> &centers, std::vector<Vec3f>
} }
} }
static void RemoveOverlaps(std::vector<Vec3f>& circles, float minDist) static void GetCircleCenters(const std::vector<int> &centers, std::vector<Vec4f> &circles, int acols, float minDist, float dr)
{
size_t centerCnt = centers.size();
float minDist2 = minDist * minDist;
for (size_t i = 0; i < centerCnt; ++i)
{
int center = centers[i];
int y = center / acols;
int x = center - y * acols;
Vec4f circle = Vec4f((x + 0.5f) * dr, (y + 0.5f) * dr, 0, (float)center);
bool goodPoint = CheckDistance(circles, circles.size(), circle, minDist2);
if (goodPoint)
circles.push_back(circle);
}
}
template<typename T>
static void RemoveOverlaps(std::vector<T>& circles, float minDist)
{ {
float minDist2 = minDist * minDist; float minDist2 = minDist * minDist;
size_t endIdx = 1; size_t endIdx = 1;
for (size_t i = 1; i < circles.size(); ++i) for (size_t i = 1; i < circles.size(); ++i)
{ {
Vec3f circle = circles[i]; T circle = circles[i];
if (CheckDistance(circles, endIdx, circle, minDist2)) if (CheckDistance(circles, endIdx, circle, minDist2))
{ {
circles[endIdx] = circle; circles[endIdx] = circle;
@ -1313,6 +1370,16 @@ static void RemoveOverlaps(std::vector<Vec3f>& circles, float minDist)
circles.resize(endIdx); circles.resize(endIdx);
} }
static void CreateCircles(const std::vector<EstimatedCircle>& circlesEst, std::vector<Vec3f>& circles)
{
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle);
}
static void CreateCircles(const std::vector<EstimatedCircle>& circlesEst, std::vector<Vec4f>& circles)
{
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle4f);
}
template<class NZPoints> template<class NZPoints>
class HoughCircleEstimateRadiusInvoker : public ParallelLoopBody class HoughCircleEstimateRadiusInvoker : public ParallelLoopBody
{ {
@ -1556,11 +1623,14 @@ inline int HoughCircleEstimateRadiusInvoker<NZPointSet>::filterCircles(const Poi
return nzCount; return nzCount;
} }
static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float dp, float minDist, template <typename CircleType>
static void HoughCirclesGradient(InputArray _image, OutputArray _circles,
float dp, float minDist,
int minRadius, int maxRadius, int cannyThreshold, int minRadius, int maxRadius, int cannyThreshold,
int accThreshold, int maxCircles, int kernelSize, bool centersOnly) int accThreshold, int maxCircles, int kernelSize, bool centersOnly)
{ {
CV_Assert(kernelSize == -1 || kernelSize == 3 || kernelSize == 5 || kernelSize == 7); CV_Assert(kernelSize == -1 || kernelSize == 3 || kernelSize == 5 || kernelSize == 7);
dp = max(dp, 1.f); dp = max(dp, 1.f);
float idp = 1.f/dp; float idp = 1.f/dp;
@ -1602,7 +1672,7 @@ static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float
std::sort(centers.begin(), centers.end(), hough_cmp_gt(accum.ptr<int>())); std::sort(centers.begin(), centers.end(), hough_cmp_gt(accum.ptr<int>()));
std::vector<Vec3f> circles; std::vector<CircleType> circles;
circles.reserve(256); circles.reserve(256);
if (centersOnly) if (centersOnly)
{ {
@ -1635,15 +1705,16 @@ static void HoughCirclesGradient(InputArray _image, OutputArray _circles, float
// Sort by accumulator value // Sort by accumulator value
std::sort(circlesEst.begin(), circlesEst.end(), cmpAccum); std::sort(circlesEst.begin(), circlesEst.end(), cmpAccum);
std::transform(circlesEst.begin(), circlesEst.end(), std::back_inserter(circles), GetCircle);
// Create Circles
CreateCircles(circlesEst, circles);
RemoveOverlaps(circles, minDist); RemoveOverlaps(circles, minDist);
} }
if(circles.size() > 0) if (circles.size() > 0)
{ {
int numCircles = std::min(maxCircles, int(circles.size())); int numCircles = std::min(maxCircles, int(circles.size()));
_circles.create(1, numCircles, CV_32FC3); Mat(1, numCircles, cv::traits::Type<CircleType>::value, &circles[0]).copyTo(_circles);
Mat(1, numCircles, CV_32FC3, &circles[0]).copyTo(_circles.getMat());
return; return;
} }
} }
@ -1656,6 +1727,13 @@ static void HoughCircles( InputArray _image, OutputArray _circles,
{ {
CV_INSTRUMENT_REGION() CV_INSTRUMENT_REGION()
int type = CV_32FC3;
if( _circles.fixedType() )
{
type = _circles.type();
CV_CheckType(type, type == CV_32FC3 || type == CV_32FC4, "Wrong type of output circles");
}
CV_Assert(!_image.empty() && _image.type() == CV_8UC1 && (_image.isMat() || _image.isUMat())); CV_Assert(!_image.empty() && _image.type() == CV_8UC1 && (_image.isMat() || _image.isUMat()));
CV_Assert(_circles.isMat() || _circles.isVector()); CV_Assert(_circles.isMat() || _circles.isVector());
@ -1679,9 +1757,16 @@ static void HoughCircles( InputArray _image, OutputArray _circles,
switch( method ) switch( method )
{ {
case CV_HOUGH_GRADIENT: case CV_HOUGH_GRADIENT:
HoughCirclesGradient(_image, _circles, (float)dp, (float)minDist, if (type == CV_32FC3)
minRadius, maxRadius, cannyThresh, HoughCirclesGradient<Vec3f>(_image, _circles, (float)dp, (float)minDist,
accThresh, maxCircles, kernelSize, centersOnly); minRadius, maxRadius, cannyThresh,
accThresh, maxCircles, kernelSize, centersOnly);
else if (type == CV_32FC4)
HoughCirclesGradient<Vec4f>(_image, _circles, (float)dp, (float)minDist,
minRadius, maxRadius, cannyThresh,
accThresh, maxCircles, kernelSize, centersOnly);
else
CV_Error(Error::StsError, "Internal error");
break; break;
default: default:
CV_Error( Error::StsBadArg, "Unrecognized method id. Actually only CV_HOUGH_GRADIENT is supported." ); CV_Error( Error::StsBadArg, "Unrecognized method id. Actually only CV_HOUGH_GRADIENT is supported." );
@ -1764,12 +1849,12 @@ cvHoughLines2( CvArr* src_image, void* lineStorage, int method,
switch( method ) switch( method )
{ {
case CV_HOUGH_STANDARD: case CV_HOUGH_STANDARD:
HoughLinesStandard( image, (float)rho, HoughLinesStandard( image, l2, CV_32FC2, (float)rho,
(float)theta, threshold, l2, linesMax, min_theta, max_theta ); (float)theta, threshold, linesMax, min_theta, max_theta );
break; break;
case CV_HOUGH_MULTI_SCALE: case CV_HOUGH_MULTI_SCALE:
HoughLinesSDiv( image, (float)rho, (float)theta, HoughLinesSDiv( image, l2, CV_32FC2, (float)rho, (float)theta,
threshold, iparam1, iparam2, l2, linesMax, min_theta, max_theta ); threshold, iparam1, iparam2, linesMax, min_theta, max_theta );
break; break;
case CV_HOUGH_PROBABILISTIC: case CV_HOUGH_PROBABILISTIC:
HoughLinesProbabilistic( image, (float)rho, (float)theta, HoughLinesProbabilistic( image, (float)rho, (float)theta,

@ -2119,4 +2119,79 @@ TEST(Imgproc_MorphEx, hitmiss_zero_kernel)
ASSERT_DOUBLE_EQ(cvtest::norm(dst, src, NORM_INF), 0.); ASSERT_DOUBLE_EQ(cvtest::norm(dst, src, NORM_INF), 0.);
} }
TEST(Imgproc_Filter2D, dftFilter2d_regression_10683)
{
uchar src_[24*24] = {
0, 40, 0, 0, 255, 0, 0, 78, 131, 0, 196, 0, 255, 0, 0, 0, 0, 255, 70, 0, 255, 0, 0, 0,
0, 0, 255, 204, 0, 0, 255, 93, 255, 0, 0, 255, 12, 0, 0, 0, 255, 121, 0, 255, 0, 0, 0, 255,
0, 178, 0, 25, 67, 0, 165, 0, 255, 0, 0, 181, 151, 175, 0, 0, 32, 0, 0, 255, 165, 93, 0, 255,
255, 255, 0, 0, 255, 126, 0, 0, 0, 0, 133, 29, 9, 0, 220, 255, 0, 142, 255, 255, 255, 0, 255, 0,
255, 32, 255, 0, 13, 237, 0, 0, 0, 0, 0, 19, 90, 0, 0, 85, 122, 62, 95, 29, 255, 20, 0, 0,
0, 0, 166, 41, 0, 48, 70, 0, 68, 0, 255, 0, 139, 7, 63, 144, 0, 204, 0, 0, 0, 98, 114, 255,
105, 0, 0, 0, 0, 255, 91, 0, 73, 0, 255, 0, 0, 0, 255, 198, 21, 0, 0, 0, 255, 43, 153, 128,
0, 98, 26, 0, 101, 0, 0, 0, 255, 0, 0, 0, 255, 77, 56, 0, 241, 0, 169, 132, 0, 255, 186, 255,
255, 87, 0, 1, 0, 0, 10, 39, 120, 0, 23, 69, 207, 0, 0, 0, 0, 84, 0, 0, 0, 0, 255, 0,
255, 0, 0, 136, 255, 77, 247, 0, 67, 0, 15, 255, 0, 143, 0, 243, 255, 0, 0, 238, 255, 0, 255, 8,
42, 0, 0, 255, 29, 0, 0, 0, 255, 255, 255, 75, 0, 0, 0, 255, 0, 0, 255, 38, 197, 0, 255, 87,
0, 123, 17, 0, 234, 0, 0, 149, 0, 0, 255, 16, 0, 0, 0, 255, 0, 255, 0, 38, 0, 114, 255, 76,
0, 0, 8, 0, 255, 0, 0, 0, 220, 0, 11, 255, 0, 0, 55, 98, 0, 0, 0, 255, 0, 175, 255, 110,
235, 0, 175, 0, 255, 227, 38, 206, 0, 0, 255, 246, 0, 0, 123, 183, 255, 0, 0, 255, 0, 156, 0, 54,
0, 255, 0, 202, 0, 0, 0, 0, 157, 0, 255, 63, 0, 0, 0, 0, 0, 255, 132, 0, 255, 0, 0, 0,
0, 0, 0, 255, 0, 0, 128, 126, 0, 243, 46, 7, 0, 211, 108, 166, 0, 0, 162, 227, 0, 204, 0, 51,
255, 216, 0, 0, 43, 0, 255, 40, 188, 188, 255, 0, 0, 255, 34, 0, 0, 168, 0, 0, 0, 35, 0, 0,
0, 80, 131, 255, 0, 255, 10, 0, 0, 0, 180, 255, 209, 255, 173, 34, 0, 66, 0, 49, 0, 255, 83, 0,
0, 204, 0, 91, 0, 0, 0, 205, 84, 0, 0, 0, 92, 255, 91, 0, 126, 0, 185, 145, 0, 0, 9, 0,
255, 0, 0, 255, 255, 0, 0, 255, 0, 0, 216, 0, 187, 221, 0, 0, 141, 0, 0, 209, 0, 0, 255, 0,
255, 0, 0, 154, 150, 0, 0, 0, 148, 0, 201, 255, 0, 255, 16, 0, 0, 160, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 255, 0, 255, 0, 255, 0, 255, 198, 255, 147, 131, 0, 255, 202, 0, 0, 0, 0, 255, 0,
0, 0, 0, 164, 181, 0, 0, 0, 69, 255, 31, 0, 255, 195, 0, 0, 255, 164, 109, 0, 0, 202, 0, 206,
0, 0, 61, 235, 33, 255, 77, 0, 0, 0, 0, 85, 0, 228, 0, 0, 0, 0, 255, 0, 0, 5, 255, 255
};
Mat_<uchar> src(24, 24, src_);
Mat dst = Mat::zeros(src.size(), src.type());
int sz = 12, size2 = sz * sz;
Mat kernel = Mat::ones(sz, sz, CV_32F) / size2;
uchar expected_[24*24] = {
83, 83, 77, 80, 76, 76, 76, 75, 71, 67, 72, 71, 73, 70, 80, 83, 86, 84, 89, 88, 88, 96, 99, 98,
83, 83, 77, 80, 76, 76, 76, 75, 71, 67, 72, 71, 73, 70, 80, 83, 86, 84, 89, 88, 88, 96, 99, 98,
82, 82, 77, 80, 77, 75, 74, 75, 70, 68, 71, 72, 72, 72, 82, 84, 88, 88, 93, 92, 93, 100, 105, 104,
76, 76, 72, 77, 73, 74, 73, 74, 69, 68, 71, 71, 73, 72, 82, 81, 86, 87, 92, 91, 92, 98, 103, 102,
75, 75, 72, 77, 73, 72, 75, 76, 74, 71, 73, 75, 76, 72, 81, 80, 85, 87, 90, 89, 90, 97, 102, 97,
74, 74, 71, 77, 72, 74, 77, 76, 74, 72, 74, 76, 77, 76, 84, 83, 85, 87, 90, 92, 93, 100, 102, 99,
72, 72, 69, 71, 68, 73, 73, 73, 70, 69, 74, 72, 75, 75, 81, 82, 85, 87, 90, 94, 96, 103, 102, 101,
71, 71, 68, 70, 68, 71, 73, 71, 69, 68, 74, 72, 73, 73, 81, 80, 84, 89, 91, 99, 102, 107, 106, 105,
74, 74, 70, 69, 67, 73, 76, 72, 69, 70, 79, 75, 74, 75, 82, 83, 88, 91, 92, 100, 104, 108, 106, 105,
75, 75, 71, 70, 67, 75, 76, 71, 67, 68, 75, 72, 72, 75, 81, 83, 87, 89, 89, 97, 102, 107, 103, 103,
69, 69, 67, 67, 65, 72, 74, 71, 70, 70, 75, 74, 74, 75, 80, 80, 84, 85, 85, 92, 96, 100, 97, 97,
67, 67, 67, 68, 67, 77, 79, 75, 74, 76, 81, 78, 81, 80, 84, 81, 84, 83, 83, 91, 94, 95, 93, 93,
73, 73, 71, 73, 70, 80, 82, 79, 80, 83, 85, 82, 82, 82, 87, 84, 88, 87, 84, 91, 93, 94, 93, 92,
72, 72, 74, 75, 71, 80, 81, 79, 80, 82, 82, 80, 82, 84, 88, 83, 87, 87, 83, 88, 88, 89, 90, 90,
78, 78, 81, 80, 74, 84, 86, 82, 85, 86, 85, 81, 83, 83, 86, 84, 85, 84, 78, 85, 82, 83, 85, 84,
81, 81, 84, 81, 75, 86, 90, 85, 89, 91, 89, 84, 86, 87, 90, 87, 89, 85, 78, 84, 79, 80, 81, 81,
76, 76, 80, 79, 73, 86, 90, 87, 92, 95, 92, 87, 91, 92, 93, 87, 89, 84, 77, 81, 76, 74, 76, 76,
77, 77, 80, 77, 72, 83, 86, 86, 93, 95, 91, 87, 92, 92, 93, 87, 90, 84, 79, 79, 75, 72, 75, 72,
80, 80, 81, 79, 72, 82, 86, 86, 95, 97, 89, 87, 89, 89, 91, 85, 88, 84, 79, 80, 73, 69, 74, 73,
82, 82, 82, 80, 74, 83, 86, 87, 98, 100, 90, 90, 93, 94, 94, 89, 90, 84, 82, 79, 71, 68, 72, 69,
76, 76, 77, 76, 70, 81, 83, 88, 99, 102, 92, 91, 97, 97, 97, 90, 90, 86, 83, 81, 70, 67, 70, 68,
75, 75, 76, 74, 69, 79, 84, 88, 102, 106, 95, 94, 99, 98, 98, 90, 89, 86, 82, 79, 67, 62, 65, 62,
80, 80, 82, 78, 71, 82, 87, 90, 105, 108, 96, 94, 99, 98, 97, 88, 88, 85, 81, 79, 65, 61, 65, 60,
77, 77, 80, 75, 66, 76, 81, 87, 102, 105, 92, 91, 95, 97, 96, 88, 89, 88, 84, 81, 67, 63, 68, 63
};
Mat_<uchar> expected(24, 24, expected_);
for(int r = 0; r < src.rows / 3; ++r)
{
for(int c = 0; c < src.cols / 3; ++c)
{
cv::Rect region(c * 3, r * 3, 3, 3);
Mat roi_i(src, region);
Mat roi_o(dst, region);
cv::filter2D(roi_i, roi_o, -1, kernel);
}
}
EXPECT_LE(cvtest::norm(dst, expected, NORM_INF), 2);
}
}} // namespace }} // namespace

@ -49,6 +49,8 @@ namespace opencv_test { namespace {
#define DEBUG_IMAGES 0 #define DEBUG_IMAGES 0
#endif #endif
//#define GENERATE_DATA // generate data in debug mode via CPU code path (without IPP / OpenCL and other accelerators)
using namespace cv; using namespace cv;
using namespace std; using namespace std;
@ -109,7 +111,8 @@ public:
{ {
} }
void run_test() template <typename CircleType>
void run_test(const char* xml_name)
{ {
string test_case_name = getTestCaseName(picture_name, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius); string test_case_name = getTestCaseName(picture_name, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
string filename = cvtest::TS::ptr()->get_data_path() + picture_name; string filename = cvtest::TS::ptr()->get_data_path() + picture_name;
@ -118,7 +121,7 @@ public:
GaussianBlur(src, src, Size(9, 9), 2, 2); GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles; vector<CircleType> circles;
const double dp = 1.0; const double dp = 1.0;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius); HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
@ -127,31 +130,37 @@ public:
highlightCircles(filename, circles, imgProc + test_case_name + ".png"); highlightCircles(filename, circles, imgProc + test_case_name + ".png");
#endif #endif
string xml = imgProc + "HoughCircles.xml"; string xml = imgProc + xml_name;
FileStorage fs(xml, FileStorage::READ); #ifdef GENERATE_DATA
FileNode node = fs[test_case_name];
if (node.empty())
{ {
fs.release(); FileStorage fs(xml, FileStorage::READ);
fs.open(xml, FileStorage::APPEND); ASSERT_TRUE(!fs.isOpened() || fs[test_case_name].empty());
}
{
FileStorage fs(xml, FileStorage::APPEND);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml; EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
fs << test_case_name << circles; fs << test_case_name << circles;
fs.release();
fs.open(xml, FileStorage::READ);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
} }
#else
vector<Vec3f> exp_circles; FileStorage fs(xml, FileStorage::READ);
read(fs[test_case_name], exp_circles, vector<Vec3f>()); FileNode node = fs[test_case_name];
ASSERT_FALSE(node.empty()) << "Missing test data: " << test_case_name << std::endl << "XML: " << xml;
vector<CircleType> exp_circles;
read(fs[test_case_name], exp_circles, vector<CircleType>());
fs.release(); fs.release();
EXPECT_EQ(exp_circles.size(), circles.size()); EXPECT_EQ(exp_circles.size(), circles.size());
#endif
} }
}; };
TEST_P(HoughCirclesTestFixture, regression) TEST_P(HoughCirclesTestFixture, regression)
{ {
run_test(); run_test<Vec3f>("HoughCircles.xml");
}
TEST_P(HoughCirclesTestFixture, regression4f)
{
run_test<Vec4f>("HoughCircles4f.xml");
} }
INSTANTIATE_TEST_CASE_P(ImgProc, HoughCirclesTestFixture, testing::Combine( INSTANTIATE_TEST_CASE_P(ImgProc, HoughCirclesTestFixture, testing::Combine(
@ -186,7 +195,9 @@ TEST(HoughCirclesTest, DefaultMaxRadius)
GaussianBlur(src, src, Size(9, 9), 2, 2); GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles; vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius); HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES #if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/"; string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -220,7 +231,9 @@ TEST(HoughCirclesTest, CentersOnly)
GaussianBlur(src, src, Size(9, 9), 2, 2); GaussianBlur(src, src, Size(9, 9), 2, 2);
vector<Vec3f> circles; vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius); HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES #if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/"; string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -231,6 +244,9 @@ TEST(HoughCirclesTest, CentersOnly)
for (size_t i = 0; i < circles.size(); ++i) for (size_t i = 0; i < circles.size(); ++i)
{ {
EXPECT_EQ(circles[i][2], 0.0f) << "Did not ask for radius"; EXPECT_EQ(circles[i][2], 0.0f) << "Did not ask for radius";
EXPECT_EQ(circles[i][0], circles4f[i][0]);
EXPECT_EQ(circles[i][1], circles4f[i][1]);
EXPECT_EQ(circles[i][2], circles4f[i][2]);
} }
} }
@ -249,7 +265,9 @@ TEST(HoughCirclesTest, ManySmallCircles)
EXPECT_FALSE(src.empty()) << "Invalid test image: " << filename; EXPECT_FALSE(src.empty()) << "Invalid test image: " << filename;
vector<Vec3f> circles; vector<Vec3f> circles;
vector<Vec4f> circles4f;
HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius); HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
HoughCircles(src, circles4f, CV_HOUGH_GRADIENT, dp, minDist, edgeThreshold, accumThreshold, minRadius, maxRadius);
#if DEBUG_IMAGES #if DEBUG_IMAGES
string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/"; string imgProc = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/";
@ -258,6 +276,7 @@ TEST(HoughCirclesTest, ManySmallCircles)
#endif #endif
EXPECT_GT(circles.size(), size_t(3000)) << "Should find a lot of circles"; EXPECT_GT(circles.size(), size_t(3000)) << "Should find a lot of circles";
EXPECT_EQ(circles.size(), circles4f.size());
} }
}} // namespace }} // namespace

@ -43,6 +43,8 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
//#define GENERATE_DATA // generate data in debug mode via CPU code path (without IPP / OpenCL and other accelerators)
namespace opencv_test { namespace { namespace opencv_test { namespace {
template<typename T> template<typename T>
@ -52,30 +54,36 @@ struct SimilarWith
float theta_eps; float theta_eps;
float rho_eps; float rho_eps;
SimilarWith<T>(T val, float e, float r_e): value(val), theta_eps(e), rho_eps(r_e) { }; SimilarWith<T>(T val, float e, float r_e): value(val), theta_eps(e), rho_eps(r_e) { };
bool operator()(T other); bool operator()(const T& other);
}; };
template<> template<>
bool SimilarWith<Vec2f>::operator()(Vec2f other) bool SimilarWith<Vec2f>::operator()(const Vec2f& other)
{
return std::abs(other[0] - value[0]) < rho_eps && std::abs(other[1] - value[1]) < theta_eps;
}
template<>
bool SimilarWith<Vec3f>::operator()(const Vec3f& other)
{ {
return std::abs(other[0] - value[0]) < rho_eps && std::abs(other[1] - value[1]) < theta_eps; return std::abs(other[0] - value[0]) < rho_eps && std::abs(other[1] - value[1]) < theta_eps;
} }
template<> template<>
bool SimilarWith<Vec4i>::operator()(Vec4i other) bool SimilarWith<Vec4i>::operator()(const Vec4i& other)
{ {
return cv::norm(value, other) < theta_eps; return cv::norm(value, other) < theta_eps;
} }
template <typename T> template <typename T>
int countMatIntersection(Mat expect, Mat actual, float eps, float rho_eps) int countMatIntersection(const Mat& expect, const Mat& actual, float eps, float rho_eps)
{ {
int count = 0; int count = 0;
if (!expect.empty() && !actual.empty()) if (!expect.empty() && !actual.empty())
{ {
for (MatIterator_<T> it=expect.begin<T>(); it!=expect.end<T>(); it++) for (MatConstIterator_<T> it=expect.begin<T>(); it!=expect.end<T>(); it++)
{ {
MatIterator_<T> f = std::find_if(actual.begin<T>(), actual.end<T>(), SimilarWith<T>(*it, eps, rho_eps)); MatConstIterator_<T> f = std::find_if(actual.begin<T>(), actual.end<T>(), SimilarWith<T>(*it, eps, rho_eps));
if (f != actual.end<T>()) if (f != actual.end<T>())
count++; count++;
} }
@ -99,7 +107,8 @@ class BaseHoughLineTest
public: public:
enum {STANDART = 0, PROBABILISTIC}; enum {STANDART = 0, PROBABILISTIC};
protected: protected:
void run_test(int type); template<typename LinesType, typename LineType>
void run_test(int type, const char* xml_name);
string picture_name; string picture_name;
double rhoStep; double rhoStep;
@ -162,60 +171,63 @@ public:
} }
}; };
void BaseHoughLineTest::run_test(int type) template<typename LinesType, typename LineType>
void BaseHoughLineTest::run_test(int type, const char* xml_name)
{ {
string filename = cvtest::TS::ptr()->get_data_path() + picture_name; string filename = cvtest::TS::ptr()->get_data_path() + picture_name;
Mat src = imread(filename, IMREAD_GRAYSCALE); Mat src = imread(filename, IMREAD_GRAYSCALE);
EXPECT_FALSE(src.empty()) << "Invalid test image: " << filename; ASSERT_FALSE(src.empty()) << "Invalid test image: " << filename;
string xml; string xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/" + xml_name;
if (type == STANDART)
xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/HoughLines.xml";
else if (type == PROBABILISTIC)
xml = string(cvtest::TS::ptr()->get_data_path()) + "imgproc/HoughLinesP.xml";
Mat dst; Mat dst;
Canny(src, dst, 100, 150, 3); Canny(src, dst, 100, 150, 3);
EXPECT_FALSE(dst.empty()) << "Failed Canny edge detector"; ASSERT_FALSE(dst.empty()) << "Failed Canny edge detector";
Mat lines; LinesType lines;
if (type == STANDART) if (type == STANDART)
HoughLines(dst, lines, rhoStep, thetaStep, threshold, 0, 0); HoughLines(dst, lines, rhoStep, thetaStep, threshold, 0, 0);
else if (type == PROBABILISTIC) else if (type == PROBABILISTIC)
HoughLinesP(dst, lines, rhoStep, thetaStep, threshold, minLineLength, maxGap); HoughLinesP(dst, lines, rhoStep, thetaStep, threshold, minLineLength, maxGap);
String test_case_name = format("lines_%s_%.0f_%.2f_%d_%d_%d", picture_name.c_str(), rhoStep, thetaStep, String test_case_name = format("lines_%s_%.0f_%.2f_%d_%d_%d", picture_name.c_str(), rhoStep, thetaStep,
threshold, minLineLength, maxGap); threshold, minLineLength, maxGap);
test_case_name = getTestCaseName(test_case_name); test_case_name = getTestCaseName(test_case_name);
FileStorage fs(xml, FileStorage::READ); #ifdef GENERATE_DATA
FileNode node = fs[test_case_name];
if (node.empty())
{ {
fs.release(); FileStorage fs(xml, FileStorage::READ);
fs.open(xml, FileStorage::APPEND); ASSERT_TRUE(!fs.isOpened() || fs[test_case_name].empty());
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml; }
fs << test_case_name << lines; {
fs.release(); FileStorage fs(xml, FileStorage::APPEND);
fs.open(xml, FileStorage::READ);
EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml; EXPECT_TRUE(fs.isOpened()) << "Cannot open sanity data file: " << xml;
fs << test_case_name << Mat(lines);
} }
#else
FileStorage fs(xml, FileStorage::READ);
FileNode node = fs[test_case_name];
ASSERT_FALSE(node.empty()) << "Missing test data: " << test_case_name << std::endl << "XML: " << xml;
Mat exp_lines; Mat exp_lines_;
read( fs[test_case_name], exp_lines, Mat() ); read(fs[test_case_name], exp_lines_, Mat());
fs.release(); fs.release();
LinesType exp_lines;
exp_lines_.copyTo(exp_lines);
int count = -1; int count = -1;
if (type == STANDART) if (type == STANDART)
count = countMatIntersection<Vec2f>(exp_lines, lines, (float) thetaStep + FLT_EPSILON, (float) rhoStep + FLT_EPSILON); count = countMatIntersection<LineType>(Mat(exp_lines), Mat(lines), (float) thetaStep + FLT_EPSILON, (float) rhoStep + FLT_EPSILON);
else if (type == PROBABILISTIC) else if (type == PROBABILISTIC)
count = countMatIntersection<Vec4i>(exp_lines, lines, 1e-4f, 0.f); count = countMatIntersection<LineType>(Mat(exp_lines), Mat(lines), 1e-4f, 0.f);
#if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH #if defined HAVE_IPP && IPP_VERSION_X100 >= 810 && !IPP_DISABLE_HOUGH
EXPECT_GE( count, (int) (exp_lines.total() * 0.8) ); EXPECT_LE(std::abs((double)count - Mat(exp_lines).total()), Mat(exp_lines).total() * 0.25)
<< "count=" << count << " expected=" << Mat(exp_lines).total();
#else #else
EXPECT_EQ( count, (int) exp_lines.total()); EXPECT_EQ(count, (int)Mat(exp_lines).total());
#endif #endif
#endif // GENERATE_DATA
} }
void HoughLinesPointSetTest::run_test(void) void HoughLinesPointSetTest::run_test(void)
@ -264,12 +276,22 @@ void HoughLinesPointSetTest::run_test(void)
TEST_P(StandartHoughLinesTest, regression) TEST_P(StandartHoughLinesTest, regression)
{ {
run_test(STANDART); run_test<Mat, Vec2f>(STANDART, "HoughLines.xml");
} }
TEST_P(ProbabilisticHoughLinesTest, regression) TEST_P(ProbabilisticHoughLinesTest, regression)
{ {
run_test(PROBABILISTIC); run_test<Mat, Vec4i>(PROBABILISTIC, "HoughLinesP.xml");
}
TEST_P(StandartHoughLinesTest, regression_Vec2f)
{
run_test<std::vector<Vec2f>, Vec2f>(STANDART, "HoughLines2f.xml");
}
TEST_P(StandartHoughLinesTest, regression_Vec3f)
{
run_test<std::vector<Vec3f>, Vec3f>(STANDART, "HoughLines3f.xml");
} }
TEST_P(HoughLinesPointSetTest, regression) TEST_P(HoughLinesPointSetTest, regression)

@ -357,20 +357,22 @@ void CV_RotatedRectangleIntersectionTest::test13()
void CV_RotatedRectangleIntersectionTest::test14() void CV_RotatedRectangleIntersectionTest::test14()
{ {
const int kNumTests = 100; const int kNumTests = 100;
const int kWidth = 5; const float kWidth = 5;
const int kHeight = 5; const float kHeight = 5;
RotatedRect rects[2]; RotatedRect rects[2];
std::vector<Point2f> inter; std::vector<Point2f> inter;
cv::RNG& rng = cv::theRNG();
for (int i = 0; i < kNumTests; ++i) for (int i = 0; i < kNumTests; ++i)
{ {
for (int j = 0; j < 2; ++j) for (int j = 0; j < 2; ++j)
{ {
rects[j].center = Point2f((float)(rand() % kWidth), (float)(rand() % kHeight)); rects[j].center = Point2f(rng.uniform(0.0f, kWidth), rng.uniform(0.0f, kHeight));
rects[j].size = Size2f(rand() % kWidth + 1.0f, rand() % kHeight + 1.0f); rects[j].size = Size2f(rng.uniform(1.0f, kWidth), rng.uniform(1.0f, kHeight));
rects[j].angle = (float)(rand() % 360); rects[j].angle = rng.uniform(0.0f, 360.0f);
} }
rotatedRectangleIntersection(rects[0], rects[1], inter); int res = rotatedRectangleIntersection(rects[0], rects[1], inter);
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)); EXPECT_TRUE(res == INTERSECT_NONE || res == INTERSECT_PARTIAL || res == INTERSECT_FULL) << res;
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)) << inter;
} }
} }

@ -259,7 +259,7 @@ public:
res_pyr[lvl - 1] += up; res_pyr[lvl - 1] += up;
} }
dst.create(size, CV_32FCC); dst.create(size, CV_32FCC);
res_pyr[0].copyTo(dst.getMat()); res_pyr[0].copyTo(dst);
} }
float getContrastWeight() const CV_OVERRIDE { return wcon; } float getContrastWeight() const CV_OVERRIDE { return wcon; }

@ -189,11 +189,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
// bail out to let the user know that it is not available // bail out to let the user know that it is not available
if (pref) break; if (pref) break;
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(capture, cvCreateCameraCapture_MSMF(index))
if (pref) break;
#endif
case CAP_VFW: // or CAP_V4L or CAP_V4L2 case CAP_VFW: // or CAP_V4L or CAP_V4L2
#ifdef HAVE_VFW #ifdef HAVE_VFW
TRY_OPEN(capture, cvCreateCameraCapture_VFW(index)) TRY_OPEN(capture, cvCreateCameraCapture_VFW(index))
@ -304,12 +299,6 @@ CV_IMPL CvCapture * cvCreateFileCaptureWithPreference (const char * filename, in
if (apiPreference) break; if (apiPreference) break;
#endif #endif
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(result, cvCreateFileCapture_MSMF (filename))
if (apiPreference) break;
#endif
#ifdef HAVE_VFW #ifdef HAVE_VFW
case CAP_VFW: case CAP_VFW:
TRY_OPEN(result, cvCreateFileCapture_VFW (filename)) TRY_OPEN(result, cvCreateFileCapture_VFW (filename))
@ -378,11 +367,6 @@ static CvVideoWriter* cvCreateVideoWriterWithPreference(const char* filename, in
default: default:
//exit if the specified API is unavaliable //exit if the specified API is unavaliable
if (apiPreference != CAP_ANY) break; if (apiPreference != CAP_ANY) break;
#ifdef HAVE_MSMF
case CAP_MSMF:
TRY_OPEN(result, cvCreateVideoWriter_MSMF(filename, fourcc, fps, frameSize, is_color))
if (apiPreference != CAP_ANY) break;
#endif
#ifdef HAVE_VFW #ifdef HAVE_VFW
case CAP_VFW: case CAP_VFW:
TRY_OPEN(result, cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color)) TRY_OPEN(result, cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color))
@ -441,6 +425,9 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
#ifdef HAVE_GSTREAMER #ifdef HAVE_GSTREAMER
CAP_GSTREAMER, CAP_GSTREAMER,
#endif #endif
#ifdef HAVE_MSMF
CAP_MSMF,
#endif
#ifdef HAVE_DSHOW #ifdef HAVE_DSHOW
CAP_DSHOW, CAP_DSHOW,
#endif #endif
@ -469,6 +456,7 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
for (int i = 0; domains[i] >= 0; i++) for (int i = 0; domains[i] >= 0; i++)
{ {
#if defined(HAVE_GSTREAMER) || \ #if defined(HAVE_GSTREAMER) || \
defined(HAVE_MSMF) || \
defined(HAVE_DSHOW) || \ defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \ defined(HAVE_INTELPERC) || \
defined(HAVE_LIBREALSENSE) || \ defined(HAVE_LIBREALSENSE) || \
@ -484,6 +472,11 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
capture = createGStreamerCapture(index); capture = createGStreamerCapture(index);
break; break;
#endif #endif
#ifdef HAVE_MSMF
case CAP_MSMF:
capture = cvCreateCapture_MSMF(index);
break; // CAP_MSMF
#endif
#ifdef HAVE_DSHOW #ifdef HAVE_DSHOW
case CAP_DSHOW: case CAP_DSHOW:
capture = makePtr<VideoCapture_DShow>(index); capture = makePtr<VideoCapture_DShow>(index);
@ -549,6 +542,14 @@ static Ptr<IVideoCapture> IVideoCapture_create(const String& filename, int apiPr
return capture; return capture;
} }
#endif #endif
#ifdef HAVE_MSMF
if (useAny || apiPreference == CAP_MSMF)
{
capture = cvCreateCapture_MSMF(filename);
if (capture && capture->isOpened())
return capture;
}
#endif
#ifdef HAVE_GPHOTO2 #ifdef HAVE_GPHOTO2
if (useAny || apiPreference == CAP_GPHOTO2) if (useAny || apiPreference == CAP_GPHOTO2)
{ {
@ -587,6 +588,14 @@ static Ptr<IVideoWriter> IVideoWriter_create(const String& filename, int apiPref
return iwriter; return iwriter;
} }
#endif #endif
#ifdef HAVE_MSMF
if (apiPreference == CAP_MSMF || apiPreference == CAP_ANY)
{
iwriter = cvCreateVideoWriter_MSMF(filename, _fourcc, fps, frameSize, isColor);
if (!iwriter.empty())
return iwriter;
}
#endif
#ifdef HAVE_MFX #ifdef HAVE_MFX
if (apiPreference == CAP_INTEL_MFX || apiPreference == CAP_ANY) if (apiPreference == CAP_INTEL_MFX || apiPreference == CAP_ANY)
{ {

@ -125,7 +125,7 @@ private:
gst_init(NULL, NULL); gst_init(NULL, NULL);
guint major, minor, micro, nano; guint major, minor, micro, nano;
gst_version(&major, &minor, &micro, &nano); gst_version(&major, &minor, &micro, &nano);
if (GST_VERSION_MAJOR == major) if (GST_VERSION_MAJOR != major)
{ {
CV_WARN("incompatible gstreamer version"); CV_WARN("incompatible gstreamer version");
} }
@ -268,7 +268,6 @@ bool GStreamerCapture::grabFrame()
sample = gst_app_sink_pull_sample(GST_APP_SINK(sink)); sample = gst_app_sink_pull_sample(GST_APP_SINK(sink));
if(!sample) if(!sample)
return false; return false;
gst_sample_ref(sample);
#endif #endif
if (isPosFramesEmulated) if (isPosFramesEmulated)

@ -681,7 +681,7 @@ void MediaType::Clear()
} }
/******* Capturing video from camera or file via Microsoft Media Foundation **********/ /******* Capturing video from camera or file via Microsoft Media Foundation **********/
class CvCapture_MSMF : public CvCapture class CvCapture_MSMF : public cv::IVideoCapture
{ {
public: public:
typedef enum { typedef enum {
@ -689,14 +689,17 @@ public:
MODE_HW = 1 MODE_HW = 1
} MSMFCapture_Mode; } MSMFCapture_Mode;
CvCapture_MSMF(); CvCapture_MSMF();
CvCapture_MSMF(int);
CvCapture_MSMF(const cv::String&);
virtual ~CvCapture_MSMF(); virtual ~CvCapture_MSMF();
virtual bool open(int index); virtual bool open(int);
virtual bool open(const char* filename); virtual bool open(const cv::String&);
virtual void close(); virtual void close();
virtual double getProperty(int) const CV_OVERRIDE; virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE; virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE; virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE; virtual bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE { return isOpen; }
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_MSMF; } // Return the type of the capture object: CV_CAP_VFW, etc... virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_MSMF; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected: protected:
double getFramerate(MediaType MT) const; double getFramerate(MediaType MT) const;
@ -723,8 +726,7 @@ protected:
LONGLONG frameStep; LONGLONG frameStep;
_ComPtr<IMFSample> videoSample; _ComPtr<IMFSample> videoSample;
LONGLONG sampleTime; LONGLONG sampleTime;
IplImage* frame; bool isOpen;
bool isOpened;
}; };
CvCapture_MSMF::CvCapture_MSMF(): CvCapture_MSMF::CvCapture_MSMF():
@ -743,10 +745,11 @@ CvCapture_MSMF::CvCapture_MSMF():
aspectN(1), aspectN(1),
aspectD(1), aspectD(1),
sampleTime(0), sampleTime(0),
frame(NULL), isOpen(false)
isOpened(false)
{ {
} }
CvCapture_MSMF::CvCapture_MSMF(int index) : CvCapture_MSMF() { open(index); }
CvCapture_MSMF::CvCapture_MSMF(const cv::String& _filename) : CvCapture_MSMF() { open(_filename); }
CvCapture_MSMF::~CvCapture_MSMF() CvCapture_MSMF::~CvCapture_MSMF()
{ {
@ -755,15 +758,13 @@ CvCapture_MSMF::~CvCapture_MSMF()
void CvCapture_MSMF::close() void CvCapture_MSMF::close()
{ {
if (isOpened) if (isOpen)
{ {
isOpened = false; isOpen = false;
if (videoSample) if (videoSample)
videoSample.Reset(); videoSample.Reset();
if (videoFileSource) if (videoFileSource)
videoFileSource.Reset(); videoFileSource.Reset();
if (frame)
cvReleaseImage(&frame);
camid = -1; camid = -1;
filename = ""; filename = "";
} }
@ -775,7 +776,7 @@ bool CvCapture_MSMF::configureHW(bool enable)
if ((enable && D3DMgr && D3DDev) || (!enable && !D3DMgr && !D3DDev)) if ((enable && D3DMgr && D3DDev) || (!enable && !D3DMgr && !D3DDev))
return true; return true;
bool reopen = isOpened; bool reopen = isOpen;
int prevcam = camid; int prevcam = camid;
cv::String prevfile = filename; cv::String prevfile = filename;
close(); close();
@ -973,7 +974,7 @@ bool CvCapture_MSMF::open(int _index)
#endif #endif
if (SUCCEEDED(MFCreateSourceReaderFromMediaSource(mSrc.Get(), srAttr.Get(), &videoFileSource))) if (SUCCEEDED(MFCreateSourceReaderFromMediaSource(mSrc.Get(), srAttr.Get(), &videoFileSource)))
{ {
isOpened = true; isOpen = true;
duration = 0; duration = 0;
if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat)) if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat))
{ {
@ -992,13 +993,13 @@ bool CvCapture_MSMF::open(int _index)
CoTaskMemFree(ppDevices); CoTaskMemFree(ppDevices);
} }
return isOpened; return isOpen;
} }
bool CvCapture_MSMF::open(const char* _filename) bool CvCapture_MSMF::open(const cv::String& _filename)
{ {
close(); close();
if (!_filename) if (_filename.empty())
return false; return false;
// Set source reader parameters // Set source reader parameters
@ -1014,11 +1015,11 @@ bool CvCapture_MSMF::open(const char* _filename)
if(D3DMgr) if(D3DMgr)
srAttr->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, D3DMgr.Get()); srAttr->SetUnknown(MF_SOURCE_READER_D3D_MANAGER, D3DMgr.Get());
#endif #endif
cv::AutoBuffer<wchar_t> unicodeFileName(strlen(_filename) + 1); cv::AutoBuffer<wchar_t> unicodeFileName(_filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, _filename, -1, unicodeFileName, (int)strlen(_filename) + 1); MultiByteToWideChar(CP_ACP, 0, _filename.c_str(), -1, unicodeFileName, (int)_filename.length() + 1);
if (SUCCEEDED(MFCreateSourceReaderFromURL(unicodeFileName, srAttr.Get(), &videoFileSource))) if (SUCCEEDED(MFCreateSourceReaderFromURL(unicodeFileName, srAttr.Get(), &videoFileSource)))
{ {
isOpened = true; isOpen = true;
sampleTime = 0; sampleTime = 0;
if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat)) if (configureOutput(0, 0, 0, aspectN, aspectD, outputFormat, convertFormat))
{ {
@ -1039,12 +1040,12 @@ bool CvCapture_MSMF::open(const char* _filename)
} }
} }
return isOpened; return isOpen;
} }
bool CvCapture_MSMF::grabFrame() bool CvCapture_MSMF::grabFrame()
{ {
if (isOpened) if (isOpen)
{ {
DWORD streamIndex, flags; DWORD streamIndex, flags;
if (videoSample) if (videoSample)
@ -1112,7 +1113,7 @@ bool CvCapture_MSMF::grabFrame()
return false; return false;
} }
IplImage* CvCapture_MSMF::retrieveFrame(int) bool CvCapture_MSMF::retrieveFrame(int, cv::OutputArray frame)
{ {
DWORD bcnt; DWORD bcnt;
if (videoSample && SUCCEEDED(videoSample->GetBufferCount(&bcnt)) && bcnt > 0) if (videoSample && SUCCEEDED(videoSample->GetBufferCount(&bcnt)) && bcnt > 0)
@ -1128,56 +1129,46 @@ IplImage* CvCapture_MSMF::retrieveFrame(int)
{ {
if ((unsigned int)cursize == captureFormat.MF_MT_SAMPLE_SIZE) if ((unsigned int)cursize == captureFormat.MF_MT_SAMPLE_SIZE)
{ {
if (!frame || (int)captureFormat.width != frame->width || (int)captureFormat.height != frame->height)
{
cvReleaseImage(&frame);
unsigned int bytes = outputFormat == CV_CAP_MODE_GRAY || !convertFormat ? 1 : outputFormat == CV_CAP_MODE_YUYV ? 2 : 3;
frame = cvCreateImage(cvSize(captureFormat.width, captureFormat.height), 8, bytes);
}
switch (outputFormat) switch (outputFormat)
{ {
case CV_CAP_MODE_YUYV: case CV_CAP_MODE_YUYV:
memcpy(frame->imageData, ptr, cursize); cv::Mat(captureFormat.height, captureFormat.width, CV_8UC2, ptr).copyTo(frame);
break; break;
case CV_CAP_MODE_BGR: case CV_CAP_MODE_BGR:
if (captureMode == MODE_HW) if (captureMode == MODE_HW)
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), cv::cvarrToMat(frame), cv::COLOR_BGRA2BGR); cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), frame, cv::COLOR_BGRA2BGR);
else else
memcpy(frame->imageData, ptr, cursize); cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr).copyTo(frame);
break; break;
case CV_CAP_MODE_RGB: case CV_CAP_MODE_RGB:
if (captureMode == MODE_HW) if (captureMode == MODE_HW)
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), cv::cvarrToMat(frame), cv::COLOR_BGRA2BGR); cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC4, ptr), frame, cv::COLOR_BGRA2BGR);
else else
cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr), cv::cvarrToMat(frame), cv::COLOR_BGR2RGB); cv::cvtColor(cv::Mat(captureFormat.height, captureFormat.width, CV_8UC3, ptr), frame, cv::COLOR_BGR2RGB);
break; break;
case CV_CAP_MODE_GRAY: case CV_CAP_MODE_GRAY:
memcpy(frame->imageData, ptr, captureFormat.height*captureFormat.width); cv::Mat(captureFormat.height, captureFormat.width, CV_8UC1, ptr).copyTo(frame);
break; break;
default: default:
cvReleaseImage(&frame); frame.release();
break; break;
} }
} }
else else
cvReleaseImage(&frame); frame.release();
} }
else else
{ {
if (!frame || frame->width != (int)cursize || frame->height != 1) cv::Mat(1, cursize, CV_8UC1, ptr).copyTo(frame);
{
cvReleaseImage(&frame);
frame = cvCreateImage(cvSize(cursize, 1), 8, 1);
}
memcpy(frame->imageData, ptr, cursize);
} }
buf->Unlock(); buf->Unlock();
return frame; return !frame.empty();
} }
} }
} }
return NULL; frame.release();
return false;
} }
double CvCapture_MSMF::getFramerate(MediaType MT) const double CvCapture_MSMF::getFramerate(MediaType MT) const
@ -1227,7 +1218,7 @@ double CvCapture_MSMF::getProperty( int property_id ) const
return aspectN; return aspectN;
else if (property_id == CV_CAP_PROP_SAR_DEN) else if (property_id == CV_CAP_PROP_SAR_DEN)
return aspectD; return aspectD;
else if (isOpened) else if (isOpen)
switch (property_id) switch (property_id)
{ {
case CV_CAP_PROP_FRAME_WIDTH: case CV_CAP_PROP_FRAME_WIDTH:
@ -1521,7 +1512,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
// image capture properties // image capture properties
if (property_id == CV_CAP_PROP_FORMAT) if (property_id == CV_CAP_PROP_FORMAT)
{ {
if (isOpened) if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, (int)cvRound(value), convertFormat); return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, (int)cvRound(value), convertFormat);
else else
outputFormat = (int)cvRound(value); outputFormat = (int)cvRound(value);
@ -1541,7 +1532,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
} }
else if (property_id == CV_CAP_PROP_CONVERT_RGB) else if (property_id == CV_CAP_PROP_CONVERT_RGB)
{ {
if (isOpened) if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, outputFormat, value != 0); return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, aspectD, outputFormat, value != 0);
else else
convertFormat = value != 0; convertFormat = value != 0;
@ -1549,7 +1540,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
} }
else if (property_id == CV_CAP_PROP_SAR_NUM && value > 0) else if (property_id == CV_CAP_PROP_SAR_NUM && value > 0)
{ {
if (isOpened) if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), (UINT32)cvRound(value), aspectD, outputFormat, convertFormat); return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), (UINT32)cvRound(value), aspectD, outputFormat, convertFormat);
else else
aspectN = (UINT32)cvRound(value); aspectN = (UINT32)cvRound(value);
@ -1557,13 +1548,13 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
} }
else if (property_id == CV_CAP_PROP_SAR_DEN && value > 0) else if (property_id == CV_CAP_PROP_SAR_DEN && value > 0)
{ {
if (isOpened) if (isOpen)
return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, (UINT32)cvRound(value), outputFormat, convertFormat); return configureOutput(captureFormat.width, captureFormat.height, getFramerate(nativeFormat), aspectN, (UINT32)cvRound(value), outputFormat, convertFormat);
else else
aspectD = (UINT32)cvRound(value); aspectD = (UINT32)cvRound(value);
return true; return true;
} }
else if (isOpened) else if (isOpen)
switch (property_id) switch (property_id)
{ {
case CV_CAP_PROP_FRAME_WIDTH: case CV_CAP_PROP_FRAME_WIDTH:
@ -1778,41 +1769,20 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
return false; return false;
} }
CvCapture* cvCreateCameraCapture_MSMF( int index ) cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF( int index )
{ {
CvCapture_MSMF* capture = new CvCapture_MSMF; cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>(index);
try if (capture && capture->isOpened())
{ return capture;
if( capture->open( index )) return cv::Ptr<cv::IVideoCapture>();
return capture;
}
catch(...)
{
delete capture;
throw;
}
delete capture;
return 0;
} }
CvCapture* cvCreateFileCapture_MSMF (const char* filename) cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF (const cv::String& filename)
{ {
CvCapture_MSMF* capture = new CvCapture_MSMF; cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>(filename);
try if (capture && capture->isOpened())
{ return capture;
if( capture->open(filename) ) return cv::Ptr<cv::IVideoCapture>();
return capture;
else
{
delete capture;
return NULL;
}
}
catch(...)
{
delete capture;
throw;
}
} }
// //
@ -1821,15 +1791,21 @@ CvCapture* cvCreateFileCapture_MSMF (const char* filename)
// //
// //
class CvVideoWriter_MSMF : public CvVideoWriter class CvVideoWriter_MSMF : public cv::IVideoWriter
{ {
public: public:
CvVideoWriter_MSMF(); CvVideoWriter_MSMF();
CvVideoWriter_MSMF(const cv::String& filename, int fourcc,
double fps, cv::Size frameSize, bool isColor);
virtual ~CvVideoWriter_MSMF(); virtual ~CvVideoWriter_MSMF();
virtual bool open(const char* filename, int fourcc, virtual bool open(const cv::String& filename, int fourcc,
double fps, CvSize frameSize, bool isColor); double fps, cv::Size frameSize, bool isColor);
virtual void close(); virtual void close();
virtual bool writeFrame(const IplImage* img); virtual void write(cv::InputArray);
virtual double getProperty(int) const { return 0; }
virtual bool setProperty(int, double) { return false; }
virtual bool isOpened() const { return initiated; }
private: private:
Media_Foundation& MF; Media_Foundation& MF;
@ -1857,6 +1833,7 @@ CvVideoWriter_MSMF::CvVideoWriter_MSMF():
initiated(false) initiated(false)
{ {
} }
CvVideoWriter_MSMF::CvVideoWriter_MSMF(const cv::String& filename, int fourcc, double fps, cv::Size frameSize, bool isColor) : CvVideoWriter_MSMF() { open(filename, fourcc, fps, frameSize, isColor); }
CvVideoWriter_MSMF::~CvVideoWriter_MSMF() CvVideoWriter_MSMF::~CvVideoWriter_MSMF()
{ {
@ -1916,8 +1893,8 @@ const GUID CvVideoWriter_MSMF::FourCC2GUID(int fourcc)
} }
} }
bool CvVideoWriter_MSMF::open( const char* filename, int fourcc, bool CvVideoWriter_MSMF::open( const cv::String& filename, int fourcc,
double _fps, CvSize _frameSize, bool /*isColor*/ ) double _fps, cv::Size _frameSize, bool /*isColor*/ )
{ {
if (initiated) if (initiated)
close(); close();
@ -1956,8 +1933,8 @@ bool CvVideoWriter_MSMF::open( const char* filename, int fourcc,
) )
{ {
// Create the sink writer // Create the sink writer
cv::AutoBuffer<wchar_t> unicodeFileName(strlen(filename) + 1); cv::AutoBuffer<wchar_t> unicodeFileName(filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, filename, -1, unicodeFileName, (int)strlen(filename) + 1); MultiByteToWideChar(CP_ACP, 0, filename.c_str(), -1, unicodeFileName, (int)filename.length() + 1);
HRESULT hr = MFCreateSinkWriterFromURL(unicodeFileName, NULL, spAttr.Get(), &sinkWriter); HRESULT hr = MFCreateSinkWriterFromURL(unicodeFileName, NULL, spAttr.Get(), &sinkWriter);
if (SUCCEEDED(hr)) if (SUCCEEDED(hr))
{ {
@ -1987,12 +1964,12 @@ void CvVideoWriter_MSMF::close()
} }
} }
bool CvVideoWriter_MSMF::writeFrame(const IplImage* img) void CvVideoWriter_MSMF::write(cv::InputArray img)
{ {
if (!img || if (img.empty() ||
(img->nChannels != 1 && img->nChannels != 3 && img->nChannels != 4) || (img.channels() != 1 && img.channels() != 3 && img.channels() != 4) ||
(UINT32)img->width != videoWidth || (UINT32)img->height != videoHeight) (UINT32)img.cols() != videoWidth || (UINT32)img.rows() != videoHeight)
return false; return;
const LONG cbWidth = 4 * videoWidth; const LONG cbWidth = 4 * videoWidth;
const DWORD cbBuffer = cbWidth * videoHeight; const DWORD cbBuffer = cbWidth * videoHeight;
@ -2014,27 +1991,23 @@ bool CvVideoWriter_MSMF::writeFrame(const IplImage* img)
SUCCEEDED(buffer->Lock(&pData, NULL, NULL))) SUCCEEDED(buffer->Lock(&pData, NULL, NULL)))
{ {
// Copy the video frame to the buffer. // Copy the video frame to the buffer.
cv::cvtColor(cv::cvarrToMat(img), cv::Mat(videoHeight, videoWidth, CV_8UC4, pData, cbWidth), img->nChannels > 1 ? cv::COLOR_BGR2BGRA : cv::COLOR_GRAY2BGRA); cv::cvtColor(img.getMat(), cv::Mat(videoHeight, videoWidth, CV_8UC4, pData, cbWidth), img.channels() > 1 ? cv::COLOR_BGR2BGRA : cv::COLOR_GRAY2BGRA);
buffer->Unlock(); buffer->Unlock();
// Send media sample to the Sink Writer. // Send media sample to the Sink Writer.
if (SUCCEEDED(sinkWriter->WriteSample(streamIndex, sample.Get()))) if (SUCCEEDED(sinkWriter->WriteSample(streamIndex, sample.Get())))
{ {
rtStart += rtDuration; rtStart += rtDuration;
return true;
} }
} }
return false;
} }
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc, cv::Ptr<cv::IVideoWriter> cv::cvCreateVideoWriter_MSMF( const cv::String& filename, int fourcc,
double fps, CvSize frameSize, int isColor ) double fps, cv::Size frameSize, int isColor )
{ {
CvVideoWriter_MSMF* writer = new CvVideoWriter_MSMF; cv::Ptr<CvVideoWriter_MSMF> writer = cv::makePtr<CvVideoWriter_MSMF>(filename, fourcc, fps, frameSize, isColor != 0);
if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 )) if (writer && writer->isOpened())
return writer; return writer;
delete writer; return cv::Ptr<cv::IVideoWriter>();
return NULL;
} }
#endif #endif

@ -244,26 +244,26 @@ private:
ApproximateSynchronizerBase(_approxSyncGrabber) ApproximateSynchronizerBase(_approxSyncGrabber)
{} {}
virtual bool isSpinContinue() const virtual bool isSpinContinue() const CV_OVERRIDE
{ {
int maxBufferSize = approxSyncGrabber.getMaxBufferSize(); int maxBufferSize = approxSyncGrabber.getMaxBufferSize();
return (maxBufferSize <= 0) || (static_cast<int>(depthQueue.size()) < maxBufferSize && return (maxBufferSize <= 0) || (static_cast<int>(depthQueue.size()) < maxBufferSize &&
static_cast<int>(imageQueue.size()) < maxBufferSize); // "<" to may push static_cast<int>(imageQueue.size()) < maxBufferSize); // "<" to may push
} }
virtual inline void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) virtual inline void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) CV_OVERRIDE
{ {
cv::Ptr<xn::DepthMetaData> depthPtr = cv::makePtr<xn::DepthMetaData>(); cv::Ptr<xn::DepthMetaData> depthPtr = cv::makePtr<xn::DepthMetaData>();
depthPtr->CopyFrom(depthMetaData); depthPtr->CopyFrom(depthMetaData);
depthQueue.push(depthPtr); depthQueue.push(depthPtr);
} }
virtual inline void pushImageMetaData( xn::ImageMetaData& imageMetaData ) virtual inline void pushImageMetaData( xn::ImageMetaData& imageMetaData ) CV_OVERRIDE
{ {
cv::Ptr<xn::ImageMetaData> imagePtr = cv::makePtr<xn::ImageMetaData>(); cv::Ptr<xn::ImageMetaData> imagePtr = cv::makePtr<xn::ImageMetaData>();
imagePtr->CopyFrom(imageMetaData); imagePtr->CopyFrom(imageMetaData);
imageQueue.push(imagePtr); imageQueue.push(imagePtr);
} }
virtual inline bool popDepthMetaData( xn::DepthMetaData& depthMetaData ) virtual inline bool popDepthMetaData( xn::DepthMetaData& depthMetaData ) CV_OVERRIDE
{ {
if( depthQueue.empty() ) if( depthQueue.empty() )
return false; return false;
@ -272,7 +272,7 @@ private:
depthQueue.pop(); depthQueue.pop();
return true; return true;
} }
virtual inline bool popImageMetaData( xn::ImageMetaData& imageMetaData ) virtual inline bool popImageMetaData( xn::ImageMetaData& imageMetaData ) CV_OVERRIDE
{ {
if( imageQueue.empty() ) if( imageQueue.empty() )
return false; return false;

@ -116,10 +116,6 @@ CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc, CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color ); double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index ); CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_MSMF( int index );
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_OpenNI( int index ); CvCapture* cvCreateCameraCapture_OpenNI( int index );
CvCapture* cvCreateCameraCapture_OpenNI2( int index ); CvCapture* cvCreateCameraCapture_OpenNI2( int index );
CvCapture* cvCreateFileCapture_OpenNI( const char* filename ); CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
@ -195,6 +191,10 @@ namespace cv
Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const String& filename); Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const String& filename);
Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const String& filename, int fourcc, double fps, Size frameSize, int isColor); Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const String& filename, int fourcc, double fps, Size frameSize, int isColor);
Ptr<IVideoCapture> cvCreateCapture_MSMF(int index);
Ptr<IVideoCapture> cvCreateCapture_MSMF(const String& filename);
Ptr<IVideoWriter> cvCreateVideoWriter_MSMF(const String& filename, int fourcc, double fps, Size frameSize, int is_color);
} }
#endif /* __VIDEOIO_H_ */ #endif /* __VIDEOIO_H_ */

@ -17,37 +17,35 @@ using namespace std;
*/ */
int main( int argc, char** argv ) int main( int argc, char** argv )
{ {
Mat src, dst; //! [Load image]
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
const char* source_window = "Source image"; Mat src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
const char* equalized_window = "Equalized Image"; if( src.empty() )
{
/// Load image cout << "Could not open or find the image!\n" << endl;
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" ); cout << "Usage: " << argv[0] << " <Input image>" << endl;
src = imread( parser.get<String>( "@input" ), IMREAD_COLOR ); return -1;
if( src.empty() ) }
{ //! [Load image]
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl; //! [Convert to grayscale]
return -1; cvtColor( src, src, COLOR_BGR2GRAY );
} //! [Convert to grayscale]
/// Convert to grayscale //! [Apply Histogram Equalization]
cvtColor( src, src, COLOR_BGR2GRAY ); Mat dst;
equalizeHist( src, dst );
/// Apply Histogram Equalization //! [Apply Histogram Equalization]
equalizeHist( src, dst );
//! [Display results]
/// Display results imshow( "Source image", src );
namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( "Equalized Image", dst );
namedWindow( equalized_window, WINDOW_AUTOSIZE ); //! [Display results]
imshow( source_window, src ); //! [Wait until user exits the program]
imshow( equalized_window, dst ); waitKey();
//! [Wait until user exits the program]
/// Wait until user exits the program
waitKey(0); return 0;
return 0;
} }

@ -14,79 +14,93 @@ using namespace cv;
using namespace std; using namespace std;
/// Global Variables /// Global Variables
Mat src; Mat hsv; Mat hue; Mat hue;
int bins = 25; int bins = 25;
/// Function Headers /// Function Headers
void Hist_and_Backproj(int, void* ); void Hist_and_Backproj(int, void* );
/** /**
* @function main * @function main
*/ */
int main( int, char** argv ) int main( int argc, char* argv[] )
{ {
/// Read the image //! [Read the image]
src = imread( argv[1], IMREAD_COLOR ); CommandLineParser parser( argc, argv, "{@input | | input image}" );
Mat src = imread( parser.get<String>( "@input" ) );
if( src.empty() ) if( src.empty() )
{ cout<<"Usage: ./calcBackProject_Demo1 <path_to_image>"<<endl; {
return -1; cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
} }
//! [Read the image]
/// Transform it to HSV
cvtColor( src, hsv, COLOR_BGR2HSV ); //! [Transform it to HSV]
Mat hsv;
/// Use only the Hue value cvtColor( src, hsv, COLOR_BGR2HSV );
hue.create( hsv.size(), hsv.depth() ); //! [Transform it to HSV]
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 ); //! [Use only the Hue value]
hue.create(hsv.size(), hsv.depth());
/// Create Trackbar to enter the number of bins int ch[] = { 0, 0 };
const char* window_image = "Source image"; mixChannels( &hsv, 1, &hue, 1, ch, 1 );
namedWindow( window_image, WINDOW_AUTOSIZE ); //! [Use only the Hue value]
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0); //! [Create Trackbar to enter the number of bins]
const char* window_image = "Source image";
/// Show the image namedWindow( window_image );
imshow( window_image, src ); createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);
/// Wait until user exits the program //! [Create Trackbar to enter the number of bins]
waitKey(0);
return 0; //! [Show the image]
imshow( window_image, src );
// Wait until user exits the program
waitKey();
//! [Show the image]
return 0;
} }
/** /**
* @function Hist_and_Backproj * @function Hist_and_Backproj
* @brief Callback to Trackbar * @brief Callback to Trackbar
*/ */
void Hist_and_Backproj(int, void* ) void Hist_and_Backproj(int, void* )
{ {
MatND hist; //! [initialize]
int histSize = MAX( bins, 2 ); int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 }; float hue_range[] = { 0, 180 };
const float* ranges = { hue_range }; const float* ranges = { hue_range };
//! [initialize]
/// Get the Histogram and normalize it
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false ); //! [Get the Histogram and normalize it]
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() ); Mat hist;
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false );
/// Get Backprojection normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
MatND backproj; //! [Get the Histogram and normalize it]
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
//! [Get Backprojection]
/// Draw the backproj Mat backproj;
imshow( "BackProj", backproj ); calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
//! [Get Backprojection]
/// Draw the histogram
int w = 400; int h = 400; //! [Draw the backproj]
int bin_w = cvRound( (double) w / histSize ); imshow( "BackProj", backproj );
Mat histImg = Mat::zeros( w, h, CV_8UC3 ); //! [Draw the backproj]
for( int i = 0; i < bins; i ++ ) //! [Draw the histogram]
{ rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ), Scalar( 0, 0, 255 ), -1 ); } int w = 400, h = 400;
int bin_w = cvRound( (double) w / histSize );
imshow( "Histogram", histImg ); Mat histImg = Mat::zeros( h, w, CV_8UC3 );
for (int i = 0; i < bins; i++)
{
rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ),
Scalar( 0, 0, 255 ), FILLED );
}
imshow( "Histogram", histImg );
//! [Draw the histogram]
} }

@ -14,10 +14,9 @@ using namespace cv;
using namespace std; using namespace std;
/// Global Variables /// Global Variables
Mat src; Mat hsv; Mat src, hsv, mask;
Mat mask;
int lo = 20; int up = 20; int low = 20, up = 20;
const char* window_image = "Source image"; const char* window_image = "Source image";
/// Function Headers /// Function Headers
@ -29,23 +28,24 @@ void pickPoint (int event, int x, int y, int, void* );
*/ */
int main( int, char** argv ) int main( int, char** argv )
{ {
/// Read the image /// Read the image
src = imread( argv[1], IMREAD_COLOR ); src = imread( argv[1] );
/// Transform it to HSV
cvtColor( src, hsv, COLOR_BGR2HSV ); /// Transform it to HSV
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image
namedWindow( window_image, WINDOW_AUTOSIZE ); /// Show the image
imshow( window_image, src ); namedWindow( window_image );
imshow( window_image, src );
/// Set Trackbars for floodfill thresholds
createTrackbar( "Low thresh", window_image, &lo, 255, 0 ); /// Set Trackbars for floodfill thresholds
createTrackbar( "High thresh", window_image, &up, 255, 0 ); createTrackbar( "Low thresh", window_image, &low, 255, 0 );
/// Set a Mouse Callback createTrackbar( "High thresh", window_image, &up, 255, 0 );
setMouseCallback( window_image, pickPoint, 0 ); /// Set a Mouse Callback
setMouseCallback( window_image, pickPoint, 0 );
waitKey(0);
return 0; waitKey();
return 0;
} }
/** /**
@ -53,25 +53,27 @@ int main( int, char** argv )
*/ */
void pickPoint (int event, int x, int y, int, void* ) void pickPoint (int event, int x, int y, int, void* )
{ {
if( event != EVENT_LBUTTONDOWN ) if( event != EVENT_LBUTTONDOWN )
{ return; } {
return;
}
// Fill and get the mask // Fill and get the mask
Point seed = Point( x, y ); Point seed = Point( x, y );
int newMaskVal = 255; int newMaskVal = 255;
Scalar newVal = Scalar( 120, 120, 120 ); Scalar newVal = Scalar( 120, 120, 120 );
int connectivity = 8; int connectivity = 8;
int flags = connectivity + (newMaskVal << 8 ) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY; int flags = connectivity + (newMaskVal << 8 ) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8UC1 ); Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8U );
floodFill( src, mask2, seed, newVal, 0, Scalar( lo, lo, lo ), Scalar( up, up, up), flags ); floodFill( src, mask2, seed, newVal, 0, Scalar( low, low, low ), Scalar( up, up, up), flags );
mask = mask2( Range( 1, mask2.rows - 1 ), Range( 1, mask2.cols - 1 ) ); mask = mask2( Range( 1, mask2.rows - 1 ), Range( 1, mask2.cols - 1 ) );
imshow( "Mask", mask ); imshow( "Mask", mask );
Hist_and_Backproj( ); Hist_and_Backproj( );
} }
/** /**
@ -79,26 +81,25 @@ void pickPoint (int event, int x, int y, int, void* )
*/ */
void Hist_and_Backproj( ) void Hist_and_Backproj( )
{ {
MatND hist; Mat hist;
int h_bins = 30; int s_bins = 32; int h_bins = 30; int s_bins = 32;
int histSize[] = { h_bins, s_bins }; int histSize[] = { h_bins, s_bins };
float h_range[] = { 0, 179 };
float s_range[] = { 0, 255 };
const float* ranges[] = { h_range, s_range };
int channels[] = { 0, 1 }; float h_range[] = { 0, 180 };
float s_range[] = { 0, 256 };
const float* ranges[] = { h_range, s_range };
/// Get the Histogram and normalize it int channels[] = { 0, 1 };
calcHist( &hsv, 1, channels, mask, hist, 2, histSize, ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() ); /// Get the Histogram and normalize it
calcHist( &hsv, 1, channels, mask, hist, 2, histSize, ranges, true, false );
/// Get Backprojection normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
MatND backproj;
calcBackProject( &hsv, 1, channels, hist, backproj, ranges, 1, true );
/// Draw the backproj /// Get Backprojection
imshow( "BackProj", backproj ); Mat backproj;
calcBackProject( &hsv, 1, channels, hist, backproj, ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );
} }

@ -17,72 +17,73 @@ using namespace cv;
*/ */
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
Mat src, dst; //! [Load image]
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
/// Load image Mat src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
String imageName( "../data/lena.jpg" ); // by default if( src.empty() )
{
if (argc > 1) return -1;
{ }
imageName = argv[1]; //! [Load image]
}
//! [Separate the image in 3 places ( B, G and R )]
src = imread( imageName, IMREAD_COLOR ); vector<Mat> bgr_planes;
split( src, bgr_planes );
if( src.empty() ) //! [Separate the image in 3 places ( B, G and R )]
{ return -1; }
//! [Establish the number of bins]
/// Separate the image in 3 places ( B, G and R ) int histSize = 256;
vector<Mat> bgr_planes; //! [Establish the number of bins]
split( src, bgr_planes );
//! [Set the ranges ( for B,G,R) )]
/// Establish the number of bins float range[] = { 0, 256 }; //the upper boundary is exclusive
int histSize = 256; const float* histRange = { range };
//! [Set the ranges ( for B,G,R) )]
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ; //! [Set histogram param]
const float* histRange = { range }; bool uniform = true, accumulate = false;
//! [Set histogram param]
bool uniform = true; bool accumulate = false;
//! [Compute the histograms]
Mat b_hist, g_hist, r_hist; Mat b_hist, g_hist, r_hist;
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
/// Compute the histograms: calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate ); calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate ); //! [Compute the histograms]
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
//! [Draw the histograms for B, G and R]
// Draw the histograms for B, G and R int hist_w = 512, hist_h = 400;
int hist_w = 512; int hist_h = 400; int bin_w = cvRound( (double) hist_w/histSize );
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) ); //! [Draw the histograms for B, G and R]
/// Normalize the result to [ 0, histImage.rows ] //! [Normalize the result to ( 0, histImage.rows )]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() ); normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() ); normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() ); normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
//! [Normalize the result to ( 0, histImage.rows )]
/// Draw for each channel
for( int i = 1; i < histSize; i++ ) //! [Draw for each channel]
{ for( int i = 1; i < histSize; i++ )
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) , {
Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ), line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ),
Scalar( 255, 0, 0), 2, 8, 0 ); Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) , Scalar( 255, 0, 0), 2, 8, 0 );
Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ), line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ),
Scalar( 0, 255, 0), 2, 8, 0 ); Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) , Scalar( 0, 255, 0), 2, 8, 0 );
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ), line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ),
Scalar( 0, 0, 255), 2, 8, 0 ); Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
} Scalar( 0, 0, 255), 2, 8, 0 );
}
/// Display //! [Draw for each channel]
namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage ); //! [Display]
imshow("Source image", src );
waitKey(0); imshow("calcHist Demo", histImage );
waitKey();
return 0; //! [Display]
return 0;
} }

@ -12,42 +12,43 @@
using namespace std; using namespace std;
using namespace cv; using namespace cv;
const char* keys =
"{ help h| | Print help message. }"
"{ input1 | | Path to input image 1. }"
"{ input2 | | Path to input image 2. }"
"{ input3 | | Path to input image 3. }";
/** /**
* @function main * @function main
*/ */
int main( int argc, char** argv ) int main( int argc, char** argv )
{ {
Mat src_base, hsv_base; //! [Load three images with different environment settings]
Mat src_test1, hsv_test1; CommandLineParser parser( argc, argv, keys );
Mat src_test2, hsv_test2; Mat src_base = imread( parser.get<String>("input1") );
Mat hsv_half_down; Mat src_test1 = imread( parser.get<String>("input2") );
Mat src_test2 = imread( parser.get<String>("input3") );
/// Load three images with different environment settings if( src_base.empty() || src_test1.empty() || src_test2.empty() )
if( argc < 4 )
{ {
printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_settings1> <image_settings2>\n"); cout << "Could not open or find the images!\n" << endl;
parser.printMessage();
return -1; return -1;
} }
//! [Load three images with different environment settings]
src_base = imread( argv[1], IMREAD_COLOR ); //! [Convert to HSV]
src_test1 = imread( argv[2], IMREAD_COLOR ); Mat hsv_base, hsv_test1, hsv_test2;
src_test2 = imread( argv[3], IMREAD_COLOR );
if(src_base.empty() || src_test1.empty() || src_test2.empty())
{
cout << "Can't read one of the images" << endl;
return -1;
}
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV ); cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV ); cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV ); cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
//! [Convert to HSV]
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) ); //! [Convert to HSV half]
Mat hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows ), Range( 0, hsv_base.cols ) );
//! [Convert to HSV half]
/// Using 50 bins for hue and 60 for saturation //! [Using 50 bins for hue and 60 for saturation]
int h_bins = 50; int s_bins = 60; int h_bins = 50, s_bins = 60;
int histSize[] = { h_bins, s_bins }; int histSize[] = { h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255 // hue varies from 0 to 179, saturation from 0 to 255
@ -56,17 +57,13 @@ int main( int argc, char** argv )
const float* ranges[] = { h_ranges, s_ranges }; const float* ranges[] = { h_ranges, s_ranges };
// Use the o-th and 1-st channels // Use the 0-th and 1-st channels
int channels[] = { 0, 1 }; int channels[] = { 0, 1 };
//! [Using 50 bins for hue and 60 for saturation]
//! [Calculate the histograms for the HSV images]
Mat hist_base, hist_half_down, hist_test1, hist_test2;
/// Histograms
MatND hist_base;
MatND hist_half_down;
MatND hist_test1;
MatND hist_test2;
/// Calculate the histograms for the HSV images
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false ); calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() ); normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
@ -78,20 +75,21 @@ int main( int argc, char** argv )
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false ); calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() ); normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
//! [Calculate the histograms for the HSV images]
/// Apply the histogram comparison methods //! [Apply the histogram comparison methods]
for( int i = 0; i < 4; i++ ) for( int compare_method = 0; compare_method < 4; compare_method++ )
{ {
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method ); double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method ); double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method ); double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method ); double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 ); cout << "Method " << compare_method << " Perfect, Base-Half, Base-Test(1), Base-Test(2) : "
<< base_base << " / " << base_half << " / " << base_test1 << " / " << base_test2 << endl;
} }
//! [Apply the histogram comparison methods]
printf( "Done \n" ); cout << "Done \n";
return 0; return 0;
} }

@ -12,77 +12,71 @@
using namespace cv; using namespace cv;
using namespace std; using namespace std;
/// Global variables
const char* source_window = "Source image";
const char* warp_window = "Warp";
const char* warp_rotate_window = "Warp + Rotate";
/** /**
* @function main * @function main
*/ */
int main( int argc, char** argv ) int main( int argc, char** argv )
{ {
Point2f srcTri[3]; //! [Load the image]
Point2f dstTri[3]; CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" );
Mat src = imread( parser.get<String>( "@input" ) );
Mat rot_mat( 2, 3, CV_32FC1 ); if( src.empty() )
Mat warp_mat( 2, 3, CV_32FC1 ); {
Mat src, warp_dst, warp_rotate_dst; cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
/// Load the image return -1;
CommandLineParser parser( argc, argv, "{@input | ../data/lena.jpg | input image}" ); }
src = imread( parser.get<String>( "@input" ), IMREAD_COLOR ); //! [Load the image]
if( src.empty() )
{ //! [Set your 3 points to calculate the Affine Transform]
cout << "Could not open or find the image!\n" << endl; Point2f srcTri[3];
cout << "Usage: " << argv[0] << " <Input image>" << endl; srcTri[0] = Point2f( 0.f, 0.f );
return -1; srcTri[1] = Point2f( src.cols - 1.f, 0.f );
} srcTri[2] = Point2f( 0.f, src.rows - 1.f );
/// Set the dst image the same type and size as src Point2f dstTri[3];
warp_dst = Mat::zeros( src.rows, src.cols, src.type() ); dstTri[0] = Point2f( 0.f, src.rows*0.33f );
dstTri[1] = Point2f( src.cols*0.85f, src.rows*0.25f );
/// Set your 3 points to calculate the Affine Transform dstTri[2] = Point2f( src.cols*0.15f, src.rows*0.7f );
srcTri[0] = Point2f( 0,0 ); //! [Set your 3 points to calculate the Affine Transform]
srcTri[1] = Point2f( src.cols - 1.f, 0 );
srcTri[2] = Point2f( 0, src.rows - 1.f ); //! [Get the Affine Transform]
Mat warp_mat = getAffineTransform( srcTri, dstTri );
dstTri[0] = Point2f( src.cols*0.0f, src.rows*0.33f ); //! [Get the Affine Transform]
dstTri[1] = Point2f( src.cols*0.85f, src.rows*0.25f );
dstTri[2] = Point2f( src.cols*0.15f, src.rows*0.7f ); //! [Apply the Affine Transform just found to the src image]
/// Set the dst image the same type and size as src
/// Get the Affine Transform Mat warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
warp_mat = getAffineTransform( srcTri, dstTri );
warpAffine( src, warp_dst, warp_mat, warp_dst.size() );
/// Apply the Affine Transform just found to the src image //! [Apply the Affine Transform just found to the src image]
warpAffine( src, warp_dst, warp_mat, warp_dst.size() );
/** Rotating the image after Warp */
/** Rotating the image after Warp */
//! [Compute a rotation matrix with respect to the center of the image]
/// Compute a rotation matrix with respect to the center of the image Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 ); double angle = -50.0;
double angle = -50.0; double scale = 0.6;
double scale = 0.6; //! [Compute a rotation matrix with respect to the center of the image]
/// Get the rotation matrix with the specifications above //! [Get the rotation matrix with the specifications above]
rot_mat = getRotationMatrix2D( center, angle, scale ); Mat rot_mat = getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
/// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() ); //! [Rotate the warped image]
Mat warp_rotate_dst;
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
/// Show what you got //! [Rotate the warped image]
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); //! [Show what you got]
imshow( "Source image", src );
namedWindow( warp_window, WINDOW_AUTOSIZE ); imshow( "Warp", warp_dst );
imshow( warp_window, warp_dst ); imshow( "Warp + Rotate", warp_rotate_dst );
//! [Show what you got]
namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst ); //! [Wait until user exits the program]
waitKey();
/// Wait until user exits the program //! [Wait until user exits the program]
waitKey(0);
return 0;
return 0;
} }

@ -64,36 +64,51 @@ removedNodes = []
# Detect unfused batch normalization nodes and fuse them. # Detect unfused batch normalization nodes and fuse them.
def fuse_batch_normalization(): def fuse_batch_normalization():
pattern = ['Add', 'Rsqrt', 'Mul', 'Mul', 'Mul', 'Sub', 'Add'] # Add_0 <-- moving_variance, add_y
candidates = [] # Rsqrt <-- Add_0
# Mul_0 <-- Rsqrt, gamma
for node in graph_def.node: # Mul_1 <-- input, Mul_0
if node.op == pattern[len(candidates)]: # Mul_2 <-- moving_mean, Mul_0
candidates.append(node) # Sub_0 <-- beta, Mul_2
# Add_1 <-- Mul_1, Sub_0
nodesMap = {node.name: node for node in graph_def.node}
subgraph = ['Add',
['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
def checkSubgraph(node, targetNode, inputs, fusedNodes):
op = targetNode[0]
if node.op == op and (len(node.input) >= len(targetNode) - 1):
fusedNodes.append(node)
for i, inpOp in enumerate(targetNode[1:]):
if isinstance(inpOp, list):
if not node.input[i] in nodesMap or \
not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes):
return False
else:
inputs[inpOp] = node.input[i]
return True
else: else:
candidates = [] return False
if len(candidates) == len(pattern):
inp = candidates[3].input[0]
gamma = candidates[2].input[1]
beta = candidates[5].input[0]
moving_mean = candidates[4].input[0]
moving_variance = candidates[0].input[0]
nodesToRemove = []
for node in graph_def.node:
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraph, inputs, fusedNodes):
name = node.name name = node.name
node.Clear() node.Clear()
node.name = name node.name = name
node.op = 'FusedBatchNorm' node.op = 'FusedBatchNorm'
node.input.append(inp) node.input.append(inputs['input'])
node.input.append(gamma) node.input.append(inputs['gamma'])
node.input.append(beta) node.input.append(inputs['beta'])
node.input.append(moving_mean) node.input.append(inputs['moving_mean'])
node.input.append(moving_variance) node.input.append(inputs['moving_variance'])
text_format.Merge('f: 0.001', node.attr["epsilon"]) text_format.Merge('f: 0.001', node.attr["epsilon"])
nodesToRemove += fusedNodes[1:]
for candidate in candidates[:-1]: for node in nodesToRemove:
graph_def.node.remove(candidate) graph_def.node.remove(node)
candidates = []
fuse_batch_normalization() fuse_batch_normalization()

@ -0,0 +1,173 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.util.Arrays;
import java.util.List;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
public CalcBackProject1(String[] args) {
//! [Read the image]
if (args.length != 1) {
System.err.println("You must supply one argument that corresponds to the path to the image.");
System.exit(0);
}
Mat src = Imgcodecs.imread(args[0]);
if (src.empty()) {
System.err.println("Empty image: " + args[0]);
System.exit(0);
}
//! [Read the image]
//! [Transform it to HSV]
Mat hsv = new Mat();
Imgproc.cvtColor(src, hsv, Imgproc.COLOR_BGR2HSV);
//! [Transform it to HSV]
//! [Use only the Hue value]
hue = new Mat(hsv.size(), hsv.depth());
Core.mixChannels(Arrays.asList(hsv), Arrays.asList(hue), new MatOfInt(0, 0));
//! [Use only the Hue value]
// Create and set up the window.
frame = new JFrame("Back Projection 1 demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
Image img = HighGui.toBufferedImage(src);
addComponentsToPane(frame.getContentPane(), img);
//! [Show the image]
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
// Display the window.
frame.pack();
frame.setVisible(true);
//! [Show the image]
}
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
}
//! [Create Trackbar to enter the number of bins]
JPanel sliderPanel = new JPanel();
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
sliderPanel.add(new JLabel("* Hue bins: "));
JSlider slider = new JSlider(0, MAX_SLIDER, bins);
slider.setMajorTickSpacing(25);
slider.setMinorTickSpacing(5);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
bins = source.getValue();
update();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
//! [Create Trackbar to enter the number of bins]
JPanel imgPanel = new JPanel();
imgLabel = new JLabel(new ImageIcon(img));
imgPanel.add(imgLabel);
backprojLabel = new JLabel();
imgPanel.add(backprojLabel);
histImgLabel = new JLabel();
imgPanel.add(histImgLabel);
pane.add(imgPanel, BorderLayout.CENTER);
}
private void update() {
//! [initialize]
int histSize = Math.max(bins, 2);
float[] hueRange = {0, 180};
//! [initialize]
//! [Get the Histogram and normalize it]
Mat hist = new Mat();
List<Mat> hueList = Arrays.asList(hue);
Imgproc.calcHist(hueList, new MatOfInt(0), new Mat(), hist, new MatOfInt(histSize), new MatOfFloat(hueRange), false);
Core.normalize(hist, hist, 0, 255, Core.NORM_MINMAX);
//! [Get the Histogram and normalize it]
//! [Get Backprojection]
Mat backproj = new Mat();
Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, backproj, new MatOfFloat(hueRange), 1);
//! [Get Backprojection]
//! [Draw the backproj]
Image backprojImg = HighGui.toBufferedImage(backproj);
backprojLabel.setIcon(new ImageIcon(backprojImg));
//! [Draw the backproj]
//! [Draw the histogram]
int w = 400, h = 400;
int binW = (int) Math.round((double) w / histSize);
histImg = Mat.zeros(h, w, CvType.CV_8UC3);
float[] histData = new float[(int) (hist.total() * hist.channels())];
hist.get(0, 0, histData);
for (int i = 0; i < bins; i++) {
Imgproc.rectangle(histImg, new Point(i * binW, h),
new Point((i + 1) * binW, h - Math.round(histData[i] * h / 255.0)), new Scalar(0, 0, 255), Core.FILLED);
}
Image histImage = HighGui.toBufferedImage(histImg);
histImgLabel.setIcon(new ImageIcon(histImage));
//! [Draw the histogram]
frame.repaint();
frame.pack();
}
}
public class CalcBackProjectDemo1 {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Schedule a job for the event dispatch thread:
// creating and showing this application's GUI.
javax.swing.SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
new CalcBackProject1(args);
}
});
}
}

@ -0,0 +1,189 @@
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Image;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.util.Arrays;
import java.util.List;
import javax.swing.BoxLayout;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcBackProject2 {
private Mat src;
private Mat hsv = new Mat();
private Mat mask = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel maskImgLabel;
private static final int MAX_SLIDER = 255;
private int low = 20;
private int up = 20;
public CalcBackProject2(String[] args) {
/// Read the image
if (args.length != 1) {
System.err.println("You must supply one argument that corresponds to the path to the image.");
System.exit(0);
}
src = Imgcodecs.imread(args[0]);
if (src.empty()) {
System.err.println("Empty image: " + args[0]);
System.exit(0);
}
/// Transform it to HSV
Imgproc.cvtColor(src, hsv, Imgproc.COLOR_BGR2HSV);
// Create and set up the window.
frame = new JFrame("Back Projection 2 demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane.
Image img = HighGui.toBufferedImage(src);
addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout());
// Display the window.
frame.pack();
frame.setVisible(true);
}
private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!"));
return;
}
/// Set Trackbars for floodfill thresholds
JPanel sliderPanel = new JPanel();
sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
sliderPanel.add(new JLabel("Low thresh"));
JSlider slider = new JSlider(0, MAX_SLIDER, low);
slider.setMajorTickSpacing(20);
slider.setMinorTickSpacing(10);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
low = source.getValue();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
sliderPanel.add(new JLabel("High thresh"));
slider = new JSlider(0, MAX_SLIDER, up);
slider.setMajorTickSpacing(20);
slider.setMinorTickSpacing(10);
slider.setPaintTicks(true);
slider.setPaintLabels(true);
slider.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource();
up = source.getValue();
}
});
sliderPanel.add(slider);
pane.add(sliderPanel, BorderLayout.PAGE_START);
JPanel imgPanel = new JPanel();
imgLabel = new JLabel(new ImageIcon(img));
/// Set a Mouse Callback
imgLabel.addMouseListener(new MouseAdapter() {
@Override
public void mousePressed(MouseEvent e) {
update(e.getX(), e.getY());
}
});
imgPanel.add(imgLabel);
maskImgLabel = new JLabel();
imgPanel.add(maskImgLabel);
backprojLabel = new JLabel();
imgPanel.add(backprojLabel);
pane.add(imgPanel, BorderLayout.CENTER);
}
private void update(int x, int y) {
// Fill and get the mask
Point seed = new Point(x, y);
int newMaskVal = 255;
Scalar newVal = new Scalar(120, 120, 120);
int connectivity = 8;
int flags = connectivity + (newMaskVal << 8) + Imgproc.FLOODFILL_FIXED_RANGE + Imgproc.FLOODFILL_MASK_ONLY;
Mat mask2 = Mat.zeros(src.rows() + 2, src.cols() + 2, CvType.CV_8U);
Imgproc.floodFill(src, mask2, seed, newVal, new Rect(), new Scalar(low, low, low), new Scalar(up, up, up), flags);
mask = mask2.submat(new Range(1, mask2.rows() - 1), new Range(1, mask2.cols() - 1));
Image maskImg = HighGui.toBufferedImage(mask);
maskImgLabel.setIcon(new ImageIcon(maskImg));
int hBins = 30, sBins = 32;
int[] histSize = { hBins, sBins };
float[] ranges = { 0, 180, 0, 256 };
int[] channels = { 0, 1 };
/// Get the Histogram and normalize it
Mat hist = new Mat();
List<Mat> hsvList = Arrays.asList(hsv);
Imgproc.calcHist(hsvList, new MatOfInt(channels), mask, hist, new MatOfInt(histSize), new MatOfFloat(ranges), false );
Core.normalize(hist, hist, 0, 255, Core.NORM_MINMAX);
/// Get Backprojection
Mat backproj = new Mat();
Imgproc.calcBackProject(hsvList, new MatOfInt(channels), hist, backproj, new MatOfFloat(ranges), 1);
Image backprojImg = HighGui.toBufferedImage(backproj);
backprojLabel.setIcon(new ImageIcon(backprojImg));
frame.repaint();
frame.pack();
}
}
public class CalcBackProjectDemo2 {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Schedule a job for the event dispatch thread:
// creating and showing this application's GUI.
javax.swing.SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
new CalcBackProject2(args);
}
});
}
}

@ -0,0 +1,99 @@
import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CalcHist {
public void run(String[] args) {
//! [Load image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load image]
//! [Separate the image in 3 places ( B, G and R )]
List<Mat> bgrPlanes = new ArrayList<>();
Core.split(src, bgrPlanes);
//! [Separate the image in 3 places ( B, G and R )]
//! [Establish the number of bins]
int histSize = 256;
//! [Establish the number of bins]
//! [Set the ranges ( for B,G,R) )]
float[] range = {0, 256}; //the upper boundary is exclusive
MatOfFloat histRange = new MatOfFloat(range);
//! [Set the ranges ( for B,G,R) )]
//! [Set histogram param]
boolean accumulate = false;
//! [Set histogram param]
//! [Compute the histograms]
Mat bHist = new Mat(), gHist = new Mat(), rHist = new Mat();
Imgproc.calcHist(bgrPlanes, new MatOfInt(0), new Mat(), bHist, new MatOfInt(histSize), histRange, accumulate);
Imgproc.calcHist(bgrPlanes, new MatOfInt(1), new Mat(), gHist, new MatOfInt(histSize), histRange, accumulate);
Imgproc.calcHist(bgrPlanes, new MatOfInt(2), new Mat(), rHist, new MatOfInt(histSize), histRange, accumulate);
//! [Compute the histograms]
//! [Draw the histograms for B, G and R]
int histW = 512, histH = 400;
int binW = (int) Math.round((double) histW / histSize);
Mat histImage = new Mat( histH, histW, CvType.CV_8UC3, new Scalar( 0,0,0) );
//! [Draw the histograms for B, G and R]
//! [Normalize the result to ( 0, histImage.rows )]
Core.normalize(bHist, bHist, 0, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(gHist, gHist, 0, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(rHist, rHist, 0, histImage.rows(), Core.NORM_MINMAX);
//! [Normalize the result to ( 0, histImage.rows )]
//! [Draw for each channel]
float[] bHistData = new float[(int) (bHist.total() * bHist.channels())];
bHist.get(0, 0, bHistData);
float[] gHistData = new float[(int) (gHist.total() * gHist.channels())];
gHist.get(0, 0, gHistData);
float[] rHistData = new float[(int) (rHist.total() * rHist.channels())];
rHist.get(0, 0, rHistData);
for( int i = 1; i < histSize; i++ ) {
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(bHistData[i - 1])),
new Point(binW * (i), histH - Math.round(bHistData[i])), new Scalar(255, 0, 0), 2);
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(gHistData[i - 1])),
new Point(binW * (i), histH - Math.round(gHistData[i])), new Scalar(0, 255, 0), 2);
Imgproc.line(histImage, new Point(binW * (i - 1), histH - Math.round(rHistData[i - 1])),
new Point(binW * (i), histH - Math.round(rHistData[i])), new Scalar(0, 0, 255), 2);
}
//! [Draw for each channel]
//! [Display]
HighGui.imshow( "Source image", src );
HighGui.imshow( "calcHist Demo", histImage );
HighGui.waitKey(0);
//! [Display]
System.exit(0);
}
}
public class CalcHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new CalcHist().run(args);
}
}

@ -0,0 +1,91 @@
import java.util.Arrays;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.Range;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class CompareHist {
public void run(String[] args) {
//! [Load three images with different environment settings]
if (args.length != 3) {
System.err.println("You must supply 3 arguments that correspond to the paths to 3 images.");
System.exit(0);
}
Mat srcBase = Imgcodecs.imread(args[0]);
Mat srcTest1 = Imgcodecs.imread(args[1]);
Mat srcTest2 = Imgcodecs.imread(args[2]);
if (srcBase.empty() || srcTest1.empty() || srcTest2.empty()) {
System.err.println("Cannot read the images");
System.exit(0);
}
//! [Load three images with different environment settings]
//! [Convert to HSV]
Mat hsvBase = new Mat(), hsvTest1 = new Mat(), hsvTest2 = new Mat();
Imgproc.cvtColor( srcBase, hsvBase, Imgproc.COLOR_BGR2HSV );
Imgproc.cvtColor( srcTest1, hsvTest1, Imgproc.COLOR_BGR2HSV );
Imgproc.cvtColor( srcTest2, hsvTest2, Imgproc.COLOR_BGR2HSV );
//! [Convert to HSV]
//! [Convert to HSV half]
Mat hsvHalfDown = hsvBase.submat( new Range( hsvBase.rows()/2, hsvBase.rows() - 1 ), new Range( 0, hsvBase.cols() - 1 ) );
//! [Convert to HSV half]
//! [Using 50 bins for hue and 60 for saturation]
int hBins = 50, sBins = 60;
int[] histSize = { hBins, sBins };
// hue varies from 0 to 179, saturation from 0 to 255
float[] ranges = { 0, 180, 0, 256 };
// Use the 0-th and 1-st channels
int[] channels = { 0, 1 };
//! [Using 50 bins for hue and 60 for saturation]
//! [Calculate the histograms for the HSV images]
Mat histBase = new Mat(), histHalfDown = new Mat(), histTest1 = new Mat(), histTest2 = new Mat();
List<Mat> hsvBaseList = Arrays.asList(hsvBase);
Imgproc.calcHist(hsvBaseList, new MatOfInt(channels), new Mat(), histBase, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histBase, histBase, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvHalfDownList = Arrays.asList(hsvHalfDown);
Imgproc.calcHist(hsvHalfDownList, new MatOfInt(channels), new Mat(), histHalfDown, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histHalfDown, histHalfDown, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvTest1List = Arrays.asList(hsvTest1);
Imgproc.calcHist(hsvTest1List, new MatOfInt(channels), new Mat(), histTest1, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histTest1, histTest1, 0, 1, Core.NORM_MINMAX);
List<Mat> hsvTest2List = Arrays.asList(hsvTest2);
Imgproc.calcHist(hsvTest2List, new MatOfInt(channels), new Mat(), histTest2, new MatOfInt(histSize), new MatOfFloat(ranges), false);
Core.normalize(histTest2, histTest2, 0, 1, Core.NORM_MINMAX);
//! [Calculate the histograms for the HSV images]
//! [Apply the histogram comparison methods]
for( int compareMethod = 0; compareMethod < 4; compareMethod++ ) {
double baseBase = Imgproc.compareHist( histBase, histBase, compareMethod );
double baseHalf = Imgproc.compareHist( histBase, histHalfDown, compareMethod );
double baseTest1 = Imgproc.compareHist( histBase, histTest1, compareMethod );
double baseTest2 = Imgproc.compareHist( histBase, histTest2, compareMethod );
System.out.println("Method " + compareMethod + " Perfect, Base-Half, Base-Test(1), Base-Test(2) : " + baseBase + " / " + baseHalf
+ " / " + baseTest1 + " / " + baseTest2);
}
//! [Apply the histogram comparison methods]
}
}
public class CompareHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new CompareHist().run(args);
}
}

@ -0,0 +1,49 @@
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class EqualizeHist {
public void run(String[] args) {
//! [Load image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load image]
//! [Convert to grayscale]
Imgproc.cvtColor(src, src, Imgproc.COLOR_BGR2GRAY);
//! [Convert to grayscale]
//! [Apply Histogram Equalization]
Mat dst = new Mat();
Imgproc.equalizeHist( src, dst );
//! [Apply Histogram Equalization]
//! [Display results]
HighGui.imshow( "Source image", src );
HighGui.imshow( "Equalized Image", dst );
//! [Display results]
//! [Wait until user exits the program]
HighGui.waitKey(0);
//! [Wait until user exits the program]
System.exit(0);
}
}
public class EqualizeHistDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new EqualizeHist().run(args);
}
}

@ -1,9 +1,8 @@
import java.awt.BorderLayout; import java.awt.BorderLayout;
import java.awt.Container; import java.awt.Container;
import java.awt.Image;
import java.awt.event.ActionEvent; import java.awt.event.ActionEvent;
import java.awt.event.ActionListener; import java.awt.event.ActionListener;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import javax.swing.BoxLayout; import javax.swing.BoxLayout;
import javax.swing.ImageIcon; import javax.swing.ImageIcon;
@ -19,6 +18,7 @@ import org.opencv.core.Core;
import org.opencv.core.Mat; import org.opencv.core.Mat;
import org.opencv.core.Point; import org.opencv.core.Point;
import org.opencv.core.Size; import org.opencv.core.Size;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc; import org.opencv.imgproc.Imgproc;
@ -46,7 +46,7 @@ public class MorphologyDemo1 {
frame = new JFrame("Erosion and dilatation demo"); frame = new JFrame("Erosion and dilatation demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane. // Set up the content pane.
BufferedImage img = toBufferedImage(matImgSrc); Image img = HighGui.toBufferedImage(matImgSrc);
addComponentsToPane(frame.getContentPane(), img); addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for // Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout()); // setLayout(new BorderLayout());
@ -55,7 +55,7 @@ public class MorphologyDemo1 {
frame.setVisible(true); frame.setVisible(true);
} }
private void addComponentsToPane(Container pane, BufferedImage img) { private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) { if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!")); pane.add(new JLabel("Container doesn't use BorderLayout!"));
return; return;
@ -115,20 +115,6 @@ public class MorphologyDemo1 {
pane.add(imgLabel, BorderLayout.CENTER); pane.add(imgLabel, BorderLayout.CENTER);
} }
private BufferedImage toBufferedImage(Mat matrix) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if (matrix.channels() > 1) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = matrix.channels() * matrix.cols() * matrix.rows();
byte[] buffer = new byte[bufferSize];
matrix.get(0, 0, buffer); // get all the pixels
BufferedImage image = new BufferedImage(matrix.cols(), matrix.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(buffer, 0, targetPixels, 0, buffer.length);
return image;
}
private void update() { private void update() {
Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1), Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1),
new Point(kernelSize, kernelSize)); new Point(kernelSize, kernelSize));
@ -138,7 +124,7 @@ public class MorphologyDemo1 {
} else { } else {
Imgproc.dilate(matImgSrc, matImgDst, element); Imgproc.dilate(matImgSrc, matImgDst, element);
} }
BufferedImage img = toBufferedImage(matImgDst); Image img = HighGui.toBufferedImage(matImgDst);
imgLabel.setIcon(new ImageIcon(img)); imgLabel.setIcon(new ImageIcon(img));
frame.repaint(); frame.repaint();
} }

@ -1,9 +1,8 @@
import java.awt.BorderLayout; import java.awt.BorderLayout;
import java.awt.Container; import java.awt.Container;
import java.awt.Image;
import java.awt.event.ActionEvent; import java.awt.event.ActionEvent;
import java.awt.event.ActionListener; import java.awt.event.ActionListener;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import javax.swing.BoxLayout; import javax.swing.BoxLayout;
import javax.swing.ImageIcon; import javax.swing.ImageIcon;
@ -19,6 +18,7 @@ import org.opencv.core.Core;
import org.opencv.core.Mat; import org.opencv.core.Mat;
import org.opencv.core.Point; import org.opencv.core.Point;
import org.opencv.core.Size; import org.opencv.core.Size;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc; import org.opencv.imgproc.Imgproc;
@ -48,7 +48,7 @@ public class MorphologyDemo2 {
frame = new JFrame("Morphology Transformations demo"); frame = new JFrame("Morphology Transformations demo");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Set up the content pane. // Set up the content pane.
BufferedImage img = toBufferedImage(matImgSrc); Image img = HighGui.toBufferedImage(matImgSrc);
addComponentsToPane(frame.getContentPane(), img); addComponentsToPane(frame.getContentPane(), img);
// Use the content pane's default BorderLayout. No need for // Use the content pane's default BorderLayout. No need for
// setLayout(new BorderLayout()); // setLayout(new BorderLayout());
@ -57,7 +57,7 @@ public class MorphologyDemo2 {
frame.setVisible(true); frame.setVisible(true);
} }
private void addComponentsToPane(Container pane, BufferedImage img) { private void addComponentsToPane(Container pane, Image img) {
if (!(pane.getLayout() instanceof BorderLayout)) { if (!(pane.getLayout() instanceof BorderLayout)) {
pane.add(new JLabel("Container doesn't use BorderLayout!")); pane.add(new JLabel("Container doesn't use BorderLayout!"));
return; return;
@ -117,26 +117,12 @@ public class MorphologyDemo2 {
pane.add(imgLabel, BorderLayout.CENTER); pane.add(imgLabel, BorderLayout.CENTER);
} }
private BufferedImage toBufferedImage(Mat matrix) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if (matrix.channels() > 1) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = matrix.channels() * matrix.cols() * matrix.rows();
byte[] buffer = new byte[bufferSize];
matrix.get(0, 0, buffer); // get all the pixels
BufferedImage image = new BufferedImage(matrix.cols(), matrix.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(buffer, 0, targetPixels, 0, buffer.length);
return image;
}
private void update() { private void update() {
Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1), Mat element = Imgproc.getStructuringElement(elementType, new Size(2 * kernelSize + 1, 2 * kernelSize + 1),
new Point(kernelSize, kernelSize)); new Point(kernelSize, kernelSize));
Imgproc.morphologyEx(matImgSrc, matImgDst, morphOpType, element); Imgproc.morphologyEx(matImgSrc, matImgDst, morphOpType, element);
BufferedImage img = toBufferedImage(matImgDst); Image img = HighGui.toBufferedImage(matImgDst);
imgLabel.setIcon(new ImageIcon(img)); imgLabel.setIcon(new ImageIcon(img));
frame.repaint(); frame.repaint();
} }

@ -1,18 +1,24 @@
import org.opencv.core.*; import java.awt.GridLayout;
import org.opencv.core.Point; import java.awt.Image;
import org.opencv.imgcodecs.Imgcodecs; import java.util.Hashtable;
import org.opencv.imgproc.Imgproc;
import javax.swing.ImageIcon;
import javax.swing.*; import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JSlider;
import javax.swing.event.ChangeEvent; import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener; import javax.swing.event.ChangeListener;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.util.*; import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class MatchTemplateDemoRun implements ChangeListener{ class MatchTemplateDemoRun implements ChangeListener {
//! [declare] //! [declare]
/// Global Variables /// Global Variables
@ -26,9 +32,7 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [declare] //! [declare]
public void run(String[] args) { public void run(String[] args) {
if (args.length < 2) {
if (args.length < 2)
{
System.out.println("Not enough parameters"); System.out.println("Not enough parameters");
System.out.println("Program arguments:\n<image_name> <template_name> [<mask_name>]"); System.out.println("Program arguments:\n<image_name> <template_name> [<mask_name>]");
System.exit(-1); System.exit(-1);
@ -36,117 +40,100 @@ class MatchTemplateDemoRun implements ChangeListener{
//! [load_image] //! [load_image]
/// Load image and template /// Load image and template
img = Imgcodecs.imread( args[0], Imgcodecs.IMREAD_COLOR ); img = Imgcodecs.imread(args[0], Imgcodecs.IMREAD_COLOR);
templ = Imgcodecs.imread( args[1], Imgcodecs.IMREAD_COLOR ); templ = Imgcodecs.imread(args[1], Imgcodecs.IMREAD_COLOR);
//! [load_image] //! [load_image]
if(args.length > 2) { if (args.length > 2) {
use_mask = true; use_mask = true;
mask = Imgcodecs.imread( args[2], Imgcodecs.IMREAD_COLOR ); mask = Imgcodecs.imread(args[2], Imgcodecs.IMREAD_COLOR);
} }
if(img.empty() || templ.empty() || (use_mask && mask.empty())) if (img.empty() || templ.empty() || (use_mask && mask.empty())) {
{
System.out.println("Can't read one of the images"); System.out.println("Can't read one of the images");
System.exit(-1); System.exit(-1);
} }
matchingMethod(); matchingMethod();
createJFrame(); createJFrame();
} }
private void matchingMethod() { private void matchingMethod() {
Mat result = new Mat(); Mat result = new Mat();
//! [copy_source] //! [copy_source]
/// Source image to display /// Source image to display
Mat img_display = new Mat(); Mat img_display = new Mat();
img.copyTo( img_display ); img.copyTo(img_display);
//! [copy_source] //! [copy_source]
//! [create_result_matrix] //! [create_result_matrix]
/// Create the result matrix /// Create the result matrix
int result_cols = img.cols() - templ.cols() + 1; int result_cols = img.cols() - templ.cols() + 1;
int result_rows = img.rows() - templ.rows() + 1; int result_rows = img.rows() - templ.rows() + 1;
result.create( result_rows, result_cols, CvType.CV_32FC1 ); result.create(result_rows, result_cols, CvType.CV_32FC1);
//! [create_result_matrix] //! [create_result_matrix]
//! [match_template] //! [match_template]
/// Do the Matching and Normalize /// Do the Matching and Normalize
Boolean method_accepts_mask = (Imgproc.TM_SQDIFF == match_method || Boolean method_accepts_mask = (Imgproc.TM_SQDIFF == match_method || match_method == Imgproc.TM_CCORR_NORMED);
match_method == Imgproc.TM_CCORR_NORMED); if (use_mask && method_accepts_mask) {
if (use_mask && method_accepts_mask) Imgproc.matchTemplate(img, templ, result, match_method, mask);
{ Imgproc.matchTemplate( img, templ, result, match_method, mask); } } else {
else Imgproc.matchTemplate(img, templ, result, match_method);
{ Imgproc.matchTemplate( img, templ, result, match_method); } }
//! [match_template] //! [match_template]
//! [normalize] //! [normalize]
Core.normalize( result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat() ); Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
//! [normalize] //! [normalize]
//! [best_match] //! [best_match]
/// Localizing the best match with minMaxLoc /// Localizing the best match with minMaxLoc
double minVal; double maxVal;
Point matchLoc; Point matchLoc;
Core.MinMaxLocResult mmr = Core.minMaxLoc( result ); Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
//! [best_match] //! [best_match]
//! [match_loc] //! [match_loc]
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values.
// For all the other methods, the higher the better /// For all the other methods, the higher the better
if( match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED ) if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
{ matchLoc = mmr.minLoc; } matchLoc = mmr.minLoc;
else } else {
{ matchLoc = mmr.maxLoc; } matchLoc = mmr.maxLoc;
}
//! [match_loc] //! [match_loc]
//! [imshow] //! [imshow]
/// Show me what you got /// Show me what you got
Imgproc.rectangle(img_display, matchLoc, new Point(matchLoc.x + templ.cols(), Imgproc.rectangle(img_display, matchLoc, new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()),
matchLoc.y + templ.rows()), new Scalar(0, 0, 0), 2, 8, 0); new Scalar(0, 0, 0), 2, 8, 0);
Imgproc.rectangle(result, matchLoc, new Point(matchLoc.x + templ.cols(), Imgproc.rectangle(result, matchLoc, new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()),
matchLoc.y + templ.rows()), new Scalar(0, 0, 0), 2, 8, 0); new Scalar(0, 0, 0), 2, 8, 0);
Image tmpImg = toBufferedImage(img_display); Image tmpImg = HighGui.toBufferedImage(img_display);
ImageIcon icon = new ImageIcon(tmpImg); ImageIcon icon = new ImageIcon(tmpImg);
imgDisplay.setIcon(icon); imgDisplay.setIcon(icon);
result.convertTo(result, CvType.CV_8UC1, 255.0); result.convertTo(result, CvType.CV_8UC1, 255.0);
tmpImg = toBufferedImage(result); tmpImg = HighGui.toBufferedImage(result);
icon = new ImageIcon(tmpImg); icon = new ImageIcon(tmpImg);
resultDisplay.setIcon(icon); resultDisplay.setIcon(icon);
//! [imshow] //! [imshow]
} }
@Override
public void stateChanged(ChangeEvent e) { public void stateChanged(ChangeEvent e) {
JSlider source = (JSlider) e.getSource(); JSlider source = (JSlider) e.getSource();
if (!source.getValueIsAdjusting()) { if (!source.getValueIsAdjusting()) {
match_method = (int)source.getValue(); match_method = source.getValue();
matchingMethod(); matchingMethod();
} }
} }
public Image toBufferedImage(Mat m) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if ( m.channels() > 1 ) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = m.channels()*m.cols()*m.rows();
byte [] b = new byte[bufferSize];
m.get(0,0,b); // get all the pixels
BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(b, 0, targetPixels, 0, b.length);
return image;
}
private void createJFrame() { private void createJFrame() {
String title = "Source image; Control; Result image"; String title = "Source image; Control; Result image";
JFrame frame = new JFrame(title); JFrame frame = new JFrame(title);
frame.setLayout(new GridLayout(2, 2)); frame.setLayout(new GridLayout(2, 2));
@ -164,14 +151,14 @@ class MatchTemplateDemoRun implements ChangeListener{
slider.setMinorTickSpacing(1); slider.setMinorTickSpacing(1);
// Customizing the labels // Customizing the labels
Hashtable labelTable = new Hashtable(); Hashtable<Integer, JLabel> labelTable = new Hashtable<>();
labelTable.put( new Integer( 0 ), new JLabel("0 - SQDIFF") ); labelTable.put(new Integer(0), new JLabel("0 - SQDIFF"));
labelTable.put( new Integer( 1 ), new JLabel("1 - SQDIFF NORMED") ); labelTable.put(new Integer(1), new JLabel("1 - SQDIFF NORMED"));
labelTable.put( new Integer( 2 ), new JLabel("2 - TM CCORR") ); labelTable.put(new Integer(2), new JLabel("2 - TM CCORR"));
labelTable.put( new Integer( 3 ), new JLabel("3 - TM CCORR NORMED") ); labelTable.put(new Integer(3), new JLabel("3 - TM CCORR NORMED"));
labelTable.put( new Integer( 4 ), new JLabel("4 - TM COEFF") ); labelTable.put(new Integer(4), new JLabel("4 - TM COEFF"));
labelTable.put( new Integer( 5 ), new JLabel("5 - TM COEFF NORMED : (Method)") ); labelTable.put(new Integer(5), new JLabel("5 - TM COEFF NORMED : (Method)"));
slider.setLabelTable( labelTable ); slider.setLabelTable(labelTable);
slider.addChangeListener(this); slider.addChangeListener(this);
@ -184,8 +171,7 @@ class MatchTemplateDemoRun implements ChangeListener{
} }
} }
public class MatchTemplateDemo public class MatchTemplateDemo {
{
public static void main(String[] args) { public static void main(String[] args) {
// load the native OpenCV library // load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME); System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

@ -0,0 +1,80 @@
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
class GeometricTransforms {
public void run(String[] args) {
//! [Load the image]
String filename = args.length > 0 ? args[0] : "../data/lena.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
//! [Load the image]
//! [Set your 3 points to calculate the Affine Transform]
Point[] srcTri = new Point[3];
srcTri[0] = new Point( 0, 0 );
srcTri[1] = new Point( src.cols() - 1, 0 );
srcTri[2] = new Point( 0, src.rows() - 1 );
Point[] dstTri = new Point[3];
dstTri[0] = new Point( 0, src.rows()*0.33 );
dstTri[1] = new Point( src.cols()*0.85, src.rows()*0.25 );
dstTri[2] = new Point( src.cols()*0.15, src.rows()*0.7 );
//! [Set your 3 points to calculate the Affine Transform]
//! [Get the Affine Transform]
Mat warpMat = Imgproc.getAffineTransform( new MatOfPoint2f(srcTri), new MatOfPoint2f(dstTri) );
//! [Get the Affine Transform]
//! [Apply the Affine Transform just found to the src image]
Mat warpDst = Mat.zeros( src.rows(), src.cols(), src.type() );
Imgproc.warpAffine( src, warpDst, warpMat, warpDst.size() );
//! [Apply the Affine Transform just found to the src image]
/** Rotating the image after Warp */
//! [Compute a rotation matrix with respect to the center of the image]
Point center = new Point(warpDst.cols() / 2, warpDst.rows() / 2);
double angle = -50.0;
double scale = 0.6;
//! [Compute a rotation matrix with respect to the center of the image]
//! [Get the rotation matrix with the specifications above]
Mat rotMat = Imgproc.getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
//! [Rotate the warped image]
Mat warpRotateDst = new Mat();
Imgproc.warpAffine( warpDst, warpRotateDst, rotMat, warpDst.size() );
//! [Rotate the warped image]
//! [Show what you got]
HighGui.imshow( "Source image", src );
HighGui.imshow( "Warp", warpDst );
HighGui.imshow( "Warp + Rotate", warpRotateDst );
//! [Show what you got]
//! [Wait until user exits the program]
HighGui.waitKey(0);
//! [Wait until user exits the program]
System.exit(0);
}
}
public class GeometricTransformsDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new GeometricTransforms().run(args);
}
}

@ -0,0 +1,71 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
def Hist_and_Backproj(val):
## [initialize]
bins = val
histSize = max(bins, 2)
ranges = [0, 180] # hue_range
## [initialize]
## [Get the Histogram and normalize it]
hist = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
## [Get the Histogram and normalize it]
## [Get Backprojection]
backproj = cv.calcBackProject([hue], [0], hist, ranges, scale=1)
## [Get Backprojection]
## [Draw the backproj]
cv.imshow('BackProj', backproj)
## [Draw the backproj]
## [Draw the histogram]
w = 400
h = 400
bin_w = int(round(w / histSize))
histImg = np.zeros((h, w, 3), dtype=np.uint8)
for i in range(bins):
cv.rectangle(histImg, (i*bin_w, h), ( (i+1)*bin_w, h - int(round( hist[i]*h/255.0 )) ), (0, 0, 255), cv.FILLED)
cv.imshow('Histogram', histImg)
## [Draw the histogram]
## [Read the image]
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Read the image]
## [Transform it to HSV]
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
## [Transform it to HSV]
## [Use only the Hue value]
ch = (0, 0)
hue = np.empty(hsv.shape, hsv.dtype)
cv.mixChannels([hsv], [hue], ch)
## [Use only the Hue value]
## [Create Trackbar to enter the number of bins]
window_image = 'Source image'
cv.namedWindow(window_image)
bins = 25
cv.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj )
Hist_and_Backproj(bins)
## [Create Trackbar to enter the number of bins]
## [Show the image]
cv.imshow(window_image, src)
cv.waitKey()
## [Show the image]

@ -0,0 +1,79 @@
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
low = 20
up = 20
def callback_low(val):
global low
low = val
def callback_up(val):
global up
up = val
def pickPoint(event, x, y, flags, param):
if event != cv.EVENT_LBUTTONDOWN:
return
# Fill and get the mask
seed = (x, y)
newMaskVal = 255
newVal = (120, 120, 120)
connectivity = 8
flags = connectivity + (newMaskVal << 8 ) + cv.FLOODFILL_FIXED_RANGE + cv.FLOODFILL_MASK_ONLY
mask2 = np.zeros((src.shape[0] + 2, src.shape[1] + 2), dtype=np.uint8)
print('low:', low, 'up:', up)
cv.floodFill(src, mask2, seed, newVal, (low, low, low), (up, up, up), flags)
mask = mask2[1:-1,1:-1]
cv.imshow('Mask', mask)
Hist_and_Backproj(mask)
def Hist_and_Backproj(mask):
h_bins = 30
s_bins = 32
histSize = [h_bins, s_bins]
h_range = [0, 180]
s_range = [0, 256]
ranges = h_range + s_range # Concat list
channels = [0, 1]
# Get the Histogram and normalize it
hist = cv.calcHist([hsv], channels, mask, histSize, ranges, accumulate=False)
cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
# Get Backprojection
backproj = cv.calcBackProject([hsv], channels, hist, ranges, scale=1)
# Draw the backproj
cv.imshow('BackProj', backproj)
# Read the image
parser = argparse.ArgumentParser(description='Code for Back Projection tutorial.')
parser.add_argument('--input', help='Path to input image.')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Transform it to HSV
hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)
# Show the image
window_image = 'Source image'
cv.namedWindow(window_image)
cv.imshow(window_image, src)
# Set Trackbars for floodfill thresholds
cv.createTrackbar('Low thresh', window_image, low, 255, callback_low)
cv.createTrackbar('High thresh', window_image, up, 255, callback_up)
# Set a Mouse Callback
cv.setMouseCallback(window_image, pickPoint)
cv.waitKey()

@ -0,0 +1,71 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Separate the image in 3 places ( B, G and R )]
bgr_planes = cv.split(src)
## [Separate the image in 3 places ( B, G and R )]
## [Establish the number of bins]
histSize = 256
## [Establish the number of bins]
## [Set the ranges ( for B,G,R) )]
histRange = (0, 256) # the upper boundary is exclusive
## [Set the ranges ( for B,G,R) )]
## [Set histogram param]
accumulate = False
## [Set histogram param]
## [Compute the histograms]
b_hist = cv.calcHist(bgr_planes, [0], None, [histSize], histRange, accumulate=accumulate)
g_hist = cv.calcHist(bgr_planes, [1], None, [histSize], histRange, accumulate=accumulate)
r_hist = cv.calcHist(bgr_planes, [2], None, [histSize], histRange, accumulate=accumulate)
## [Compute the histograms]
## [Draw the histograms for B, G and R]
hist_w = 512
hist_h = 400
bin_w = int(round( hist_w/histSize ))
histImage = np.zeros((hist_h, hist_w, 3), dtype=np.uint8)
## [Draw the histograms for B, G and R]
## [Normalize the result to ( 0, histImage.rows )]
cv.normalize(b_hist, b_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(g_hist, g_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
cv.normalize(r_hist, r_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)
## [Normalize the result to ( 0, histImage.rows )]
## [Draw for each channel]
for i in range(1, histSize):
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(b_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(b_hist[i])) ),
( 255, 0, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(g_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(g_hist[i])) ),
( 0, 255, 0), thickness=2)
cv.line(histImage, ( bin_w*(i-1), hist_h - int(round(r_hist[i-1])) ),
( bin_w*(i), hist_h - int(round(r_hist[i])) ),
( 0, 0, 255), thickness=2)
## [Draw for each channel]
## [Display]
cv.imshow('Source image', src)
cv.imshow('calcHist Demo', histImage)
cv.waitKey()
## [Display]

@ -0,0 +1,69 @@
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
## [Load three images with different environment settings]
parser = argparse.ArgumentParser(description='Code for Histogram Comparison tutorial.')
parser.add_argument('--input1', help='Path to input image 1.')
parser.add_argument('--input2', help='Path to input image 2.')
parser.add_argument('--input3', help='Path to input image 3.')
args = parser.parse_args()
src_base = cv.imread(args.input1)
src_test1 = cv.imread(args.input2)
src_test2 = cv.imread(args.input3)
if src_base is None or src_test1 is None or src_test2 is None:
print('Could not open or find the images!')
exit(0)
## [Load three images with different environment settings]
## [Convert to HSV]
hsv_base = cv.cvtColor(src_base, cv.COLOR_BGR2HSV)
hsv_test1 = cv.cvtColor(src_test1, cv.COLOR_BGR2HSV)
hsv_test2 = cv.cvtColor(src_test2, cv.COLOR_BGR2HSV)
## [Convert to HSV]
## [Convert to HSV half]
hsv_half_down = hsv_base[hsv_base.shape[0]//2:,:]
## [Convert to HSV half]
## [Using 50 bins for hue and 60 for saturation]
h_bins = 50
s_bins = 60
histSize = [h_bins, s_bins]
# hue varies from 0 to 179, saturation from 0 to 255
h_ranges = [0, 180]
s_ranges = [0, 256]
ranges = h_ranges + s_ranges # concat lists
# Use the 0-th and 1-st channels
channels = [0, 1]
## [Using 50 bins for hue and 60 for saturation]
## [Calculate the histograms for the HSV images]
hist_base = cv.calcHist([hsv_base], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_base, hist_base, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_half_down = cv.calcHist([hsv_half_down], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_half_down, hist_half_down, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test1 = cv.calcHist([hsv_test1], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test1, hist_test1, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
hist_test2 = cv.calcHist([hsv_test2], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_test2, hist_test2, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
## [Calculate the histograms for the HSV images]
## [Apply the histogram comparison methods]
for compare_method in range(4):
base_base = cv.compareHist(hist_base, hist_base, compare_method)
base_half = cv.compareHist(hist_base, hist_half_down, compare_method)
base_test1 = cv.compareHist(hist_base, hist_test1, compare_method)
base_test2 = cv.compareHist(hist_base, hist_test2, compare_method)
print('Method:', compare_method, 'Perfect, Base-Half, Base-Test(1), Base-Test(2) :',\
base_base, '/', base_half, '/', base_test1, '/', base_test2)
## [Apply the histogram comparison methods]

@ -0,0 +1,31 @@
from __future__ import print_function
import cv2 as cv
import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load image]
## [Convert to grayscale]
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [Convert to grayscale]
## [Apply Histogram Equalization]
dst = cv.equalizeHist(src);
## [Apply Histogram Equalization]
## [Display results]
cv.imshow('Source image', src)
cv.imshow('Equalized Image', dst)
## [Display results]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]

@ -0,0 +1,54 @@
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
## [Load the image]
parser = argparse.ArgumentParser(description='Code for Affine Transformations tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
## [Load the image]
## [Set your 3 points to calculate the Affine Transform]
srcTri = np.array( [[0, 0], [src.shape[1] - 1, 0], [0, src.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, src.shape[1]*0.33], [src.shape[1]*0.85, src.shape[0]*0.25], [src.shape[1]*0.15, src.shape[0]*0.7]] ).astype(np.float32)
## [Set your 3 points to calculate the Affine Transform]
## [Get the Affine Transform]
warp_mat = cv.getAffineTransform(srcTri, dstTri)
## [Get the Affine Transform]
## [Apply the Affine Transform just found to the src image]
warp_dst = cv.warpAffine(src, warp_mat, (src.shape[1], src.shape[0]))
## [Apply the Affine Transform just found to the src image]
# Rotating the image after Warp
## [Compute a rotation matrix with respect to the center of the image]
center = (warp_dst.shape[1]//2, warp_dst.shape[0]//2)
angle = -50
scale = 0.6
## [Compute a rotation matrix with respect to the center of the image]
## [Get the rotation matrix with the specifications above]
rot_mat = cv.getRotationMatrix2D( center, angle, scale )
## [Get the rotation matrix with the specifications above]
## [Rotate the warped image]
warp_rotate_dst = cv.warpAffine(warp_dst, rot_mat, (warp_dst.shape[1], warp_dst.shape[0]))
## [Rotate the warped image]
## [Show what you got]
cv.imshow('Source image', src)
cv.imshow('Warp', warp_dst)
cv.imshow('Warp + Rotate', warp_rotate_dst)
## [Show what you got]
## [Wait until user exits the program]
cv.waitKey()
## [Wait until user exits the program]
Loading…
Cancel
Save